diff options
Diffstat (limited to 'ansible_collections/dellemc/powerflex')
61 files changed, 23159 insertions, 0 deletions
diff --git a/ansible_collections/dellemc/powerflex/CHANGELOG.rst b/ansible_collections/dellemc/powerflex/CHANGELOG.rst new file mode 100644 index 00000000..707509dc --- /dev/null +++ b/ansible_collections/dellemc/powerflex/CHANGELOG.rst @@ -0,0 +1,95 @@ +=============================== +Dellemc.PowerFlex Change Logs +=============================== + +.. contents:: Topics + + +v1.5.0 +====== + +Minor Changes +------------- + +- Info module is enhanced to support the listing replication consistency groups. +- Renamed gateway_host to hostname +- Renamed verifycert to validate_certs. +- Updated modules to adhere with ansible community guidelines. + +New Modules +----------- + +- dellemc.powerflex.replication_consistency_group - Manage replication consistency groups on Dell PowerFlex + +v1.4.0 +====== + +Minor Changes +------------- + +- Added support for 4.0.x release of PowerFlex OS. +- Info module is enhanced to support the listing volumes and storage pools with statistics data. +- Storage pool module is enhanced to get the details with statistics data. +- Volume module is enhanced to get the details with statistics data. + +v1.3.0 +====== + +Minor Changes +------------- + +- Added execution environment manifest file to support building an execution environment with ansible-builder. +- Enabled the check_mode support for info module + +New Modules +----------- + +- dellemc.powerflex.mdm_cluster - Manage MDM cluster on Dell PowerFlex + +v1.2.0 +====== + +Minor Changes +------------- + +- Names of previously released modules have been changed from dellemc_powerflex_\<module name> to \<module name>. + +New Modules +----------- + +- dellemc.powerflex.protection_domain - Manage Protection Domain on Dell PowerFlex + +v1.1.1 +====== + +Deprecated Features +------------------- + +- The dellemc_powerflex_gatherfacts module is deprecated and replaced with dellemc_powerflex_info + +v1.1.0 +====== + +Minor Changes +------------- + +- Added dual licensing. +- Gatherfacts module is enhanced to list devices. + +New Modules +----------- + +- dellemc.powerflex.device - Manage device on Dell PowerFlex +- dellemc.powerflex.sds - Manage SDS on Dell PowerFlex + +v1.0.0 +====== + +New Modules +----------- + +- dellemc.powerflex.info - Gathering information about Dell PowerFlex +- dellemc.powerflex.sdc - Manage SDCs on Dell PowerFlex +- dellemc.powerflex.snapshot - Manage Snapshots on Dell PowerFlex +- dellemc.powerflex.storagepool - Managing Dell PowerFlex storage pool +- dellemc.powerflex.volume - Manage volumes on Dell PowerFlex diff --git a/ansible_collections/dellemc/powerflex/FILES.json b/ansible_collections/dellemc/powerflex/FILES.json new file mode 100644 index 00000000..58a2dfd4 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/FILES.json @@ -0,0 +1,530 @@ +{ + "files": [ + { + "name": ".", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "requirements.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "796d9d259e4da6c8655b665586ffaf095d1a5a45b18c768b5ba748385910cf8e", + "format": 1 + }, + { + "name": "LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8b1ba204bb69a0ade2bfcf65ef294a920f6bb361b317dba43c7ef29d96332b9b", + "format": 1 + }, + { + "name": "meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "meta/runtime.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3593e8970a00d95a557377bd7af2f50e5212620def3ed4134c989a33dfd8ec4f", + "format": 1 + }, + { + "name": "meta/execution-environment.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6275c6cd307d5afbdf72bd71abcffea19e755fd452f37be43d7036dc1ed4d5a4", + "format": 1 + }, + { + "name": "MODULE-LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c71d239df91726fc519c6eb72d318ec65820627232b2f796219e87dcf35d0ab4", + "format": 1 + }, + { + "name": "CHANGELOG.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "33b23a8f0c817a8e2648e255269cb51dc3882c613cc9e94c1c605d885a163f4c", + "format": 1 + }, + { + "name": "plugins", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/doc_fragments", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/doc_fragments/powerflex.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "042fd3430b1ecc1ce01ce3efafa2f2d0fca1d814b891a756f686b5b543eb3bef", + "format": 1 + }, + { + "name": "plugins/module_utils", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/storage", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/storage/dell", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/storage/dell/logging_handler.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fdd1b7ef81bde300864051fe060d5f393c074201279e3f739584a5c2c44153a2", + "format": 1 + }, + { + "name": "plugins/module_utils/storage/dell/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/module_utils/storage/dell/utils.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "87e9c4d3570ace6a236080e285e3b3c12b4e5c763064334b861ddb38ea37b264", + "format": 1 + }, + { + "name": "plugins/modules", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/modules/sdc.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b8104d04864a2270a5ceb5cf7e66f681125dec8510faf60d0054f4ae0e8739c2", + "format": 1 + }, + { + "name": "plugins/modules/protection_domain.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "53142b5ab071e1b841af1857e0408df1d2d29b9e5a261a04b0acf1e84a3eb851", + "format": 1 + }, + { + "name": "plugins/modules/mdm_cluster.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3bd51cc09a4f91e05d96d696600149275ffb2c1904b3ecdaf794212da1bb90a5", + "format": 1 + }, + { + "name": "plugins/modules/snapshot.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3f653d2f3c1f0dc8a2d496cab89e240fe7a77e7d3d5e7f88a47f718ae0dbc07c", + "format": 1 + }, + { + "name": "plugins/modules/info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4d8525b1e2b4a5ef671b93082d329193331181b4a85c37a0e8e5b172d8c9734c", + "format": 1 + }, + { + "name": "plugins/modules/replication_consistency_group.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e187923e495ab03d4f52f5001a450aed291500f0f9a8d5b03645bec0907131a6", + "format": 1 + }, + { + "name": "plugins/modules/sds.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dc02c40e9b6d49c9df942e275de073a4854cfb6e0b27f2a876583e3d094b7803", + "format": 1 + }, + { + "name": "plugins/modules/storagepool.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7f9c9179a1cfe50510946135ee8ff0eb7b9b027e4f7d7afa53cc20e35f6a1b5d", + "format": 1 + }, + { + "name": "plugins/modules/device.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9531ecfeaa468b126d90e0932723d12d434dd604a089de4770b0b2dfcd5b9253", + "format": 1 + }, + { + "name": "plugins/modules/volume.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4c8e0677e4071d2288a6b13969279e10873e41410eaabf80c344f56206dcedb9", + "format": 1 + }, + { + "name": "tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/sanity", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.12.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f1ef9cddda5a94dce31f5e8f4719e3e963adec5c2295f8f0c584faddcc0bf110", + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.14.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f1ef9cddda5a94dce31f5e8f4719e3e963adec5c2295f8f0c584faddcc0bf110", + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.13.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f1ef9cddda5a94dce31f5e8f4719e3e963adec5c2295f8f0c584faddcc0bf110", + "format": 1 + }, + { + "name": "tests/requirements.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "65e6091d1c8d88a703555bd13590bb95248fb0b7376d3ed1d660e2b9d65581c8", + "format": 1 + }, + { + "name": "tests/unit", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/mock_volume_api.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5e61698f03d1a7ec229c5ffb6a4def656e806e5dd234a0e15b2136fba839a2d7", + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/mock_mdm_cluster_api.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a319347b17bfd7eca567a2a259313809b2ef3e3a33e545cc2b2a4b5187d27ee4", + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/mock_replication_consistency_group_api.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5bf628a051d160856652bda22e45d35303db612874edc75ed4e2e8b4a270fba3", + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/mock_api_exception.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6a9639349df9561e15df73ad72a28fb0120121b9ef2f8f72e6a7ef8c01c1edeb", + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/mock_info_api.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1c47b5eeb60987f156d5891f514ec891099e39e42b42fb7e9ec3f91125491ed1", + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/mock_sdk_response.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "82b029313ed53922594cdabcf9129708a5a1ee8b4b00382994ed054e58637b89", + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/mock_protection_domain_api.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1005c8842b81ff4c5613992e5f80fb25cfa6ac36d1a9274e574caf04d7510584", + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/mock_storagepool_api.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f2552190a68b46919f44632fe65972825624ab201e330771e1992e57de253d27", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_storagepool.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4f78779e6b346b612915d92323ca0d1850e68da65b95f32205cc585b622b48be", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_volume.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4742f5675d613a39bd5929b24e6f515d034bebf8defc1c98bb8fe69444785015", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "66d4d0adaaa1197363ffe1f25e4c5f7def1b6996fdcf5678af40528c5652a64b", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_replication_consistency_group.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7071e60cf4c57f36a59be9249964ceb9ef6ed0b8b6dca8a86a62515dbea087d3", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_mdm_cluster.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4a242b57ed85421cb8823e0814484d077407f00c761e23169542ac34cc9aa0d3", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_protection_domain.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8d2d1e320a857f994db4e48ce5406ca5bbfe21cd7c37d9f8d3bb1e07db8d333e", + "format": 1 + }, + { + "name": "tests/unit/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "requirements.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b02f408522b5f335ac6c1ef0d7ee5dd72a0a24492de516545ac4071d315882db", + "format": 1 + }, + { + "name": "README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "98ddfc70b930fb944fb944d6e981fd33da656495bca6b5f56562144546149252", + "format": 1 + }, + { + "name": "changelogs", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "changelogs/.plugin-cache.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2969cb15eb1ae0ca0f5033d427a1ff70fe84bcf1a655c1e51059d8d03f34ebb3", + "format": 1 + }, + { + "name": "changelogs/config.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c7ab69438be06910388bd76d2cc4cc60b31bf5b501513435673879d5df230a0a", + "format": 1 + }, + { + "name": "changelogs/changelog.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f0fb1c23651bf566498150c8eda7e9baa2aa6f7e8436bfe6928dfc202b3e1817", + "format": 1 + }, + { + "name": "docs", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "docs/Product Guide.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "26906ca9341bd49c5ed3d7c8c633d570d7f92f91c5b595460e47003cc6a7fc89", + "format": 1 + }, + { + "name": "docs/CODE_OF_CONDUCT.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "10e328fb91562dc4e0b6c5da59f213295b4f25af28a53235612698e4a8465acc", + "format": 1 + }, + { + "name": "docs/ISSUE_TRIAGE.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "530744bbed97aad7ce03e7d367ab53882a53a69b2307f7a1ded46f19a6d410ab", + "format": 1 + }, + { + "name": "docs/SUPPORT.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c5ff5c4c06bcd6e7432653d13fab3fbf230685ae0337f6ecabec6f38aacb0ef7", + "format": 1 + }, + { + "name": "docs/Release Notes.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c83ab7f6bcbfe80040e285730b0309ce9457839880babfd1d14de8dd4bb73650", + "format": 1 + }, + { + "name": "docs/CONTRIBUTING.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a1579da61311237aaa707ceea81dff9269d6826fdf90f32c5fcf9b513d2c4513", + "format": 1 + }, + { + "name": "docs/INSTALLATION.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9b07d28298ed19f6de063a772a607f4cb865d2e706c4bd676599ab7fc49c64dc", + "format": 1 + }, + { + "name": "docs/MAINTAINER_GUIDE.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dc7f5f908d7eb407ce9f93982892b7615e99a6abdb0f836b4b0e59ab2b3c1dcb", + "format": 1 + }, + { + "name": "docs/SECURITY.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "062552a1455d62b9e3c6683ad9d1b42343a82f7ffc33ac412fce7e40d1f52c28", + "format": 1 + }, + { + "name": "docs/BRANCHING.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "06567f4a7d818fbc89030b88b40ac7d7965e42c4a3408e7e8629662a8104e215", + "format": 1 + }, + { + "name": "docs/COMMITTER_GUIDE.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b41b38fe09cfcbfb4499c39ed4822a9f8c3f5d562e68dad45b5f2389f18053b5", + "format": 1 + }, + { + "name": "docs/ADOPTERS.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c81933a41503275063d789f0685472e1603e4614376f3918b42c4bfb210a2c01", + "format": 1 + }, + { + "name": "docs/MAINTAINERS.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e748fd39a38ac2a61aa6f48eac2179dffcc5a3e8f261f54042946d969bbfadf6", + "format": 1 + } + ], + "format": 1 +}
\ No newline at end of file diff --git a/ansible_collections/dellemc/powerflex/LICENSE b/ansible_collections/dellemc/powerflex/LICENSE new file mode 100644 index 00000000..e72bfdda --- /dev/null +++ b/ansible_collections/dellemc/powerflex/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/> + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + <one line to give the program's name and a brief idea of what it does.> + Copyright (C) <year> <name of author> + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see <https://www.gnu.org/licenses/>. + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + <program> Copyright (C) <year> <name of author> + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +<https://www.gnu.org/licenses/>. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +<https://www.gnu.org/licenses/why-not-lgpl.html>.
\ No newline at end of file diff --git a/ansible_collections/dellemc/powerflex/MANIFEST.json b/ansible_collections/dellemc/powerflex/MANIFEST.json new file mode 100644 index 00000000..eecd338a --- /dev/null +++ b/ansible_collections/dellemc/powerflex/MANIFEST.json @@ -0,0 +1,39 @@ +{ + "collection_info": { + "namespace": "dellemc", + "name": "powerflex", + "version": "1.5.0", + "authors": [ + "Akash Shendge <ansible.team@dell.com>", + "Arindam Datta <ansible.team@dell.com>", + "P Srinivas Rao <ansible.team@dell.com>", + "Rajshree Khare <ansible.team@dell.com>", + "Bhavneet Sharma <ansible.team@dell.com>", + "Ananthu S Kuttattu <ansible.team@dell.com>", + "Trisha Datta <ansible.team@dell.com>" + ], + "readme": "README.md", + "tags": [ + "storage" + ], + "description": "Ansible modules for PowerFlex", + "license": [ + "GPL-3.0-or-later", + "Apache-2.0" + ], + "license_file": null, + "dependencies": {}, + "repository": "https://github.com/dell/ansible-powerflex/tree/1.5.0", + "documentation": "https://github.com/dell/ansible-powerflex/tree/1.5.0/docs", + "homepage": "https://github.com/dell/ansible-powerflex/tree/1.5.0", + "issues": "https://www.dell.com/community/Automation/bd-p/Automation" + }, + "file_manifest_file": { + "name": "FILES.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9d8254015c7f5f969370d97d3badb243137e3e8a2f13cbfea625fc543f6f685b", + "format": 1 + }, + "format": 1 +}
\ No newline at end of file diff --git a/ansible_collections/dellemc/powerflex/MODULE-LICENSE b/ansible_collections/dellemc/powerflex/MODULE-LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/ansible_collections/dellemc/powerflex/MODULE-LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/ansible_collections/dellemc/powerflex/README.md b/ansible_collections/dellemc/powerflex/README.md new file mode 100644 index 00000000..e15e7a54 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/README.md @@ -0,0 +1,60 @@ +# Ansible Modules for Dell Technologies PowerFlex + +The Ansible Modules for Dell Technologies (Dell) PowerFlex allow Data Center and IT administrators to use RedHat Ansible to automate and orchestrate the provisioning and management of Dell PowerFlex storage systems. + +The capabilities of the Ansible modules are managing SDCs, volumes, snapshots, storage pools, replication consistency groups, SDSs, devices, protection domains, MDM cluster, and to gather high level facts from the storage system. The options available are list, show, create, modify and delete. These tasks can be executed by running simple playbooks written in yaml syntax. The modules are written so that all the operations are idempotent, so making multiple identical requests has the same effect as making a single request. + +## Table of contents + +* [Code of conduct](https://github.com/dell/ansible-powerflex/blob/1.5.0/docs/CODE_OF_CONDUCT.md) +* [Maintainer guide](https://github.com/dell/ansible-powerflex/blob/1.5.0/docs/MAINTAINER_GUIDE.md) +* [Committer guide](https://github.com/dell/ansible-powerflex/blob/1.5.0/docs/COMMITTER_GUIDE.md) +* [Contributing guide](https://github.com/dell/ansible-powerflex/blob/1.5.0/docs/CONTRIBUTING.md) +* [Branching strategy](https://github.com/dell/ansible-powerflex/blob/1.5.0/docs/BRANCHING.md) +* [List of adopters](https://github.com/dell/ansible-powerflex/blob/1.5.0/docs/ADOPTERS.md) +* [Maintainers](https://github.com/dell/ansible-powerflex/blob/1.5.0/docs/MAINTAINERS.md) +* [Support](https://github.com/dell/ansible-powerflex/blob/1.5.0/docs/SUPPORT.md) +* [License](#license) +* [Security](https://github.com/dell/ansible-powerflex/blob/1.5.0/docs/SECURITY.md) +* [Prerequisites](#prerequisites) +* [List of Ansible modules for Dell PowerFlex](#list-of-ansible-modules-for-dell-powerflex) +* [Installation and execution of Ansible modules for Dell PowerFlex](#installation-and-execution-of-ansible-modules-for-dell-powerflex) +* [Releasing, Maintenance and Deprecation](#releasing-maintenance-and-deprecation) + +## License +The Ansible collection for PowerFlex is released and licensed under the GPL-3.0 license. See [LICENSE](https://github.com/dell/ansible-powerflex/blob/1.5.0/LICENSE) for the full terms. Ansible modules and modules utilities that are part of the Ansible collection for PowerFlex are released and licensed under the Apache 2.0 license. See [MODULE-LICENSE](https://github.com/dell/ansible-powerflex/blob/1.5.0/MODULE-LICENSE) for the full terms. + +## Prerequisites + +| **Ansible Modules** | **PowerFlex/VxFlex OS Version** | **SDK version** | **Python version** | **Ansible** | +|---------------------|-----------------------|-------|--------------------|--------------------------| +| v1.5.0 |3.5 <br> 3.6 <br> 4.0 | 1.6.0 | 3.9.x <br> 3.10.x <br> 3.11.x | 2.12 <br> 2.13 <br> 2.14 | + + * Please follow PyPowerFlex installation instructions on [PyPowerFlex Documentation](https://github.com/dell/python-powerflex) + +## Idempotency +The modules are written in such a way that all requests are idempotent and hence fault-tolerant. It essentially means that the result of a successfully performed request is independent of the number of times it is executed. + +## List of Ansible modules for Dell PowerFlex + * [Info module](https://github.com/dell/ansible-powerflex/blob/1.5.0/docs/Product%20Guide.md#info-module) + * [Snapshot module](https://github.com/dell/ansible-powerflex/blob/1.5.0/docs/Product%20Guide.md#snapshot-module) + * [SDC module](https://github.com/dell/ansible-powerflex/blob/1.5.0/docs/Product%20Guide.md#sdc-module) + * [Storage pool module](https://github.com/dell/ansible-powerflex/blob/1.5.0/docs/Product%20Guide.md#storage-pool-module) + * [Volume module](https://github.com/dell/ansible-powerflex/blob/1.5.0/docs/Product%20Guide.md#volume-module) + * [SDS module](https://github.com/dell/ansible-powerflex/blob/1.5.0/docs/Product%20Guide.md#sds-module) + * [Device Module](https://github.com/dell/ansible-powerflex/blob/1.5.0/docs/Product%20Guide.md#device-module) + * [Protection Domain Module](https://github.com/dell/ansible-powerflex/blob/1.5.0/docs/Product%20Guide.md#protection-domain-module) + * [MDM Cluster Module](https://github.com/dell/ansible-powerflex/blob/1.5.0/docs/Product%20Guide.md#mdm-cluster-module) + +## Installation and execution of Ansible modules for Dell PowerFlex +The installation and execution steps of Ansible modules for Dell PowerFlex can be found [here](https://github.com/dell/ansible-powerflex/blob/1.5.0/docs/INSTALLATION.md). + +## Releasing, Maintenance and Deprecation + +Ansible Modules for Dell Technnologies PowerFlex follows [Semantic Versioning](https://semver.org/). + +New version will be release regularly if significant changes (bug fix or new feature) are made in the collection. + +Released code versions are located on "release" branches with names of the form "release-x.y.z" where x.y.z corresponds to the version number. More information on branching strategy followed can be found [here](https://github.com/dell/ansible-powerflex/blob/1.5.0/docs/BRANCHING.md). + +Ansible Modules for Dell Technologies PowerFlex deprecation cycle is aligned with that of [Ansible](https://docs.ansible.com/ansible/latest/dev_guide/module_lifecycle.html).
\ No newline at end of file diff --git a/ansible_collections/dellemc/powerflex/changelogs/.plugin-cache.yaml b/ansible_collections/dellemc/powerflex/changelogs/.plugin-cache.yaml new file mode 100644 index 00000000..733ca5d8 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/changelogs/.plugin-cache.yaml @@ -0,0 +1,67 @@ +objects: + role: {} +plugins: + become: {} + cache: {} + callback: {} + cliconf: {} + connection: {} + httpapi: {} + inventory: {} + lookup: {} + module: + device: + description: Manage device on Dell PowerFlex + name: device + namespace: '' + version_added: 1.1.0 + info: + description: Gathering information about Dell PowerFlex + name: info + namespace: '' + version_added: 1.0.0 + mdm_cluster: + description: Manage MDM cluster on Dell PowerFlex + name: mdm_cluster + namespace: '' + version_added: 1.3.0 + protection_domain: + description: Manage Protection Domain on Dell PowerFlex + name: protection_domain + namespace: '' + version_added: 1.2.0 + replication_consistency_group: + description: Manage replication consistency groups on Dell PowerFlex + name: replication_consistency_group + namespace: '' + version_added: 1.5.0 + sdc: + description: Manage SDCs on Dell PowerFlex + name: sdc + namespace: '' + version_added: 1.0.0 + sds: + description: Manage SDS on Dell PowerFlex + name: sds + namespace: '' + version_added: 1.1.0 + snapshot: + description: Manage Snapshots on Dell PowerFlex + name: snapshot + namespace: '' + version_added: 1.0.0 + storagepool: + description: Managing Dell PowerFlex storage pool + name: storagepool + namespace: '' + version_added: 1.0.0 + volume: + description: Manage volumes on Dell PowerFlex + name: volume + namespace: '' + version_added: 1.0.0 + netconf: {} + shell: {} + strategy: {} + vars: {} +version: 1.5.0 diff --git a/ansible_collections/dellemc/powerflex/changelogs/changelog.yaml b/ansible_collections/dellemc/powerflex/changelogs/changelog.yaml new file mode 100644 index 00000000..80c2934c --- /dev/null +++ b/ansible_collections/dellemc/powerflex/changelogs/changelog.yaml @@ -0,0 +1,82 @@ +ancestor: null +releases: + 1.0.0: + modules: + - description: Gathering information about Dell PowerFlex + name: info + namespace: '' + - description: Manage SDCs on Dell PowerFlex + name: sdc + namespace: '' + - description: Manage Snapshots on Dell PowerFlex + name: snapshot + namespace: '' + - description: Managing Dell PowerFlex storage pool + name: storagepool + namespace: '' + - description: Manage volumes on Dell PowerFlex + name: volume + namespace: '' + release_date: '2021-03-24' + 1.1.0: + changes: + minor_changes: + - Added dual licensing. + - Gatherfacts module is enhanced to list devices. + modules: + - description: Manage device on Dell PowerFlex + name: device + namespace: '' + - description: Manage SDS on Dell PowerFlex + name: sds + namespace: '' + release_date: '2021-09-28' + 1.1.1: + changes: + deprecated_features: + - The dellemc_powerflex_gatherfacts module is deprecated and replaced with dellemc_powerflex_info + trivial: + - Product Guide, Release Notes and ReadMe updated as per community guidelines. + release_date: '2021-12-16' + 1.2.0: + changes: + minor_changes: + - Names of previously released modules have been changed from dellemc_powerflex_\<module + name> to \<module name>. + modules: + - description: Manage Protection Domain on Dell PowerFlex + name: protection_domain + namespace: '' + release_date: '2022-03-25' + 1.3.0: + changes: + minor_changes: + - Added execution environment manifest file to support building an execution + environment with ansible-builder. + - Enabled the check_mode support for info module + modules: + - description: Manage MDM cluster on Dell PowerFlex + name: mdm_cluster + namespace: '' + release_date: '2022-06-28' + 1.4.0: + changes: + minor_changes: + - Added support for 4.0.x release of PowerFlex OS. + - Info module is enhanced to support the listing volumes and storage pools with + statistics data. + - Storage pool module is enhanced to get the details with statistics data. + - Volume module is enhanced to get the details with statistics data. + release_date: '2022-09-27' + 1.5.0: + changes: + minor_changes: + - Info module is enhanced to support the listing replication consistency groups. + - Renamed gateway_host to hostname + - Renamed verifycert to validate_certs. + - Updated modules to adhere with ansible community guidelines. + modules: + - description: Manage replication consistency groups on Dell PowerFlex + name: replication_consistency_group + namespace: '' + release_date: '2022-12-22' diff --git a/ansible_collections/dellemc/powerflex/changelogs/config.yaml b/ansible_collections/dellemc/powerflex/changelogs/config.yaml new file mode 100644 index 00000000..636258e1 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/changelogs/config.yaml @@ -0,0 +1,33 @@ +--- +changelog_filename_template: ../CHANGELOG.rst +changelog_filename_version_depth: 0 +changes_file: changelog.yaml +changes_format: combined +ignore_other_fragment_extensions: true +keep_fragments: false +mention_ancestor: true +new_plugins_after_name: removed_features +notesdir: fragments +prelude_section_name: release_summary +prelude_section_title: Release Summary +sanitize_changelog: true +sections: + - - major_changes + - Major Changes + - - minor_changes + - Minor Changes + - - breaking_changes + - Breaking Changes / Porting Guide + - - deprecated_features + - Deprecated Features + - - removed_features + - Removed Features (previously deprecated) + - - security_fixes + - Security Fixes + - - bugfixes + - Bugfixes + - - known_issues + - Known Issues +title: Dellemc.PowerFlex +trivial_section_name: trivial +use_fqcn: true diff --git a/ansible_collections/dellemc/powerflex/docs/ADOPTERS.md b/ansible_collections/dellemc/powerflex/docs/ADOPTERS.md new file mode 100644 index 00000000..826b5cd7 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/docs/ADOPTERS.md @@ -0,0 +1,11 @@ +<!-- +Copyright (c) 2022 Dell Inc., or its subsidiaries. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 +--> + +# List of adopters diff --git a/ansible_collections/dellemc/powerflex/docs/BRANCHING.md b/ansible_collections/dellemc/powerflex/docs/BRANCHING.md new file mode 100644 index 00000000..e244df70 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/docs/BRANCHING.md @@ -0,0 +1,32 @@ +<!-- +Copyright (c) 2022 Dell Inc., or its subsidiaries. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 +--> + +# Branching strategy + +Ansible modules for Dell PowerFlex follows a scaled trunk branching strategy where short-lived branches are created off of the main branch. When coding is complete, the branch is merged back into main after being approved in a pull request code review. + +## Branch naming convention + +| Branch Type | Example | Comment | +|--------------|-----------------------------------|-------------------------------------------| +| main | main | | +| Release | release-1.0 | hotfix: release-1.1 patch: release-1.0.1 | +| Feature | feature-9-vol-support | "9" referring to GitHub issue ID | +| Bug Fix | bugfix-110-fix-duplicates-issue | "110" referring to GitHub issue ID | + + +## Steps for working on a release branch + +1. Fork the repository. +2. Create a branch off of the main branch. The branch name should follow [branch naming convention](#branch-naming-convention). +3. Make your changes and commit them to your branch. +4. If other code changes have merged into the upstream main branch, perform a rebase of those changes into your branch. +5. Open a [pull request](https://github.com/dell/ansible-powerflex/pulls) between your branch and the upstream main branch. +6. Once your pull request has merged, your branch can be deleted. diff --git a/ansible_collections/dellemc/powerflex/docs/CODE_OF_CONDUCT.md b/ansible_collections/dellemc/powerflex/docs/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..cfc4993c --- /dev/null +++ b/ansible_collections/dellemc/powerflex/docs/CODE_OF_CONDUCT.md @@ -0,0 +1,137 @@ +<!-- +Copyright (c) 2022 Dell Inc., or its subsidiaries. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 +--> + +# Code of conduct - contributor covenant + +## Our pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, religion, or sexual identity +and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our standards + +Examples of behavior that contributes to a positive environment for our +community include: + +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +* Focusing on what is best not just for us as individuals, but for the + overall community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery, and sexual attention or + advances of any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or email + address, without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official e-mail address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement at ansible.team@dell.com +All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. + +## Enforcement guidelines + +Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community impact**: A violation through a single incident or series +of actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or +permanent ban. + +### 3. Temporary ban + +**Community impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent ban + +**Community impact**: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within +the community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.0, available at +https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. + +Community Impact Guidelines were inspired by [Mozilla's code of conduct +enforcement ladder](https://github.com/mozilla/diversity). + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see the FAQ at +https://www.contributor-covenant.org/faq. Translations are available at +https://www.contributor-covenant.org/translations.
\ No newline at end of file diff --git a/ansible_collections/dellemc/powerflex/docs/COMMITTER_GUIDE.md b/ansible_collections/dellemc/powerflex/docs/COMMITTER_GUIDE.md new file mode 100644 index 00000000..8af0752e --- /dev/null +++ b/ansible_collections/dellemc/powerflex/docs/COMMITTER_GUIDE.md @@ -0,0 +1,49 @@ +<!-- +Copyright (c) 2022 Dell Inc., or its subsidiaries. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 +--> + +# Committer guidelines + +These are the guidelines for people with commit privileges on the GitHub repository. Committers act as members of the Core Team and not necessarily employees of Dell. + +These guidelines apply to everyone and as Committers you have been given access to commit changes because you exhibit good judgment and have demonstrated your commitment to the vision of the project. We trust that you will use these privileges wisely and not abuse it. + +If these privileges are abused in any way and the quality of the project is compromised, our trust will be diminished and you may be asked to not commit or lose these privileges all together. + +## General rules + +### Don't + +* Break the build. +* Commit directly. +* Compromise backward compatibility. +* Disrespect your Community Team members. Help them grow. +* Think it is someone elses job to test your code. Write tests for all the code you produce. +* Forget to keep thing simple. +* Create technical debt. Fix-in-place and make it the highest priority above everything else. + +### Do + +* Keep it simple. +* Good work, your best every time. +* Keep the design of your software clean and maintainable. +* Squash your commits, avoid merges. +* Be active. Committers that are not active may have their permissions suspended. +* Write tests for all your deliverables. +* Automate everything. +* Maintain a high code coverage. +* Keep an open communication with other Committers. +* Ask questions. +* Document your contributions and remember to keep it simple. + +## People + +| Name | GitHub ID | Nickname | +|-------|-------------|------------| +| | | | diff --git a/ansible_collections/dellemc/powerflex/docs/CONTRIBUTING.md b/ansible_collections/dellemc/powerflex/docs/CONTRIBUTING.md new file mode 100644 index 00000000..b01639f3 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/docs/CONTRIBUTING.md @@ -0,0 +1,173 @@ +<!-- +Copyright (c) 2022 Dell Inc., or its subsidiaries. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 +--> + +# How to contribute + +Become one of the contributors to this project! We thrive to build a welcoming and open community for anyone who wants to use the project or contribute to it. There are just a few small guidelines you need to follow. To help us create a safe and positive community experience for all, we require all participants to adhere to the [Code of Conduct](https://github.com/dell/ansible-powerflex/blob/1.5.0/CODE_OF_CONDUCT.md). + +## Table of contents + +* [Become a contributor](#Become-a-contributor) +* [Submitting issues](#Submitting-issues) +* [Triage issues](#Triage-issues) +* [Your first contribution](#Your-first-contribution) +* [Branching](#Branching) +* [Signing your commits](#Signing-your-commits) +* [Pull requests](#Pull-requests) +* [Code reviews](#Code-reviews) +* [TODOs in the code](#TODOs-in-the-code) + +## Become a contributor + +You can contribute to this project in several ways. Here are some examples: + +* Contribute to the Ansible modules for Dell PowerFlex documentation and codebase. +* Report and triage bugs. +* Feature requests. +* Write technical documentation and blog posts, for users and contributors. +* Help others by answering questions about this project. + +## Submitting issues + +All issues related to Ansible modules for Dell PowerFlex, regardless of the service/repository the issue belongs to (see table above), should be submitted [here](https://github.com/dell/ansible-powerflex/issues). Issues will be triaged and labels will be used to indicate the type of issue. This section outlines the types of issues that can be submitted. + +### Report bugs + +We aim to track and document everything related to Ansible modules for Dell PowerFlex via the Issues page. The code and documentation are released with no warranties or SLAs and are intended to be supported through a community driven process. + +Before submitting a new issue, make sure someone hasn't already reported the problem. Look through the [existing issues](https://github.com/dell/ansible-powerflex/issues) for similar issues. + +Report a bug by submitting a [bug report](https://github.com/dell/ansible-powerflex/issues/new?labels=type%2Fbug%2C+needs-triage&template=bug_report.md&title=%5BBUG%5D%3A). Make sure that you provide as much information as possible on how to reproduce the bug. + +When opening a Bug please include this information to help with debugging: + +1. Version of relevant software: this software, Ansible, Python, SDK, etc. +2. Details of the issue explaining the problem: what, when, where +3. The expected outcome that was not met (if any) +4. Supporting troubleshooting information. __Note: Do not provide private company information that could compromise your company's security.__ + +An Issue __must__ be created before submitting any pull request. Any pull request that is created should be linked to an Issue. + +### Feature request + +If you have an idea of how to improve this project, submit a [feature request](https://github.com/dell/ansible-powerflex/issues/new?labels=type%2Ffeature-request%2C+needs-triage&template=feature_request.md&title=%5BFEATURE%5D%3A). + +### Answering questions + +If you have a question and you can't find the answer in the documentation or issues, the next step is to submit a [question.](https://github.com/dell/ansible-powerflex/issues/new?labels=type%2Fquestion&template=ask-a-question.md&title=%5BQUESTION%5D%3A) + +We'd love your help answering questions being asked by other Ansible modules for Dell PowerFlex users. + +## Triage issues + +Triage helps ensure that issues resolve quickly by: + +* Ensuring the issue's intent and purpose is conveyed precisely. This is necessary because it can be difficult for an issue to explain how an end user experiences a problem and what actions they took. +* Giving a contributor the information they need before they commit to resolving an issue. +* Lowering the issue count by preventing duplicate issues. +* Streamlining the development process by preventing duplicate discussions. + +If you don't have the knowledge or time to code, consider helping with _issue triage_. The Ansible modules for Dell PowerFlex community will thank you for saving them time by spending some of yours. + +Read more about the ways you can [Triage issues](https://github.com/dell/ansible-powerflex/blob/1.5.0/ISSUE_TRIAGE.md). + +## Your first contribution + +Unsure where to begin contributing? Start by browsing issues labeled `beginner friendly` or `help wanted`. + +* [Beginner-friendly](https://github.com/dell/ansible-powerflex/issues?q=is%3Aopen+is%3Aissue+label%3A%22beginner+friendly%22) issues are generally straightforward to complete. +* [Help wanted](https://github.com/dell/ansible-powerflex/issues?q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22) issues are problems we would like the community to help us with regardless of complexity. + +When you're ready to contribute, it's time to create a pull request. + +## Branching + +* [Branching Strategy for Ansible modules for Dell PowerFlex](https://github.com/dell/ansible-powerflex/blob/1.5.0/BRANCHING.md) + +## Signing your commits + +We require that developers sign off their commits to certify that they have permission to contribute the code in a pull request. This way of certifying is commonly known as the [Developer Certificate of Origin (DCO)](https://developercertificate.org/). We encourage all contributors to read the DCO text before signing a commit and making contributions. + +GitHub will prevent a pull request from being merged if there are any unsigned commits. + +### Signing a commit + +GPG (GNU Privacy Guard) will be used to sign commits. Follow the instructions [here](https://docs.github.com/en/free-pro-team@latest/github/authenticating-to-github/signing-commits) to create a GPG key and configure your GitHub account to use that key. + +Make sure you have your user name and e-mail set. This will be required for your signed commit to be properly verified. Check this references: + +* Setting up your github user name [reference](https://help.github.com/articles/setting-your-username-in-git/) +* Setting up your e-mail address [reference](https://help.github.com/articles/setting-your-commit-email-address-in-git/) + +Once Git and your GitHub account have been properly configured, you can add the -S flag to the git commits: + +```console +$ git commit -S -m your commit message +# Creates a signed commit +``` + +### Commit message format + +Ansible modules for Dell PowerFlex uses the guidelines for commit messages outlined in [How to Write a Git Commit Message](https://chris.beams.io/posts/git-commit/) + +## Pull requests + +If this is your first time contributing to an open-source project on GitHub, make sure you read about [Creating a pull request](https://help.github.com/en/articles/creating-a-pull-request). + +A pull request must always link to at least one GitHub issue. If that is not the case, create a GitHub issue and link it. + +To increase the chance of having your pull request accepted, make sure your pull request follows these guidelines: + +* Title and description matches the implementation. +* Commits within the pull request follow the formatting guidelines. +* The pull request closes one related issue. +* The pull request contains necessary tests that verify the intended behavior. +* If your pull request has conflicts, rebase your branch onto the main branch. + +If the pull request fixes a bug: + +* The pull request description must include `Fixes #<issue number>`. +* To avoid regressions, the pull request should include tests that replicate the fixed bug. + +The team _squashes_ all commits into one when we accept a pull request. The title of the pull request becomes the subject line of the squashed commit message. We still encourage contributors to write informative commit messages, as they becomes a part of the Git commit body. + +We use the pull request title when we generate change logs for releases. As such, we strive to make the title as informative as possible. + +Make sure that the title for your pull request uses the same format as the subject line in the commit message. + +### Quality gates for pull requests + +GitHub Actions are used to enforce quality gates when a pull request is created or when any commit is made to the pull request. These GitHub Actions enforce our minimum code quality requirement for any code that get checked into the repository. If any of the quality gates fail, it is expected that the contributor will look into the check log, understand the problem and resolve the issue. If help is needed, please feel free to reach out the maintainers of the project for [support](https://github.com/dell/ansible-powerflex/blob/1.5.0/SUPPORT.md). + +#### Code sanitization + +[GitHub action](https://github.com/dell/ansible-powerflex/actions/workflows/ansible-test.yml) that analyzes source code to flag ansible sanity errors and runs Unit tests. + +## Code reviews + +All submissions, including submissions by project members, require review. We use GitHub pull requests for this purpose. Consult [GitHub Help](https://help.github.com/articles/about-pull-requests/) for more information on using pull requests. + +A pull request must satisfy following for it to be merged: + +* A pull request will require at least 2 maintainer approvals. +* Maintainers must perform a review to ensure the changes adhere to guidelines laid out in this document. +* If any commits are made after the PR has been approved, the PR approval will automatically be removed and the above process must happen again. + +## Code style + +Ensure the added code has the required documenation, examples and unit tests. + +### Sanity + +Run ansible-test sanity --docker default on your code to ensure sanity. Ensure the code does not have any Andersson script violations and not break any existing unit test workflows. + +### TODOs in the code + +We don't like TODOs in the code or documentation. It is really best if you sort out all issues you can see with the changes before we check the changes in. diff --git a/ansible_collections/dellemc/powerflex/docs/INSTALLATION.md b/ansible_collections/dellemc/powerflex/docs/INSTALLATION.md new file mode 100644 index 00000000..7cf4da17 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/docs/INSTALLATION.md @@ -0,0 +1,106 @@ +<!-- +Copyright (c) 2022 Dell Inc., or its subsidiaries. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 +--> + +# Installation and execution of Ansible modules for Dell PowerFlex + +## Installation of SDK +* Install the python SDK named [PyPowerFlex](https://pypi.org/project/PyPowerFlex/). It can be installed using pip, based on appropriate python version. Execute this command: + + pip install PyPowerFlex +* Alternatively, Clone the repo "https://github.com/dell/python-powerflex" + using command: + + git clone https://github.com/dell/python-powerflex.git + * Go to the root directory of setup. + * Execute this command: + + pip install . + +## Building collections + * Use this command to build the collection from source code: + + ansible-galaxy collection build + + For more details on how to build a tar ball, please refer to: [Building the collection](https://docs.ansible.com/ansible/latest/dev_guide/developing_collections_distributing.html#building-your-collection-tarball) + +## Installing collections + +#### Online installation of collections + * Use this command to install the latest collection hosted in [galaxy portal](https://galaxy.ansible.com/dellemc/powerflex): + + ansible-galaxy collection install dellemc.powerflex -p <install_path> + +#### Offline installation of collections + + * Download the latest tar build from any of the available distribution channel [Ansible Galaxy](https://galaxy.ansible.com/dellemc/powerflex) /[Automation Hub](https://console.redhat.com/ansible/automation-hub/repo/published/dellemc/powerflex) and use this command to install the collection anywhere in your system: + + ansible-galaxy collection install dellemc-powerflex-1.5.0.tar.gz -p <install_path> + + * Set the environment variable: + + export ANSIBLE_COLLECTIONS_PATHS=$ANSIBLE_COLLECTIONS_PATHS:<install_path> + +## Using collections + + * In order to use any Ansible module, ensure that the importing of proper FQCN (Fully Qualified Collection Name) must be embedded in the playbook. + This example can be referred to: + + collections: + - dellemc.powerflex + + * In order to use installed collection in a specific task use a proper FQCN (Fully Qualified Collection Name). Refer to this example: + + tasks: + - name: Get Volume details + dellemc.powerflex.volume + + * For generating Ansible documentation for a specific module, embed the FQCN before the module name. Refer to this example: + + ansible-doc dellemc.powerflex.volume + + +## Ansible modules execution + +The Ansible server must be configured with Python library for PowerFlex to run the Ansible playbooks. The [Documents](https://github.com/dell/ansible-powerflex/blob/1.5.0/docs/) provide information on different Ansible modules along with their functions and syntax. The parameters table in the Product Guide provides information on various parameters which needs to be configured before running the modules. + +## SSL certificate validation + +* Copy the CA certificate to the "/etc/pki/ca-trust/source/anchors" path of the host by any external means. +* Set the "REQUESTS_CA_BUNDLE" environment variable to the path of the SSL certificate using the command: + + export REQUESTS_CA_BUNDLE=/etc/pki/ca-trust/source/anchors/<<Certificate_Name>> +* Import the SSL certificate to host using the command: + + update-ca-trust extract +* If "TLS CA certificate bundle error" occurs, then follow these steps: + + cd /etc/pki/tls/certs/ + openssl x509 -in ca-bundle.crt -text -noout + +## Results +Each module returns the updated state and details of the entity, For example, if you are using the Volume module, all calls will return the updated details of the volume. Sample result is shown in each module's documentation. + +## Ansible execution environment +Ansible can also be installed in a container environment. Ansible Builder provides the ability to create reproducible, self-contained environments as container images that can be run as Ansible execution environments. +* Install the ansible builder package using: + + pip3 install ansible-builder +* Ensure the execution-environment.yml is at the root of collection and create the execution environment using: + + ansible-builder build --tag <tag_name> --container-runtime docker +* After the image is built, run the container using: + + docker run -it <tag_name> /bin/bash +* Verify collection installation using command: + + ansible-galaxy collection list +* The playbook can be run on the container using: + + docker run --rm -v $(pwd):/runner <tag_name> ansible-playbook info_test.yml diff --git a/ansible_collections/dellemc/powerflex/docs/ISSUE_TRIAGE.md b/ansible_collections/dellemc/powerflex/docs/ISSUE_TRIAGE.md new file mode 100644 index 00000000..f764df38 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/docs/ISSUE_TRIAGE.md @@ -0,0 +1,306 @@ +<!-- +Copyright (c) 2022 Dell Inc., or its subsidiaries. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 +--> + +# Triage issues + +The main goal of issue triage is to categorize all incoming issues and make sure each issue has all basic information needed for anyone else to understand and be able to start working on it. + +> **Note:** This information is for project Maintainers, Owners, and Admins. If you are a Contributor, then you will not be able to perform most of the tasks in this topic. + +The core maintainers of this project are responsible for categorizing all incoming issues and delegating any critical or important issue to other maintainers. Triage provides an important way to contribute to an open source project. + +Triage helps ensure issues resolve quickly by: + +- Ensuring the issue's intent and purpose is conveyed precisely. This is necessary because it can be difficult for an issue to explain how an end user experiences a problem and what actions they took. +- Giving a contributor the information they need before they commit to resolving an issue. +- Lowering the issue count by preventing duplicate issues. +- Streamlining the development process by preventing duplicate discussions. + +If you don't have the knowledge or time to code, consider helping with triage. The community will thank you for saving them time by spending some of yours. + +## 1. Find issues that need triage + +The easiest way to find issues that haven't been triaged is to search for issues with the `needs-triage` label. + +## 2. Ensure the issue contains basic information + +Make sure that the issue's author provided the standard issue information. This project utilizes GitHub issue templates to guide contributors to provide standard information that must be included for each type of template or type of issue. + +### Standard issue information that must be included + +This section describes the various issue templates and the expected content. + +#### Bug reports + +Should explain what happened, what was expected and how to reproduce it together with any additional information that may help giving a complete picture of what happened such as screenshots, output and any environment related information that's applicable and/or maybe related to the reported problem: + + - Ansible Version: [e.g. 2.14] + - Python Version [e.g. 3.11] + - Ansible modules for Dell PowerFlex Version: [e.g. 1.5.0] + - PowerFlex SDK version: [e.g. PyPowerFlex 1.6.0] + - Any other additional information... + +#### Feature requests + +Should explain what feature that the author wants to be added and why that is needed. + +#### Ask a question requests + +In general, if the issue description and title is perceived as a question no more information is needed. + +### Good practices + +To make it easier for everyone to understand and find issues they're searching for it's suggested as a general rule of thumbs to: + +- Make sure that issue titles are named to explain the subject of the issue, has a correct spelling and doesn't include irrelevant information and/or sensitive information. +- Make sure that issue descriptions doesn't include irrelevant information. +- Make sure that issues do not contain sensitive information. +- Make sure that issues have all relevant fields filled in. +- Do your best effort to change title and description or request suggested changes by adding a comment. + +> **Note:** Above rules are applicable to both new and existing issues. + +### Dealing with missing information + +Depending on the issue, you might not feel all this information is needed. Use your best judgement. If you cannot triage an issue using what its author provided, explain kindly to the author that they must provide the above information to clarify the problem. Label issue with `triage/needs-information`. + +If the author provides the standard information but you are still unable to triage the issue, request additional information. Do this kindly and politely because you are asking for more of the author's time. Label issue with `triage/needs-information`. + +If the author does not respond to the requested information within the timespan of a week, close the issue with a kind note stating that the author can request for the issue to be reopened when the necessary information is provided. + +If you receive a notification with additional information provided but you are not anymore on issue triage and you feel you do not have time to handle it, you should delegate it to the current person on issue triage. + +## 3. Categorizing an issue + +### Duplicate issues + +Make sure it's not a duplicate by searching existing issues using related terms from the issue title and description. If you think you know there is an existing issue, but can't find it, please reach out to one of the maintainers and ask for help. If you identify that the issue is a duplicate of an existing issue: + +1. Add a comment `duplicate of #<issue number>` +2. Add the `triage/duplicate` label + +### Bug reports + +If it's not perfectly clear that it's an actual bug, quickly try to reproduce it. + +**It's a bug/it can be reproduced:** + +1. Add a comment describing detailed steps for how to reproduce it, if applicable. +2. If you know that maintainers wont be able to put any resources into it for some time then label the issue with `help wanted` and optionally `beginner friendly` together with pointers on which code to update to fix the bug. This should signal to the community that we would appreciate any help we can get to resolve this. +3. Move on to [prioritizing the issue](#4-prioritization-of-issues). + +**It can't be reproduced:** + +1. Either [ask for more information](#2-ensure-the-issue-contains-basic-information) needed to investigate it more thoroughly. Provide details in a comment. +2. Either [delegate further investigations](#investigation-of-issues) to someone else. Provide details in a comment. + +**It works as intended/by design:** + +1. Kindly and politely add a comment explaining briefly why we think it works as intended and close the issue. +2. Label the issue `triage/works-as-intended`. +3. Remove the `needs-triage` label. + +**It does not work as intended/by design:** + +### Feature requests + +1. If the feature request does not align with the product vision, add a comment indicating so, remove the `needs-triage` label and close the issue +2. Otherwise, move on to [prioritizing the issue](#4-prioritization-of-issues). Assign the appropriate priority label to the issue, add the appropriate comments to the issue, and remove the `needs-triage` label. + +## 4. Prioritization of issues + +In general bugs and feature request issues should be labeled with a priority. + +Adding priority levels can be difficult. Ensure you have the knowledge, context, and the experience before prioritizing any issue. + +If you have any uncertainty as to which priority level to assign, please ask the maintainers for help. + +| Label | Description | +| --------------------------------- | ------------------------------------------------------------------------------------------------------------------------ | +| `priority/critical` | Highest priority. Must be actively worked on as someone's top priority immediately. | +| `priority/high` | Must be worked on soon, ideally in time for the next release. | +| `priority/low` | Lowest priority. Possibly useful, but not yet enough interest in it. | + +### Critical priority + +1. If an issue has been categorized and any of this criteria apply, the issue should be labeled as critical and must be actively worked on as someone's top priority immediately. + + - Results in any data loss + - Critical security or performance issues + - Problem that makes a feature unusable + - Multiple users experience a severe problem affecting their business, users etc. + +2. Label the issue `priority/critical`. +3. Escalate the problem to the maintainers. +4. Assign or ask a maintainer for help assigning someone to make this issue their top priority immediately. +5. Add the issue to the next upcoming release milestone. + +### High priority + +1. Label the issue `priority/high`. +2. Add the issue to the next upcoming release milestone. +3. Prioritize it or assign someone to work on it now or very soon. +4. Consider requesting [help from the community](#5-requesting-help-from-the-community). + +### Low priority + +1. If the issue is deemed possibly useful but a low priority label the issue `priority/low`. +2. The amount of interest in the issue will determine if the priority elevated. +3. Consider requesting [help from the community](#5-requesting-help-from-the-community). + +## 5. Requesting help from the community + +Depending on the issue and/or priority, it's always a good idea to consider signalling to the community that help from community is appreciated and needed in case an issue is not prioritized to be worked on by maintainers. Use your best judgement. In general, requesting help from the community means that a contribution has a good chance of getting accepted and merged. + +In many cases the issue author or community as a whole is more suitable to contribute changes since they're experts in their domain. It's also quite common that someone has tried to get something to work using the documentation without success and made an effort to get it to work and/or reached out to the community to get the missing information. + +1. Kindly and politely add a comment to alert update subscribers. + - Explain the issue and need for resolution. Be sure and detail that the issue has not been prioritized and that the issue has not been scheduled for work by the maintainers. + - If possible or applicable, add pointers and references to the code/files that need to be revised. Provide any idea as to the solution. This will help the maintainers get started on resolving the issue. +2. Label the issue with `help wanted`. +3. If applicable, label the issue with `beginner friendly` to denote that the issue is suitable for a beginner to work on. + +## Investigation of issues + +When an issue has all basic information provided, but the reported problem cannot be reproduced at a first glance, the issue is labeled `triage/needs-information`. Depending on the perceived severity and/or number of [upvotes](https://help.github.com/en/articles/about-conversations-on-github#reacting-to-ideas-in-comments), the investigation will either be delegated to another maintainer for further investigation or put on hold until someone else (maintainer or contributor) picks it up and eventually starts investigating it. + +Even if you don't have the time or knowledge to investigate an issue we highly recommend that you [upvote](https://help.github.com/en/articles/about-conversations-on-github#reacting-to-ideas-in-comments) the issue if you happen to have the same problem. If you have further details that may help investigating the issue please provide as much information as possible. + +## External pull requests + +Part of issue triage should also be triaging of external PRs. Main goal should be to make sure PRs from external contributors have an owner/reviewer and are not forgotten. + +1. Check new external PRs which do not have a reviewer. +1. Check if there is a link to an existing issue. +1. If not and you know which issue it is solving, add the link yourself, otherwise ask the author to link the issue or create one. +1. Assign a reviewer based on who was handling the linked issue or what code or feature does the PR touches (look at who was the last to make changes there if all else fails). + +## GitHub issue management workflow + +This section describes the triage workflow for new GitGHub issues that get created. + +### GitHub Issue: Bug + +This workflow starts off with a GitHub issue of type bug being created. + +1. Collaborator or maintainer creates a GitHub bug using the appropriate GitHub issue template +2. By default a bug will be created with the `type/bug` and `needs-triage` labels + +The following flow chart outlines the triage process for bugs. + +<!-- https://textik.com/#38ec14781648871c --> +``` + +--------------------------+ + | New bug issue opened/more| + | information added | + +-------------|------------+ + | + | + +----------------------------------+ NO +--------------|-------------+ + | label: triage/needs-information --------- All required information | + | | | contained in issue? | + +-----------------------------|----+ +--------------|-------------+ + | | YES + | | + +--------------------------+ | +---------------------+ YES +---------------------------------------+ + |label: | | | Dupicate Issue? ------- Comment `Duplicate of #<issue number>` + |triage/needs-investigation| | NO | | | Remove needs-triage label | + +------|-------------------+ | +----------|----------+ | label: triage/duplicate | + | | | NO +-----------------|---------------------+ + YES | | | | + | +---------------|----+ NO +------------|------------+ | + | |Needs investigation?|---------- Can it be reproduced? | | + |------- | +------------|------------+ | + +--------------------+ | YES | + | +----------|----------+ + +-------------------------+ +------------|------------+ | Close Issue | + | Add release-found label |------------------ Works as intended? | | | + | label: release-found/* | NO | | +----------|----------+ + +------------|------------+ +------------|------------+ | + | | | + | | YES | + +-----------------------------+ +----------------|----------------+ | + | Add area label | | Add comment | | + | label: area/* | | Remove needs-triage label ------------------| + +------------|----------------+ | label: triage/works-as-intended | + | +---------------------------------+ + | + +------------|-------------+ +----------+ + | Add priority label | | Done ---------------------------------------- + | label: priority/* | +----|-----+ | + +------------|-------------+ |NO | + | | +------------------|------------------+ + +------------|-------------+ +----|----------------+ YES | Add details to issue | + | ------------ Signal Community? ---------- label: help wanted | + |Remove needs-triage label | | | | label: beginner friendly (optional)| + +--------------------------+ +---------------------+ +-------------------------------------+ + +``` + +If the author does not respond to a request for more information within the timespan of a week, close the issue with a kind note stating that the author can request for the issue to be reopened when the necessary information is provided. + +### GitHub issue: feature request + +This workflow starts off with a GitHub issue of type feature request being created. + +1. Collaborator or maintainer creates a GitHub feature request using the appropriate GitHub issue template +2. By default a feature request will be created with the `type/feature-request` and `needs-triage` labels + +This flow chart outlines the triage process for feature requests. + +<!-- https://textik.com/#81e81fc717f63429 --> +``` + +---------------------------------+ + |New feature request issue opened/| + |more information added | + +----------------|----------------+ + | + | + +---------------------------------+ NO +-------------|------------+ + | label: triage/needs-information ---------- All required information | + | | | contained in issue? | + +---------------------------------+ +-------------|------------+ + | + | + +---------------------------------------+ | + |Comment `Duplicate of #<issue number>` | YES +----------|----------+ + |Remove needs-triage label ------- Duplicate issue? | + |label: triage/duplicate | | | + +-----|---------------------------------+ +-----------|---------+ + | |NO + | +-------------------------+ NO +-----------------------------+ + | |Add comment |-------- Does feature request align | + | |Remove needs-triage label| | with product vision? | + | +------|------------------+ +--------------|--------------+ + | | | YES + | | +-----------------|----------------+ + | | |Change feature-request to feature | + | | |Remove label: type/feature-request| + | | |Add label: type/feature | + | | +-----------------|----------------+ + | | | + | | +--------------|--------------+ + | | | Add area label | + | | | label: area/* | + | | +--------------|--------------+ + | | | + +-|---------|---+ +--------+ +--------------|--------------+ + | Close issue | | Done --------- Add priority label | + | | | | | label: priority/* | + +---------------+ +--------+ +-----------------------------+ +``` + +If the author does not respond to a request for more information within the timespan of a week, close the issue with a kind note stating that the author can request for the issue to be reopened when the necessary information is provided. + +In some cases you may receive a request you do not wish to accept. Perhaps the request doesn't align with the project scope or vision. It is important to tactfully handle contributions that don't meet the project standards. + +1. Acknowledge the person behind the contribution and thank them for their interest and contribution +2. Explain why it didn't fit into the scope of the project or vision +3. Don't leave an unwanted contributions open. Immediately close the contribution you do not wish to accept diff --git a/ansible_collections/dellemc/powerflex/docs/MAINTAINERS.md b/ansible_collections/dellemc/powerflex/docs/MAINTAINERS.md new file mode 100644 index 00000000..24ab255d --- /dev/null +++ b/ansible_collections/dellemc/powerflex/docs/MAINTAINERS.md @@ -0,0 +1,19 @@ +<!-- +Copyright (c) 2022 Dell Inc., or its subsidiaries. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 +--> + +# Maintainers + +* Ananthu Kuttattu (kuttattz) +* Bhavneet Sharma (Bhavneet-Sharma) +* Jennifer John (Jennifer-John) +* Meenakshi Dembi (meenakshidembi691) +* Pavan Mudunuri (Pavan-Mudunuri) +* Previnkumar G (Previnkumar-G) +* Trisha Datta (trisha-dell) diff --git a/ansible_collections/dellemc/powerflex/docs/MAINTAINER_GUIDE.md b/ansible_collections/dellemc/powerflex/docs/MAINTAINER_GUIDE.md new file mode 100644 index 00000000..b99e5b22 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/docs/MAINTAINER_GUIDE.md @@ -0,0 +1,38 @@ +<!-- +Copyright (c) 2022 Dell Inc., or its subsidiaries. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 +--> + +# Maintainer guidelines + +As a Maintainer of this project you have the responsibility of keeping true to the vision of the project with a high-degree quality. Being part of this group is a privilege that requires dedication and time to attend to the daily activities that are associated with the maintenance of this project. + +## Becoming a maintainer + +Most Maintainers started as Contributors that have demonstrated their commitment to the success of the project. Contributors wishing to become Maintainers, must demonstrate commitment to the success of the project by contributing code, reviewing others' work, and triaging issues on a regular basis for at least three months. + +The contributions alone don't make you a Maintainer. You need to earn the trust of the current Maintainers and other project Contributors, that your decisions and actions are in the best interest of the project. + +Periodically, the existing Maintainers curate a list of Contributors who have shown regular activity on the project over the prior months. It is from this list that Maintainer candidates are selected. + +After a candidate is selected, the existing Maintainers discuss the candidate over the next 5 business days, provide feedback, and vote. At least 75% of the current Maintainers must vote in the affirmative for a candidate to be moved to the role of Maintainer. + +If a candidate is approved, a Maintainer contacts the candidate to invite them to open a pull request that adds the contributor to the MAINTAINERS file. The candidate becomes a Maintainer once the pull request is merged. + +## Maintainer policies + +* Lead by example +* Follow the [Code of Conduct](https://github.com/dell/ansible-powerflex/blob/1.5.0/CODE_OF_CONDUCT.md) and the guidelines in the [Contributing](https://github.com/dell/ansible-powerflex/blob/1.5.0/CONTRIBUTING.md) and [Committer](https://github.com/dell/ansible-powerflex/blob/1.5.0/COMMITTER_GUIDE.md) guides +* Promote a friendly and collaborative environment within our community +* Be actively engaged in discussions, answering questions, updating defects, and reviewing pull requests +* Criticize code, not people. Ideally, tell the contributor a better way to do what they need. +* Clearly mark optional suggestions as such. Best practice, start your comment with *At your option: …* + +## Project decision making + +All project decisions should contribute to successfully executing on the project roadmap. Project milestones are established for each release.
\ No newline at end of file diff --git a/ansible_collections/dellemc/powerflex/docs/Product Guide.md b/ansible_collections/dellemc/powerflex/docs/Product Guide.md new file mode 100644 index 00000000..b255917e --- /dev/null +++ b/ansible_collections/dellemc/powerflex/docs/Product Guide.md @@ -0,0 +1,5437 @@ +# Ansible Modules for Dell Technologies PowerFlex +## Product Guide 1.5.0 +© 2022 Dell Inc. or its subsidiaries. All rights reserved. Dell, and other trademarks are trademarks of Dell Inc. or its subsidiaries. Other trademarks may be trademarks of their respective owners. + +-------------- +## Contents +* [Device Module](#device-module) + * [Synopsis](#synopsis) + * [Parameters](#parameters) + * [Notes](#notes) + * [Examples](#examples) + * [Return Values](#return-values) + * [Authors](#authors) +* [Info Module](#info-module) + * [Synopsis](#synopsis-1) + * [Parameters](#parameters-1) + * [Notes](#notes-1) + * [Examples](#examples-1) + * [Return Values](#return-values-1) + * [Authors](#authors-1) +* [MDM Cluster Module](#mdm-cluster-module) + * [Synopsis](#synopsis-2) + * [Parameters](#parameters-2) + * [Notes](#notes-2) + * [Examples](#examples-2) + * [Return Values](#return-values-2) + * [Authors](#authors-2) +* [Protection Domain Module](#protection-domain-module) + * [Synopsis](#synopsis-3) + * [Parameters](#parameters-3) + * [Notes](#notes-3) + * [Examples](#examples-3) + * [Return Values](#return-values-3) + * [Authors](#authors-3) +* [Replication Consistency Group Module](#replication-consistency-group-module) + * [Synopsis](#synopsis-4) + * [Parameters](#parameters-4) + * [Notes](#notes-4) + * [Examples](#examples-4) + * [Return Values](#return-values-4) + * [Authors](#authors-4) +* [SDC Module](#sdc-module) + * [Synopsis](#synopsis-5) + * [Parameters](#parameters-5) + * [Notes](#notes-5) + * [Examples](#examples-5) + * [Return Values](#return-values-5) + * [Authors](#authors-5) +* [SDS Module](#sds-module) + * [Synopsis](#synopsis-6) + * [Parameters](#parameters-6) + * [Notes](#notes-6) + * [Examples](#examples-6) + * [Return Values](#return-values-6) + * [Authors](#authors-6) +* [Storage Pool Module](#storage-pool-module) + * [Synopsis](#synopsis-7) + * [Parameters](#parameters-7) + * [Notes](#notes-7) + * [Examples](#examples-7) + * [Return Values](#return-values-7) + * [Authors](#authors-7) +* [Volume Module](#volume-module) + * [Synopsis](#synopsis-8) + * [Parameters](#parameters-8) + * [Notes](#notes-8) + * [Examples](#examples-8) + * [Return Values](#return-values-8) + * [Authors](#authors-8) + +-------------- + +# Device Module + +Manage device on Dell PowerFlex + +### Synopsis + Managing device on PowerFlex storage system includes adding new device, getting details of device, and removing a device. + +### Parameters + +<table> + <tr> + <th colspan=1>Parameter</th> + <th width="20%">Type</th> + <th>Required</th> + <th>Default</th> + <th>Choices</th> + <th width="80%">Description</th> + </tr> + <tr> + <td colspan=1 > current_pathname</td> + <td> str </td> + <td></td> + <td></td> + <td></td> + <td> <br> Full path of the device to be added. <br> Required while adding a device. </td> + </tr> + <tr> + <td colspan=1 > device_name</td> + <td> str </td> + <td></td> + <td></td> + <td></td> + <td> <br> Device name. <br> Mutually exclusive with device_id. </td> + </tr> + <tr> + <td colspan=1 > device_id</td> + <td> str </td> + <td></td> + <td></td> + <td></td> + <td> <br> Device ID. <br> Mutually exclusive with device_name. </td> + </tr> + <tr> + <td colspan=1 > sds_name</td> + <td> str </td> + <td></td> + <td></td> + <td></td> + <td> <br> The name of the SDS. <br> Required while adding a device. <br> Mutually exclusive with sds_id. </td> + </tr> + <tr> + <td colspan=1 > sds_id</td> + <td> str </td> + <td></td> + <td></td> + <td></td> + <td> <br> The ID of the SDS. <br> Required while adding a device. <br> Mutually exclusive with sds_name. </td> + </tr> + <tr> + <td colspan=1 > storage_pool_name</td> + <td> str </td> + <td></td> + <td></td> + <td></td> + <td> <br> Storage Pool name. <br> Used while adding a storage device. <br> Mutually exclusive with storage_pool_id, acceleration_pool_id and acceleration_pool_name. </td> + </tr> + <tr> + <td colspan=1 > storage_pool_id</td> + <td> str </td> + <td></td> + <td></td> + <td></td> + <td> <br> Storage Pool ID. <br> Used while adding a storage device. <br> Media type supported are SSD and HDD. <br> Mutually exclusive with storage_pool_name, acceleration_pool_id and acceleration_pool_name. </td> + </tr> + <tr> + <td colspan=1 > acceleration_pool_name</td> + <td> str </td> + <td></td> + <td></td> + <td></td> + <td> <br> Acceleration Pool Name. <br> Used while adding an acceleration device. <br> Media type supported are SSD and NVDIMM. <br> Mutually exclusive with storage_pool_id, storage_pool_name and acceleration_pool_name. </td> + </tr> + <tr> + <td colspan=1 > acceleration_pool_id</td> + <td> str </td> + <td></td> + <td></td> + <td></td> + <td> <br> Acceleration Pool ID. <br> Used while adding an acceleration device. <br> Media type supported are SSD and NVDIMM. <br> Mutually exclusive with acceleration_pool_name, storage_pool_name and storage_pool_id. </td> + </tr> + <tr> + <td colspan=1 > protection_domain_name</td> + <td> str </td> + <td></td> + <td></td> + <td></td> + <td> <br> Protection domain name. <br> Used while identifying a storage pool along with storage_pool_name. <br> Mutually exclusive with protection_domain_id. </td> + </tr> + <tr> + <td colspan=1 > protection_domain_id</td> + <td> str </td> + <td></td> + <td></td> + <td></td> + <td> <br> Protection domain ID. <br> Used while identifying a storage pool along with storage_pool_name. <br> Mutually exclusive with protection_domain_name. </td> + </tr> + <tr> + <td colspan=1 > external_acceleration_type</td> + <td> str </td> + <td></td> + <td></td> + <td> <ul> <li>Invalid</li> <li>None</li> <li>Read</li> <li>Write</li> <li>ReadAndWrite</li> </ul></td> + <td> <br> Device external acceleration types. <br> Used while adding a device. </td> + </tr> + <tr> + <td colspan=1 > media_type</td> + <td> str </td> + <td></td> + <td></td> + <td> <ul> <li>HDD</li> <li>SSD</li> <li>NVDIMM</li> </ul></td> + <td> <br> Device media types. <br> Required while adding a device. </td> + </tr> + <tr> + <td colspan=1 > state</td> + <td> str </td> + <td> True </td> + <td></td> + <td> <ul> <li>present</li> <li>absent</li> </ul></td> + <td> <br> State of the device. </td> + </tr> + <tr> + <td colspan=1 > hostname</td> + <td> str </td> + <td> True </td> + <td></td> + <td></td> + <td> <br> IP or FQDN of the PowerFlex host. </td> + </tr> + <tr> + <td colspan=1 > username</td> + <td> str </td> + <td> True </td> + <td></td> + <td></td> + <td> <br> The username of the PowerFlex host. </td> + </tr> + <tr> + <td colspan=1 > password</td> + <td> str </td> + <td> True </td> + <td></td> + <td></td> + <td> <br> The password of the PowerFlex host. </td> + </tr> + <tr> + <td colspan=1 > validate_certs</td> + <td> bool </td> + <td></td> + <td> True </td> + <td></td> + <td> <br> Boolean variable to specify whether or not to validate SSL certificate. <br> True - Indicates that the SSL certificate should be verified. <br> False - Indicates that the SSL certificate should not be verified. </td> + </tr> + <tr> + <td colspan=1 > port</td> + <td> int </td> + <td></td> + <td> 443 </td> + <td></td> + <td> <br> Port number through which communication happens with PowerFlex host. </td> + </tr> + <tr> + <td colspan=1 > timeout</td> + <td> int </td> + <td></td> + <td> 120 </td> + <td></td> + <td> <br> Time after which connection will get terminated. <br> It is to be mentioned in seconds. </td> + </tr> + </table> + +### Notes +* The value for device_id is generated only after successful addition of the device. +* To uniquely identify a device, either device_id can be passed or one of current_pathname or device_name must be passed with sds_id or sds_name. +* It is recommended to install Rfcache driver for SSD device on SDS in order to add it to an acceleration pool. +* The check_mode is not supported. +* The modules present in the collection named as 'dellemc.powerflex' are built to support the Dell PowerFlex storage platform. + +### Examples +``` +- name: Add a device + dellemc.powerflex.device: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + current_pathname: "/dev/sdb" + sds_name: "node1" + media_type: "HDD" + device_name: "device2" + storage_pool_name: "pool1" + protection_domain_name: "domain1" + external_acceleration_type: "ReadAndWrite" + state: "present" +- name: Get device details using device_id + dellemc.powerflex.device: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + device_id: "d7fe088900000000" + state: "present" +- name: Get device details using (current_pathname, sds_name) + dellemc.powerflex.device: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + current_pathname: "/dev/sdb" + sds_name: "node0" + state: "present" +- name: Get device details using (current_pathname, sds_id) + dellemc.powerflex.device: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + current_pathname: "/dev/sdb" + sds_id: "5717d71800000000" + state: "present" +- name: Remove a device using device_id + dellemc.powerflex.device: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + device_id: "76eb7e2f00010000" + state: "absent" +- name: Remove a device using (current_pathname, sds_id) + dellemc.powerflex.device: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + current_pathname: "/dev/sdb" + sds_name: "node1" + state: "absent" +``` + +### Return Values + +<table> + <tr> + <th colspan=3>Key</th> + <th>Type</th> + <th>Returned</th> + <th width="100%">Description</th> + </tr> + <tr> + <td colspan=3 > changed </td> + <td> bool </td> + <td> always </td> + <td> Whether or not the resource has changed. </td> + </tr> + <tr> + <td colspan=3 > device_details </td> + <td> dict </td> + <td> When device exists </td> + <td> Details of the device. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > accelerationPoolId </td> + <td> str </td> + <td>success</td> + <td> Acceleration pool ID. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > accelerationPoolName </td> + <td> str </td> + <td>success</td> + <td> Acceleration pool name. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > accelerationProps </td> + <td> str </td> + <td>success</td> + <td> Indicates acceleration props. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > aggregatedState </td> + <td> str </td> + <td>success</td> + <td> Indicates aggregated state. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > ataSecurityActive </td> + <td> bool </td> + <td>success</td> + <td> Indicates ATA security active state. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > autoDetectMediaType </td> + <td> str </td> + <td>success</td> + <td> Indicates auto detection of media type. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > cacheLookAheadActive </td> + <td> bool </td> + <td>success</td> + <td> Indicates cache look ahead active state. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > capacity </td> + <td> int </td> + <td>success</td> + <td> Device capacity. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > capacityLimitInKb </td> + <td> int </td> + <td>success</td> + <td> Device capacity limit in KB. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > deviceCurrentPathName </td> + <td> str </td> + <td>success</td> + <td> Device current path name. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > deviceOriginalPathName </td> + <td> str </td> + <td>success</td> + <td> Device original path name. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > deviceState </td> + <td> str </td> + <td>success</td> + <td> Indicates device state. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > deviceType </td> + <td> str </td> + <td>success</td> + <td> Indicates device type. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > errorState </td> + <td> str </td> + <td>success</td> + <td> Indicates error state. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > externalAccelerationType </td> + <td> str </td> + <td>success</td> + <td> Indicates external acceleration type. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > fglNvdimmMetadataAmortizationX100 </td> + <td> int </td> + <td>success</td> + <td> Indicates FGL NVDIMM meta data amortization value. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > fglNvdimmWriteCacheSize </td> + <td> int </td> + <td>success</td> + <td> Indicates FGL NVDIMM write cache size. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > firmwareVersion </td> + <td> str </td> + <td>success</td> + <td> Indicates firmware version. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > id </td> + <td> str </td> + <td>success</td> + <td> Device ID. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > ledSetting </td> + <td> str </td> + <td>success</td> + <td> Indicates LED setting. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > links </td> + <td> list </td> + <td>success</td> + <td> Device links. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=1 > href </td> + <td> str </td> + <td>success</td> + <td> Device instance URL. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=1 > rel </td> + <td> str </td> + <td>success</td> + <td> Relationship of device with different entities. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > logicalSectorSizeInBytes </td> + <td> int </td> + <td>success</td> + <td> Logical sector size in bytes. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > longSuccessfulIos </td> + <td> list </td> + <td>success</td> + <td> Indicates long successful IOs. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > maxCapacityInKb </td> + <td> int </td> + <td>success</td> + <td> Maximum device capacity limit in KB. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > mediaFailing </td> + <td> bool </td> + <td>success</td> + <td> Indicates media failing. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > mediaType </td> + <td> str </td> + <td>success</td> + <td> Indicates media type. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > modelName </td> + <td> str </td> + <td>success</td> + <td> Indicates model name. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > name </td> + <td> str </td> + <td>success</td> + <td> Device name. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > persistentChecksumState </td> + <td> str </td> + <td>success</td> + <td> Indicates persistent checksum state. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > physicalSectorSizeInBytes </td> + <td> int </td> + <td>success</td> + <td> Physical sector size in bytes. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > protectionDomainId </td> + <td> str </td> + <td>success</td> + <td> Protection domain ID. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > protectionDomainName </td> + <td> str </td> + <td>success</td> + <td> Protection domain name. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > raidControllerSerialNumber </td> + <td> str </td> + <td>success</td> + <td> RAID controller serial number. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > rfcacheErrorDeviceDoesNotExist </td> + <td> bool </td> + <td>success</td> + <td> Indicates RF cache error device does not exist. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > rfcacheProps </td> + <td> str </td> + <td>success</td> + <td> RF cache props. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > sdsId </td> + <td> str </td> + <td>success</td> + <td> SDS ID. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > sdsName </td> + <td> str </td> + <td>success</td> + <td> SDS name. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > serialNumber </td> + <td> str </td> + <td>success</td> + <td> Indicates Serial number. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > spSdsId </td> + <td> str </td> + <td>success</td> + <td> Indicates SPs SDS ID. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > ssdEndOfLifeState </td> + <td> str </td> + <td>success</td> + <td> Indicates SSD end of life state. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > storagePoolId </td> + <td> str </td> + <td>success</td> + <td> Storage Pool ID. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > storagePoolName </td> + <td> str </td> + <td>success</td> + <td> Storage Pool name. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > storageProps </td> + <td> list </td> + <td>success</td> + <td> Storage props. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > temperatureState </td> + <td> str </td> + <td>success</td> + <td> Indicates temperature state. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > vendorName </td> + <td> str </td> + <td>success</td> + <td> Indicates vendor name. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > writeCacheActive </td> + <td> bool </td> + <td>success</td> + <td> Indicates write cache active. </td> + </tr> + </table> + +### Authors +* Rajshree Khare (@khareRajshree) <ansible.team@dell.com> + +-------------------------------- +# Info Module + +Gathering information about Dell PowerFlex + +### Synopsis + Gathering information about Dell PowerFlex storage system includes getting the api details, list of volumes, SDSs, SDCs, storage pools, protection domains, snapshot policies, and devices. + +### Parameters + +<table> + <tr> + <th colspan=2>Parameter</th> + <th width="20%">Type</th> + <th>Required</th> + <th>Default</th> + <th>Choices</th> + <th width="80%">Description</th> + </tr> + <tr> + <td colspan=2 > gather_subset</td> + <td> list <br> elements: str </td> + <td></td> + <td></td> + <td> <ul> <li>vol</li> <li>storage_pool</li> <li>protection_domain</li> <li>sdc</li> <li>sds</li> <li>snapshot_policy</li> <li>device</li> <li>rcg</li> </ul></td> + <td> <br> List of string variables to specify the Powerflex storage system entities for which information is required. <br> Volumes - vol. <br> Storage pools - storage_pool. <br> Protection domains - protection_domain. <br> SDCs - sdc. <br> SDSs - sds. <br> Snapshot policies - snapshot_policy. <br> Devices - device. <br> Replication consistency groups - rcg. </td> + </tr> + <tr> + <td colspan=2 > filters</td> + <td> list <br> elements: dict </td> + <td></td> + <td></td> + <td></td> + <td> <br> List of filters to support filtered output for storage entities. <br> Each filter is a list of filter_key, filter_operator, filter_value. <br> Supports passing of multiple filters. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > filter_key </td> + <td> str </td> + <td> True </td> + <td></td> + <td></td> + <td> <br> Name identifier of the filter. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > filter_operator </td> + <td> str </td> + <td> True </td> + <td></td> + <td> <ul> <li>equal</li> </ul></td> + <td> <br> Operation to be performed on filter key. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > filter_value </td> + <td> str </td> + <td> True </td> + <td></td> + <td></td> + <td> <br> Value of the filter key. </td> + </tr> + <tr> + <td colspan=2 > hostname</td> + <td> str </td> + <td> True </td> + <td></td> + <td></td> + <td> <br> IP or FQDN of the PowerFlex host. </td> + </tr> + <tr> + <td colspan=2 > username</td> + <td> str </td> + <td> True </td> + <td></td> + <td></td> + <td> <br> The username of the PowerFlex host. </td> + </tr> + <tr> + <td colspan=2 > password</td> + <td> str </td> + <td> True </td> + <td></td> + <td></td> + <td> <br> The password of the PowerFlex host. </td> + </tr> + <tr> + <td colspan=2 > validate_certs</td> + <td> bool </td> + <td></td> + <td> True </td> + <td></td> + <td> <br> Boolean variable to specify whether or not to validate SSL certificate. <br> True - Indicates that the SSL certificate should be verified. <br> False - Indicates that the SSL certificate should not be verified. </td> + </tr> + <tr> + <td colspan=2 > port</td> + <td> int </td> + <td></td> + <td> 443 </td> + <td></td> + <td> <br> Port number through which communication happens with PowerFlex host. </td> + </tr> + <tr> + <td colspan=2 > timeout</td> + <td> int </td> + <td></td> + <td> 120 </td> + <td></td> + <td> <br> Time after which connection will get terminated. <br> It is to be mentioned in seconds. </td> + </tr> + </table> + +### Notes +* The check_mode is supported. +* The modules present in the collection named as 'dellemc.powerflex' are built to support the Dell PowerFlex storage platform. + +### Examples +``` +- name: Get detailed list of PowerFlex entities + dellemc.powerflex.info: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + gather_subset: + - vol + - storage_pool + - protection_domain + - sdc + - sds + - snapshot_policy + - device + - rcg + +- name: Get a subset list of PowerFlex volumes + dellemc.powerflex.info: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + gather_subset: + - vol + filters: + - filter_key: "name" + filter_operator: "equal" + filter_value: "ansible_test" +``` + +### Return Values + +<table> + <tr> + <th colspan=4>Key</th> + <th>Type</th> + <th>Returned</th> + <th width="100%">Description</th> + </tr> + <tr> + <td colspan=4 > API_Version </td> + <td> str </td> + <td> always </td> + <td> API version of PowerFlex API Gateway. </td> + </tr> + <tr> + <td colspan=4 > Array_Details </td> + <td> dict </td> + <td> always </td> + <td> System entities of PowerFlex storage array. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > addressSpaceUsage </td> + <td> str </td> + <td>success</td> + <td> Address space usage. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > authenticationMethod </td> + <td> str </td> + <td>success</td> + <td> Authentication method. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > capacityAlertCriticalThresholdPercent </td> + <td> int </td> + <td>success</td> + <td> Capacity alert critical threshold percentage. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > capacityAlertHighThresholdPercent </td> + <td> int </td> + <td>success</td> + <td> Capacity alert high threshold percentage. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > capacityTimeLeftInDays </td> + <td> str </td> + <td>success</td> + <td> Capacity time left in days. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > cliPasswordAllowed </td> + <td> bool </td> + <td>success</td> + <td> CLI password allowed. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > daysInstalled </td> + <td> int </td> + <td>success</td> + <td> Days installed. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > defragmentationEnabled </td> + <td> bool </td> + <td>success</td> + <td> Defragmentation enabled. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > enterpriseFeaturesEnabled </td> + <td> bool </td> + <td>success</td> + <td> Enterprise features enabled. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > id </td> + <td> str </td> + <td>success</td> + <td> The ID of the system. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > installId </td> + <td> str </td> + <td>success</td> + <td> installation Id. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > isInitialLicense </td> + <td> bool </td> + <td>success</td> + <td> Initial license. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > lastUpgradeTime </td> + <td> int </td> + <td>success</td> + <td> Last upgrade time. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > managementClientSecureCommunicationEnabled </td> + <td> bool </td> + <td>success</td> + <td> Management client secure communication enabled. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > maxCapacityInGb </td> + <td> dict </td> + <td>success</td> + <td> Maximum capacity in GB. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > mdmCluster </td> + <td> dict </td> + <td>success</td> + <td> MDM cluster details. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > mdmExternalPort </td> + <td> int </td> + <td>success</td> + <td> MDM external port. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > mdmManagementPort </td> + <td> int </td> + <td>success</td> + <td> MDM management port. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > mdmSecurityPolicy </td> + <td> str </td> + <td>success</td> + <td> MDM security policy. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > showGuid </td> + <td> bool </td> + <td>success</td> + <td> Show guid. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > swid </td> + <td> str </td> + <td>success</td> + <td> SWID. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > systemVersionName </td> + <td> str </td> + <td>success</td> + <td> System version and name. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > tlsVersion </td> + <td> str </td> + <td>success</td> + <td> TLS version. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > upgradeState </td> + <td> str </td> + <td>success</td> + <td> Upgrade state. </td> + </tr> + <tr> + <td colspan=4 > Devices </td> + <td> list </td> + <td> always </td> + <td> Details of devices. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > id </td> + <td> str </td> + <td>success</td> + <td> device id. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > name </td> + <td> str </td> + <td>success</td> + <td> device name. </td> + </tr> + <tr> + <td colspan=4 > Protection_Domains </td> + <td> list </td> + <td> always </td> + <td> Details of all protection domains. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > id </td> + <td> str </td> + <td>success</td> + <td> protection domain id. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > name </td> + <td> str </td> + <td>success</td> + <td> protection domain name. </td> + </tr> + <tr> + <td colspan=4 > Replication_Consistency_Groups </td> + <td> list </td> + <td> always </td> + <td> Details of rcgs. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > abstractState </td> + <td> str </td> + <td>success</td> + <td> The abstract state of the replication consistency group. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > activeLocal </td> + <td> bool </td> + <td>success</td> + <td> Whether the local replication consistency group is active. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > activeRemote </td> + <td> bool </td> + <td>success</td> + <td> Whether the remote replication consistency group is active </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > currConsistMode </td> + <td> str </td> + <td>success</td> + <td> The current consistency mode of the replication consistency group. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > disasterRecoveryState </td> + <td> str </td> + <td>success</td> + <td> The state of disaster recovery of the local replication consistency group. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > error </td> + <td> int </td> + <td>success</td> + <td> The error code of the replication consistency group. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > failoverState </td> + <td> str </td> + <td>success</td> + <td> The state of failover of the replication consistency group. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > failoverType </td> + <td> str </td> + <td>success</td> + <td> The type of failover of the replication consistency group. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > freezeState </td> + <td> str </td> + <td>success</td> + <td> The freeze state of the replication consistency group. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > id </td> + <td> str </td> + <td>success</td> + <td> The ID of the replication consistency group. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > inactiveReason </td> + <td> int </td> + <td>success</td> + <td> The reason for the inactivity of the replication consistency group. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > lastSnapCreationRc </td> + <td> int </td> + <td>success</td> + <td> The return code of the last snapshot of the replication consistency group. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > lastSnapGroupId </td> + <td> str </td> + <td>success</td> + <td> ID of the last snapshot of the replication consistency group. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > lifetimeState </td> + <td> str </td> + <td>success</td> + <td> The Lifetime state of the replication consistency group. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > localActivityState </td> + <td> str </td> + <td>success</td> + <td> The state of activity of the local replication consistency group. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > name </td> + <td> str </td> + <td>success</td> + <td> The name of the replication consistency group. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > pauseMode </td> + <td> str </td> + <td>success</td> + <td> The Lifetime state of the replication consistency group. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > peerMdmId </td> + <td> str </td> + <td>success</td> + <td> The ID of the peer MDM of the replication consistency group. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > protectionDomainId </td> + <td> str </td> + <td>success</td> + <td> The Protection Domain ID of the replication consistency group. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > remoteActivityState </td> + <td> str </td> + <td>success</td> + <td> The state of activity of the remote replication consistency group.. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > remoteDisasterRecoveryState </td> + <td> str </td> + <td>success</td> + <td> The state of disaster recovery of the remote replication consistency group. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > remoteId </td> + <td> str </td> + <td>success</td> + <td> The ID of the remote replication consistency group. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > remoteMdmId </td> + <td> str </td> + <td>success</td> + <td> The ID of the remote MDM of the replication consistency group. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > remoteProtectionDomainId </td> + <td> str </td> + <td>success</td> + <td> The ID of the remote Protection Domain. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > remoteProtectionDomainName </td> + <td> str </td> + <td>success</td> + <td> The Name of the remote Protection Domain. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > replicationDirection </td> + <td> str </td> + <td>success</td> + <td> The direction of the replication of the replication consistency group. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > rpoInSeconds </td> + <td> int </td> + <td>success</td> + <td> The RPO value of the replication consistency group in seconds. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > snapCreationInProgress </td> + <td> bool </td> + <td>success</td> + <td> Whether the process of snapshot creation of the replication consistency group is in progress or not. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > targetVolumeAccessMode </td> + <td> str </td> + <td>success</td> + <td> The access mode of the target volume of the replication consistency group. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > type </td> + <td> str </td> + <td>success</td> + <td> The type of the replication consistency group. </td> + </tr> + <tr> + <td colspan=4 > SDCs </td> + <td> list </td> + <td> always </td> + <td> Details of storage data clients. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > id </td> + <td> str </td> + <td>success</td> + <td> storage data client id. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > name </td> + <td> str </td> + <td>success</td> + <td> storage data client name. </td> + </tr> + <tr> + <td colspan=4 > SDSs </td> + <td> list </td> + <td> always </td> + <td> Details of storage data servers. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > id </td> + <td> str </td> + <td>success</td> + <td> storage data server id. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > name </td> + <td> str </td> + <td>success</td> + <td> storage data server name. </td> + </tr> + <tr> + <td colspan=4 > Snapshot_Policies </td> + <td> list </td> + <td> always </td> + <td> Details of snapshot policies. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > id </td> + <td> str </td> + <td>success</td> + <td> snapshot policy id. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > name </td> + <td> str </td> + <td>success</td> + <td> snapshot policy name. </td> + </tr> + <tr> + <td colspan=4 > Storage_Pools </td> + <td> list </td> + <td> always </td> + <td> Details of storage pools. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > id </td> + <td> str </td> + <td>success</td> + <td> ID of the storage pool under protection domain. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > mediaType </td> + <td> str </td> + <td>success</td> + <td> Type of devices in the storage pool. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > name </td> + <td> str </td> + <td>success</td> + <td> Name of the storage pool under protection domain. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > protectionDomainId </td> + <td> str </td> + <td>success</td> + <td> ID of the protection domain in which pool resides. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > protectionDomainName </td> + <td> str </td> + <td>success</td> + <td> Name of the protection domain in which pool resides. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > statistics </td> + <td> dict </td> + <td>success</td> + <td> Statistics details of the storage pool. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=2 > capacityInUseInKb </td> + <td> str </td> + <td>success</td> + <td> Total capacity of the storage pool. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=2 > deviceIds </td> + <td> list </td> + <td>success</td> + <td> Device Ids of the storage pool. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=2 > unusedCapacityInKb </td> + <td> str </td> + <td>success</td> + <td> Unused capacity of the storage pool. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > useRfcache </td> + <td> bool </td> + <td>success</td> + <td> Enable/Disable RFcache on a specific storage pool. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > useRmcache </td> + <td> bool </td> + <td>success</td> + <td> Enable/Disable RMcache on a specific storage pool. </td> + </tr> + <tr> + <td colspan=4 > Volumes </td> + <td> list </td> + <td> always </td> + <td> Details of volumes. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > id </td> + <td> str </td> + <td>success</td> + <td> The ID of the volume. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > mappedSdcInfo </td> + <td> dict </td> + <td>success</td> + <td> The details of the mapped SDC. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=2 > accessMode </td> + <td> str </td> + <td>success</td> + <td> mapping access mode for the specified volume. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=2 > limitBwInMbps </td> + <td> int </td> + <td>success</td> + <td> Bandwidth limit for the SDC. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=2 > limitIops </td> + <td> int </td> + <td>success</td> + <td> IOPS limit for the SDC. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=2 > sdcId </td> + <td> str </td> + <td>success</td> + <td> ID of the SDC. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=2 > sdcIp </td> + <td> str </td> + <td>success</td> + <td> IP of the SDC. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=2 > sdcName </td> + <td> str </td> + <td>success</td> + <td> Name of the SDC. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > name </td> + <td> str </td> + <td>success</td> + <td> Name of the volume. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > protectionDomainId </td> + <td> str </td> + <td>success</td> + <td> ID of the protection domain in which volume resides. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > protectionDomainName </td> + <td> str </td> + <td>success</td> + <td> Name of the protection domain in which volume resides. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > sizeInGb </td> + <td> int </td> + <td>success</td> + <td> Size of the volume in Gb. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > sizeInKb </td> + <td> int </td> + <td>success</td> + <td> Size of the volume in Kb. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > snapshotPolicyId </td> + <td> str </td> + <td>success</td> + <td> ID of the snapshot policy associated with volume. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > snapshotPolicyName </td> + <td> str </td> + <td>success</td> + <td> Name of the snapshot policy associated with volume. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > snapshotsList </td> + <td> str </td> + <td>success</td> + <td> List of snapshots associated with the volume. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > statistics </td> + <td> dict </td> + <td>success</td> + <td> Statistics details of the storage pool. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=2 > numOfChildVolumes </td> + <td> int </td> + <td>success</td> + <td> Number of child volumes. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=2 > numOfMappedSdcs </td> + <td> int </td> + <td>success</td> + <td> Number of mapped Sdcs of the volume. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > storagePoolId </td> + <td> str </td> + <td>success</td> + <td> ID of the storage pool in which volume resides. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > storagePoolName </td> + <td> str </td> + <td>success</td> + <td> Name of the storage pool in which volume resides. </td> + </tr> + <tr> + <td colspan=4 > changed </td> + <td> bool </td> + <td> always </td> + <td> Whether or not the resource has changed. </td> + </tr> + </table> + +### Authors +* Arindam Datta (@dattaarindam) <ansible.team@dell.com> + +-------------------------------- +# MDM Cluster Module + +Manage MDM cluster on Dell PowerFlex + +### Synopsis + Managing MDM cluster and MDMs on PowerFlex storage system includes adding/removing standby MDM, modify MDM name and virtual interface. + It also includes getting details of MDM cluster, modify MDM cluster ownership, cluster mode, and performance profile. + +### Parameters + +<table> + <tr> + <th colspan=2>Parameter</th> + <th width="20%">Type</th> + <th>Required</th> + <th>Default</th> + <th>Choices</th> + <th width="80%">Description</th> + </tr> + <tr> + <td colspan=2 > mdm_name</td> + <td> str </td> + <td></td> + <td></td> + <td></td> + <td> <br> The name of the MDM. It is unique across the PowerFlex array. <br> Mutually exclusive with mdm_id. <br> If mdm_name passed in add standby operation, then same name will be assigned to the new standby mdm. </td> + </tr> + <tr> + <td colspan=2 > mdm_id</td> + <td> str </td> + <td></td> + <td></td> + <td></td> + <td> <br> The ID of the MDM. <br> Mutually exclusive with mdm_name. </td> + </tr> + <tr> + <td colspan=2 > mdm_new_name</td> + <td> str </td> + <td></td> + <td></td> + <td></td> + <td> <br> To rename the MDM. </td> + </tr> + <tr> + <td colspan=2 > standby_mdm</td> + <td> dict </td> + <td></td> + <td></td> + <td></td> + <td> <br> Specifies add standby MDM parameters. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > mdm_ips </td> + <td> list <br> elements: str </td> + <td> True </td> + <td></td> + <td></td> + <td> <br> List of MDM IPs that will be assigned to new MDM. It can contain IPv4 addresses. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > role </td> + <td> str </td> + <td> True </td> + <td></td> + <td> <ul> <li>Manager</li> <li>TieBreaker</li> </ul></td> + <td> <br> Role of new MDM. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > management_ips </td> + <td> list <br> elements: str </td> + <td></td> + <td></td> + <td></td> + <td> <br> List of management IPs to manage MDM. It can contain IPv4 addresses. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > port </td> + <td> int </td> + <td></td> + <td></td> + <td></td> + <td> <br> Specifies the port of new MDM. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > allow_multiple_ips </td> + <td> bool </td> + <td></td> + <td></td> + <td></td> + <td> <br> Allow the added node to have different number of IPs from the primary node. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > virtual_interfaces </td> + <td> list <br> elements: str </td> + <td></td> + <td></td> + <td></td> + <td> <br> List of NIC interfaces that will be used for virtual IP addresses. </td> + </tr> + <tr> + <td colspan=2 > is_primary</td> + <td> bool </td> + <td></td> + <td></td> + <td></td> + <td> <br> Set is_primary as True to change MDM cluster ownership from the current master MDM to different MDM. <br> Set is_primary as False, will return MDM cluster details. <br> New owner MDM must be an MDM with a manager role. </td> + </tr> + <tr> + <td colspan=2 > cluster_mode</td> + <td> str </td> + <td></td> + <td></td> + <td> <ul> <li>OneNode</li> <li>ThreeNodes</li> <li>FiveNodes</li> </ul></td> + <td> <br> Mode of the cluster. </td> + </tr> + <tr> + <td colspan=2 > mdm</td> + <td> list <br> elements: dict </td> + <td></td> + <td></td> + <td></td> + <td> <br> Specifies parameters to add/remove MDMs to/from the MDM cluster. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > mdm_id </td> + <td> str </td> + <td></td> + <td></td> + <td></td> + <td> <br> ID of MDM that will be added/removed to/from the cluster. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > mdm_name </td> + <td> str </td> + <td></td> + <td></td> + <td></td> + <td> <br> Name of MDM that will be added/removed to/from the cluster. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > mdm_type </td> + <td> str </td> + <td> True </td> + <td></td> + <td> <ul> <li>Secondary</li> <li>TieBreaker</li> </ul></td> + <td> <br> Type of the MDM. <br> Either mdm_id or mdm_name must be passed with mdm_type. </td> + </tr> + <tr> + <td colspan=2 > mdm_state</td> + <td> str </td> + <td></td> + <td></td> + <td> <ul> <li>present-in-cluster</li> <li>absent-in-cluster</li> </ul></td> + <td> <br> Mapping state of MDM. </td> + </tr> + <tr> + <td colspan=2 > virtual_ip_interfaces</td> + <td> list <br> elements: str </td> + <td></td> + <td></td> + <td></td> + <td> <br> List of interfaces to be used for virtual IPs. <br> The order of interfaces must be matched with virtual IPs assigned to the cluster. <br> Interfaces of the primary and secondary type MDMs are allowed to modify. <br> The virtual_ip_interfaces is mutually exclusive with clear_interfaces. </td> + </tr> + <tr> + <td colspan=2 > clear_interfaces</td> + <td> bool </td> + <td></td> + <td></td> + <td></td> + <td> <br> Clear all virtual IP interfaces. <br> The clear_interfaces is mutually exclusive with virtual_ip_interfaces. </td> + </tr> + <tr> + <td colspan=2 > performance_profile</td> + <td> str </td> + <td></td> + <td></td> + <td> <ul> <li>Compact</li> <li>HighPerformance</li> </ul></td> + <td> <br> Apply performance profile to cluster MDMs. </td> + </tr> + <tr> + <td colspan=2 > state</td> + <td> str </td> + <td> True </td> + <td></td> + <td> <ul> <li>present</li> <li>absent</li> </ul></td> + <td> <br> State of the MDM cluster. </td> + </tr> + <tr> + <td colspan=2 > hostname</td> + <td> str </td> + <td> True </td> + <td></td> + <td></td> + <td> <br> IP or FQDN of the PowerFlex host. </td> + </tr> + <tr> + <td colspan=2 > username</td> + <td> str </td> + <td> True </td> + <td></td> + <td></td> + <td> <br> The username of the PowerFlex host. </td> + </tr> + <tr> + <td colspan=2 > password</td> + <td> str </td> + <td> True </td> + <td></td> + <td></td> + <td> <br> The password of the PowerFlex host. </td> + </tr> + <tr> + <td colspan=2 > validate_certs</td> + <td> bool </td> + <td></td> + <td> True </td> + <td></td> + <td> <br> Boolean variable to specify whether or not to validate SSL certificate. <br> True - Indicates that the SSL certificate should be verified. <br> False - Indicates that the SSL certificate should not be verified. </td> + </tr> + <tr> + <td colspan=2 > port</td> + <td> int </td> + <td></td> + <td> 443 </td> + <td></td> + <td> <br> Port number through which communication happens with PowerFlex host. </td> + </tr> + <tr> + <td colspan=2 > timeout</td> + <td> int </td> + <td></td> + <td> 120 </td> + <td></td> + <td> <br> Time after which connection will get terminated. <br> It is to be mentioned in seconds. </td> + </tr> + </table> + +### Notes +* Parameters mdm_name or mdm_id are mandatory for rename and modify virtual IP interfaces. +* Parameters mdm_name or mdm_id are not required while modifying performance profile. +* For change MDM cluster ownership operation, only changed as True will be returned and for idempotency case MDM cluster details will be returned. +* Reinstall all SDC after changing ownership to some newly added MDM. +* To add manager standby MDM, MDM package must be installed with manager role. +* The check_mode is supported. +* The modules present in the collection named as 'dellemc.powerflex' are built to support the Dell PowerFlex storage platform. + +### Examples +``` +- name: Add a standby MDM + dellemc.powerflex.mdm_cluster: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + mdm_name: "mdm_1" + standby_mdm: + mdm_ips: + - "10.x.x.x" + role: "TieBreaker" + management_ips: + - "10.x.y.z" + state: "present" + +- name: Remove a standby MDM + dellemc.powerflex.mdm_cluster: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + mdm_name: "mdm_1" + state: "absent" + +- name: Switch cluster mode from 3 node to 5 node MDM cluster + dellemc.powerflex.mdm_cluster: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + cluster_mode: "FiveNodes" + mdm: + - mdm_id: "5f091a8a013f1100" + mdm_type: "Secondary" + - mdm_name: "mdm_1" + mdm_type: "TieBreaker" + sdc_state: "present-in-cluster" + state: "present" + +- name: Switch cluster mode from 5 node to 3 node MDM cluster + dellemc.powerflex.mdm_cluster: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + cluster_mode: "ThreeNodes" + mdm: + - mdm_id: "5f091a8a013f1100" + mdm_type: "Secondary" + - mdm_name: "mdm_1" + mdm_type: "TieBreaker" + sdc_state: "absent-in-cluster" + state: "present" + +- name: Get the details of the MDM cluster + dellemc.powerflex.mdm_cluster: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + state: "present" + +- name: Change ownership of MDM cluster + dellemc.powerflex.mdm_cluster: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + mdm_name: "mdm_2" + is_primary: True + state: "present" + +- name: Modify performance profile + dellemc.powerflex.mdm_cluster: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + performance_profile: "HighPerformance" + state: "present" + +- name: Rename the MDM + dellemc.powerflex.mdm_cluster: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + mdm_name: "mdm_1" + mdm_new_name: "new_mdm_1" + state: "present" + +- name: Modify virtual IP interface of the MDM + dellemc.powerflex.mdm_cluster: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + mdm_name: "mdm_1" + virtual_ip_interface: + - "ens224" + state: "present" + +- name: Clear virtual IP interface of the MDM + dellemc.powerflex.mdm_cluster: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + mdm_name: "mdm_1" + clear_interfaces: True + state: "present" +``` + +### Return Values + +<table> + <tr> + <th colspan=6>Key</th> + <th>Type</th> + <th>Returned</th> + <th width="100%">Description</th> + </tr> + <tr> + <td colspan=6 > changed </td> + <td> bool </td> + <td> always </td> + <td> Whether or not the resource has changed. </td> + </tr> + <tr> + <td colspan=6 > mdm_cluster_details </td> + <td> dict </td> + <td> When MDM cluster exists </td> + <td> Details of the MDM cluster. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=5 > clusterMode </td> + <td> str </td> + <td>success</td> + <td> Mode of the MDM cluster. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=5 > clusterState </td> + <td> str </td> + <td>success</td> + <td> State of the MDM cluster. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=5 > goodNodesNum </td> + <td> int </td> + <td>success</td> + <td> Number of Nodes in MDM cluster. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=5 > goodReplicasNum </td> + <td> int </td> + <td>success</td> + <td> Number of nodes for Replication. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=5 > id </td> + <td> str </td> + <td>success</td> + <td> The ID of the MDM cluster. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=5 > master </td> + <td> dict </td> + <td>success</td> + <td> The details of the master MDM. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=4 > id </td> + <td> str </td> + <td>success</td> + <td> ID of the MDM. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=4 > ips </td> + <td> list </td> + <td>success</td> + <td> List of IPs for master MDM. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=4 > managementIPs </td> + <td> list </td> + <td>success</td> + <td> List of management IPs for master MDM. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=4 > name </td> + <td> str </td> + <td>success</td> + <td> Name of the MDM. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=4 > opensslVersion </td> + <td> str </td> + <td>success</td> + <td> OpenSSL version. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=4 > port </td> + <td> str </td> + <td>success</td> + <td> Port of the MDM. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=4 > role </td> + <td> str </td> + <td>success</td> + <td> Role of MDM. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=4 > status </td> + <td> str </td> + <td>success</td> + <td> Status of MDM. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=4 > versionInfo </td> + <td> str </td> + <td>success</td> + <td> Version of MDM. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=4 > virtualInterfaces </td> + <td> list </td> + <td>success</td> + <td> List of virtual interfaces </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=5 > name </td> + <td> str </td> + <td>success</td> + <td> Name of MDM cluster. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=5 > slaves </td> + <td> list </td> + <td>success</td> + <td> The list of the secondary MDMs. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=4 > id </td> + <td> str </td> + <td>success</td> + <td> ID of the MDM. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=4 > ips </td> + <td> list </td> + <td>success</td> + <td> List of IPs for secondary MDM. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=4 > managementIPs </td> + <td> list </td> + <td>success</td> + <td> List of management IPs for secondary MDM. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=4 > name </td> + <td> str </td> + <td>success</td> + <td> Name of the MDM. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=4 > opensslVersion </td> + <td> str </td> + <td>success</td> + <td> OpenSSL version. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=4 > port </td> + <td> str </td> + <td>success</td> + <td> Port of the MDM. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=4 > role </td> + <td> str </td> + <td>success</td> + <td> Role of MDM. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=4 > status </td> + <td> str </td> + <td>success</td> + <td> Status of MDM. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=4 > versionInfo </td> + <td> str </td> + <td>success</td> + <td> Version of MDM. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=4 > virtualInterfaces </td> + <td> list </td> + <td>success</td> + <td> List of virtual interfaces </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=5 > standbyMDMs </td> + <td> list </td> + <td>success</td> + <td> The list of the standby MDMs. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=4 > id </td> + <td> str </td> + <td>success</td> + <td> ID of the MDM. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=4 > ips </td> + <td> list </td> + <td>success</td> + <td> List of IPs for MDM. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=4 > managementIPs </td> + <td> list </td> + <td>success</td> + <td> List of management IPs for MDM. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=4 > name </td> + <td> str </td> + <td>success</td> + <td> Name of the MDM. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=4 > opensslVersion </td> + <td> str </td> + <td>success</td> + <td> OpenSSL version. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=4 > port </td> + <td> str </td> + <td>success</td> + <td> Port of the MDM. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=4 > role </td> + <td> str </td> + <td>success</td> + <td> Role of MDM. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=4 > status </td> + <td> str </td> + <td>success</td> + <td> Status of MDM. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=4 > versionInfo </td> + <td> str </td> + <td>success</td> + <td> Version of MDM. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=4 > virtualInterfaces </td> + <td> list </td> + <td>success</td> + <td> List of virtual interfaces. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=5 > tieBreakers </td> + <td> list </td> + <td>success</td> + <td> The list of the TieBreaker MDMs. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=4 > id </td> + <td> str </td> + <td>success</td> + <td> ID of the MDM. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=4 > ips </td> + <td> list </td> + <td>success</td> + <td> List of IPs for tie-breaker MDM. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=4 > managementIPs </td> + <td> list </td> + <td>success</td> + <td> List of management IPs for tie-breaker MDM. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=4 > name </td> + <td> str </td> + <td>success</td> + <td> Name of the MDM. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=4 > opensslVersion </td> + <td> str </td> + <td>success</td> + <td> OpenSSL version. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=4 > port </td> + <td> str </td> + <td>success</td> + <td> Port of the MDM. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=4 > role </td> + <td> str </td> + <td>success</td> + <td> Role of MDM. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=4 > status </td> + <td> str </td> + <td>success</td> + <td> Status of MDM. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=4 > versionInfo </td> + <td> str </td> + <td>success</td> + <td> Version of MDM. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=5 > virtualIps </td> + <td> list </td> + <td>success</td> + <td> List of virtual IPs. </td> + </tr> + </table> + +### Authors +* Bhavneet Sharma (@sharmb5) <ansible.team@dell.com> + +-------------------------------- +# Protection Domain Module + +Manage Protection Domain on Dell PowerFlex + +### Synopsis + Managing Protection Domain on PowerFlex storage system includes creating, modifying attributes, deleting and getting details of Protection Domain. + +### Parameters + +<table> + <tr> + <th colspan=2>Parameter</th> + <th width="20%">Type</th> + <th>Required</th> + <th>Default</th> + <th>Choices</th> + <th width="80%">Description</th> + </tr> + <tr> + <td colspan=2 > protection_domain_name</td> + <td> str </td> + <td></td> + <td></td> + <td></td> + <td> <br> The name of the protection domain. <br> Mandatory for create operation. <br> It is unique across the PowerFlex array. <br> Mutually exclusive with protection_domain_id. </td> + </tr> + <tr> + <td colspan=2 > protection_domain_id</td> + <td> str </td> + <td></td> + <td></td> + <td></td> + <td> <br> The ID of the protection domain. <br> Except for create operation, all other operations can be performed using protection_domain_id. <br> Mutually exclusive with protection_domain_name. </td> + </tr> + <tr> + <td colspan=2 > protection_domain_new_name</td> + <td> str </td> + <td></td> + <td></td> + <td></td> + <td> <br> Used to rename the protection domain. </td> + </tr> + <tr> + <td colspan=2 > is_active</td> + <td> bool </td> + <td></td> + <td></td> + <td></td> + <td> <br> Used to activate or deactivate the protection domain. </td> + </tr> + <tr> + <td colspan=2 > network_limits</td> + <td> dict </td> + <td></td> + <td></td> + <td></td> + <td> <br> Network bandwidth limit used by all SDS in protection domain. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > rebuild_limit </td> + <td> int </td> + <td></td> + <td></td> + <td></td> + <td> <br> Limit the network bandwidth for rebuild. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > rebalance_limit </td> + <td> int </td> + <td></td> + <td></td> + <td></td> + <td> <br> Limit the network bandwidth for rebalance. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > vtree_migration_limit </td> + <td> int </td> + <td></td> + <td></td> + <td></td> + <td> <br> Limit the network bandwidth for vtree migration. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > overall_limit </td> + <td> int </td> + <td></td> + <td></td> + <td></td> + <td> <br> Limit the overall network bandwidth. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > bandwidth_unit </td> + <td> str </td> + <td></td> + <td> KBps </td> + <td> <ul> <li>KBps</li> <li>MBps</li> <li>GBps</li> </ul></td> + <td> <br> Unit for network bandwidth limits. </td> + </tr> + <tr> + <td colspan=2 > rf_cache_limits</td> + <td> dict </td> + <td></td> + <td></td> + <td></td> + <td> <br> Used to set the RFcache parameters of the protection domain. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > is_enabled </td> + <td> bool </td> + <td></td> + <td></td> + <td></td> + <td> <br> Used to enable or disable RFcache in the protection domain. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > page_size </td> + <td> int </td> + <td></td> + <td></td> + <td></td> + <td> <br> Used to set the cache page size in KB. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > max_io_limit </td> + <td> int </td> + <td></td> + <td></td> + <td></td> + <td> <br> Used to set cache maximum I/O limit in KB. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > pass_through_mode </td> + <td> str </td> + <td></td> + <td></td> + <td> <ul> <li>None</li> <li>Read</li> <li>Write</li> <li>ReadAndWrite</li> <li>WriteMiss</li> </ul></td> + <td> <br> Used to set the cache mode. </td> + </tr> + <tr> + <td colspan=2 > state</td> + <td> str </td> + <td> True </td> + <td></td> + <td> <ul> <li>present</li> <li>absent</li> </ul></td> + <td> <br> State of the protection domain. </td> + </tr> + <tr> + <td colspan=2 > hostname</td> + <td> str </td> + <td> True </td> + <td></td> + <td></td> + <td> <br> IP or FQDN of the PowerFlex host. </td> + </tr> + <tr> + <td colspan=2 > username</td> + <td> str </td> + <td> True </td> + <td></td> + <td></td> + <td> <br> The username of the PowerFlex host. </td> + </tr> + <tr> + <td colspan=2 > password</td> + <td> str </td> + <td> True </td> + <td></td> + <td></td> + <td> <br> The password of the PowerFlex host. </td> + </tr> + <tr> + <td colspan=2 > validate_certs</td> + <td> bool </td> + <td></td> + <td> True </td> + <td></td> + <td> <br> Boolean variable to specify whether or not to validate SSL certificate. <br> True - Indicates that the SSL certificate should be verified. <br> False - Indicates that the SSL certificate should not be verified. </td> + </tr> + <tr> + <td colspan=2 > port</td> + <td> int </td> + <td></td> + <td> 443 </td> + <td></td> + <td> <br> Port number through which communication happens with PowerFlex host. </td> + </tr> + <tr> + <td colspan=2 > timeout</td> + <td> int </td> + <td></td> + <td> 120 </td> + <td></td> + <td> <br> Time after which connection will get terminated. <br> It is to be mentioned in seconds. </td> + </tr> + </table> + +### Notes +* The protection domain can only be deleted if all its related objects have been dissociated from the protection domain. +* If the protection domain set to inactive, then no operation can be performed on protection domain. +* The check_mode is not supported. +* The modules present in the collection named as 'dellemc.powerflex' are built to support the Dell PowerFlex storage platform. + +### Examples +``` +- name: Create protection domain + dellemc.powerflex.protection_domain: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + protection_domain_name: "domain1" + state: "present" + +- name: Create protection domain with all parameters + dellemc.powerflex.protection_domain: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + protection_domain_name: "domain1" + is_active: true + network_limits: + rebuild_limit: 10 + rebalance_limit: 17 + vtree_migration_limit: 14 + overall_limit: 20 + bandwidth_unit: "MBps" + rf_cache_limits: + is_enabled: true + page_size: 16 + max_io_limit: 128 + pass_through_mode: "Read" + state: "present" + +- name: Get protection domain details using name + dellemc.powerflex.protection_domain: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + protection_domain_name: "domain1" + state: "present" + +- name: Get protection domain details using ID + dellemc.powerflex.protection_domain: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + protection_domain_id: "5718253c00000004" + state: "present" + +- name: Modify protection domain attributes + dellemc.powerflex.protection_domain: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + protection_domain_name: "domain1" + protection_domain_new_name: "domain1_new" + network_limits: + rebuild_limit: 14 + rebalance_limit: 20 + overall_limit: 25 + bandwidth_unit: "MBps" + rf_cache_limits: + page_size: 64 + pass_through_mode: "WriteMiss" + state: "present" + +- name: Delete protection domain using name + dellemc.powerflex.protection_domain: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + protection_domain_name: "domain1_new" + state: "absent" +``` + +### Return Values + +<table> + <tr> + <th colspan=6>Key</th> + <th>Type</th> + <th>Returned</th> + <th width="100%">Description</th> + </tr> + <tr> + <td colspan=6 > changed </td> + <td> bool </td> + <td> always </td> + <td> Whether or not the resource has changed. </td> + </tr> + <tr> + <td colspan=6 > protection_domain_details </td> + <td> dict </td> + <td> When protection domain exists </td> + <td> Details of the protection domain. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=5 > fglDefaultMetadataCacheSize </td> + <td> int </td> + <td>success</td> + <td> FGL metadata cache size. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=5 > fglDefaultNumConcurrentWrites </td> + <td> str </td> + <td>success</td> + <td> FGL concurrent writes. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=5 > fglMetadataCacheEnabled </td> + <td> bool </td> + <td>success</td> + <td> Whether FGL cache enabled. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=5 > id </td> + <td> str </td> + <td>success</td> + <td> Protection domain ID. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=5 > links </td> + <td> list </td> + <td>success</td> + <td> Protection domain links. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=4 > href </td> + <td> str </td> + <td>success</td> + <td> Protection domain instance URL. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=4 > rel </td> + <td> str </td> + <td>success</td> + <td> Protection domain's relationship with different entities. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=5 > mdmSdsNetworkDisconnectionsCounterParameters </td> + <td> dict </td> + <td>success</td> + <td> MDM's SDS counter parameter. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=4 > longWindow </td> + <td> int </td> + <td>success</td> + <td> Long window for Counter Parameters. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=4 > mediumWindow </td> + <td> int </td> + <td>success</td> + <td> Medium window for Counter Parameters. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=4 > shortWindow </td> + <td> int </td> + <td>success</td> + <td> Short window for Counter Parameters. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=5 > name </td> + <td> str </td> + <td>success</td> + <td> Name of the protection domain. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=5 > overallIoNetworkThrottlingEnabled </td> + <td> bool </td> + <td>success</td> + <td> Whether overall network throttling enabled. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=5 > overallIoNetworkThrottlingInKbps </td> + <td> int </td> + <td>success</td> + <td> Overall network throttling in KBps. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=5 > protectedMaintenanceModeNetworkThrottlingEnabled </td> + <td> bool </td> + <td>success</td> + <td> Whether protected maintenance mode network throttling enabled. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=5 > protectedMaintenanceModeNetworkThrottlingInKbps </td> + <td> int </td> + <td>success</td> + <td> Protected maintenance mode network throttling in KBps. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=5 > protectionDomainState </td> + <td> int </td> + <td>success</td> + <td> State of protection domain. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=5 > rebalanceNetworkThrottlingEnabled </td> + <td> int </td> + <td>success</td> + <td> Whether rebalance network throttling enabled. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=5 > rebalanceNetworkThrottlingInKbps </td> + <td> int </td> + <td>success</td> + <td> Rebalance network throttling in KBps. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=5 > rebuildNetworkThrottlingEnabled </td> + <td> int </td> + <td>success</td> + <td> Whether rebuild network throttling enabled. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=5 > rebuildNetworkThrottlingInKbps </td> + <td> int </td> + <td>success</td> + <td> Rebuild network throttling in KBps. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=5 > rfcacheAccpId </td> + <td> str </td> + <td>success</td> + <td> Id of RF cache acceleration pool. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=5 > rfcacheEnabled </td> + <td> bool </td> + <td>success</td> + <td> Whether RF cache is enabled or not. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=5 > rfcacheMaxIoSizeKb </td> + <td> int </td> + <td>success</td> + <td> RF cache maximum I/O size in KB. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=5 > rfcacheOpertionalMode </td> + <td> str </td> + <td>success</td> + <td> RF cache operational mode. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=5 > rfcachePageSizeKb </td> + <td> bool </td> + <td>success</td> + <td> RF cache page size in KB. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=5 > sdrSdsConnectivityInfo </td> + <td> dict </td> + <td>success</td> + <td> Connectivity info of SDR and SDS. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=4 > clientServerConnStatus </td> + <td> str </td> + <td>success</td> + <td> Connectivity status of client and server. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=4 > disconnectedClientId </td> + <td> str </td> + <td>success</td> + <td> Disconnected client ID. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=4 > disconnectedClientName </td> + <td> str </td> + <td>success</td> + <td> Disconnected client name. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=4 > disconnectedServerId </td> + <td> str </td> + <td>success</td> + <td> Disconnected server ID. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=4 > disconnectedServerIp </td> + <td> str </td> + <td>success</td> + <td> Disconnected server IP. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=4 > disconnectedServerName </td> + <td> str </td> + <td>success</td> + <td> Disconnected server name. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=5 > sdsSdsNetworkDisconnectionsCounterParameters </td> + <td> dict </td> + <td>success</td> + <td> Counter parameter for SDS-SDS network. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=4 > longWindow </td> + <td> int </td> + <td>success</td> + <td> Long window for Counter Parameters. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=4 > mediumWindow </td> + <td> int </td> + <td>success</td> + <td> Medium window for Counter Parameters. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=4 > shortWindow </td> + <td> int </td> + <td>success</td> + <td> Short window for Counter Parameters. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=5 > storagePool </td> + <td> list </td> + <td>success</td> + <td> List of storage pools. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=5 > systemId </td> + <td> str </td> + <td>success</td> + <td> ID of system. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=5 > vtreeMigrationNetworkThrottlingEnabled </td> + <td> bool </td> + <td>success</td> + <td> Whether V-Tree migration network throttling enabled. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=5 > vtreeMigrationNetworkThrottlingInKbps </td> + <td> int </td> + <td>success</td> + <td> V-Tree migration network throttling in KBps. </td> + </tr> + </table> + +### Authors +* Bhavneet Sharma (@sharmb5) <ansible.team@dell.com> + +-------------------------------- +# Replication Consistency Group Module + +Manage replication consistency groups on Dell PowerFlex + +### Synopsis + Managing replication consistency groups on PowerFlex storage system includes getting details, creating, modifying, creating snapshots, pause, resume, freeze, unfreeze, activate, inactivate and deleting a replication consistency group. + +### Parameters + +<table> + <tr> + <th colspan=2>Parameter</th> + <th width="20%">Type</th> + <th>Required</th> + <th>Default</th> + <th>Choices</th> + <th width="80%">Description</th> + </tr> + <tr> + <td colspan=2 > rcg_name</td> + <td> str </td> + <td></td> + <td></td> + <td></td> + <td> <br> The name of the replication consistency group. <br> It is unique across the PowerFlex array. <br> Mutually exclusive with rcg_id. </td> + </tr> + <tr> + <td colspan=2 > rcg_id</td> + <td> str </td> + <td></td> + <td></td> + <td></td> + <td> <br> The ID of the replication consistency group. <br> Mutually exclusive with rcg_name. </td> + </tr> + <tr> + <td colspan=2 > create_snapshot</td> + <td> bool </td> + <td></td> + <td></td> + <td></td> + <td> <br> Whether to create the snapshot of the replication consistency group. </td> + </tr> + <tr> + <td colspan=2 > rpo</td> + <td> int </td> + <td></td> + <td></td> + <td></td> + <td> <br> Desired RPO in seconds. </td> + </tr> + <tr> + <td colspan=2 > protection_domain_id</td> + <td> str </td> + <td></td> + <td></td> + <td></td> + <td> <br> Protection domain id. <br> Mutually exclusive with protection_domain_name. </td> + </tr> + <tr> + <td colspan=2 > protection_domain_name</td> + <td> str </td> + <td></td> + <td></td> + <td></td> + <td> <br> Protection domain name. <br> Mutually exclusive with protection_domain_id. </td> + </tr> + <tr> + <td colspan=2 > activity_mode</td> + <td> str </td> + <td></td> + <td></td> + <td> <ul> <li>Active</li> <li>Inactive</li> </ul></td> + <td> <br> Activity mode of RCG. <br> This parameter is supported for version 3.6 and above. </td> + </tr> + <tr> + <td colspan=2 > pause</td> + <td> bool </td> + <td></td> + <td></td> + <td></td> + <td> <br> Pause or resume the RCG. </td> + </tr> + <tr> + <td colspan=2 > freeze</td> + <td> bool </td> + <td></td> + <td></td> + <td></td> + <td> <br> Freeze or unfreeze the RCG. </td> + </tr> + <tr> + <td colspan=2 > pause_mode</td> + <td> str </td> + <td></td> + <td></td> + <td> <ul> <li>StopDataTransfer</li> <li>OnlyTrackChanges</li> </ul></td> + <td> <br> Pause mode. <br> It is required if pause is set as True. </td> + </tr> + <tr> + <td colspan=2 > target_volume_access_mode</td> + <td> str </td> + <td></td> + <td></td> + <td> <ul> <li>ReadOnly</li> <li>NoAccess</li> </ul></td> + <td> <br> Target volume access mode. </td> + </tr> + <tr> + <td colspan=2 > is_consistent</td> + <td> bool </td> + <td></td> + <td></td> + <td></td> + <td> <br> Consistency of RCG. </td> + </tr> + <tr> + <td colspan=2 > new_rcg_name</td> + <td> str </td> + <td></td> + <td></td> + <td></td> + <td> <br> Name of RCG to rename to. </td> + </tr> + <tr> + <td colspan=2 > remote_peer</td> + <td> dict </td> + <td></td> + <td></td> + <td></td> + <td> <br> Remote peer system. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > hostname </td> + <td> str </td> + <td> True </td> + <td></td> + <td></td> + <td> <br> IP or FQDN of the remote peer host. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > username </td> + <td> str </td> + <td> True </td> + <td></td> + <td></td> + <td> <br> The username of the remote peer host. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > password </td> + <td> str </td> + <td> True </td> + <td></td> + <td></td> + <td> <br> The password of the remote peer host. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > validate_certs </td> + <td> bool </td> + <td></td> + <td> True </td> + <td></td> + <td> <br> Boolean variable to specify whether or not to validate SSL certificate. <br> True - Indicates that the SSL certificate should be verified. <br> False - Indicates that the SSL certificate should not be verified. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > port </td> + <td> int </td> + <td></td> + <td> 443 </td> + <td></td> + <td> <br> Port number through which communication happens with remote peer host. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > timeout </td> + <td> int </td> + <td></td> + <td> 120 </td> + <td></td> + <td> <br> Time after which connection will get terminated. <br> It is to be mentioned in seconds. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > protection_domain_id </td> + <td> str </td> + <td></td> + <td></td> + <td></td> + <td> <br> Remote protection domain id. <br> Mutually exclusive with protection_domain_name. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > protection_domain_name </td> + <td> str </td> + <td></td> + <td></td> + <td></td> + <td> <br> Remote protection domain name. <br> Mutually exclusive with protection_domain_id. </td> + </tr> + <tr> + <td colspan=2 > state</td> + <td> str </td> + <td></td> + <td> present </td> + <td> <ul> <li>present</li> <li>absent</li> </ul></td> + <td> <br> State of the replication consistency group. </td> + </tr> + <tr> + <td colspan=2 > hostname</td> + <td> str </td> + <td> True </td> + <td></td> + <td></td> + <td> <br> IP or FQDN of the PowerFlex host. </td> + </tr> + <tr> + <td colspan=2 > username</td> + <td> str </td> + <td> True </td> + <td></td> + <td></td> + <td> <br> The username of the PowerFlex host. </td> + </tr> + <tr> + <td colspan=2 > password</td> + <td> str </td> + <td> True </td> + <td></td> + <td></td> + <td> <br> The password of the PowerFlex host. </td> + </tr> + <tr> + <td colspan=2 > validate_certs</td> + <td> bool </td> + <td></td> + <td> True </td> + <td></td> + <td> <br> Boolean variable to specify whether or not to validate SSL certificate. <br> True - Indicates that the SSL certificate should be verified. <br> False - Indicates that the SSL certificate should not be verified. </td> + </tr> + <tr> + <td colspan=2 > port</td> + <td> int </td> + <td></td> + <td> 443 </td> + <td></td> + <td> <br> Port number through which communication happens with PowerFlex host. </td> + </tr> + <tr> + <td colspan=2 > timeout</td> + <td> int </td> + <td></td> + <td> 120 </td> + <td></td> + <td> <br> Time after which connection will get terminated. <br> It is to be mentioned in seconds. </td> + </tr> + </table> + +### Notes +* The check_mode is supported. +* Idempotency is not supported for create snapshot operation. +* There is a delay in reflection of final state of RCG after few update operations on RCG. +* In 3.6 and above, the replication consistency group will return back to consistent mode on changing to inconsistent mode if consistence barrier arrives. Hence idempotency on setting to inconsistent mode will return changed as True. +* The modules present in the collection named as 'dellemc.powerflex' are built to support the Dell PowerFlex storage platform. + +### Examples +``` +- name: Get RCG details + dellemc.powerflex.replication_consistency_group: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + rcg_name: "{{rcg_name}}" + +- name: Create a snapshot of the RCG + dellemc.powerflex.replication_consistency_group: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + rcg_id: "{{rcg_id}}" + create_snapshot: True + state: "present" + +- name: Create a replication consistency group + dellemc.powerflex.replication_consistency_group: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + rcg_name: "rcg_test" + rpo: 60 + protection_domain_name: "domain1" + activity_mode: "active" + remote_peer: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + protection_domain_name: "domain1" + +- name: Modify replication consistency group + dellemc.powerflex.replication_consistency_group: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + rcg_name: "rcg_test" + rpo: 60 + target_volume_access_mode: "ReadOnly" + activity_mode: "Inactive" + is_consistent: True + +- name: Rename replication consistency group + dellemc.powerflex.replication_consistency_group: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + rcg_name: "rcg_test" + new_rcg_name: "rcg_test_rename" + +- name: Pause replication consistency group + dellemc.powerflex.replication_consistency_group: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + rcg_name: "rcg_test" + action: "pause" + pause_mode: "StopDataTransfer" + +- name: Resume replication consistency group + dellemc.powerflex.replication_consistency_group: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + rcg_name: "rcg_test" + action: "resume" + +- name: Freeze replication consistency group + dellemc.powerflex.replication_consistency_group: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + rcg_name: "rcg_test" + action: "freeze" + +- name: UnFreeze replication consistency group + dellemc.powerflex.replication_consistency_group: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + rcg_name: "rcg_test" + action: "unfreeze" + +- name: Delete replication consistency group + dellemc.powerflex.replication_consistency_group: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + rcg_name: "rcg_test" + state: "absent" +``` + +### Return Values + +<table> + <tr> + <th colspan=2>Key</th> + <th>Type</th> + <th>Returned</th> + <th width="100%">Description</th> + </tr> + <tr> + <td colspan=2 > changed </td> + <td> bool </td> + <td> always </td> + <td> Whether or not the resource has changed. </td> + </tr> + <tr> + <td colspan=2 > replication_consistency_group_details </td> + <td> dict </td> + <td> When replication consistency group exists </td> + <td> Details of the replication consistency group. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > abstractState </td> + <td> str </td> + <td>success</td> + <td> The abstract state of the replication consistency group. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > activeLocal </td> + <td> bool </td> + <td>success</td> + <td> Whether the local replication consistency group is active. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > activeRemote </td> + <td> bool </td> + <td>success</td> + <td> Whether the remote replication consistency group is active </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > currConsistMode </td> + <td> str </td> + <td>success</td> + <td> The current consistency mode of the replication consistency group. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > disasterRecoveryState </td> + <td> str </td> + <td>success</td> + <td> The state of disaster recovery of the local replication consistency group. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > error </td> + <td> int </td> + <td>success</td> + <td> The error code of the replication consistency group. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > failoverState </td> + <td> str </td> + <td>success</td> + <td> The state of failover of the replication consistency group. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > failoverType </td> + <td> str </td> + <td>success</td> + <td> The type of failover of the replication consistency group. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > freezeState </td> + <td> str </td> + <td>success</td> + <td> The freeze state of the replication consistency group. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > id </td> + <td> str </td> + <td>success</td> + <td> The ID of the replication consistency group. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > inactiveReason </td> + <td> int </td> + <td>success</td> + <td> The reason for the inactivity of the replication consistency group. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > lastSnapCreationRc </td> + <td> int </td> + <td>success</td> + <td> The return code of the last snapshot of the replication consistency group. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > lastSnapGroupId </td> + <td> str </td> + <td>success</td> + <td> ID of the last snapshot of the replication consistency group. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > lifetimeState </td> + <td> str </td> + <td>success</td> + <td> The Lifetime state of the replication consistency group. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > localActivityState </td> + <td> str </td> + <td>success</td> + <td> The state of activity of the local replication consistency group. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > name </td> + <td> str </td> + <td>success</td> + <td> The name of the replication consistency group. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > pauseMode </td> + <td> str </td> + <td>success</td> + <td> The Lifetime state of the replication consistency group. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > peerMdmId </td> + <td> str </td> + <td>success</td> + <td> The ID of the peer MDM of the replication consistency group. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > protectionDomainId </td> + <td> str </td> + <td>success</td> + <td> The Protection Domain ID of the replication consistency group. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > remoteActivityState </td> + <td> str </td> + <td>success</td> + <td> The state of activity of the remote replication consistency group.. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > remoteDisasterRecoveryState </td> + <td> str </td> + <td>success</td> + <td> The state of disaster recovery of the remote replication consistency group. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > remoteId </td> + <td> str </td> + <td>success</td> + <td> The ID of the remote replication consistency group. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > remoteMdmId </td> + <td> str </td> + <td>success</td> + <td> The ID of the remote MDM of the replication consistency group. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > remoteProtectionDomainId </td> + <td> str </td> + <td>success</td> + <td> The ID of the remote Protection Domain. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > remoteProtectionDomainName </td> + <td> str </td> + <td>success</td> + <td> The Name of the remote Protection Domain. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > replicationDirection </td> + <td> str </td> + <td>success</td> + <td> The direction of the replication of the replication consistency group. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > rpoInSeconds </td> + <td> int </td> + <td>success</td> + <td> The RPO value of the replication consistency group in seconds. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > snapCreationInProgress </td> + <td> bool </td> + <td>success</td> + <td> Whether the process of snapshot creation of the replication consistency group is in progress or not. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > targetVolumeAccessMode </td> + <td> str </td> + <td>success</td> + <td> The access mode of the target volume of the replication consistency group. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > type </td> + <td> str </td> + <td>success</td> + <td> The type of the replication consistency group. </td> + </tr> + </table> + +### Authors +* Trisha Datta (@Trisha-Datta) <ansible.team@dell.com> +* Jennifer John (@Jennifer-John) <ansible.team@dell.com> + +-------------------------------- +# SDC Module + +Manage SDCs on Dell PowerFlex + +### Synopsis + Managing SDCs on PowerFlex storage system includes getting details of SDC and renaming SDC. + +### Parameters + +<table> + <tr> + <th colspan=1>Parameter</th> + <th width="20%">Type</th> + <th>Required</th> + <th>Default</th> + <th>Choices</th> + <th width="80%">Description</th> + </tr> + <tr> + <td colspan=1 > sdc_name</td> + <td> str </td> + <td></td> + <td></td> + <td></td> + <td> <br> Name of the SDC. <br> Specify either sdc_name, sdc_id or sdc_ip for get/rename operation. <br> Mutually exclusive with sdc_id and sdc_ip. </td> + </tr> + <tr> + <td colspan=1 > sdc_id</td> + <td> str </td> + <td></td> + <td></td> + <td></td> + <td> <br> ID of the SDC. <br> Specify either sdc_name, sdc_id or sdc_ip for get/rename operation. <br> Mutually exclusive with sdc_name and sdc_ip. </td> + </tr> + <tr> + <td colspan=1 > sdc_ip</td> + <td> str </td> + <td></td> + <td></td> + <td></td> + <td> <br> IP of the SDC. <br> Specify either sdc_name, sdc_id or sdc_ip for get/rename operation. <br> Mutually exclusive with sdc_id and sdc_name. </td> + </tr> + <tr> + <td colspan=1 > sdc_new_name</td> + <td> str </td> + <td></td> + <td></td> + <td></td> + <td> <br> New name of the SDC. Used to rename the SDC. </td> + </tr> + <tr> + <td colspan=1 > state</td> + <td> str </td> + <td> True </td> + <td></td> + <td> <ul> <li>present</li> <li>absent</li> </ul></td> + <td> <br> State of the SDC. </td> + </tr> + <tr> + <td colspan=1 > hostname</td> + <td> str </td> + <td> True </td> + <td></td> + <td></td> + <td> <br> IP or FQDN of the PowerFlex host. </td> + </tr> + <tr> + <td colspan=1 > username</td> + <td> str </td> + <td> True </td> + <td></td> + <td></td> + <td> <br> The username of the PowerFlex host. </td> + </tr> + <tr> + <td colspan=1 > password</td> + <td> str </td> + <td> True </td> + <td></td> + <td></td> + <td> <br> The password of the PowerFlex host. </td> + </tr> + <tr> + <td colspan=1 > validate_certs</td> + <td> bool </td> + <td></td> + <td> True </td> + <td></td> + <td> <br> Boolean variable to specify whether or not to validate SSL certificate. <br> True - Indicates that the SSL certificate should be verified. <br> False - Indicates that the SSL certificate should not be verified. </td> + </tr> + <tr> + <td colspan=1 > port</td> + <td> int </td> + <td></td> + <td> 443 </td> + <td></td> + <td> <br> Port number through which communication happens with PowerFlex host. </td> + </tr> + <tr> + <td colspan=1 > timeout</td> + <td> int </td> + <td></td> + <td> 120 </td> + <td></td> + <td> <br> Time after which connection will get terminated. <br> It is to be mentioned in seconds. </td> + </tr> + </table> + +### Notes +* The check_mode is not supported. +* The modules present in the collection named as 'dellemc.powerflex' are built to support the Dell PowerFlex storage platform. + +### Examples +``` +- name: Get SDC details using SDC ip + dellemc.powerflex.sdc: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + sdc_ip: "{{sdc_ip}}" + state: "present" + +- name: Rename SDC using SDC name + dellemc.powerflex.sdc: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + sdc_name: "centos_sdc" + sdc_new_name: "centos_sdc_renamed" + state: "present" +``` + +### Return Values + +<table> + <tr> + <th colspan=3>Key</th> + <th>Type</th> + <th>Returned</th> + <th width="100%">Description</th> + </tr> + <tr> + <td colspan=3 > changed </td> + <td> bool </td> + <td> always </td> + <td> Whether or not the resource has changed. </td> + </tr> + <tr> + <td colspan=3 > sdc_details </td> + <td> dict </td> + <td> When SDC exists </td> + <td> Details of the SDC. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > id </td> + <td> str </td> + <td>success</td> + <td> The ID of the SDC. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > mapped_volumes </td> + <td> list </td> + <td>success</td> + <td> The details of the mapped volumes. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=1 > id </td> + <td> str </td> + <td>success</td> + <td> The ID of the volume. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=1 > name </td> + <td> str </td> + <td>success</td> + <td> The name of the volume. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=1 > volumeType </td> + <td> str </td> + <td>success</td> + <td> Type of the volume. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > name </td> + <td> str </td> + <td>success</td> + <td> Name of the SDC. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > osType </td> + <td> str </td> + <td>success</td> + <td> OS type of the SDC. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > sdcApproved </td> + <td> bool </td> + <td>success</td> + <td> Indicates whether an SDC has approved access to the system. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > sdcIp </td> + <td> str </td> + <td>success</td> + <td> IP of the SDC. </td> + </tr> + </table> + +### Authors +* Akash Shendge (@shenda1) <ansible.team@dell.com> + +-------------------------------- +# SDS Module + +Manage SDS on Dell PowerFlex + +### Synopsis + Managing SDS on PowerFlex storage system includes creating new SDS, getting details of SDS, adding/removing IP to/from SDS, modifying attributes of SDS, and deleting SDS. + +### Parameters + +<table> + <tr> + <th colspan=2>Parameter</th> + <th width="20%">Type</th> + <th>Required</th> + <th>Default</th> + <th>Choices</th> + <th width="80%">Description</th> + </tr> + <tr> + <td colspan=2 > sds_name</td> + <td> str </td> + <td></td> + <td></td> + <td></td> + <td> <br> The name of the SDS. <br> Mandatory for create operation. <br> It is unique across the PowerFlex array. <br> Mutually exclusive with sds_id. </td> + </tr> + <tr> + <td colspan=2 > sds_id</td> + <td> str </td> + <td></td> + <td></td> + <td></td> + <td> <br> The ID of the SDS. <br> Except create operation, all other operations can be performed using sds_id. <br> Mutually exclusive with sds_name. </td> + </tr> + <tr> + <td colspan=2 > protection_domain_name</td> + <td> str </td> + <td></td> + <td></td> + <td></td> + <td> <br> The name of the protection domain. <br> Mutually exclusive with protection_domain_id. </td> + </tr> + <tr> + <td colspan=2 > protection_domain_id</td> + <td> str </td> + <td></td> + <td></td> + <td></td> + <td> <br> The ID of the protection domain. <br> Mutually exclusive with protection_domain_name. </td> + </tr> + <tr> + <td colspan=2 > sds_ip_list</td> + <td> list <br> elements: dict </td> + <td></td> + <td></td> + <td></td> + <td> <br> Dictionary of IPs and their roles for the SDS. <br> At least one IP-role is mandatory while creating a SDS. <br> IP-roles can be updated as well. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > ip </td> + <td> str </td> + <td> True </td> + <td></td> + <td></td> + <td> <br> IP address of the SDS. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > role </td> + <td> str </td> + <td> True </td> + <td></td> + <td> <ul> <li>sdsOnly</li> <li>sdcOnly</li> <li>all</li> </ul></td> + <td> <br> Role assigned to the SDS IP address. </td> + </tr> + <tr> + <td colspan=2 > sds_ip_state</td> + <td> str </td> + <td></td> + <td></td> + <td> <ul> <li>present-in-sds</li> <li>absent-in-sds</li> </ul></td> + <td> <br> State of IP with respect to the SDS. </td> + </tr> + <tr> + <td colspan=2 > rfcache_enabled</td> + <td> bool </td> + <td></td> + <td></td> + <td></td> + <td> <br> Whether to enable the Read Flash cache. </td> + </tr> + <tr> + <td colspan=2 > rmcache_enabled</td> + <td> bool </td> + <td></td> + <td></td> + <td></td> + <td> <br> Whether to enable the Read RAM cache. </td> + </tr> + <tr> + <td colspan=2 > rmcache_size</td> + <td> int </td> + <td></td> + <td></td> + <td></td> + <td> <br> Read RAM cache size (in MB). <br> Minimum size is 128 MB. <br> Maximum size is 3911 MB. </td> + </tr> + <tr> + <td colspan=2 > sds_new_name</td> + <td> str </td> + <td></td> + <td></td> + <td></td> + <td> <br> SDS new name. </td> + </tr> + <tr> + <td colspan=2 > performance_profile</td> + <td> str </td> + <td></td> + <td></td> + <td> <ul> <li>Compact</li> <li>HighPerformance</li> </ul></td> + <td> <br> Performance profile to apply to the SDS. <br> The HighPerformance profile configures a predefined set of parameters for very high performance use cases. <br> Default value by API is HighPerformance. </td> + </tr> + <tr> + <td colspan=2 > state</td> + <td> str </td> + <td> True </td> + <td></td> + <td> <ul> <li>present</li> <li>absent</li> </ul></td> + <td> <br> State of the SDS. </td> + </tr> + <tr> + <td colspan=2 > hostname</td> + <td> str </td> + <td> True </td> + <td></td> + <td></td> + <td> <br> IP or FQDN of the PowerFlex host. </td> + </tr> + <tr> + <td colspan=2 > username</td> + <td> str </td> + <td> True </td> + <td></td> + <td></td> + <td> <br> The username of the PowerFlex host. </td> + </tr> + <tr> + <td colspan=2 > password</td> + <td> str </td> + <td> True </td> + <td></td> + <td></td> + <td> <br> The password of the PowerFlex host. </td> + </tr> + <tr> + <td colspan=2 > validate_certs</td> + <td> bool </td> + <td></td> + <td> True </td> + <td></td> + <td> <br> Boolean variable to specify whether or not to validate SSL certificate. <br> True - Indicates that the SSL certificate should be verified. <br> False - Indicates that the SSL certificate should not be verified. </td> + </tr> + <tr> + <td colspan=2 > port</td> + <td> int </td> + <td></td> + <td> 443 </td> + <td></td> + <td> <br> Port number through which communication happens with PowerFlex host. </td> + </tr> + <tr> + <td colspan=2 > timeout</td> + <td> int </td> + <td></td> + <td> 120 </td> + <td></td> + <td> <br> Time after which connection will get terminated. <br> It is to be mentioned in seconds. </td> + </tr> + </table> + +### Notes +* The maximum limit for the IPs that can be associated with an SDS is 8. +* There needs to be at least 1 IP for SDS communication and 1 for SDC communication. +* If only 1 IP exists, it must be with role 'all'; else 1 IP can be with role 'all'and other IPs with role 'sdcOnly'; or 1 IP must be with role 'sdsOnly' and others with role 'sdcOnly'. +* There can be 1 or more IPs with role 'sdcOnly'. +* There must be only 1 IP with SDS role (either with role 'all' or 'sdsOnly'). +* SDS can be created with RF cache disabled, but, be aware that the RF cache is not always updated. In this case, the user should re-try the operation. +* The check_mode is not supported. +* The modules present in the collection named as 'dellemc.powerflex' are built to support the Dell PowerFlex storage platform. + +### Examples +``` +- name: Create SDS + dellemc.powerflex.sds: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + sds_name: "node0" + protection_domain_name: "domain1" + sds_ip_list: + - ip: "198.10.xxx.xxx" + role: "all" + sds_ip_state: "present-in-sds" + state: "present" + +- name: Create SDS with all parameters + dellemc.powerflex.sds: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + sds_name: "node1" + protection_domain_name: "domain1" + sds_ip_list: + - ip: "198.10.xxx.xxx" + role: "sdcOnly" + sds_ip_state: "present-in-sds" + rmcache_enabled: true + rmcache_size: 128 + performance_profile: "HighPerformance" + state: "present" + +- name: Get SDS details using name + dellemc.powerflex.sds: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + sds_name: "node0" + state: "present" + +- name: Get SDS details using ID + dellemc.powerflex.sds: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + sds_id: "5718253c00000004" + state: "present" + +- name: Modify SDS attributes using name + dellemc.powerflex.sds: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + sds_name: "node0" + sds_new_name: "node0_new" + rfcache_enabled: true + rmcache_enabled: true + rmcache_size: 256 + performance_profile: "HighPerformance" + state: "present" + +- name: Modify SDS attributes using ID + dellemc.powerflex.sds: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + sds_id: "5718253c00000004" + sds_new_name: "node0_new" + rfcache_enabled: true + rmcache_enabled: true + rmcache_size: 256 + performance_profile: "HighPerformance" + state: "present" + +- name: Add IP and role to an SDS + dellemc.powerflex.sds: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + sds_name: "node0" + sds_ip_list: + - ip: "198.10.xxx.xxx" + role: "sdcOnly" + sds_ip_state: "present-in-sds" + state: "present" + +- name: Remove IP and role from an SDS + dellemc.powerflex.sds: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + sds_name: "node0" + sds_ip_list: + - ip: "198.10.xxx.xxx" + role: "sdcOnly" + sds_ip_state: "absent-in-sds" + state: "present" + +- name: Delete SDS using name + dellemc.powerflex.sds: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + sds_name: "node0" + state: "absent" + +- name: Delete SDS using ID + dellemc.powerflex.sds: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + sds_id: "5718253c00000004" + state: "absent" +``` + +### Return Values + +<table> + <tr> + <th colspan=4>Key</th> + <th>Type</th> + <th>Returned</th> + <th width="100%">Description</th> + </tr> + <tr> + <td colspan=4 > changed </td> + <td> bool </td> + <td> always </td> + <td> Whether or not the resource has changed. </td> + </tr> + <tr> + <td colspan=4 > sds_details </td> + <td> dict </td> + <td> When SDS exists </td> + <td> Details of the SDS. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > authenticationError </td> + <td> str </td> + <td>success</td> + <td> Indicates authentication error. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > certificateInfo </td> + <td> str </td> + <td>success</td> + <td> Information about certificate. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > configuredDrlMode </td> + <td> str </td> + <td>success</td> + <td> Configured DRL mode. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > drlMode </td> + <td> str </td> + <td>success</td> + <td> DRL mode. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > faultSetId </td> + <td> str </td> + <td>success</td> + <td> Fault set ID. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > fglMetadataCacheSize </td> + <td> int </td> + <td>success</td> + <td> FGL metadata cache size. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > fglMetadataCacheState </td> + <td> str </td> + <td>success</td> + <td> FGL metadata cache state. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > fglNumConcurrentWrites </td> + <td> int </td> + <td>success</td> + <td> FGL concurrent writes. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > id </td> + <td> str </td> + <td>success</td> + <td> SDS ID. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > ipList </td> + <td> list </td> + <td>success</td> + <td> SDS IP list. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=2 > ip </td> + <td> str </td> + <td>success</td> + <td> IP present in the SDS. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=2 > role </td> + <td> str </td> + <td>success</td> + <td> Role of the SDS IP. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > lastUpgradeTime </td> + <td> str </td> + <td>success</td> + <td> Last time SDS was upgraded. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > links </td> + <td> list </td> + <td>success</td> + <td> SDS links. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=2 > href </td> + <td> str </td> + <td>success</td> + <td> SDS instance URL. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=2 > rel </td> + <td> str </td> + <td>success</td> + <td> SDS's relationship with different entities. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > maintenanceState </td> + <td> str </td> + <td>success</td> + <td> Maintenance state. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > maintenanceType </td> + <td> str </td> + <td>success</td> + <td> Maintenance type. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > mdmConnectionState </td> + <td> str </td> + <td>success</td> + <td> MDM connection state. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > membershipState </td> + <td> str </td> + <td>success</td> + <td> Membership state. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > name </td> + <td> str </td> + <td>success</td> + <td> Name of the SDS. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > numOfIoBuffers </td> + <td> int </td> + <td>success</td> + <td> Number of IO buffers. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > numRestarts </td> + <td> int </td> + <td>success</td> + <td> Number of restarts. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > onVmWare </td> + <td> bool </td> + <td>success</td> + <td> Presence on VMware. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > perfProfile </td> + <td> str </td> + <td>success</td> + <td> Performance profile. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > port </td> + <td> int </td> + <td>success</td> + <td> SDS port. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > protectionDomainId </td> + <td> str </td> + <td>success</td> + <td> Protection Domain ID. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > protectionDomainName </td> + <td> str </td> + <td>success</td> + <td> Protection Domain Name. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > raidControllers </td> + <td> int </td> + <td>success</td> + <td> Number of RAID controllers. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > rfcacheEnabled </td> + <td> bool </td> + <td>success</td> + <td> Whether RF cache is enabled or not. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > rfcacheErrorApiVersionMismatch </td> + <td> bool </td> + <td>success</td> + <td> RF cache error for API version mismatch. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > rfcacheErrorDeviceDoesNotExist </td> + <td> bool </td> + <td>success</td> + <td> RF cache error for device does not exist. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > rfcacheErrorInconsistentCacheConfiguration </td> + <td> bool </td> + <td>success</td> + <td> RF cache error for inconsistent cache configuration. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > rfcacheErrorInconsistentSourceConfiguration </td> + <td> bool </td> + <td>success</td> + <td> RF cache error for inconsistent source configuration. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > rfcacheErrorInvalidDriverPath </td> + <td> bool </td> + <td>success</td> + <td> RF cache error for invalid driver path. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > rfcacheErrorLowResources </td> + <td> bool </td> + <td>success</td> + <td> RF cache error for low resources. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > rmcacheEnabled </td> + <td> bool </td> + <td>success</td> + <td> Whether Read RAM cache is enabled or not. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > rmcacheFrozen </td> + <td> bool </td> + <td>success</td> + <td> RM cache frozen. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > rmcacheMemoryAllocationState </td> + <td> bool </td> + <td>success</td> + <td> RM cache memory allocation state. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > rmcacheSizeInKb </td> + <td> int </td> + <td>success</td> + <td> RM cache size in KB. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > rmcacheSizeInMb </td> + <td> int </td> + <td>success</td> + <td> RM cache size in MB. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > sdsConfigurationFailure </td> + <td> str </td> + <td>success</td> + <td> SDS configuration failure. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > sdsDecoupled </td> + <td> str </td> + <td>success</td> + <td> SDS decoupled. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > sdsReceiveBufferAllocationFailures </td> + <td> str </td> + <td>success</td> + <td> SDS receive buffer allocation failures. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > sdsState </td> + <td> str </td> + <td>success</td> + <td> SDS state. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > softwareVersionInfo </td> + <td> str </td> + <td>success</td> + <td> SDS software version information. </td> + </tr> + </table> + +### Authors +* Rajshree Khare (@khareRajshree) <ansible.team@dell.com> + +-------------------------------- +# Storage Pool Module + +Managing Dell PowerFlex storage pool + +### Synopsis + Dell PowerFlex storage pool module includes getting the details of storage pool, creating a new storage pool, and modifying the attribute of a storage pool. + +### Parameters + +<table> + <tr> + <th colspan=1>Parameter</th> + <th width="20%">Type</th> + <th>Required</th> + <th>Default</th> + <th>Choices</th> + <th width="80%">Description</th> + </tr> + <tr> + <td colspan=1 > storage_pool_name</td> + <td> str </td> + <td></td> + <td></td> + <td></td> + <td> <br> The name of the storage pool. <br> If more than one storage pool is found with the same name then protection domain id/name is required to perform the task. <br> Mutually exclusive with storage_pool_id. </td> + </tr> + <tr> + <td colspan=1 > storage_pool_id</td> + <td> str </td> + <td></td> + <td></td> + <td></td> + <td> <br> The id of the storage pool. <br> It is auto generated, hence should not be provided during creation of a storage pool. <br> Mutually exclusive with storage_pool_name. </td> + </tr> + <tr> + <td colspan=1 > protection_domain_name</td> + <td> str </td> + <td></td> + <td></td> + <td></td> + <td> <br> The name of the protection domain. <br> During creation of a pool, either protection domain name or id must be mentioned. <br> Mutually exclusive with protection_domain_id. </td> + </tr> + <tr> + <td colspan=1 > protection_domain_id</td> + <td> str </td> + <td></td> + <td></td> + <td></td> + <td> <br> The id of the protection domain. <br> During creation of a pool, either protection domain name or id must be mentioned. <br> Mutually exclusive with protection_domain_name. </td> + </tr> + <tr> + <td colspan=1 > media_type</td> + <td> str </td> + <td></td> + <td></td> + <td> <ul> <li>HDD</li> <li>SSD</li> <li>TRANSITIONAL</li> </ul></td> + <td> <br> Type of devices in the storage pool. </td> + </tr> + <tr> + <td colspan=1 > storage_pool_new_name</td> + <td> str </td> + <td></td> + <td></td> + <td></td> + <td> <br> New name for the storage pool can be provided. <br> This parameter is used for renaming the storage pool. </td> + </tr> + <tr> + <td colspan=1 > use_rfcache</td> + <td> bool </td> + <td></td> + <td></td> + <td></td> + <td> <br> Enable/Disable RFcache on a specific storage pool. </td> + </tr> + <tr> + <td colspan=1 > use_rmcache</td> + <td> bool </td> + <td></td> + <td></td> + <td></td> + <td> <br> Enable/Disable RMcache on a specific storage pool. </td> + </tr> + <tr> + <td colspan=1 > state</td> + <td> str </td> + <td> True </td> + <td></td> + <td> <ul> <li>present</li> <li>absent</li> </ul></td> + <td> <br> State of the storage pool. </td> + </tr> + <tr> + <td colspan=1 > hostname</td> + <td> str </td> + <td> True </td> + <td></td> + <td></td> + <td> <br> IP or FQDN of the PowerFlex host. </td> + </tr> + <tr> + <td colspan=1 > username</td> + <td> str </td> + <td> True </td> + <td></td> + <td></td> + <td> <br> The username of the PowerFlex host. </td> + </tr> + <tr> + <td colspan=1 > password</td> + <td> str </td> + <td> True </td> + <td></td> + <td></td> + <td> <br> The password of the PowerFlex host. </td> + </tr> + <tr> + <td colspan=1 > validate_certs</td> + <td> bool </td> + <td></td> + <td> True </td> + <td></td> + <td> <br> Boolean variable to specify whether or not to validate SSL certificate. <br> True - Indicates that the SSL certificate should be verified. <br> False - Indicates that the SSL certificate should not be verified. </td> + </tr> + <tr> + <td colspan=1 > port</td> + <td> int </td> + <td></td> + <td> 443 </td> + <td></td> + <td> <br> Port number through which communication happens with PowerFlex host. </td> + </tr> + <tr> + <td colspan=1 > timeout</td> + <td> int </td> + <td></td> + <td> 120 </td> + <td></td> + <td> <br> Time after which connection will get terminated. <br> It is to be mentioned in seconds. </td> + </tr> + </table> + +### Notes +* TRANSITIONAL media type is supported only during modification. +* The check_mode is not supported. +* The modules present in the collection named as 'dellemc.powerflex' are built to support the Dell PowerFlex storage platform. + +### Examples +``` +- name: Get the details of storage pool by name + dellemc.powerflex.storagepool: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + storage_pool_name: "sample_pool_name" + protection_domain_name: "sample_protection_domain" + state: "present" + +- name: Get the details of storage pool by id + dellemc.powerflex.storagepool: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + storage_pool_id: "abcd1234ab12r" + state: "present" + +- name: Create a new storage pool by name + dellemc.powerflex.storagepool: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + storage_pool_name: "ansible_test_pool" + protection_domain_id: "1c957da800000000" + media_type: "HDD" + state: "present" + +- name: Modify a storage pool by name + dellemc.powerflex.storagepool: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + storage_pool_name: "ansible_test_pool" + protection_domain_id: "1c957da800000000" + use_rmcache: True + use_rfcache: True + state: "present" + +- name: Rename storage pool by id + dellemc.powerflex.storagepool: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + storage_pool_id: "abcd1234ab12r" + storage_pool_new_name: "new_ansible_pool" + state: "present" +``` + +### Return Values + +<table> + <tr> + <th colspan=3>Key</th> + <th>Type</th> + <th>Returned</th> + <th width="100%">Description</th> + </tr> + <tr> + <td colspan=3 > changed </td> + <td> bool </td> + <td> always </td> + <td> Whether or not the resource has changed. </td> + </tr> + <tr> + <td colspan=3 > storage_pool_details </td> + <td> dict </td> + <td> When storage pool exists </td> + <td> Details of the storage pool. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > id </td> + <td> str </td> + <td>success</td> + <td> ID of the storage pool under protection domain. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > mediaType </td> + <td> str </td> + <td>success</td> + <td> Type of devices in the storage pool. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > name </td> + <td> str </td> + <td>success</td> + <td> Name of the storage pool under protection domain. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > protectionDomainId </td> + <td> str </td> + <td>success</td> + <td> ID of the protection domain in which pool resides. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > protectionDomainName </td> + <td> str </td> + <td>success</td> + <td> Name of the protection domain in which pool resides. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > statistics </td> + <td> dict </td> + <td>success</td> + <td> Statistics details of the storage pool. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=1 > capacityInUseInKb </td> + <td> str </td> + <td>success</td> + <td> Total capacity of the storage pool. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=1 > deviceIds </td> + <td> list </td> + <td>success</td> + <td> Device Ids of the storage pool. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=1 > unusedCapacityInKb </td> + <td> str </td> + <td>success</td> + <td> Unused capacity of the storage pool. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > useRfcache </td> + <td> bool </td> + <td>success</td> + <td> Enable/Disable RFcache on a specific storage pool. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=2 > useRmcache </td> + <td> bool </td> + <td>success</td> + <td> Enable/Disable RMcache on a specific storage pool. </td> + </tr> + </table> + +### Authors +* Arindam Datta (@dattaarindam) <ansible.team@dell.com> +* P Srinivas Rao (@srinivas-rao5) <ansible.team@dell.com> + +-------------------------------- +# Volume Module + +Manage volumes on Dell PowerFlex + +### Synopsis + Managing volumes on PowerFlex storage system includes creating, getting details, modifying attributes and deleting volume. + It also includes adding/removing snapshot policy, mapping/unmapping volume to/from SDC and listing associated snapshots. + +### Parameters + +<table> + <tr> + <th colspan=2>Parameter</th> + <th width="20%">Type</th> + <th>Required</th> + <th>Default</th> + <th>Choices</th> + <th width="80%">Description</th> + </tr> + <tr> + <td colspan=2 > vol_name</td> + <td> str </td> + <td></td> + <td></td> + <td></td> + <td> <br> The name of the volume. <br> Mandatory for create operation. <br> It is unique across the PowerFlex array. <br> Mutually exclusive with vol_id. </td> + </tr> + <tr> + <td colspan=2 > vol_id</td> + <td> str </td> + <td></td> + <td></td> + <td></td> + <td> <br> The ID of the volume. <br> Except create operation, all other operations can be performed using vol_id. <br> Mutually exclusive with vol_name. </td> + </tr> + <tr> + <td colspan=2 > storage_pool_name</td> + <td> str </td> + <td></td> + <td></td> + <td></td> + <td> <br> The name of the storage pool. <br> Either name or the id of the storage pool is required for creating a volume. <br> During creation, if storage pool name is provided then either protection domain name or id must be mentioned along with it. <br> Mutually exclusive with storage_pool_id. </td> + </tr> + <tr> + <td colspan=2 > storage_pool_id</td> + <td> str </td> + <td></td> + <td></td> + <td></td> + <td> <br> The ID of the storage pool. <br> Either name or the id of the storage pool is required for creating a volume. <br> Mutually exclusive with storage_pool_name. </td> + </tr> + <tr> + <td colspan=2 > protection_domain_name</td> + <td> str </td> + <td></td> + <td></td> + <td></td> + <td> <br> The name of the protection domain. <br> During creation of a volume, if more than one storage pool exists with the same name then either protection domain name or id must be mentioned along with it. <br> Mutually exclusive with protection_domain_id. </td> + </tr> + <tr> + <td colspan=2 > protection_domain_id</td> + <td> str </td> + <td></td> + <td></td> + <td></td> + <td> <br> The ID of the protection domain. <br> During creation of a volume, if more than one storage pool exists with the same name then either protection domain name or id must be mentioned along with it. <br> Mutually exclusive with protection_domain_name. </td> + </tr> + <tr> + <td colspan=2 > vol_type</td> + <td> str </td> + <td></td> + <td></td> + <td> <ul> <li>THICK_PROVISIONED</li> <li>THIN_PROVISIONED</li> </ul></td> + <td> <br> Type of volume provisioning. </td> + </tr> + <tr> + <td colspan=2 > compression_type</td> + <td> str </td> + <td></td> + <td></td> + <td> <ul> <li>NORMAL</li> <li>NONE</li> </ul></td> + <td> <br> Type of the compression method. </td> + </tr> + <tr> + <td colspan=2 > use_rmcache</td> + <td> bool </td> + <td></td> + <td></td> + <td></td> + <td> <br> Whether to use RM Cache or not. </td> + </tr> + <tr> + <td colspan=2 > snapshot_policy_name</td> + <td> str </td> + <td></td> + <td></td> + <td></td> + <td> <br> Name of the snapshot policy. <br> To remove/detach snapshot policy, empty snapshot_policy_id/snapshot_policy_name is to be passed along with auto_snap_remove_type. </td> + </tr> + <tr> + <td colspan=2 > snapshot_policy_id</td> + <td> str </td> + <td></td> + <td></td> + <td></td> + <td> <br> ID of the snapshot policy. <br> To remove/detach snapshot policy, empty snapshot_policy_id/snapshot_policy_name is to be passed along with auto_snap_remove_type. </td> + </tr> + <tr> + <td colspan=2 > auto_snap_remove_type</td> + <td> str </td> + <td></td> + <td></td> + <td> <ul> <li>remove</li> <li>detach</li> </ul></td> + <td> <br> Whether to remove or detach the snapshot policy. <br> To remove/detach snapshot policy, empty snapshot_policy_id/snapshot_policy_name is to be passed along with auto_snap_remove_type. <br> If the snapshot policy name/id is passed empty then auto_snap_remove_type is defaulted to detach. </td> + </tr> + <tr> + <td colspan=2 > size</td> + <td> int </td> + <td></td> + <td></td> + <td></td> + <td> <br> The size of the volume. <br> Size of the volume will be assigned as higher multiple of 8 GB. </td> + </tr> + <tr> + <td colspan=2 > cap_unit</td> + <td> str </td> + <td></td> + <td></td> + <td> <ul> <li>GB</li> <li>TB</li> </ul></td> + <td> <br> The unit of the volume size. It defaults to 'GB'. </td> + </tr> + <tr> + <td colspan=2 > vol_new_name</td> + <td> str </td> + <td></td> + <td></td> + <td></td> + <td> <br> New name of the volume. Used to rename the volume. </td> + </tr> + <tr> + <td colspan=2 > allow_multiple_mappings</td> + <td> bool </td> + <td></td> + <td></td> + <td></td> + <td> <br> Specifies whether to allow or not allow multiple mappings. <br> If the volume is mapped to one SDC then for every new mapping allow_multiple_mappings has to be passed as True. </td> + </tr> + <tr> + <td colspan=2 > sdc</td> + <td> list <br> elements: dict </td> + <td></td> + <td></td> + <td></td> + <td> <br> Specifies SDC parameters. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > sdc_name </td> + <td> str </td> + <td></td> + <td></td> + <td></td> + <td> <br> Name of the SDC. <br> Specify either sdc_name, sdc_id or sdc_ip. <br> Mutually exclusive with sdc_id and sdc_ip. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > sdc_id </td> + <td> str </td> + <td></td> + <td></td> + <td></td> + <td> <br> ID of the SDC. <br> Specify either sdc_name, sdc_id or sdc_ip. <br> Mutually exclusive with sdc_name and sdc_ip. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > sdc_ip </td> + <td> str </td> + <td></td> + <td></td> + <td></td> + <td> <br> IP of the SDC. <br> Specify either sdc_name, sdc_id or sdc_ip. <br> Mutually exclusive with sdc_id and sdc_ip. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > access_mode </td> + <td> str </td> + <td></td> + <td></td> + <td> <ul> <li>READ_WRITE</li> <li>READ_ONLY</li> <li>NO_ACCESS</li> </ul></td> + <td> <br> Define the access mode for all mappings of the volume. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > bandwidth_limit </td> + <td> int </td> + <td></td> + <td></td> + <td></td> + <td> <br> Limit of volume network bandwidth. <br> Need to mention in multiple of 1024 Kbps. <br> To set no limit, 0 is to be passed. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=1 > iops_limit </td> + <td> int </td> + <td></td> + <td></td> + <td></td> + <td> <br> Limit of volume IOPS. <br> Minimum IOPS limit is 11 and specify 0 for unlimited iops. </td> + </tr> + <tr> + <td colspan=2 > sdc_state</td> + <td> str </td> + <td></td> + <td></td> + <td> <ul> <li>mapped</li> <li>unmapped</li> </ul></td> + <td> <br> Mapping state of the SDC. </td> + </tr> + <tr> + <td colspan=2 > delete_snapshots</td> + <td> bool </td> + <td></td> + <td></td> + <td></td> + <td> <br> If True, the volume and all its dependent snapshots will be deleted. <br> If False, only the volume will be deleted. <br> It can be specified only when the state is absent. <br> It defaults to False, if not specified. </td> + </tr> + <tr> + <td colspan=2 > state</td> + <td> str </td> + <td> True </td> + <td></td> + <td> <ul> <li>present</li> <li>absent</li> </ul></td> + <td> <br> State of the volume. </td> + </tr> + <tr> + <td colspan=2 > hostname</td> + <td> str </td> + <td> True </td> + <td></td> + <td></td> + <td> <br> IP or FQDN of the PowerFlex host. </td> + </tr> + <tr> + <td colspan=2 > username</td> + <td> str </td> + <td> True </td> + <td></td> + <td></td> + <td> <br> The username of the PowerFlex host. </td> + </tr> + <tr> + <td colspan=2 > password</td> + <td> str </td> + <td> True </td> + <td></td> + <td></td> + <td> <br> The password of the PowerFlex host. </td> + </tr> + <tr> + <td colspan=2 > validate_certs</td> + <td> bool </td> + <td></td> + <td> True </td> + <td></td> + <td> <br> Boolean variable to specify whether or not to validate SSL certificate. <br> True - Indicates that the SSL certificate should be verified. <br> False - Indicates that the SSL certificate should not be verified. </td> + </tr> + <tr> + <td colspan=2 > port</td> + <td> int </td> + <td></td> + <td> 443 </td> + <td></td> + <td> <br> Port number through which communication happens with PowerFlex host. </td> + </tr> + <tr> + <td colspan=2 > timeout</td> + <td> int </td> + <td></td> + <td> 120 </td> + <td></td> + <td> <br> Time after which connection will get terminated. <br> It is to be mentioned in seconds. </td> + </tr> + </table> + +### Notes +* The check_mode is not supported. +* The modules present in the collection named as 'dellemc.powerflex' are built to support the Dell PowerFlex storage platform. + +### Examples +``` +- name: Create a volume + dellemc.powerflex.volume: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + vol_name: "sample_volume" + storage_pool_name: "pool_1" + protection_domain_name: "pd_1" + vol_type: "THICK_PROVISIONED" + compression_type: "NORMAL" + use_rmcache: True + size: 16 + state: "present" + +- name: Map a SDC to volume + dellemc.powerflex.volume: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + vol_name: "sample_volume" + allow_multiple_mappings: True + sdc: + - sdc_id: "92A304DB-EFD7-44DF-A07E-D78134CC9764" + access_mode: "READ_WRITE" + sdc_state: "mapped" + state: "present" + +- name: Unmap a SDC to volume + dellemc.powerflex.volume: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + vol_name: "sample_volume" + sdc: + - sdc_id: "92A304DB-EFD7-44DF-A07E-D78134CC9764" + sdc_state: "unmapped" + state: "present" + +- name: Map multiple SDCs to a volume + dellemc.powerflex.volume: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + vol_name: "sample_volume" + protection_domain_name: "pd_1" + sdc: + - sdc_id: "92A304DB-EFD7-44DF-A07E-D78134CC9764" + access_mode: "READ_WRITE" + bandwidth_limit: 2048 + iops_limit: 20 + - sdc_ip: "198.10.xxx.xxx" + access_mode: "READ_ONLY" + allow_multiple_mappings: True + sdc_state: "mapped" + state: "present" + +- name: Get the details of the volume + dellemc.powerflex.volume: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + vol_id: "fe6c8b7100000005" + state: "present" + +- name: Modify the details of the Volume + dellemc.powerflex.volume: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + vol_name: "sample_volume" + storage_pool_name: "pool_1" + new_vol_name: "new_sample_volume" + size: 64 + state: "present" + +- name: Delete the Volume + dellemc.powerflex.volume: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + vol_name: "sample_volume" + delete_snapshots: False + state: "absent" + +- name: Delete the Volume and all its dependent snapshots + dellemc.powerflex.volume: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + vol_name: "sample_volume" + delete_snapshots: True + state: "absent" +``` + +### Return Values + +<table> + <tr> + <th colspan=4>Key</th> + <th>Type</th> + <th>Returned</th> + <th width="100%">Description</th> + </tr> + <tr> + <td colspan=4 > changed </td> + <td> bool </td> + <td> always </td> + <td> Whether or not the resource has changed. </td> + </tr> + <tr> + <td colspan=4 > volume_details </td> + <td> dict </td> + <td> When volume exists </td> + <td> Details of the volume. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > id </td> + <td> str </td> + <td>success</td> + <td> The ID of the volume. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > mappedSdcInfo </td> + <td> dict </td> + <td>success</td> + <td> The details of the mapped SDC. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=2 > accessMode </td> + <td> str </td> + <td>success</td> + <td> Mapping access mode for the specified volume. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=2 > limitBwInMbps </td> + <td> int </td> + <td>success</td> + <td> Bandwidth limit for the SDC. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=2 > limitIops </td> + <td> int </td> + <td>success</td> + <td> IOPS limit for the SDC. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=2 > sdcId </td> + <td> str </td> + <td>success</td> + <td> ID of the SDC. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=2 > sdcIp </td> + <td> str </td> + <td>success</td> + <td> IP of the SDC. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=2 > sdcName </td> + <td> str </td> + <td>success</td> + <td> Name of the SDC. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > name </td> + <td> str </td> + <td>success</td> + <td> Name of the volume. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > protectionDomainId </td> + <td> str </td> + <td>success</td> + <td> ID of the protection domain in which volume resides. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > protectionDomainName </td> + <td> str </td> + <td>success</td> + <td> Name of the protection domain in which volume resides. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > sizeInGb </td> + <td> int </td> + <td>success</td> + <td> Size of the volume in Gb. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > sizeInKb </td> + <td> int </td> + <td>success</td> + <td> Size of the volume in Kb. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > snapshotPolicyId </td> + <td> str </td> + <td>success</td> + <td> ID of the snapshot policy associated with volume. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > snapshotPolicyName </td> + <td> str </td> + <td>success</td> + <td> Name of the snapshot policy associated with volume. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > snapshotsList </td> + <td> str </td> + <td>success</td> + <td> List of snapshots associated with the volume. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > statistics </td> + <td> dict </td> + <td>success</td> + <td> Statistics details of the storage pool. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=2 > numOfChildVolumes </td> + <td> int </td> + <td>success</td> + <td> Number of child volumes. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td class="elbow-placeholder"> </td> + <td colspan=2 > numOfMappedSdcs </td> + <td> int </td> + <td>success</td> + <td> Number of mapped Sdcs of the volume. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > storagePoolId </td> + <td> str </td> + <td>success</td> + <td> ID of the storage pool in which volume resides. </td> + </tr> + <tr> + <td class="elbow-placeholder"> </td> + <td colspan=3 > storagePoolName </td> + <td> str </td> + <td>success</td> + <td> Name of the storage pool in which volume resides. </td> + </tr> + </table> + +### Authors +* P Srinivas Rao (@srinivas-rao5) <ansible.team@dell.com> + +-------------------------------- diff --git a/ansible_collections/dellemc/powerflex/docs/Release Notes.md b/ansible_collections/dellemc/powerflex/docs/Release Notes.md new file mode 100644 index 00000000..562937fb --- /dev/null +++ b/ansible_collections/dellemc/powerflex/docs/Release Notes.md @@ -0,0 +1,73 @@ +**Ansible Modules for Dell Technologies PowerFlex** +========================================= +### Release notes 1.5.0 + +> © 2022 Dell Inc. or its subsidiaries. All rights reserved. Dell +> and other trademarks are trademarks of Dell Inc. or its +> subsidiaries. Other trademarks may be trademarks of their respective +> owners. + +Contents +------- +These release notes contain supplemental information about Ansible +Modules for Dell Technologies (Dell) PowerFlex. + +- [Revision History](#revision-history) +- [Product Description](#product-description) +- [New Features](#new-features-and-enhancements) +- [Known issues](#known-issues) +- [Limitations](#limitations) +- [Distribution](#distribution) +- [Documentation](#documentation) + +Revision history +---------------- +The table in this section lists the revision history of this document. + +Table 1. Revision history + +| Revision | Date | Description | +|----------|----------------|-------------------------------------------------------------| +| 01 | December 2022 | Current release of Ansible Modules for Dell PowerFlex 1.5.0 | + +Product description +------------------- + +The Ansible modules for Dell PowerFlex are used to automate and orchestrate +the deployment, configuration, and management of Dell PowerFlex storage +systems. The capabilities of Ansible modules are managing volumes, +storage pools, SDCs, snapshots, SDSs, replication consistency groups, devices, protection domain and MDM +cluster, and obtaining high-level information about a PowerFlex system information. +The modules use playbooks to list, show, create, delete, and modify +each of the entities. + +New features and enhancements +----------------------------- +Along with the previous release deliverables, this release supports following features - +- Info module is enhanced to support the listing replication consistency groups. +- Added New module for replication consistency groups. +- Updated modules to adhere with ansible community guidelines. +- Renamed gateway_host to hostname +- Renamed verifycert to validate_certs. + +Known issues +------------ +- Setting the RF cache and performance profile of the SDS during its creation fails intermittently on PowerFlex version 3.5 + +Limitations +----------- +- The API is accepting a negative integer value for overall_limit in the network_limits for a specific protection domain. + +Distribution +------------ +The software package is available for download from the [Ansible Modules +for PowerFlex GitHub](https://github.com/dell/ansible-powerflex/tree/1.5.0) page. + +Documentation +------------- +The documentation is available on [Ansible Modules for PowerFlex GitHub](https://github.com/dell/ansible-powerflex/tree/1.5.0/docs) +page. It includes the following: + + - README + - Release Notes (this document) + - Product Guide diff --git a/ansible_collections/dellemc/powerflex/docs/SECURITY.md b/ansible_collections/dellemc/powerflex/docs/SECURITY.md new file mode 100644 index 00000000..d8bf879f --- /dev/null +++ b/ansible_collections/dellemc/powerflex/docs/SECURITY.md @@ -0,0 +1,22 @@ +<!-- +Copyright (c) 2022 Dell Inc., or its subsidiaries. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 +--> + +# Security policy + +The Ansible modules for Dell PowerFlex repository are inspected for security vulnerabilities via blackduck scans and static code analysis. + +In addition to this, there are various security checks that get executed against a branch when a pull request is created/updated. Please refer to [pull request](https://github.com/dell/ansible-powerflex/blob/1.5.0/docs/CONTRIBUTING.md#Pull-requests) for more information. + +## Reporting a vulnerability + +Have you discovered a security vulnerability in this project? +We ask you to alert the maintainers by sending an email, describing the issue, impact, and fix - if applicable. + +You can reach the Ansible modules for Dell PowerFlex maintainers at ansible.team@dell.com. diff --git a/ansible_collections/dellemc/powerflex/docs/SUPPORT.md b/ansible_collections/dellemc/powerflex/docs/SUPPORT.md new file mode 100644 index 00000000..26e6f159 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/docs/SUPPORT.md @@ -0,0 +1,12 @@ +<!-- +Copyright (c) 2022 Dell Inc., or its subsidiaries. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 +--> + +## Support +For all your support needs you can interact with us on [GitHub](https://github.com/dell/ansible-powerflex) by creating a [GitHub Issue](https://github.com/dell/ansible-powerflex/issues) or through the [Ansible Community](https://www.dell.com/community/Automation/bd-p/Automation). diff --git a/ansible_collections/dellemc/powerflex/meta/execution-environment.yml b/ansible_collections/dellemc/powerflex/meta/execution-environment.yml new file mode 100644 index 00000000..d2c0a3ea --- /dev/null +++ b/ansible_collections/dellemc/powerflex/meta/execution-environment.yml @@ -0,0 +1,5 @@ +--- +version: 1 +dependencies: + galaxy: requirements.yml #Absolute/relative path of requirements.yml + python: requirements.txt #Absolute/relative path of requirements.txt diff --git a/ansible_collections/dellemc/powerflex/meta/runtime.yml b/ansible_collections/dellemc/powerflex/meta/runtime.yml new file mode 100644 index 00000000..0e8263ed --- /dev/null +++ b/ansible_collections/dellemc/powerflex/meta/runtime.yml @@ -0,0 +1,39 @@ +--- +requires_ansible: ">=2.12" +plugin_routing: + modules: + dellemc_powerflex_gatherfacts: + redirect: dellemc.powerflex.info + deprecation: + removal_date: "2024-03-31" + warning_text: Use info module instead. + dellemc_powerflex_device: + redirect: dellemc.powerflex.device + deprecation: + removal_date: "2024-03-31" + warning_text: Use device module instead. + dellemc_powerflex_sdc: + redirect: dellemc.powerflex.sdc + deprecation: + removal_date: "2024-03-31" + warning_text: Use sdc module instead. + dellemc_powerflex_sds: + redirect: dellemc.powerflex.sds + deprecation: + removal_date: "2024-03-31" + warning_text: Use sds module instead. + dellemc_powerflex_snapshot: + redirect: dellemc.powerflex.snapshot + deprecation: + removal_date: "2024-03-31" + warning_text: Use snapshot module instead. + dellemc_powerflex_storagepool: + redirect: dellemc.powerflex.storagepool + deprecation: + removal_date: "2024-03-31" + warning_text: Use storagepool module instead. + dellemc_powerflex_volume: + redirect: dellemc.powerflex.volume + deprecation: + removal_date: "2024-03-31" + warning_text: Use volume module instead. diff --git a/ansible_collections/dellemc/powerflex/plugins/doc_fragments/powerflex.py b/ansible_collections/dellemc/powerflex/plugins/doc_fragments/powerflex.py new file mode 100644 index 00000000..34968034 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/plugins/doc_fragments/powerflex.py @@ -0,0 +1,61 @@ +# Copyright: (c) 2020, Dell Technologies. +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +class ModuleDocFragment(object): + # Documentation fragment for PowerFlex + DOCUMENTATION = r''' + options: + hostname: + required: true + description: + - IP or FQDN of the PowerFlex host. + type: str + aliases: + - gateway_host + username: + type: str + required: true + description: + - The username of the PowerFlex host. + password: + type: str + required: true + description: + - The password of the PowerFlex host. + validate_certs: + type: bool + default: true + aliases: + - verifycert + description: + - Boolean variable to specify whether or not to validate SSL + certificate. + - C(true) - Indicates that the SSL certificate should be verified. + - C(false) - Indicates that the SSL certificate should not be + verified. + port: + description: + - Port number through which communication happens with PowerFlex + host. + type: int + default: 443 + timeout: + description: + - Time after which connection will get terminated. + - It is to be mentioned in seconds. + type: int + required: False + default: 120 + requirements: + - A Dell PowerFlex storage system version 3.5 or later. + - Ansible-core 2.12 or later. + - PyPowerFlex 1.6.0. + - Python 3.9, 3.10 or 3.11. + notes: + - The modules present in the collection named as 'dellemc.powerflex' + are built to support the Dell PowerFlex storage platform. +''' diff --git a/ansible_collections/dellemc/powerflex/plugins/module_utils/storage/dell/__init__.py b/ansible_collections/dellemc/powerflex/plugins/module_utils/storage/dell/__init__.py new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/dellemc/powerflex/plugins/module_utils/storage/dell/__init__.py diff --git a/ansible_collections/dellemc/powerflex/plugins/module_utils/storage/dell/logging_handler.py b/ansible_collections/dellemc/powerflex/plugins/module_utils/storage/dell/logging_handler.py new file mode 100644 index 00000000..7436cbb1 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/plugins/module_utils/storage/dell/logging_handler.py @@ -0,0 +1,24 @@ +# Copyright: (c) 2022, Dell Technologies +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +"""Custom rotating file handler for PowerFlex""" + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from datetime import datetime +from logging.handlers import RotatingFileHandler + + +class CustomRotatingFileHandler(RotatingFileHandler): + def rotation_filename(self, default_name): + """ + Modify the filename of a log file when rotating. + :param default_name: The default name of the log file. + """ + src_file_name = default_name.split('.') + dest_file_name = "{0}_{1}.{2}.{3}".format( + src_file_name[0], '{0:%Y%m%d}'.format(datetime.now()), + src_file_name[1], src_file_name[2] + ) + return dest_file_name diff --git a/ansible_collections/dellemc/powerflex/plugins/module_utils/storage/dell/utils.py b/ansible_collections/dellemc/powerflex/plugins/module_utils/storage/dell/utils.py new file mode 100644 index 00000000..8503aeb0 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/plugins/module_utils/storage/dell/utils.py @@ -0,0 +1,186 @@ +# Copyright: (c) 2021, Dell Technologies +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import logging +import math +import re +from decimal import Decimal +from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell.logging_handler \ + import CustomRotatingFileHandler +import traceback +from ansible.module_utils.basic import missing_required_lib + +"""import PyPowerFlex lib""" +try: + from PyPowerFlex import PowerFlexClient + from PyPowerFlex.objects.sds import Sds + from PyPowerFlex.objects import protection_domain + from PyPowerFlex.objects import storage_pool + from PyPowerFlex.objects import sdc + from PyPowerFlex.objects import volume + from PyPowerFlex.objects import system + from PyPowerFlex.objects.system import SnapshotDef + + HAS_POWERFLEX_SDK, POWERFLEX_SDK_IMP_ERR = True, None +except ImportError: + HAS_POWERFLEX_SDK, POWERFLEX_SDK_IMP_ERR = False, traceback.format_exc() + +"""importing pkg_resources""" +try: + from pkg_resources import parse_version + import pkg_resources + + PKG_RSRC_IMPORTED, PKG_RSRC_IMP_ERR = True, None +except ImportError: + PKG_RSRC_IMPORTED, PKG_RSRC_IMP_ERR = False, traceback.format_exc() + +"""importing dateutil""" +try: + import dateutil.relativedelta + HAS_DATEUTIL, DATEUTIL_IMP_ERR = True, None +except ImportError: + HAS_DATEUTIL, DATEUTIL_IMP_ERR = False, traceback.format_exc() + + +def get_powerflex_gateway_host_parameters(): + """Provides common access parameters required for the + ansible modules on PowerFlex Storage System""" + + return dict( + hostname=dict(type='str', aliases=['gateway_host'], required=True), + username=dict(type='str', required=True), + password=dict(type='str', required=True, no_log=True), + validate_certs=dict(type='bool', aliases=['verifycert'], required=False, default=True), + port=dict(type='int', required=False, default=443), + timeout=dict(type='int', required=False, default=120) + ) + + +def get_powerflex_gateway_host_connection(module_params): + """Establishes connection with PowerFlex storage system""" + + if HAS_POWERFLEX_SDK: + conn = PowerFlexClient( + gateway_address=module_params['hostname'], + gateway_port=module_params['port'], + verify_certificate=module_params['validate_certs'], + username=module_params['username'], + password=module_params['password'], + timeout=module_params['timeout']) + conn.initialize() + return conn + + +def ensure_required_libs(module): + """Check required libraries""" + + if not HAS_DATEUTIL: + module.fail_json(msg=missing_required_lib("python-dateutil"), + exception=DATEUTIL_IMP_ERR) + + if not PKG_RSRC_IMPORTED: + module.fail_json(msg=missing_required_lib("pkg_resources"), + exception=PKG_RSRC_IMP_ERR) + + if not HAS_POWERFLEX_SDK: + module.fail_json(msg=missing_required_lib("PyPowerFlex V 1.6.0 or above"), + exception=POWERFLEX_SDK_IMP_ERR) + + min_ver = '1.6.0' + try: + curr_version = pkg_resources.require("PyPowerFlex")[0].version + supported_version = (parse_version(curr_version) >= parse_version(min_ver)) + if not supported_version: + module.fail_json(msg="PyPowerFlex {0} is not supported. " + "Required minimum version is " + "{1}".format(curr_version, min_ver)) + except Exception as e: + module.fail_json(msg="Getting PyPowerFlex SDK version, failed with " + "Error {0}".format(str(e))) + + +def get_logger(module_name, log_file_name='ansible_powerflex.log', log_devel=logging.INFO): + """ + Initialize logger and return the logger object. + :param module_name: Name of module to be part of log message + :param log_file_name: Name of file in which the log messages get appended + :param log_devel: Log level + :return LOG object + """ + FORMAT = '%(asctime)-15s %(filename)s %(levelname)s : %(message)s' + max_bytes = 5 * 1024 * 1024 + logging.basicConfig(filename=log_file_name, format=FORMAT) + LOG = logging.getLogger(module_name) + LOG.setLevel(log_devel) + handler = CustomRotatingFileHandler(log_file_name, maxBytes=max_bytes, backupCount=5) + formatter = logging.Formatter(FORMAT) + handler.setFormatter(formatter) + LOG.addHandler(handler) + LOG.propagate = False + return LOG + + +KB_IN_BYTES = 1024 +MB_IN_BYTES = 1024 * 1024 +GB_IN_BYTES = 1024 * 1024 * 1024 +TB_IN_BYTES = 1024 * 1024 * 1024 * 1024 + + +def get_size_bytes(size, cap_units): + """Convert the given size to bytes""" + + if size is not None and size > 0: + if cap_units in ('kb', 'KB'): + return size * KB_IN_BYTES + elif cap_units in ('mb', 'MB'): + return size * MB_IN_BYTES + elif cap_units in ('gb', 'GB'): + return size * GB_IN_BYTES + elif cap_units in ('tb', 'TB'): + return size * TB_IN_BYTES + else: + return size + else: + return 0 + + +def convert_size_with_unit(size_bytes): + """Convert size in byte with actual unit like KB,MB,GB,TB,PB etc.""" + + if not isinstance(size_bytes, int): + raise ValueError('This method takes Integer type argument only') + if size_bytes == 0: + return "0B" + size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB") + i = int(math.floor(math.log(size_bytes, 1024))) + p = math.pow(1024, i) + s = round(size_bytes / p, 2) + return "%s %s" % (s, size_name[i]) + + +def get_size_in_gb(size, cap_units): + """Convert the given size to size in GB, size is restricted to 2 decimal places""" + + size_in_bytes = get_size_bytes(size, cap_units) + size = Decimal(size_in_bytes / GB_IN_BYTES) + size_in_gb = round(size) + return size_in_gb + + +def is_version_less_than_3_6(version): + """Verifies if powerflex version is less than 3.6""" + version = re.search(r'R\s*([\d.]+)', version.replace('_', '.')).group(1) + return \ + pkg_resources.parse_version(version) < pkg_resources.parse_version('3.6') + + +def is_invalid_name(name): + """Validates string against regex pattern""" + if name is not None: + regexp = re.compile(r'^[a-zA-Z0-9!@#$%^~*_-]*$') + if not regexp.search(name): + return True diff --git a/ansible_collections/dellemc/powerflex/plugins/modules/device.py b/ansible_collections/dellemc/powerflex/plugins/modules/device.py new file mode 100644 index 00000000..a321315e --- /dev/null +++ b/ansible_collections/dellemc/powerflex/plugins/modules/device.py @@ -0,0 +1,1105 @@ +#!/usr/bin/python + +# Copyright: (c) 2021, Dell Technologies +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +""" Ansible module for managing device on Dell Technologies (Dell) PowerFlex""" + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = r''' +module: device +version_added: '1.1.0' +short_description: Manage device on Dell PowerFlex +description: +- Managing device on PowerFlex storage system includes + adding new device, getting details of device, and removing a device. +author: +- Rajshree Khare (@khareRajshree) <ansible.team@dell.com> +extends_documentation_fragment: + - dellemc.powerflex.powerflex +options: + current_pathname: + description: + - Full path of the device to be added. + - Required while adding a device. + type: str + device_name: + description: + - Device name. + - Mutually exclusive with I(device_id). + type: str + device_id: + description: + - Device ID. + - Mutually exclusive with I(device_name). + type: str + sds_name: + description: + - The name of the SDS. + - Required while adding a device. + - Mutually exclusive with I(sds_id). + type: str + sds_id: + description: + - The ID of the SDS. + - Required while adding a device. + - Mutually exclusive with I(sds_name). + type: str + storage_pool_name: + description: + - Storage Pool name. + - Used while adding a storage device. + - Mutually exclusive with I(storage_pool_id), I(acceleration_pool_id) and + I(acceleration_pool_name). + type: str + storage_pool_id: + description: + - Storage Pool ID. + - Used while adding a storage device. + - Media type supported are C(SSD) and C(HDD). + - Mutually exclusive with I(storage_pool_name), I(acceleration_pool_id) and + I(acceleration_pool_name). + type: str + acceleration_pool_name: + description: + - Acceleration Pool Name. + - Used while adding an acceleration device. + - Media type supported are C(SSD) and C(NVDIMM). + - Mutually exclusive with I(storage_pool_id), I(storage_pool_name) and + I(acceleration_pool_name). + type: str + acceleration_pool_id: + description: + - Acceleration Pool ID. + - Used while adding an acceleration device. + - Media type supported are C(SSD) and C(NVDIMM). + - Mutually exclusive with I(acceleration_pool_name), I(storage_pool_name) and + I(storage_pool_id). + type: str + protection_domain_name: + description: + - Protection domain name. + - Used while identifying a storage pool along with I(storage_pool_name). + - Mutually exclusive with I(protection_domain_id). + type: str + protection_domain_id: + description: + - Protection domain ID. + - Used while identifying a storage pool along with I(storage_pool_name). + - Mutually exclusive with I(protection_domain_name). + type: str + external_acceleration_type: + description: + - Device external acceleration types. + - Used while adding a device. + type: str + choices: ['Invalid', 'None', 'Read', 'Write', 'ReadAndWrite'] + media_type: + description: + - Device media types. + - Required while adding a device. + type: str + choices: ['HDD', 'SSD', 'NVDIMM'] + state: + description: + - State of the device. + choices: ['present', 'absent'] + required: true + type: str +notes: + - The value for device_id is generated only after successful addition of the + device. + - To uniquely identify a device, either I(device_id) can be passed or one of + I(current_pathname) or I(device_name) must be passed with I(sds_id) or I(sds_name). + - It is recommended to install Rfcache driver for SSD device on SDS in + order to add it to an acceleration pool. + - The I(check_mode) is not supported. +''' + +EXAMPLES = r''' +- name: Add a device + dellemc.powerflex.device: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + current_pathname: "/dev/sdb" + sds_name: "node1" + media_type: "HDD" + device_name: "device2" + storage_pool_name: "pool1" + protection_domain_name: "domain1" + external_acceleration_type: "ReadAndWrite" + state: "present" +- name: Get device details using device_id + dellemc.powerflex.device: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + device_id: "d7fe088900000000" + state: "present" +- name: Get device details using (current_pathname, sds_name) + dellemc.powerflex.device: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + current_pathname: "/dev/sdb" + sds_name: "node0" + state: "present" +- name: Get device details using (current_pathname, sds_id) + dellemc.powerflex.device: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + current_pathname: "/dev/sdb" + sds_id: "5717d71800000000" + state: "present" +- name: Remove a device using device_id + dellemc.powerflex.device: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + device_id: "76eb7e2f00010000" + state: "absent" +- name: Remove a device using (current_pathname, sds_id) + dellemc.powerflex.device: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + current_pathname: "/dev/sdb" + sds_name: "node1" + state: "absent" +''' + +RETURN = r''' +changed: + description: Whether or not the resource has changed. + returned: always + type: bool + sample: 'false' +device_details: + description: Details of the device. + returned: When device exists + type: dict + contains: + accelerationPoolId: + description: Acceleration pool ID. + type: str + accelerationPoolName: + description: Acceleration pool name. + type: str + accelerationProps: + description: Indicates acceleration props. + type: str + aggregatedState: + description: Indicates aggregated state. + type: str + ataSecurityActive: + description: Indicates ATA security active state. + type: bool + autoDetectMediaType: + description: Indicates auto detection of media type. + type: str + cacheLookAheadActive: + description: Indicates cache look ahead active state. + type: bool + capacity: + description: Device capacity. + type: int + capacityLimitInKb: + description: Device capacity limit in KB. + type: int + deviceCurrentPathName: + description: Device current path name. + type: str + deviceOriginalPathName: + description: Device original path name. + type: str + deviceState: + description: Indicates device state. + type: str + deviceType: + description: Indicates device type. + type: str + errorState: + description: Indicates error state. + type: str + externalAccelerationType: + description: Indicates external acceleration type. + type: str + fglNvdimmMetadataAmortizationX100: + description: Indicates FGL NVDIMM meta data amortization value. + type: int + fglNvdimmWriteCacheSize: + description: Indicates FGL NVDIMM write cache size. + type: int + firmwareVersion: + description: Indicates firmware version. + type: str + id: + description: Device ID. + type: str + ledSetting: + description: Indicates LED setting. + type: str + links: + description: Device links. + type: list + contains: + href: + description: Device instance URL. + type: str + rel: + description: Relationship of device with different + entities. + type: str + logicalSectorSizeInBytes: + description: Logical sector size in bytes. + type: int + longSuccessfulIos: + description: Indicates long successful IOs. + type: list + maxCapacityInKb: + description: Maximum device capacity limit in KB. + type: int + mediaFailing: + description: Indicates media failing. + type: bool + mediaType: + description: Indicates media type. + type: str + modelName: + description: Indicates model name. + type: str + name: + description: Device name. + type: str + persistentChecksumState: + description: Indicates persistent checksum state. + type: str + physicalSectorSizeInBytes: + description: Physical sector size in bytes. + type: int + protectionDomainId: + description: Protection domain ID. + type: str + protectionDomainName: + description: Protection domain name. + type: str + raidControllerSerialNumber: + description: RAID controller serial number. + type: str + rfcacheErrorDeviceDoesNotExist: + description: Indicates RF cache error device does not exist. + type: bool + rfcacheProps: + description: RF cache props. + type: str + sdsId: + description: SDS ID. + type: str + sdsName: + description: SDS name. + type: str + serialNumber: + description: Indicates Serial number. + type: str + spSdsId: + description: Indicates SPs SDS ID. + type: str + ssdEndOfLifeState: + description: Indicates SSD end of life state. + type: str + storagePoolId: + description: Storage Pool ID. + type: str + storagePoolName: + description: Storage Pool name. + type: str + storageProps: + description: Storage props. + type: list + temperatureState: + description: Indicates temperature state. + type: str + vendorName: + description: Indicates vendor name. + type: str + writeCacheActive: + description: Indicates write cache active. + type: bool + sample: { + "accelerationPoolId": null, + "accelerationProps": null, + "aggregatedState": "NeverFailed", + "ataSecurityActive": false, + "autoDetectMediaType": "SSD", + "cacheLookAheadActive": false, + "capacity": 0, + "capacityLimitInKb": 365772800, + "deviceCurrentPathName": "/dev/sdb", + "deviceOriginalPathName": "/dev/sdb", + "deviceState": "Normal", + "deviceType": "Unknown", + "errorState": "None", + "externalAccelerationType": "None", + "fglNvdimmMetadataAmortizationX100": 150, + "fglNvdimmWriteCacheSize": 16, + "firmwareVersion": null, + "id": "b6efa59900000000", + "ledSetting": "Off", + "links": [ + { + "href": "/api/instances/Device::b6efa59900000000", + "rel": "self" + }, + { + "href": "/api/instances/Device::b6efa59900000000/relationships + /Statistics", + "rel": "/api/Device/relationship/Statistics" + }, + { + "href": "/api/instances/Sds::8f3bb0ce00000000", + "rel": "/api/parent/relationship/sdsId" + }, + { + "href": "/api/instances/StoragePool::e0d8f6c900000000", + "rel": "/api/parent/relationship/storagePoolId" + }, + { + "href": "/api/instances/SpSds::fedf6f2000000000", + "rel": "/api/parent/relationship/spSdsId" + } + ], + "logicalSectorSizeInBytes": 0, + "longSuccessfulIos": { + "longWindow": null, + "mediumWindow": null, + "shortWindow": null + }, + "maxCapacityInKb": 365772800, + "mediaFailing": false, + "mediaType": "HDD", + "modelName": null, + "name": "device230", + "persistentChecksumState": "Protected", + "physicalSectorSizeInBytes": 0, + "protectionDomainId": "9300c1f900000000", + "protectionDomainName": "domain1", + "raidControllerSerialNumber": null, + "rfcacheErrorDeviceDoesNotExist": false, + "rfcacheProps": null, + "sdsId": "8f3bb0ce00000000", + "sdsName": "node1", + "serialNumber": null, + "slotNumber": null, + "spSdsId": "fedf6f2000000000", + "ssdEndOfLifeState": "NeverFailed", + "storagePoolId": "e0d8f6c900000000", + "storagePoolName": "pool1", + "storageProps": { + "destFglAccDeviceId": null, + "destFglNvdimmSizeMb": 0, + "fglAccDeviceId": null, + "fglNvdimmSizeMb": 0 + }, + "temperatureState": "NeverFailed", + "vendorName": null, + "writeCacheActive": false + } +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell\ + import utils + +LOG = utils.get_logger('device') + + +class PowerFlexDevice(object): + """Class with device operations""" + + def __init__(self): + """ Define all parameters required by this module""" + self.module_params = utils.get_powerflex_gateway_host_parameters() + self.module_params.update(get_powerflex_device_parameters()) + + mut_ex_args = [['sds_name', 'sds_id'], + ['device_name', 'device_id'], + ['protection_domain_name', + 'protection_domain_id'], + ['storage_pool_name', 'storage_pool_id'], + ['acceleration_pool_name', 'acceleration_pool_id'], + ['acceleration_pool_id', 'storage_pool_id'], + ['acceleration_pool_name', 'storage_pool_name'], + ['device_id', 'sds_name'], + ['device_id', 'sds_id'], + ['device_id', 'current_pathname']] + + # initialize the Ansible module + self.module = AnsibleModule( + argument_spec=self.module_params, + supports_check_mode=False, + mutually_exclusive=mut_ex_args) + + utils.ensure_required_libs(self.module) + + try: + self.powerflex_conn = utils.get_powerflex_gateway_host_connection( + self.module.params) + LOG.info("Got the PowerFlex system connection object instance") + except Exception as e: + LOG.error(str(e)) + self.module.fail_json(msg=str(e)) + + def get_device_details(self, current_pathname=None, sds_id=None, + device_name=None, device_id=None): + """Get device details + :param current_pathname: Device path name + :type current_pathname: str + :param sds_id: ID of the SDS + :type sds_id: str + :param device_name: Name of the device + :type device_name: str + :param device_id: ID of the device + :type device_id: str + :return: Details of device if it exist + :rtype: dict + """ + + try: + if current_pathname and sds_id: + device_details = self.powerflex_conn.device.get( + filter_fields={'deviceCurrentPathName': current_pathname, + 'sdsId': sds_id}) + elif device_name and sds_id: + device_details = self.powerflex_conn.device.get( + filter_fields={'name': device_name, + 'sdsId': sds_id}) + else: + device_details = self.powerflex_conn.device.get( + filter_fields={'id': device_id}) + + if len(device_details) == 0: + msg = "Device not found" + LOG.info(msg) + return None + + return device_details[0] + + except Exception as e: + error_msg = "Failed to get the device with error '%s'" % str(e) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def get_sds(self, sds_name=None, sds_id=None): + """Get SDS details + :param sds_name: Name of the SDS + :param sds_id: ID of the SDS + :return: SDS details + :rtype: dict + """ + name_or_id = sds_id if sds_id else sds_name + try: + sds_details = None + if sds_id: + sds_details = self.powerflex_conn.sds.get( + filter_fields={'id': sds_id}) + + if sds_name: + sds_details = self.powerflex_conn.sds.get( + filter_fields={'name': sds_name}) + + if not sds_details: + error_msg = "Unable to find the SDS with '%s'. Please " \ + "enter a valid SDS name/id." % name_or_id + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + return sds_details[0] + + except Exception as e: + error_msg = "Failed to get the SDS '%s' with error '%s'" \ + % (name_or_id, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def get_protection_domain(self, protection_domain_name=None, + protection_domain_id=None): + """Get protection domain details + :param protection_domain_name: Name of the protection domain + :param protection_domain_id: ID of the protection domain + :return: Protection domain details + :rtype: dict + """ + name_or_id = protection_domain_id if protection_domain_id \ + else protection_domain_name + try: + pd_details = None + if protection_domain_id: + pd_details = self.powerflex_conn.protection_domain.get( + filter_fields={'id': protection_domain_id}) + + if protection_domain_name: + pd_details = self.powerflex_conn.protection_domain.get( + filter_fields={'name': protection_domain_name}) + + if not pd_details: + error_msg = "Unable to find the protection domain with " \ + "'%s'. Please enter a valid protection domain " \ + "name/id." % name_or_id + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + return pd_details[0] + + except Exception as e: + error_msg = "Failed to get the protection domain '%s' with " \ + "error '%s'" % (name_or_id, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def get_storage_pool(self, storage_pool_name=None, + storage_pool_id=None, + protection_domain_id=None): + """Get storage pool details + :param storage_pool_name: Name of the storage pool + :param storage_pool_id: ID of the storage pool + :param protection_domain_id: ID of the protection domain + :return: Storage pool details + :rtype: dict + """ + name_or_id = storage_pool_id if storage_pool_id else storage_pool_name + try: + storage_pool_details = None + if storage_pool_id: + storage_pool_details = self.powerflex_conn.storage_pool.get( + filter_fields={'id': storage_pool_id}) + + if storage_pool_name: + storage_pool_details = self.powerflex_conn.storage_pool.get( + filter_fields={'name': storage_pool_name, + 'protectionDomainId': protection_domain_id} + ) + + if not storage_pool_details: + error_msg = "Unable to find the storage pool with " \ + "'%s'. Please enter a valid storage pool " \ + "name/id." % name_or_id + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + return storage_pool_details[0] + + except Exception as e: + error_msg = "Failed to get the storage_pool '%s' with " \ + "error '%s'" % (name_or_id, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def get_acceleration_pool(self, acceleration_pool_name=None, + acceleration_pool_id=None, + protection_domain_id=None): + """Get acceleration pool details + :param acceleration_pool_name: Name of the acceleration pool + :param acceleration_pool_id: ID of the acceleration pool + :param protection_domain_id: ID of the protection domain + :return: Acceleration pool details + :rtype: dict + """ + name_or_id = acceleration_pool_id \ + if acceleration_pool_id else acceleration_pool_name + try: + acceleration_pool_details = None + if acceleration_pool_id: + acceleration_pool_details = self.powerflex_conn.\ + acceleration_pool.get(filter_fields={ + 'id': acceleration_pool_id}) + + if acceleration_pool_name: + acceleration_pool_details = self.powerflex_conn.\ + acceleration_pool.get(filter_fields={ + 'name': acceleration_pool_name, + 'protectionDomainId': protection_domain_id}) + + if not acceleration_pool_details: + error_msg = "Unable to find the acceleration pool with " \ + "'%s'. Please enter a valid acceleration pool " \ + "name/id." % name_or_id + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + return acceleration_pool_details[0] + + except Exception as e: + error_msg = "Failed to get the acceleration pool '%s' with " \ + "error '%s'" % (name_or_id, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def add_device(self, device_name, current_pathname, sds_id, + storage_pool_id, media_type, acceleration_pool_id, + external_acceleration_type): + """Add device + :param device_name: Device name + :type device_name: str + :param current_pathname: Current pathname of device + :type current_pathname: str + :param sds_id: SDS ID + :type sds_id: str + :param storage_pool_id: Storage Pool ID + :type storage_pool_id: str + :param media_type: Media type of device + :type media_type: str + :param acceleration_pool_id: Acceleration pool ID + :type acceleration_pool_id: str + :param external_acceleration_type: External acceleration type + :type external_acceleration_type: str + return: Boolean indicating if add device operation is successful + """ + try: + if device_name is None or len(device_name.strip()) == 0: + error_msg = "Please provide valid device_name value for " \ + "adding a device." + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + if current_pathname is None or len(current_pathname.strip()) == 0: + error_msg = "Current pathname of device is a mandatory " \ + "parameter for adding a device. Please enter a " \ + "valid value." + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + if sds_id is None or len(sds_id.strip()) == 0: + error_msg = "Please provide valid sds_id value " \ + "for adding a device." + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + if storage_pool_id is None and acceleration_pool_id is None: + error_msg = "Please provide either storage pool name/ID " \ + "or acceleration pool name/ID for adding a " \ + "device." + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + add_params = ("current_pathname: %s, " + "sds_id: %s, " + "acceleration_pool_id: %s," + "external_acceleration_type: %s," + "media_type: %s," + "device_name: %s," + "storage_pool_id: %s," + % (current_pathname, sds_id, + acceleration_pool_id, + external_acceleration_type, + media_type, + device_name, + storage_pool_id)) + LOG.info("Adding device with params: %s", add_params) + + self.powerflex_conn.device.create( + current_pathname=current_pathname, + sds_id=sds_id, + acceleration_pool_id=acceleration_pool_id, + external_acceleration_type=external_acceleration_type, + media_type=media_type, + name=device_name, + storage_pool_id=storage_pool_id) + return True + except Exception as e: + error_msg = "Adding device %s operation failed with " \ + "error '%s'" % (device_name, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def remove_device(self, device_id): + """Remove device + :param device_id: Device ID + :type device_id: str + return: Boolean indicating if remove device operation is + successful + """ + try: + LOG.info("Device to be removed: %s", device_id) + self.powerflex_conn.device.delete(device_id=device_id) + return True + except Exception as e: + error_msg = "Remove device '%s' operation failed with " \ + "error '%s'" % (device_id, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def validate_input_parameters(self, device_name=None, device_id=None, + current_pathname=None, sds_name=None, + sds_id=None): + """Validate the input parameters""" + + # Unique ways to identify a device: + # (current_pathname , sds_id) + # (current_pathname , sds_name) + # (device_name , sds_name) + # (device_name , sds_id) + # device_id. + + if current_pathname: + if (sds_name is None or len(sds_name.strip()) == 0) \ + and (sds_id is None or len(sds_id.strip()) == 0): + error_msg = "sds_name or sds_id is mandatory along with " \ + "current_pathname. Please enter a valid value." + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + elif current_pathname is not None \ + and len(current_pathname.strip()) == 0: + error_msg = "Please enter a valid value for current_pathname." + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + if device_name: + if (sds_name is None or len(sds_name.strip()) == 0) \ + and (sds_id is None or len(sds_id.strip()) == 0): + error_msg = "sds_name or sds_id is mandatory along with " \ + "device_name. Please enter a valid value." + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + elif device_name is not None and len(device_name.strip()) == 0: + error_msg = "Please enter a valid value for device_name." + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + if sds_name: + if (current_pathname is None + or len(current_pathname.strip()) == 0) \ + and (device_name is None + or len(device_name.strip()) == 0): + error_msg = "current_pathname or device_name is mandatory " \ + "along with sds_name. Please enter a valid value." + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + elif sds_name is not None and len(sds_name.strip()) == 0: + error_msg = "Please enter a valid value for sds_name." + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + if sds_id: + if (current_pathname is None + or len(current_pathname.strip()) == 0) \ + and (device_name is None + or len(device_name.strip()) == 0): + error_msg = "current_pathname or device_name is mandatory " \ + "along with sds_id. Please enter a valid value." + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + elif sds_id is not None and len(sds_id.strip()) == 0: + error_msg = "Please enter a valid value for sds_id." + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + if device_id is not None and len(device_id.strip()) == 0: + error_msg = "Please provide valid device_id value to identify " \ + "a device." + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + if current_pathname is None and device_name is None \ + and device_id is None: + error_msg = "Please specify a valid parameter combination to " \ + "identify a device." + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def validate_add_parameters(self, device_id=None, + external_acceleration_type=None, + storage_pool_id=None, + storage_pool_name=None, + acceleration_pool_id=None, + acceleration_pool_name=None): + """Validate the add device parameters""" + + if device_id: + error_msg = "Addition of device is allowed using " \ + "device_name only, device_id given." + LOG.info(error_msg) + self.module.fail_json(msg=error_msg) + if external_acceleration_type and storage_pool_id is None \ + and storage_pool_name is None \ + and acceleration_pool_id is None \ + and acceleration_pool_name is None: + error_msg = "Storage Pool ID/name or Acceleration Pool " \ + "ID/name is mandatory along with " \ + "external_acceleration_type." + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def perform_module_operation(self): + """ + Perform different actions on device based on parameters passed in + the playbook + """ + current_pathname = self.module.params['current_pathname'] + device_name = self.module.params['device_name'] + device_id = self.module.params['device_id'] + sds_name = self.module.params['sds_name'] + sds_id = self.module.params['sds_id'] + storage_pool_name = self.module.params['storage_pool_name'] + storage_pool_id = self.module.params['storage_pool_id'] + acceleration_pool_id = self.module.params['acceleration_pool_id'] + acceleration_pool_name = self.module.params['acceleration_pool_name'] + protection_domain_name = self.module.params['protection_domain_name'] + protection_domain_id = self.module.params['protection_domain_id'] + external_acceleration_type = self.module.params[ + 'external_acceleration_type'] + media_type = self.module.params['media_type'] + state = self.module.params['state'] + + # result is a dictionary to contain end state and device details + changed = False + result = dict( + changed=False, + device_details={} + ) + + # validate input parameters + self.validate_input_parameters(device_name, device_id, + current_pathname, sds_name, sds_id) + + # get SDS ID from name + if sds_name: + sds_details = self.get_sds(sds_name) + if sds_details: + sds_id = sds_details['id'] + msg = "Fetched the SDS details with id '%s', name '%s'" \ + % (sds_id, sds_name) + LOG.info(msg) + + # get device details + device_details = self.get_device_details(current_pathname, + sds_id, device_name, + device_id) + + if device_details: + device_id = device_details['id'] + msg = "Fetched the device details %s" % (str(device_details)) + LOG.info(msg) + + # add operation + add_changed = False + if state == 'present' and not device_details: + # get Protection Domain ID from name + # it is needed to uniquely identify a storage pool or acceleration + # pool using name + if protection_domain_name \ + and (storage_pool_name or acceleration_pool_name): + pd_details = self.get_protection_domain( + protection_domain_name) + if pd_details: + protection_domain_id = pd_details['id'] + msg = "Fetched the protection domain details with id " \ + "'%s', name '%s'" % (protection_domain_id, + protection_domain_name) + LOG.info(msg) + + # get storage pool ID from name + if storage_pool_name: + if protection_domain_id: + storage_pool_details = self.get_storage_pool( + storage_pool_name=storage_pool_name, + protection_domain_id=protection_domain_id) + if storage_pool_details: + storage_pool_id = storage_pool_details['id'] + msg = "Fetched the storage pool details with id '%s', " \ + "name '%s'" % (storage_pool_id, storage_pool_name) + LOG.info(msg) + else: + error_msg = "Protection domain name/id is required to " \ + "uniquely identify a storage pool, only " \ + "storage_pool_name is given." + LOG.info(error_msg) + self.module.fail_json(msg=error_msg) + + # get acceleration pool ID from name + if acceleration_pool_name: + if protection_domain_id: + acceleration_pool_details = self.get_acceleration_pool( + acceleration_pool_name=acceleration_pool_name, + protection_domain_id=protection_domain_id) + if acceleration_pool_details: + acceleration_pool_id = acceleration_pool_details['id'] + msg = "Fetched the acceleration pool details with id " \ + "'%s', name '%s'" % (acceleration_pool_id, + acceleration_pool_name) + LOG.info(msg) + else: + error_msg = "Protection domain name/id is required to " \ + "uniquely identify a acceleration pool, " \ + "only acceleration_pool_name is given." + LOG.info(error_msg) + self.module.fail_json(msg=error_msg) + + # validate input parameters + self.validate_add_parameters(device_id, + external_acceleration_type, + storage_pool_id, + storage_pool_name, + acceleration_pool_id, + acceleration_pool_name) + add_changed = self.add_device(device_name, current_pathname, + sds_id, storage_pool_id, media_type, + acceleration_pool_id, + external_acceleration_type) + if add_changed: + device_details = self.get_device_details( + device_name=device_name, sds_id=sds_id) + device_id = device_details['id'] + msg = "Device created successfully, fetched device details " \ + "%s" % (str(device_details)) + LOG.info(msg) + + # remove operation + remove_changed = False + if state == 'absent' and device_details: + remove_changed = self.remove_device(device_id) + + if add_changed or remove_changed: + changed = True + + # modify operation + if device_details and state == 'present': + modify_dict = to_modify(device_details, media_type, + external_acceleration_type) + if modify_dict: + error_msg = "Modification of device attributes is " \ + "currently not supported by Ansible modules." + LOG.info(error_msg) + self.module.fail_json(msg=error_msg) + + # Returning the updated device details + if state == 'present': + device_details = self.show_output(device_id) + result['device_details'] = device_details + result['changed'] = changed + self.module.exit_json(**result) + + def show_output(self, device_id): + """Show device details + :param device_id: ID of the device + :type device_id: str + :return: Details of device + :rtype: dict + """ + + try: + device_details = self.powerflex_conn.device.get( + filter_fields={'id': device_id}) + + if len(device_details) == 0: + msg = "Device with identifier '%s' not found" % device_id + LOG.error(msg) + return None + + # Append SDS name + if 'sdsId' in device_details[0] and device_details[0]['sdsId']: + sds_details = self.get_sds(sds_id=device_details[0]['sdsId']) + device_details[0]['sdsName'] = sds_details['name'] + + # Append storage pool name and its protection domain name and ID + if 'storagePoolId' in device_details[0] \ + and device_details[0]['storagePoolId']: + sp_details = self.get_storage_pool( + storage_pool_id=device_details[0]['storagePoolId']) + device_details[0]['storagePoolName'] = sp_details['name'] + pd_id = sp_details['protectionDomainId'] + device_details[0]['protectionDomainId'] = pd_id + pd_details = self.get_protection_domain( + protection_domain_id=pd_id) + device_details[0]['protectionDomainName'] = pd_details['name'] + + # Append acceleration pool name and its protection domain name + # and ID + if 'accelerationPoolId' in device_details[0] \ + and device_details[0]['accelerationPoolId']: + ap_details = self.get_acceleration_pool( + acceleration_pool_id=device_details[0][ + 'accelerationPoolId']) + device_details[0]['accelerationPoolName'] = ap_details['name'] + pd_id = ap_details['protectionDomainId'] + device_details[0]['protectionDomainId'] = pd_id + pd_details = self.get_protection_domain( + protection_domain_id=pd_id) + device_details[0]['protectionDomainName'] = pd_details['name'] + + return device_details[0] + + except Exception as e: + error_msg = "Failed to get the device '%s' with error '%s'"\ + % (device_id, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + +def to_modify(device_details, media_type, external_acceleration_type): + """Identify device attributes to be modified""" + + modify_dict = {} + + if media_type is not None and \ + device_details['mediaType'] != media_type: + modify_dict['mediaType'] = media_type + + if external_acceleration_type is not None and \ + device_details['externalAccelerationType'] \ + != external_acceleration_type: + modify_dict['externalAccelerationType'] \ + = external_acceleration_type + + if len(modify_dict) != 0: + LOG.info("Attributes to be modified: %s", modify_dict) + return modify_dict + + +def get_powerflex_device_parameters(): + """This method provide parameter required for the device module on + PowerFlex""" + return dict( + current_pathname=dict(), + device_name=dict(), + device_id=dict(), + sds_name=dict(), + sds_id=dict(), + storage_pool_name=dict(), + storage_pool_id=dict(), + acceleration_pool_id=dict(), + acceleration_pool_name=dict(), + protection_domain_name=dict(), + protection_domain_id=dict(), + external_acceleration_type=dict(choices=['Invalid', 'None', 'Read', + 'Write', 'ReadAndWrite']), + media_type=dict(choices=['HDD', 'SSD', 'NVDIMM']), + state=dict(required=True, type='str', choices=['present', 'absent']) + ) + + +def main(): + """ Create PowerFlex device object and perform actions on it + based on user input from playbook""" + obj = PowerFlexDevice() + obj.perform_module_operation() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/powerflex/plugins/modules/info.py b/ansible_collections/dellemc/powerflex/plugins/modules/info.py new file mode 100644 index 00000000..40bdfd92 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/plugins/modules/info.py @@ -0,0 +1,1495 @@ +#!/usr/bin/python + +# Copyright: (c) 2021, Dell Technologies +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +"""Ansible module for Gathering information about Dell Technologies (Dell) PowerFlex""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: info + +version_added: '1.0.0' + +short_description: Gathering information about Dell PowerFlex + +description: +- Gathering information about Dell PowerFlex storage system includes + getting the api details, list of volumes, SDSs, SDCs, storage pools, + protection domains, snapshot policies, and devices. + +extends_documentation_fragment: + - dellemc.powerflex.powerflex + +author: +- Arindam Datta (@dattaarindam) <ansible.team@dell.com> + +options: + gather_subset: + description: + - List of string variables to specify the Powerflex storage system + entities for which information is required. + - Volumes - C(vol). + - Storage pools - C(storage_pool). + - Protection domains - C(protection_domain). + - SDCs - C(sdc). + - SDSs - C(sds). + - Snapshot policies - C(snapshot_policy). + - Devices - C(device). + - Replication consistency groups - C(rcg). + choices: [vol, storage_pool, protection_domain, sdc, sds, + snapshot_policy, device, rcg] + type: list + elements: str + filters: + description: + - List of filters to support filtered output for storage entities. + - Each filter is a list of I(filter_key), I(filter_operator), I(filter_value). + - Supports passing of multiple filters. + type: list + elements: dict + suboptions: + filter_key: + description: + - Name identifier of the filter. + type: str + required: true + filter_operator: + description: + - Operation to be performed on filter key. + type: str + choices: [equal] + required: true + filter_value: + description: + - Value of the filter key. + type: str + required: true +notes: + - The I(check_mode) is supported. +''' + +EXAMPLES = r''' +- name: Get detailed list of PowerFlex entities + dellemc.powerflex.info: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + gather_subset: + - vol + - storage_pool + - protection_domain + - sdc + - sds + - snapshot_policy + - device + - rcg + +- name: Get a subset list of PowerFlex volumes + dellemc.powerflex.info: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + gather_subset: + - vol + filters: + - filter_key: "name" + filter_operator: "equal" + filter_value: "ansible_test" +''' + +RETURN = r''' +changed: + description: Whether or not the resource has changed. + returned: always + type: bool + sample: 'false' +Array_Details: + description: System entities of PowerFlex storage array. + returned: always + type: dict + contains: + addressSpaceUsage: + description: Address space usage. + type: str + authenticationMethod: + description: Authentication method. + type: str + capacityAlertCriticalThresholdPercent: + description: Capacity alert critical threshold percentage. + type: int + capacityAlertHighThresholdPercent: + description: Capacity alert high threshold percentage. + type: int + capacityTimeLeftInDays: + description: Capacity time left in days. + type: str + cliPasswordAllowed: + description: CLI password allowed. + type: bool + daysInstalled: + description: Days installed. + type: int + defragmentationEnabled: + description: Defragmentation enabled. + type: bool + enterpriseFeaturesEnabled: + description: Enterprise features enabled. + type: bool + id: + description: The ID of the system. + type: str + installId: + description: installation Id. + type: str + isInitialLicense: + description: Initial license. + type: bool + lastUpgradeTime: + description: Last upgrade time. + type: int + managementClientSecureCommunicationEnabled: + description: Management client secure communication enabled. + type: bool + maxCapacityInGb: + description: Maximum capacity in GB. + type: dict + mdmCluster: + description: MDM cluster details. + type: dict + mdmExternalPort: + description: MDM external port. + type: int + mdmManagementPort: + description: MDM management port. + type: int + mdmSecurityPolicy: + description: MDM security policy. + type: str + showGuid: + description: Show guid. + type: bool + swid: + description: SWID. + type: str + systemVersionName: + description: System version and name. + type: str + tlsVersion: + description: TLS version. + type: str + upgradeState: + description: Upgrade state. + type: str + sample: { + "addressSpaceUsage": "Normal", + "authenticationMethod": "Native", + "capacityAlertCriticalThresholdPercent": 90, + "capacityAlertHighThresholdPercent": 80, + "capacityTimeLeftInDays": "24", + "cliPasswordAllowed": true, + "daysInstalled": 66, + "defragmentationEnabled": true, + "enterpriseFeaturesEnabled": true, + "id": "4a54a8ba6df0690f", + "installId": "38622771228e56db", + "isInitialLicense": true, + "lastUpgradeTime": 0, + "managementClientSecureCommunicationEnabled": true, + "maxCapacityInGb": "Unlimited", + "mdmCluster": { + "clusterMode": "ThreeNodes", + "clusterState": "ClusteredNormal", + "goodNodesNum": 3, + "goodReplicasNum": 2, + "id": "5356091375512217871", + "master": { + "id": "6101582c2ca8db00", + "ips": [ + "10.47.xxx.xxx" + ], + "managementIPs": [ + "10.47.xxx.xxx" + ], + "name": "node0", + "opensslVersion": "OpenSSL 1.0.2k-fips 26 Jan 2017", + "port": 9011, + "role": "Manager", + "status": "Normal", + "versionInfo": "R3_6.0.0", + "virtualInterfaces": [ + "ens160" + ] + }, + "slaves": [ + { + "id": "23fb724015661901", + "ips": [ + "10.47.xxx.xxx" + ], + "managementIPs": [ + "10.47.xxx.xxx" + ], + "opensslVersion": "OpenSSL 1.0.2k-fips 26 Jan 2017", + "port": 9011, + "role": "Manager", + "status": "Normal", + "versionInfo": "R3_6.0.0", + "virtualInterfaces": [ + "ens160" + ] + } + ], + "tieBreakers": [ + { + "id": "6ef27eb20d0c1202", + "ips": [ + "10.47.xxx.xxx" + ], + "managementIPs": [ + "10.47.xxx.xxx" + ], + "opensslVersion": "N/A", + "port": 9011, + "role": "TieBreaker", + "status": "Normal", + "versionInfo": "R3_6.0.0" + } + ] + }, + "mdmExternalPort": 7611, + "mdmManagementPort": 6611, + "mdmSecurityPolicy": "None", + "showGuid": true, + "swid": "", + "systemVersionName": "DellEMC PowerFlex Version: R3_6.0.354", + "tlsVersion": "TLSv1.2", + "upgradeState": "NoUpgrade" + } +API_Version: + description: API version of PowerFlex API Gateway. + returned: always + type: str + sample: "3.5" +Protection_Domains: + description: Details of all protection domains. + returned: always + type: list + contains: + id: + description: protection domain id. + type: str + name: + description: protection domain name. + type: str + sample: [ + { + "id": "9300e90900000001", + "name": "domain2" + }, + { + "id": "9300c1f900000000", + "name": "domain1" + } + ] +SDCs: + description: Details of storage data clients. + returned: always + type: list + contains: + id: + description: storage data client id. + type: str + name: + description: storage data client name. + type: str + sample: [ + { + "id": "07335d3d00000006", + "name": "LGLAP203" + }, + { + "id": "07335d3c00000005", + "name": "LGLAP178" + }, + { + "id": "0733844a00000003" + } + ] +SDSs: + description: Details of storage data servers. + returned: always + type: list + contains: + id: + description: storage data server id. + type: str + name: + description: storage data server name. + type: str + sample: [ + { + "id": "8f3bb0cc00000002", + "name": "node0" + }, + { + "id": "8f3bb0ce00000000", + "name": "node1" + }, + { + "id": "8f3bb15300000001", + "name": "node22" + } + ] +Snapshot_Policies: + description: Details of snapshot policies. + returned: always + type: list + contains: + id: + description: snapshot policy id. + type: str + name: + description: snapshot policy name. + type: str + sample: [ + { + "id": "2b380c5c00000000", + "name": "sample_snap_policy" + }, + { + "id": "2b380c5d00000001", + "name": "sample_snap_policy_1" + } + ] +Storage_Pools: + description: Details of storage pools. + returned: always + type: list + contains: + mediaType: + description: Type of devices in the storage pool. + type: str + useRfcache: + description: Enable/Disable RFcache on a specific storage pool. + type: bool + useRmcache: + description: Enable/Disable RMcache on a specific storage pool. + type: bool + id: + description: ID of the storage pool under protection domain. + type: str + name: + description: Name of the storage pool under protection domain. + type: str + protectionDomainId: + description: ID of the protection domain in which pool resides. + type: str + protectionDomainName: + description: Name of the protection domain in which pool resides. + type: str + statistics: + description: Statistics details of the storage pool. + type: dict + contains: + capacityInUseInKb: + description: Total capacity of the storage pool. + type: str + unusedCapacityInKb: + description: Unused capacity of the storage pool. + type: str + deviceIds: + description: Device Ids of the storage pool. + type: list + sample: [ + { + "addressSpaceUsage": "Normal", + "addressSpaceUsageType": "DeviceCapacityLimit", + "backgroundScannerBWLimitKBps": 3072, + "backgroundScannerMode": "DataComparison", + "bgScannerCompareErrorAction": "ReportAndFix", + "bgScannerReadErrorAction": "ReportAndFix", + "capacityAlertCriticalThreshold": 90, + "capacityAlertHighThreshold": 80, + "capacityUsageState": "Normal", + "capacityUsageType": "NetCapacity", + "checksumEnabled": false, + "compressionMethod": "Invalid", + "dataLayout": "MediumGranularity", + "externalAccelerationType": "None", + "fglAccpId": null, + "fglExtraCapacity": null, + "fglMaxCompressionRatio": null, + "fglMetadataSizeXx100": null, + "fglNvdimmMetadataAmortizationX100": null, + "fglNvdimmWriteCacheSizeInMb": null, + "fglOverProvisioningFactor": null, + "fglPerfProfile": null, + "fglWriteAtomicitySize": null, + "fragmentationEnabled": true, + "id": "e0d8f6c900000000", + "links": [ + { + "href": "/api/instances/StoragePool::e0d8f6c900000000", + "rel": "self" + }, + { + "href": "/api/instances/StoragePool::e0d8f6c900000000 + /relationships/Statistics", + "rel": "/api/StoragePool/relationship/Statistics" + }, + { + "href": "/api/instances/StoragePool::e0d8f6c900000000 + /relationships/SpSds", + "rel": "/api/StoragePool/relationship/SpSds" + }, + { + "href": "/api/instances/StoragePool::e0d8f6c900000000 + /relationships/Volume", + "rel": "/api/StoragePool/relationship/Volume" + }, + { + "href": "/api/instances/StoragePool::e0d8f6c900000000 + /relationships/Device", + "rel": "/api/StoragePool/relationship/Device" + }, + { + "href": "/api/instances/StoragePool::e0d8f6c900000000 + /relationships/VTree", + "rel": "/api/StoragePool/relationship/VTree" + }, + { + "href": "/api/instances/ProtectionDomain::9300c1f900000000", + "rel": "/api/parent/relationship/protectionDomainId" + } + ], + "statistics": { + "BackgroundScannedInMB": 3466920, + "activeBckRebuildCapacityInKb": 0, + "activeEnterProtectedMaintenanceModeCapacityInKb": 0, + "aggregateCompressionLevel": "Uncompressed", + "atRestCapacityInKb": 1248256, + "backgroundScanCompareErrorCount": 0, + "backgroundScanFixedCompareErrorCount": 0, + "bckRebuildReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "bckRebuildWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "capacityAvailableForVolumeAllocationInKb": 369098752, + "capacityInUseInKb": 2496512, + "capacityInUseNoOverheadInKb": 2496512, + "capacityLimitInKb": 845783040, + "compressedDataCompressionRatio": 0.0, + "compressionRatio": 1.0, + "currentFglMigrationSizeInKb": 0, + "deviceIds": [ + ], + "enterProtectedMaintenanceModeCapacityInKb": 0, + "enterProtectedMaintenanceModeReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "enterProtectedMaintenanceModeWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "exitProtectedMaintenanceModeReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "exitProtectedMaintenanceModeWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "exposedCapacityInKb": 0, + "failedCapacityInKb": 0, + "fwdRebuildReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "fwdRebuildWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "inMaintenanceCapacityInKb": 0, + "inMaintenanceVacInKb": 0, + "inUseVacInKb": 184549376, + "inaccessibleCapacityInKb": 0, + "logWrittenBlocksInKb": 0, + "maxCapacityInKb": 845783040, + "migratingVolumeIds": [ + ], + "migratingVtreeIds": [ + ], + "movingCapacityInKb": 0, + "netCapacityInUseInKb": 1248256, + "normRebuildCapacityInKb": 0, + "normRebuildReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "normRebuildWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "numOfDeviceAtFaultRebuilds": 0, + "numOfDevices": 3, + "numOfIncomingVtreeMigrations": 0, + "numOfVolumes": 8, + "numOfVolumesInDeletion": 0, + "numOfVtrees": 8, + "overallUsageRatio": 73.92289, + "pendingBckRebuildCapacityInKb": 0, + "pendingEnterProtectedMaintenanceModeCapacityInKb": 0, + "pendingExitProtectedMaintenanceModeCapacityInKb": 0, + "pendingFwdRebuildCapacityInKb": 0, + "pendingMovingCapacityInKb": 0, + "pendingMovingInBckRebuildJobs": 0, + "persistentChecksumBuilderProgress": 100.0, + "persistentChecksumCapacityInKb": 414720, + "primaryReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "primaryReadFromDevBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "primaryReadFromRmcacheBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "primaryVacInKb": 92274688, + "primaryWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "protectedCapacityInKb": 2496512, + "protectedVacInKb": 184549376, + "provisionedAddressesInKb": 2496512, + "rebalanceCapacityInKb": 0, + "rebalanceReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "rebalanceWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "rfacheReadHit": 0, + "rfacheWriteHit": 0, + "rfcacheAvgReadTime": 0, + "rfcacheAvgWriteTime": 0, + "rfcacheIoErrors": 0, + "rfcacheIosOutstanding": 0, + "rfcacheIosSkipped": 0, + "rfcacheReadMiss": 0, + "rmPendingAllocatedInKb": 0, + "rmPendingThickInKb": 0, + "rplJournalCapAllowed": 0, + "rplTotalJournalCap": 0, + "rplUsedJournalCap": 0, + "secondaryReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "secondaryReadFromDevBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "secondaryReadFromRmcacheBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "secondaryVacInKb": 92274688, + "secondaryWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "semiProtectedCapacityInKb": 0, + "semiProtectedVacInKb": 0, + "snapCapacityInUseInKb": 0, + "snapCapacityInUseOccupiedInKb": 0, + "snapshotCapacityInKb": 0, + "spSdsIds": [ + "abdfe71b00030001", + "abdce71d00040001", + "abdde71e00050001" + ], + "spareCapacityInKb": 84578304, + "targetOtherLatency": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "targetReadLatency": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "targetWriteLatency": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "tempCapacityInKb": 0, + "tempCapacityVacInKb": 0, + "thickCapacityInUseInKb": 0, + "thinAndSnapshotRatio": 73.92289, + "thinCapacityAllocatedInKm": 184549376, + "thinCapacityInUseInKb": 0, + "thinUserDataCapacityInKb": 2496512, + "totalFglMigrationSizeInKb": 0, + "totalReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "totalWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "trimmedUserDataCapacityInKb": 0, + "unreachableUnusedCapacityInKb": 0, + "unusedCapacityInKb": 758708224, + "userDataCapacityInKb": 2496512, + "userDataCapacityNoTrimInKb": 2496512, + "userDataReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataSdcReadLatency": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataSdcTrimLatency": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataSdcWriteLatency": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataTrimBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "volMigrationReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "volMigrationWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "volumeAddressSpaceInKb": 922XXXXX, + "volumeAllocationLimitInKb": 3707XXXXX, + "volumeIds": [ + "456afc7900XXXXXXXX" + ], + "vtreeAddresSpaceInKb": 92274688, + "vtreeIds": [ + "32b1681bXXXXXXXX", + ] + }, + "mediaType": "HDD", + "name": "pool1", + "numOfParallelRebuildRebalanceJobsPerDevice": 2, + "persistentChecksumBuilderLimitKb": 3072, + "persistentChecksumEnabled": true, + "persistentChecksumState": "Protected", + "persistentChecksumValidateOnRead": false, + "protectedMaintenanceModeIoPriorityAppBwPerDeviceThresholdInKbps": null, + "protectedMaintenanceModeIoPriorityAppIopsPerDeviceThreshold": null, + "protectedMaintenanceModeIoPriorityBwLimitPerDeviceInKbps": 10240, + "protectedMaintenanceModeIoPriorityNumOfConcurrentIosPerDevice": 1, + "protectedMaintenanceModeIoPriorityPolicy": "limitNumOfConcurrentIos", + "protectedMaintenanceModeIoPriorityQuietPeriodInMsec": null, + "protectionDomainId": "9300c1f900000000", + "protectionDomainName": "domain1", + "rebalanceEnabled": true, + "rebalanceIoPriorityAppBwPerDeviceThresholdInKbps": null, + "rebalanceIoPriorityAppIopsPerDeviceThreshold": null, + "rebalanceIoPriorityBwLimitPerDeviceInKbps": 10240, + "rebalanceIoPriorityNumOfConcurrentIosPerDevice": 1, + "rebalanceIoPriorityPolicy": "favorAppIos", + "rebalanceIoPriorityQuietPeriodInMsec": null, + "rebuildEnabled": true, + "rebuildIoPriorityAppBwPerDeviceThresholdInKbps": null, + "rebuildIoPriorityAppIopsPerDeviceThreshold": null, + "rebuildIoPriorityBwLimitPerDeviceInKbps": 10240, + "rebuildIoPriorityNumOfConcurrentIosPerDevice": 1, + "rebuildIoPriorityPolicy": "limitNumOfConcurrentIos", + "rebuildIoPriorityQuietPeriodInMsec": null, + "replicationCapacityMaxRatio": 32, + "rmcacheWriteHandlingMode": "Cached", + "sparePercentage": 10, + "useRfcache": false, + "useRmcache": false, + "vtreeMigrationIoPriorityAppBwPerDeviceThresholdInKbps": null, + "vtreeMigrationIoPriorityAppIopsPerDeviceThreshold": null, + "vtreeMigrationIoPriorityBwLimitPerDeviceInKbps": 10240, + "vtreeMigrationIoPriorityNumOfConcurrentIosPerDevice": 1, + "vtreeMigrationIoPriorityPolicy": "favorAppIos", + "vtreeMigrationIoPriorityQuietPeriodInMsec": null, + "zeroPaddingEnabled": true + } + ] +Volumes: + description: Details of volumes. + returned: always + type: list + contains: + id: + description: The ID of the volume. + type: str + mappedSdcInfo: + description: The details of the mapped SDC. + type: dict + contains: + sdcId: + description: ID of the SDC. + type: str + sdcName: + description: Name of the SDC. + type: str + sdcIp: + description: IP of the SDC. + type: str + accessMode: + description: mapping access mode for the specified volume. + type: str + limitIops: + description: IOPS limit for the SDC. + type: int + limitBwInMbps: + description: Bandwidth limit for the SDC. + type: int + name: + description: Name of the volume. + type: str + sizeInKb: + description: Size of the volume in Kb. + type: int + sizeInGb: + description: Size of the volume in Gb. + type: int + storagePoolId: + description: ID of the storage pool in which volume resides. + type: str + storagePoolName: + description: Name of the storage pool in which volume resides. + type: str + protectionDomainId: + description: ID of the protection domain in which volume resides. + type: str + protectionDomainName: + description: Name of the protection domain in which volume resides. + type: str + snapshotPolicyId: + description: ID of the snapshot policy associated with volume. + type: str + snapshotPolicyName: + description: Name of the snapshot policy associated with volume. + type: str + snapshotsList: + description: List of snapshots associated with the volume. + type: str + "statistics": + description: Statistics details of the storage pool. + type: dict + contains: + "numOfChildVolumes": + description: Number of child volumes. + type: int + "numOfMappedSdcs": + description: Number of mapped Sdcs of the volume. + type: int + sample: [ + { + "accessModeLimit": "ReadWrite", + "ancestorVolumeId": null, + "autoSnapshotGroupId": null, + "compressionMethod": "Invalid", + "consistencyGroupId": null, + "creationTime": 1661234220, + "dataLayout": "MediumGranularity", + "id": "456afd7XXXXXXX", + "lockedAutoSnapshot": false, + "lockedAutoSnapshotMarkedForRemoval": false, + "managedBy": "ScaleIO", + "mappedSdcInfo": [ + { + "accessMode": "ReadWrite", + "isDirectBufferMapping": false, + "limitBwInMbps": 0, + "limitIops": 0, + "sdcId": "c42425cbXXXXX", + "sdcIp": "10.XXX.XX.XX", + "sdcName": null + } + ], + "name": "vol-1", + "notGenuineSnapshot": false, + "originalExpiryTime": 0, + "pairIds": null, + "replicationJournalVolume": false, + "replicationTimeStamp": 0, + "retentionLevels": [ + ], + "secureSnapshotExpTime": 0, + "sizeInKb": 8388608, + "snplIdOfAutoSnapshot": null, + "snplIdOfSourceVolume": null, + "statistics": { + "childVolumeIds": [ + ], + "descendantVolumeIds": [ + ], + "initiatorSdcId": null, + "mappedSdcIds": [ + "c42425XXXXXX" + ], + "numOfChildVolumes": 0, + "numOfDescendantVolumes": 0, + "numOfMappedSdcs": 1, + "registrationKey": null, + "registrationKeys": [ + ], + "replicationJournalVolume": false, + "replicationState": "UnmarkedForReplication", + "reservationType": "NotReserved", + "rplTotalJournalCap": 0, + "rplUsedJournalCap": 0, + "userDataReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataSdcReadLatency": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataSdcTrimLatency": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataSdcWriteLatency": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataTrimBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + } + }, + "storagePoolId": "7630a248XXXXXXX", + "timeStampIsAccurate": false, + "useRmcache": false, + "volumeReplicationState": "UnmarkedForReplication", + "volumeType": "ThinProvisioned", + "vtreeId": "32b168bXXXXXX" + } + ] +Devices: + description: Details of devices. + returned: always + type: list + contains: + id: + description: device id. + type: str + name: + description: device name. + type: str + sample: [ + { + "id": "b6efa59900000000", + "name": "device230" + }, + { + "id": "b6efa5fa00020000", + "name": "device_node0" + }, + { + "id": "b7f3a60900010000", + "name": "device22" + } + ] +Replication_Consistency_Groups: + description: Details of rcgs. + returned: always + type: list + contains: + id: + description: The ID of the replication consistency group. + type: str + name: + description: The name of the replication consistency group. + type: str + protectionDomainId: + description: The Protection Domain ID of the replication consistency group. + type: str + peerMdmId: + description: The ID of the peer MDM of the replication consistency group. + type: str + remoteId: + description: The ID of the remote replication consistency group. + type: str + remoteMdmId: + description: The ID of the remote MDM of the replication consistency group. + type: str + currConsistMode: + description: The current consistency mode of the replication consistency group. + type: str + freezeState: + description: The freeze state of the replication consistency group. + type: str + lifetimeState: + description: The Lifetime state of the replication consistency group. + type: str + pauseMode: + description: The Lifetime state of the replication consistency group. + type: str + snapCreationInProgress: + description: Whether the process of snapshot creation of the replication consistency group is in progress or not. + type: bool + lastSnapGroupId: + description: ID of the last snapshot of the replication consistency group. + type: str + lastSnapCreationRc: + description: The return code of the last snapshot of the replication consistency group. + type: int + targetVolumeAccessMode: + description: The access mode of the target volume of the replication consistency group. + type: str + remoteProtectionDomainId: + description: The ID of the remote Protection Domain. + type: str + remoteProtectionDomainName: + description: The Name of the remote Protection Domain. + type: str + failoverType: + description: The type of failover of the replication consistency group. + type: str + failoverState: + description: The state of failover of the replication consistency group. + type: str + activeLocal: + description: Whether the local replication consistency group is active. + type: bool + activeRemote: + description: Whether the remote replication consistency group is active + type: bool + abstractState: + description: The abstract state of the replication consistency group. + type: str + localActivityState: + description: The state of activity of the local replication consistency group. + type: str + remoteActivityState: + description: The state of activity of the remote replication consistency group.. + type: str + inactiveReason: + description: The reason for the inactivity of the replication consistency group. + type: int + rpoInSeconds: + description: The RPO value of the replication consistency group in seconds. + type: int + replicationDirection: + description: The direction of the replication of the replication consistency group. + type: str + disasterRecoveryState: + description: The state of disaster recovery of the local replication consistency group. + type: str + remoteDisasterRecoveryState: + description: The state of disaster recovery of the remote replication consistency group. + type: str + error: + description: The error code of the replication consistency group. + type: int + type: + description: The type of the replication consistency group. + type: str + sample: { + "protectionDomainId": "b969400500000000", + "peerMdmId": "6c3d94f600000000", + "remoteId": "2130961a00000000", + "remoteMdmId": "0e7a082862fedf0f", + "currConsistMode": "Consistent", + "freezeState": "Unfrozen", + "lifetimeState": "Normal", + "pauseMode": "None", + "snapCreationInProgress": false, + "lastSnapGroupId": "e58280b300000001", + "lastSnapCreationRc": "SUCCESS", + "targetVolumeAccessMode": "NoAccess", + "remoteProtectionDomainId": "4eeb304600000000", + "remoteProtectionDomainName": "domain1", + "failoverType": "None", + "failoverState": "None", + "activeLocal": true, + "activeRemote": true, + "abstractState": "Ok", + "localActivityState": "Active", + "remoteActivityState": "Active", + "inactiveReason": 11, + "rpoInSeconds": 30, + "replicationDirection": "LocalToRemote", + "disasterRecoveryState": "None", + "remoteDisasterRecoveryState": "None", + "error": 65, + "name": "test_rcg", + "type": "User", + "id": "aadc17d500000000" + } +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell \ + import utils + +LOG = utils.get_logger('info') + + +class PowerFlexInfo(object): + """Class with Info operations""" + + filter_mapping = {'equal': 'eq.'} + + def __init__(self): + """ Define all parameters required by this module""" + + self.module_params = utils.get_powerflex_gateway_host_parameters() + self.module_params.update(get_powerflex_info_parameters()) + + self.filter_keys = sorted( + [k for k in self.module_params['filters']['options'].keys() + if 'filter' in k]) + + """ initialize the ansible module """ + self.module = AnsibleModule(argument_spec=self.module_params, + supports_check_mode=True) + + utils.ensure_required_libs(self.module) + + try: + self.powerflex_conn = utils.get_powerflex_gateway_host_connection( + self.module.params) + LOG.info('Got the PowerFlex system connection object instance') + LOG.info('The check_mode flag %s', self.module.check_mode) + + except Exception as e: + LOG.error(str(e)) + self.module.fail_json(msg=str(e)) + + def get_api_details(self): + """ Get api details of the array """ + try: + LOG.info('Getting API details ') + api_version = self.powerflex_conn.system.api_version() + return api_version + + except Exception as e: + msg = 'Get API details from Powerflex array failed with' \ + ' error %s' % (str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def get_array_details(self): + """ Get system details of a powerflex array """ + + try: + LOG.info('Getting array details ') + entity_list = ['addressSpaceUsage', 'authenticationMethod', + 'capacityAlertCriticalThresholdPercent', + 'capacityAlertHighThresholdPercent', + 'capacityTimeLeftInDays', 'cliPasswordAllowed', + 'daysInstalled', 'defragmentationEnabled', + 'enterpriseFeaturesEnabled', 'id', 'installId', + 'isInitialLicense', 'lastUpgradeTime', + 'managementClientSecureCommunicationEnabled', + 'maxCapacityInGb', 'mdmCluster', + 'mdmExternalPort', 'mdmManagementPort', + 'mdmSecurityPolicy', 'showGuid', 'swid', + 'systemVersionName', 'tlsVersion', 'upgradeState'] + + sys_list = self.powerflex_conn.system.get() + sys_details_list = [] + for sys in sys_list: + sys_details = {} + for entity in entity_list: + if entity in sys.keys(): + sys_details.update({entity: sys[entity]}) + if sys_details: + sys_details_list.append(sys_details) + + return sys_details_list + + except Exception as e: + msg = 'Get array details from Powerflex array failed with' \ + ' error %s' % (str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def get_sdc_list(self, filter_dict=None): + """ Get the list of sdcs on a given PowerFlex storage system """ + + try: + LOG.info('Getting SDC list ') + if filter_dict: + sdc = self.powerflex_conn.sdc.get(filter_fields=filter_dict) + else: + sdc = self.powerflex_conn.sdc.get() + return result_list(sdc) + + except Exception as e: + msg = 'Get SDC list from powerflex array failed with' \ + ' error %s' % (str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def get_sds_list(self, filter_dict=None): + """ Get the list of sdses on a given PowerFlex storage system """ + + try: + LOG.info('Getting SDS list ') + if filter_dict: + sds = self.powerflex_conn.sds.get(filter_fields=filter_dict) + else: + sds = self.powerflex_conn.sds.get() + return result_list(sds) + + except Exception as e: + msg = 'Get sds list from powerflex array failed with' \ + ' error %s' % (str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def get_pd_list(self, filter_dict=None): + """ Get the list of Protection Domains on a given PowerFlex + storage system """ + + try: + LOG.info('Getting protection domain list ') + + if filter_dict: + pd = self.powerflex_conn.protection_domain.get(filter_fields=filter_dict) + else: + pd = self.powerflex_conn.protection_domain.get() + return result_list(pd) + + except Exception as e: + msg = 'Get protection domain list from powerflex array failed ' \ + 'with error %s' % (str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def get_storage_pool_list(self, filter_dict=None): + """ Get the list of storage pools on a given PowerFlex storage + system """ + + try: + LOG.info('Getting storage pool list ') + if filter_dict: + pool = self.powerflex_conn.storage_pool.get(filter_fields=filter_dict) + else: + pool = self.powerflex_conn.storage_pool.get() + + if pool: + statistics_map = self.powerflex_conn.utility.get_statistics_for_all_storagepools() + list_of_pool_ids_in_statistics = statistics_map.keys() + for item in pool: + item['statistics'] = statistics_map[item['id']] if item['id'] in list_of_pool_ids_in_statistics else {} + return result_list(pool) + + except Exception as e: + msg = 'Get storage pool list from powerflex array failed with' \ + ' error %s' % (str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def get_replication_consistency_group_list(self, filter_dict=None): + """ Get the list of replication consistency group on a given PowerFlex storage + system """ + + try: + LOG.info('Getting replication consistency group list ') + if filter_dict: + rcgs = self.powerflex_conn.replication_consistency_group.get(filter_fields=filter_dict) + else: + rcgs = self.powerflex_conn.replication_consistency_group.get() + if rcgs: + api_version = self.powerflex_conn.system.get()[0]['mdmCluster']['master']['versionInfo'] + statistics_map = \ + self.powerflex_conn.replication_consistency_group.get_all_statistics(utils.is_version_less_than_3_6(api_version)) + list_of_rcg_ids_in_statistics = statistics_map.keys() + for rcg in rcgs: + rcg.pop('links', None) + rcg['statistics'] = statistics_map[rcg['id']] if rcg['id'] in list_of_rcg_ids_in_statistics else {} + return result_list(rcgs) + + except Exception as e: + msg = 'Get replication consistency group list from powerflex array failed with' \ + ' error %s' % (str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def get_volumes_list(self, filter_dict=None): + """ Get the list of volumes on a given PowerFlex storage + system """ + + try: + LOG.info('Getting volumes list ') + if filter_dict: + volumes = self.powerflex_conn.volume.get(filter_fields=filter_dict) + else: + volumes = self.powerflex_conn.volume.get() + + if volumes: + statistics_map = self.powerflex_conn.utility.get_statistics_for_all_volumes() + list_of_vol_ids_in_statistics = statistics_map.keys() + for item in volumes: + item['statistics'] = statistics_map[item['id']] if item['id'] in list_of_vol_ids_in_statistics else {} + return result_list(volumes) + + except Exception as e: + msg = 'Get volumes list from powerflex array failed with' \ + ' error %s' % (str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def get_snapshot_policy_list(self, filter_dict=None): + """ Get the list of snapshot schedules on a given PowerFlex storage + system """ + + try: + LOG.info('Getting snapshot schedules list ') + if filter_dict: + snapshot_schedules = \ + self.powerflex_conn.snapshot_policy.get( + filter_fields=filter_dict) + else: + snapshot_schedules = \ + self.powerflex_conn.snapshot_policy.get() + + return result_list(snapshot_schedules) + + except Exception as e: + msg = 'Get snapshot schedules list from powerflex array failed ' \ + 'with error %s' % (str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def get_devices_list(self, filter_dict=None): + """ Get the list of devices on a given PowerFlex storage + system """ + + try: + LOG.info('Getting device list ') + if filter_dict: + devices = self.powerflex_conn.device.get(filter_fields=filter_dict) + else: + devices = self.powerflex_conn.device.get() + + return result_list(devices) + + except Exception as e: + msg = 'Get device list from powerflex array failed ' \ + 'with error %s' % (str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def validate_filter(self, filter_dict): + """ Validate given filter_dict """ + + is_invalid_filter = self.filter_keys != sorted(list(filter_dict)) + if is_invalid_filter: + msg = "Filter should have all keys: '{0}'".format( + ", ".join(self.filter_keys)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + is_invalid_filter = [filter_dict[i] is None for i in filter_dict] + if True in is_invalid_filter: + msg = "Filter keys: '{0}' cannot be None".format(self.filter_keys) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def get_filters(self, filters): + """Get the filters to be applied""" + + filter_dict = {} + for item in filters: + self.validate_filter(item) + f_op = item['filter_operator'] + if self.filter_mapping.get(f_op): + f_key = item['filter_key'] + f_val = item['filter_value'] + if f_key in filter_dict: + # multiple filters on same key + if isinstance(filter_dict[f_key], list): + # prev_val is list, so append new f_val + filter_dict[f_key].append(f_val) + else: + # prev_val is not list, + # so create list with prev_val & f_val + filter_dict[f_key] = [filter_dict[f_key], f_val] + else: + filter_dict[f_key] = f_val + else: + msg = "Given filter operator '{0}' is not supported." \ + "supported operators are : '{1}'".format( + f_op, + list(self.filter_mapping.keys())) + LOG.error(msg) + self.module.fail_json(msg=msg) + return filter_dict + + def perform_module_operation(self): + """ Perform different actions on info based on user input + in the playbook """ + + filters = self.module.params['filters'] + filter_dict = {} + if filters: + filter_dict = self.get_filters(filters) + LOG.info('filters: %s', filter_dict) + + api_version = self.get_api_details() + array_details = self.get_array_details() + sdc = [] + sds = [] + storage_pool = [] + vol = [] + snapshot_policy = [] + protection_domain = [] + device = [] + rcgs = [] + + subset = self.module.params['gather_subset'] + if subset is not None: + if 'sdc' in subset: + sdc = self.get_sdc_list(filter_dict=filter_dict) + if 'sds' in subset: + sds = self.get_sds_list(filter_dict=filter_dict) + if 'protection_domain' in subset: + protection_domain = self.get_pd_list(filter_dict=filter_dict) + if 'storage_pool' in subset: + storage_pool = self.get_storage_pool_list(filter_dict=filter_dict) + if 'vol' in subset: + vol = self.get_volumes_list(filter_dict=filter_dict) + if 'snapshot_policy' in subset: + snapshot_policy = self.get_snapshot_policy_list(filter_dict=filter_dict) + if 'device' in subset: + device = self.get_devices_list(filter_dict=filter_dict) + if 'rcg' in subset: + rcgs = self.get_replication_consistency_group_list(filter_dict=filter_dict) + + self.module.exit_json( + Array_Details=array_details, + API_Version=api_version, + SDCs=sdc, + SDSs=sds, + Storage_Pools=storage_pool, + Volumes=vol, + Snapshot_Policies=snapshot_policy, + Protection_Domains=protection_domain, + Devices=device, + Replication_Consistency_Groups=rcgs + ) + + +def result_list(entity): + """ Get the name and id associated with the PowerFlex entities """ + result = [] + if entity: + LOG.info('Successfully listed.') + for item in entity: + if item['name']: + result.append(item) + else: + result.append({"id": item['id']}) + return result + else: + return None + + +def get_powerflex_info_parameters(): + """This method provides parameters required for the ansible + info module on powerflex""" + return dict( + gather_subset=dict(type='list', required=False, elements='str', + choices=['vol', 'storage_pool', + 'protection_domain', 'sdc', 'sds', + 'snapshot_policy', 'device', 'rcg']), + filters=dict(type='list', required=False, elements='dict', + options=dict(filter_key=dict(type='str', required=True, no_log=False), + filter_operator=dict( + type='str', required=True, + choices=['equal']), + filter_value=dict(type='str', required=True) + ))) + + +def main(): + """ Create PowerFlex info object and perform action on it + based on user input from playbook""" + obj = PowerFlexInfo() + obj.perform_module_operation() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/powerflex/plugins/modules/mdm_cluster.py b/ansible_collections/dellemc/powerflex/plugins/modules/mdm_cluster.py new file mode 100644 index 00000000..25c5058a --- /dev/null +++ b/ansible_collections/dellemc/powerflex/plugins/modules/mdm_cluster.py @@ -0,0 +1,1333 @@ +#!/usr/bin/python + +# Copyright: (c) 2022, Dell Technologies +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +""" Ansible module for managing MDM Cluster on PowerFlex""" + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = r''' +module: mdm_cluster +version_added: '1.3.0' +short_description: Manage MDM cluster on Dell PowerFlex +description: +- Managing MDM cluster and MDMs on PowerFlex storage system includes + adding/removing standby MDM, modify MDM name and virtual interface. +- It also includes getting details of MDM cluster, modify MDM cluster + ownership, cluster mode, and performance profile. +author: +- Bhavneet Sharma (@sharmb5) <ansible.team@dell.com> +extends_documentation_fragment: + - dellemc.powerflex.powerflex +options: + mdm_name: + description: + - The name of the MDM. It is unique across the PowerFlex array. + - Mutually exclusive with I(mdm_id). + - If mdm_name passed in add standby operation, then same name will be + assigned to the new standby mdm. + type: str + mdm_id: + description: + - The ID of the MDM. + - Mutually exclusive with I(mdm_name). + type: str + mdm_new_name: + description: + - To rename the MDM. + type: str + standby_mdm: + description: + - Specifies add standby MDM parameters. + type: dict + suboptions: + mdm_ips: + description: + - List of MDM IPs that will be assigned to new MDM. It can contain + IPv4 addresses. + required: true + type: list + elements: str + role: + description: + - Role of new MDM. + required: true + choices: ['Manager', 'TieBreaker'] + type: str + management_ips: + description: + - List of management IPs to manage MDM. It can contain IPv4 + addresses. + type: list + elements: str + port: + description: + - Specifies the port of new MDM. + type: int + allow_multiple_ips: + description: + - Allow the added node to have different number of IPs from the + primary node. + type: bool + virtual_interfaces: + description: + - List of NIC interfaces that will be used for virtual IP addresses. + type: list + elements: str + is_primary: + description: + - Set I(is_primary) as C(true) to change MDM cluster ownership from the current + master MDM to different MDM. + - Set I(is_primary) as C(false), will return MDM cluster details. + - New owner MDM must be an MDM with a manager role. + type: bool + cluster_mode: + description: + - Mode of the cluster. + choices: ['OneNode', 'ThreeNodes', 'FiveNodes'] + type: str + mdm: + description: + - Specifies parameters to add/remove MDMs to/from the MDM cluster. + type: list + elements: dict + suboptions: + mdm_id: + description: + - ID of MDM that will be added/removed to/from the cluster. + type: str + mdm_name: + description: + - Name of MDM that will be added/removed to/from the cluster. + type: str + mdm_type: + description: + - Type of the MDM. + - Either I(mdm_id) or I(mdm_name) must be passed with mdm_type. + required: true + choices: ['Secondary', 'TieBreaker'] + type: str + mdm_state: + description: + - Mapping state of MDM. + choices: ['present-in-cluster', 'absent-in-cluster'] + type: str + virtual_ip_interfaces: + description: + - List of interfaces to be used for virtual IPs. + - The order of interfaces must be matched with virtual IPs assigned to the + cluster. + - Interfaces of the primary and secondary type MDMs are allowed to modify. + - The I(virtual_ip_interfaces) is mutually exclusive with I(clear_interfaces). + type: list + elements: str + clear_interfaces: + description: + - Clear all virtual IP interfaces. + - The I(clear_interfaces) is mutually exclusive with I(virtual_ip_interfaces). + type: bool + performance_profile: + description: + - Apply performance profile to cluster MDMs. + choices: ['Compact', 'HighPerformance'] + type: str + state: + description: + - State of the MDM cluster. + choices: ['present', 'absent'] + required: true + type: str +notes: + - Parameters I(mdm_name) or I(mdm_id) are mandatory for rename and modify virtual IP + interfaces. + - Parameters I(mdm_name) or I(mdm_id) are not required while modifying performance + profile. + - For change MDM cluster ownership operation, only changed as True will be + returned and for idempotency case MDM cluster details will be returned. + - Reinstall all SDC after changing ownership to some newly added MDM. + - To add manager standby MDM, MDM package must be installed with manager + role. + - The I(check_mode) is supported. +''' + +EXAMPLES = r''' +- name: Add a standby MDM + dellemc.powerflex.mdm_cluster: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + mdm_name: "mdm_1" + standby_mdm: + mdm_ips: + - "10.x.x.x" + role: "TieBreaker" + management_ips: + - "10.x.y.z" + state: "present" + +- name: Remove a standby MDM + dellemc.powerflex.mdm_cluster: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + mdm_name: "mdm_1" + state: "absent" + +- name: Switch cluster mode from 3 node to 5 node MDM cluster + dellemc.powerflex.mdm_cluster: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + cluster_mode: "FiveNodes" + mdm: + - mdm_id: "5f091a8a013f1100" + mdm_type: "Secondary" + - mdm_name: "mdm_1" + mdm_type: "TieBreaker" + sdc_state: "present-in-cluster" + state: "present" + +- name: Switch cluster mode from 5 node to 3 node MDM cluster + dellemc.powerflex.mdm_cluster: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + cluster_mode: "ThreeNodes" + mdm: + - mdm_id: "5f091a8a013f1100" + mdm_type: "Secondary" + - mdm_name: "mdm_1" + mdm_type: "TieBreaker" + sdc_state: "absent-in-cluster" + state: "present" + +- name: Get the details of the MDM cluster + dellemc.powerflex.mdm_cluster: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + state: "present" + +- name: Change ownership of MDM cluster + dellemc.powerflex.mdm_cluster: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + mdm_name: "mdm_2" + is_primary: True + state: "present" + +- name: Modify performance profile + dellemc.powerflex.mdm_cluster: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + performance_profile: "HighPerformance" + state: "present" + +- name: Rename the MDM + dellemc.powerflex.mdm_cluster: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + mdm_name: "mdm_1" + mdm_new_name: "new_mdm_1" + state: "present" + +- name: Modify virtual IP interface of the MDM + dellemc.powerflex.mdm_cluster: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + mdm_name: "mdm_1" + virtual_ip_interface: + - "ens224" + state: "present" + +- name: Clear virtual IP interface of the MDM + dellemc.powerflex.mdm_cluster: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + mdm_name: "mdm_1" + clear_interfaces: True + state: "present" +''' + +RETURN = r''' +changed: + description: Whether or not the resource has changed. + returned: always + type: bool + sample: 'false' +mdm_cluster_details: + description: Details of the MDM cluster. + returned: When MDM cluster exists + type: dict + contains: + id: + description: The ID of the MDM cluster. + type: str + name: + description: Name of MDM cluster. + type: str + clusterMode: + description: Mode of the MDM cluster. + type: str + master: + description: The details of the master MDM. + type: dict + contains: + id: + description: ID of the MDM. + type: str + name: + description: Name of the MDM. + type: str + port: + description: Port of the MDM. + type: str + ips: + description: List of IPs for master MDM. + type: list + managementIPs: + description: List of management IPs for master MDM. + type: list + role: + description: Role of MDM. + type: str + status: + description: Status of MDM. + type: str + versionInfo: + description: Version of MDM. + type: str + virtualInterfaces: + description: List of virtual interfaces + type: list + opensslVersion: + description: OpenSSL version. + type: str + slaves: + description: The list of the secondary MDMs. + type: list + elements: dict + contains: + id: + description: ID of the MDM. + type: str + name: + description: Name of the MDM. + type: str + port: + description: Port of the MDM. + type: str + ips: + description: List of IPs for secondary MDM. + type: list + managementIPs: + description: List of management IPs for secondary MDM. + type: list + role: + description: Role of MDM. + type: str + status: + description: Status of MDM. + type: str + versionInfo: + description: Version of MDM. + type: str + virtualInterfaces: + description: List of virtual interfaces + type: list + opensslVersion: + description: OpenSSL version. + type: str + tieBreakers: + description: The list of the TieBreaker MDMs. + type: list + elements: dict + contains: + id: + description: ID of the MDM. + type: str + name: + description: Name of the MDM. + type: str + port: + description: Port of the MDM. + type: str + ips: + description: List of IPs for tie-breaker MDM. + type: list + managementIPs: + description: List of management IPs for tie-breaker MDM. + type: list + role: + description: Role of MDM. + type: str + status: + description: Status of MDM. + type: str + versionInfo: + description: Version of MDM. + type: str + opensslVersion: + description: OpenSSL version. + type: str + standbyMDMs: + description: The list of the standby MDMs. + type: list + elements: dict + contains: + id: + description: ID of the MDM. + type: str + name: + description: Name of the MDM. + type: str + port: + description: Port of the MDM. + type: str + ips: + description: List of IPs for MDM. + type: list + managementIPs: + description: List of management IPs for MDM. + type: list + role: + description: Role of MDM. + type: str + status: + description: Status of MDM. + type: str + versionInfo: + description: Version of MDM. + type: str + virtualInterfaces: + description: List of virtual interfaces. + type: list + opensslVersion: + description: OpenSSL version. + type: str + clusterState: + description: State of the MDM cluster. + type: str + goodNodesNum: + description: Number of Nodes in MDM cluster. + type: int + goodReplicasNum: + description: Number of nodes for Replication. + type: int + virtualIps: + description: List of virtual IPs. + type: list + sample: { + "clusterState": "ClusteredNormal", + "clusterMode": "ThreeNodes", + "goodNodesNum": 3, + "master": { + "virtualInterfaces": [ + "ens1" + ], + "managementIPs": [ + "10.x.y.z" + ], + "ips": [ + "10.x.y.z" + ], + "versionInfo": "R3_6.0.0", + "opensslVersion": "OpenSSL 1.0.2k-fips 26 Jan 2017", + "role": "Manager", + "status": "Normal", + "name": "sample_mdm", + "id": "5908d328581d1400", + "port": 9011 + }, + "perfProfile": "HighPerformance", + "slaves": [ + { + "virtualInterfaces": [ + "ens1" + ], + "managementIPs": [ + "10.x.x.z" + ], + "ips": [ + "10.x.x.z" + ], + "versionInfo": "R3_6.0.0", + "opensslVersion": "OpenSSL 1.0.2k-fips 26 Jan 2017", + "role": "Manager", + "status": "Normal", + "name": "sample_mdm1", + "id": "5908d328581d1401", + "port": 9011 + } + ], + "tieBreakers": [ + { + "virtualInterfaces": [], + "managementIPs": [], + "ips": [ + "10.x.y.y" + ], + "versionInfo": "R3_6.0.0", + "opensslVersion": "N/A", + "role": "TieBreaker", + "status": "Normal", + "id": "5908d328581d1402", + "port": 9011 + } + ], + "standbyMDMs": [ + { + "virtualInterfaces": [], + "managementIPs": [ + "10.x.z.z" + ], + "ips": [ + "10.x.z.z" + ], + "versionInfo": "R3_6.0.0", + "opensslVersion": "N/A", + "role": "TieBreaker", + "status": "Normal", + "id": "5908d328581d1403", + "port": 9011 + } + ], + "goodReplicasNum": 2, + "id": "cdd883cf00000002" + } +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell \ + import utils +import copy + +LOG = utils.get_logger('mdm_cluster') + + +class PowerFlexMdmCluster(object): + """Class with MDM cluster operations""" + + def __init__(self): + """ Define all parameters required by this module""" + self.module_params = utils.get_powerflex_gateway_host_parameters() + self.module_params.update(get_powerflex_mdm_cluster_parameters()) + + mut_ex_args = [['mdm_name', 'mdm_id'], + ['virtual_ip_interfaces', 'clear_interfaces']] + + required_together_args = [['cluster_mode', 'mdm', 'mdm_state']] + + # initialize the Ansible module + self.module = AnsibleModule( + argument_spec=self.module_params, + supports_check_mode=True, + mutually_exclusive=mut_ex_args, + required_together=required_together_args) + + utils.ensure_required_libs(self.module) + + self.not_exist_msg = "MDM {0} does not exists in MDM cluster." + self.exist_msg = "MDM already exists in the MDM cluster" + try: + self.powerflex_conn = utils.get_powerflex_gateway_host_connection( + self.module.params) + LOG.info("Got the PowerFlex system connection object instance") + LOG.info('Check Mode Flag %s', self.module.check_mode) + except Exception as e: + LOG.error(str(e)) + self.module.fail_json(msg=str(e)) + + def set_mdm_virtual_interface(self, mdm_id=None, mdm_name=None, + virtual_ip_interfaces=None, + clear_interfaces=None, + mdm_cluster_details=None): + """Modify the MDM virtual IP interface. + :param mdm_id: ID of MDM + :param mdm_name: Name of MDM + :param virtual_ip_interfaces: List of virtual IP interfaces + :param clear_interfaces: clear virtual IP interfaces of MDM. + :param mdm_cluster_details: Details of MDM cluster + :return: True if modification of virtual interface or clear operation + successful + """ + + name_or_id = mdm_id if mdm_id else mdm_name + if mdm_name is None and mdm_id is None: + err_msg = "Please provide mdm_name/mdm_id to modify virtual IP" \ + " interfaces the MDM." + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + mdm_details = self.\ + is_mdm_name_id_exists(mdm_name=mdm_name, mdm_id=mdm_id, + cluster_details=mdm_cluster_details) + if mdm_details is None: + err_msg = self.not_exist_msg.format(name_or_id) + self.module.fail_json(msg=err_msg) + + mdm_id = mdm_details['id'] + modify_list = [] + modify_list, clear = is_modify_mdm_virtual_interface( + virtual_ip_interfaces, clear_interfaces, mdm_details) + + if modify_list is None and not clear: + LOG.info("No change required in MDM virtual IP interfaces.") + return False + + try: + log_msg = "Modifying MDM virtual interfaces to %s " \ + "or %s" % (str(modify_list), clear) + LOG.info(log_msg) + if not self.module.check_mode: + self.powerflex_conn.system.modify_virtual_ip_interface( + mdm_id=mdm_id, virtual_ip_interfaces=modify_list, + clear_interfaces=clear) + return True + except Exception as e: + error_msg = "Failed to modify the virtual IP interfaces of MDM " \ + "{0} with error {1}".format(name_or_id, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def set_performance_profile(self, performance_profile=None, + cluster_details=None): + """ Set the performance profile of Cluster MDMs + :param performance_profile: Specifies the performance profile of MDMs + :param cluster_details: Details of MDM cluster + :return: True if updated successfully + """ + + if self.module.params['state'] == 'present' and performance_profile: + if cluster_details['perfProfile'] != performance_profile: + try: + if not self.module.check_mode: + self.powerflex_conn.system.\ + set_cluster_mdm_performance_profile(performance_profile=performance_profile) + return True + except Exception as e: + error_msg = "Failed to update performance profile to {0} " \ + "with error {1}.".format(performance_profile, + str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + return False + return False + + def rename_mdm(self, mdm_name=None, mdm_id=None, mdm_new_name=None, + cluster_details=None): + """Rename the MDM + :param mdm_name: Name of the MDM. + :param mdm_id: ID of the MDM. + :param mdm_new_name: New name of the MDM. + :param cluster_details: Details of the MDM cluster. + :return: True if successfully renamed. + """ + + name_or_id = mdm_id if mdm_id else mdm_name + if mdm_name is None and mdm_id is None: + err_msg = "Please provide mdm_name/mdm_id to rename the MDM." + self.module.fail_json(msg=err_msg) + mdm_details = self.\ + is_mdm_name_id_exists(mdm_name=mdm_name, mdm_id=mdm_id, + cluster_details=cluster_details) + if mdm_details is None: + err_msg = self.not_exist_msg.format(name_or_id) + self.module.fail_json(msg=err_msg) + + mdm_id = mdm_details['id'] + try: + if ('name' in mdm_details and + mdm_new_name != mdm_details['name']) or \ + 'name' not in mdm_details: + log_msg = "Modifying the MDM name from %s to " \ + "%s." % (mdm_name, mdm_new_name) + LOG.info(log_msg) + if not self.module.check_mode: + self.powerflex_conn.system.rename_mdm( + mdm_id=mdm_id, mdm_new_name=mdm_new_name) + return True + except Exception as e: + error_msg = "Failed to rename the MDM {0} with error {1}.".\ + format(name_or_id, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def is_none_name_id_in_switch_cluster_mode(self, mdm): + """ Check whether mdm dict have mdm_name and mdm_id or not""" + + for node in mdm: + if node['mdm_id'] and node['mdm_name']: + msg = "parameters are mutually exclusive: mdm_name|mdm_id" + self.module.fail_json(msg=msg) + + def change_cluster_mode(self, cluster_mode, mdm, cluster_details): + """change the MDM cluster mode. + :param cluster_mode: specifies the mode of MDM cluster + :param mdm: A dict containing parameters to change MDM cluster mode + :param cluster_details: Details of MDM cluster + :return: True if mode changed successfully + """ + + self.is_none_name_id_in_switch_cluster_mode(mdm=mdm) + + if cluster_mode == cluster_details['clusterMode']: + LOG.info("MDM cluster is already in required mode.") + return False + + add_secondary = [] + add_tb = [] + remove_secondary = [] + remove_tb = [] + if self.module.params['state'] == 'present' and \ + self.module.params['mdm_state'] == 'present-in-cluster': + add_secondary, add_tb = self.cluster_expand_list(mdm, cluster_details) + elif self.module.params['state'] == 'present' and \ + self.module.params['mdm_state'] == 'absent-in-cluster': + remove_secondary, remove_tb = self.\ + cluster_reduce_list(mdm, cluster_details) + try: + if not self.module.check_mode: + self.powerflex_conn.system.switch_cluster_mode( + cluster_mode=cluster_mode, add_secondary=add_secondary, + remove_secondary=remove_secondary, add_tb=add_tb, + remove_tb=remove_tb) + return True + except Exception as e: + err_msg = "Failed to change the MDM cluster mode with error " \ + "{0}".format(str(e)) + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + + def gather_secondarys_ids(self, mdm, cluster_details): + """ Prepare a list of secondary MDMs for switch cluster mode + operation""" + + secondarys = [] + + for node in mdm: + name_or_id = node['mdm_name'] if node['mdm_name'] else \ + node['mdm_id'] + + if node['mdm_type'] == 'Secondary' and node['mdm_id'] is not None: + mdm_details = self. \ + is_mdm_name_id_exists(mdm_id=node['mdm_id'], + cluster_details=cluster_details) + if mdm_details is None: + err_msg = self.not_exist_msg.format(name_or_id) + self.module.fail_json(msg=err_msg) + secondarys.append(node['mdm_id']) + + elif node['mdm_type'] == 'Secondary' and node['mdm_name'] is not None: + mdm_details = self. \ + is_mdm_name_id_exists(mdm_name=node['mdm_name'], + cluster_details=cluster_details) + if mdm_details is None: + err_msg = self.not_exist_msg.format(name_or_id) + self.module.fail_json(msg=err_msg) + secondarys.append(mdm_details['id']) + return secondarys + + def cluster_expand_list(self, mdm, cluster_details): + """Whether MDM cluster expansion is required or not. + """ + add_secondary = [] + add_tb = [] + + if 'standbyMDMs' not in cluster_details: + err_msg = "No Standby MDMs found. To expand cluster size, " \ + "first add standby MDMs." + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + + add_secondary = self.gather_secondarys_ids(mdm, cluster_details) + for node in mdm: + name_or_id = node['mdm_name'] if node['mdm_name'] else \ + node['mdm_id'] + + if node['mdm_type'] == 'TieBreaker' and \ + node['mdm_id'] is not None: + add_tb.append(node['mdm_id']) + + elif node['mdm_type'] == 'TieBreaker' and \ + node['mdm_name'] is not None: + mdm_details = self. \ + is_mdm_name_id_exists(mdm_name=node['mdm_name'], + cluster_details=cluster_details) + + if mdm_details is None: + err_msg = self.not_exist_msg.format(name_or_id) + self.module.fail_json(msg=err_msg) + add_tb.append(mdm_details['id']) + + log_msg = "expand List are: %s, %s" % (add_secondary, add_tb) + LOG.info(log_msg) + return add_secondary, add_tb + + def cluster_reduce_list(self, mdm, cluster_details): + """Whether MDM cluster reduction is required or not. + """ + remove_secondary = [] + remove_tb = [] + + remove_secondary = self.gather_secondarys_ids(mdm, cluster_details) + for node in mdm: + name_or_id = node['mdm_name'] if node['mdm_name'] else \ + node['mdm_id'] + + if node['mdm_type'] == 'TieBreaker' and \ + node['mdm_id'] is not None: + mdm_details = self. \ + is_mdm_name_id_exists(mdm_id=node['mdm_id'], + cluster_details=cluster_details) + if mdm_details is None: + err_msg = self.not_exist_msg.format(name_or_id) + self.module.fail_json(msg=err_msg) + remove_tb.append(mdm_details['id']) + + elif node['mdm_type'] == 'TieBreaker' and \ + node['mdm_name'] is not None: + mdm_details = self.\ + is_mdm_name_id_exists(mdm_name=node['mdm_name'], + cluster_details=cluster_details) + if mdm_details is None: + err_msg = self.not_exist_msg.format(name_or_id) + self.module.fail_json(msg=err_msg) + remove_tb.append(mdm_details['id']) + + log_msg = "Reduce List are: %s, %s." % (remove_secondary, remove_tb) + LOG.info(log_msg) + return remove_secondary, remove_tb + + def perform_add_standby(self, mdm_name, standby_payload): + """ Perform SDK call to add a standby MDM + + :param mdm_name: Name of new standby MDM + :param standby_payload: Parameters dict to add a standby MDM + :return: True if standby MDM added successfully + """ + try: + if not self.module.check_mode: + self.powerflex_conn.system.add_standby_mdm( + mdm_ips=standby_payload['mdm_ips'], + role=standby_payload['role'], + management_ips=standby_payload['management_ips'], + mdm_name=mdm_name, port=standby_payload['port'], + allow_multiple_ips=standby_payload['allow_multiple_ips'], + virtual_interface=standby_payload['virtual_interfaces']) + return True + except Exception as e: + err_msg = "Failed to Add a standby MDM with error {0}.".format( + str(e)) + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + + def is_id_new_name_in_add_mdm(self): + """ Check whether mdm_id or mdm_new_name present in Add standby MDM""" + + if self.module.params['mdm_id'] or self.module.params['mdm_new_name']: + err_msg = "Parameters mdm_id/mdm_new_name are not allowed while" \ + " adding a standby MDM. Please try with valid " \ + "parameters to add a standby MDM." + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + + def add_standby_mdm(self, mdm_name, standby_mdm, cluster_details): + """ Adding a standby MDM""" + + if self.module.params['state'] == 'present' and \ + standby_mdm is not None and \ + (self.check_mdm_exists(standby_mdm['mdm_ips'], + cluster_details)): + self.is_id_new_name_in_add_mdm() + mdm_details = self.\ + is_mdm_name_id_exists(mdm_name=mdm_name, + cluster_details=cluster_details) + if mdm_details: + LOG.info("Standby MDM %s exits in the system", mdm_name) + return False, cluster_details + + standby_payload = prepare_standby_payload(standby_mdm) + standby_add = self.perform_add_standby(mdm_name, standby_payload) + + if standby_add: + cluster_details = self.get_mdm_cluster_details() + msg = "Fetched the MDM cluster details {0} after adding a " \ + "standby MDM".format(str(cluster_details)) + LOG.info(msg) + return True, cluster_details + return False, cluster_details + + def remove_standby_mdm(self, mdm_name, mdm_id, cluster_details): + """ Remove the Standby MDM + :param mdm_id: ID of MDM that will become owner of MDM cluster + :param mdm_name: Name of MDM that will become owner of MDM cluster + :param cluster_details: Details of MDM cluster + :return: True if MDM removed successful + """ + + name_or_id = mdm_id if mdm_id else mdm_name + if mdm_id is None and mdm_name is None: + err_msg = "Either mdm_name or mdm_id is required while removing" \ + " the standby MDM." + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + mdm_details = self. \ + is_mdm_name_id_exists(mdm_name=mdm_name, mdm_id=mdm_id, + cluster_details=cluster_details) + if mdm_details is None: + LOG.info("MDM %s not exists in MDM cluster.", name_or_id) + return False + mdm_id = mdm_details['id'] + + try: + if not self.module.check_mode: + self.powerflex_conn.system.remove_standby_mdm(mdm_id=mdm_id) + return True + except Exception as e: + error_msg = "Failed to remove the standby MDM {0} from the MDM " \ + "cluster with error {1}".format(name_or_id, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def change_ownership(self, mdm_id=None, mdm_name=None, + cluster_details=None): + """ Change the ownership of MDM cluster. + :param mdm_id: ID of MDM that will become owner of MDM cluster + :param mdm_name: Name of MDM that will become owner of MDM cluster + :param cluster_details: Details of MDM cluster + :return: True if Owner of MDM cluster change successful + """ + + name_or_id = mdm_id if mdm_id else mdm_name + if mdm_id is None and mdm_name is None: + err_msg = "Either mdm_name or mdm_id is required while changing" \ + " ownership of MDM cluster." + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + + mdm_details = self.\ + is_mdm_name_id_exists(mdm_name=mdm_name, mdm_id=mdm_id, + cluster_details=cluster_details) + if mdm_details is None: + err_msg = self.not_exist_msg.format(name_or_id) + self.module.fail_json(msg=err_msg) + + mdm_id = mdm_details['id'] + + if mdm_details['id'] == cluster_details['master']['id']: + LOG.info("MDM %s is already Owner of MDM cluster.", name_or_id) + return False + else: + try: + if not self.module.check_mode: + self.powerflex_conn.system.\ + change_mdm_ownership(mdm_id=mdm_id) + return True + except Exception as e: + error_msg = "Failed to update the Owner of MDM cluster to " \ + "MDM {0} with error {1}".format(name_or_id, + str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def find_mdm_in_secondarys(self, mdm_name=None, mdm_id=None, + cluster_details=None, name_or_id=None): + """Whether MDM exists with mdm_name or id in secondary MDMs""" + for mdm in cluster_details['slaves']: + if ('name' in mdm and mdm_name == mdm['name']) or \ + mdm_id == mdm['id']: + LOG.info("MDM %s found in Secondarys MDM.", name_or_id) + return mdm + + def find_mdm_in_tb(self, mdm_name=None, mdm_id=None, + cluster_details=None, name_or_id=None): + """Whether MDM exists with mdm_name or id in tie-breaker MDMs""" + + for mdm in cluster_details['tieBreakers']: + if ('name' in mdm and mdm_name == mdm['name']) or \ + mdm_id == mdm['id']: + LOG.info("MDM %s found in tieBreakers MDM.", name_or_id) + return mdm + + def find_mdm_in_standby(self, mdm_name=None, mdm_id=None, + cluster_details=None, name_or_id=None): + """Whether MDM exists with mdm_name or id in standby MDMs""" + + if 'standbyMDMs' in cluster_details: + for mdm in cluster_details['standbyMDMs']: + if ('name' in mdm and mdm_name == mdm['name']) or \ + mdm_id == mdm['id']: + LOG.info("MDM %s found in standby MDM.", name_or_id) + return mdm + + def is_mdm_name_id_exists(self, mdm_id=None, mdm_name=None, + cluster_details=None): + """Whether MDM exists with mdm_name or id """ + + name_or_id = mdm_id if mdm_id else mdm_name + # check in master MDM + if ('name' in cluster_details['master'] and mdm_name == cluster_details['master']['name']) \ + or mdm_id == cluster_details['master']['id']: + LOG.info("MDM %s is master MDM.", name_or_id) + return cluster_details['master'] + + # check in secondary MDMs + secondary_mdm = [] + secondary_mdm = self.\ + find_mdm_in_secondarys(mdm_name=mdm_name, mdm_id=mdm_id, + cluster_details=cluster_details, + name_or_id=name_or_id) + if secondary_mdm is not None: + return secondary_mdm + + # check in tie-breaker MDMs + tb_mdm = [] + tb_mdm = self.find_mdm_in_tb(mdm_name=mdm_name, mdm_id=mdm_id, + cluster_details=cluster_details, + name_or_id=name_or_id) + if tb_mdm is not None: + return tb_mdm + + # check in standby MDMs + standby_mdm = self.find_mdm_in_standby(mdm_name=mdm_name, + mdm_id=mdm_id, + cluster_details=cluster_details, + name_or_id=name_or_id) + if standby_mdm is not None: + return standby_mdm + + LOG.info("MDM %s does not exists in MDM Cluster.", name_or_id) + return None + + def get_mdm_cluster_details(self): + """Get MDM cluster details + :return: Details of MDM Cluster if existed. + """ + + try: + mdm_cluster_details = self.powerflex_conn.system.\ + get_mdm_cluster_details() + + if len(mdm_cluster_details) == 0: + msg = "MDM cluster not found" + LOG.error(msg) + self.module.fail_json(msg=msg) + + # Append Performance profile + resp = self.get_system_details() + if resp is not None: + mdm_cluster_details['perfProfile'] = resp['perfProfile'] + + return mdm_cluster_details + + except Exception as e: + error_msg = "Failed to get the MDM cluster with error {0}." + error_msg = error_msg.format(str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def check_ip_in_secondarys(self, standby_ip, cluster_details): + """whether standby IPs present in secondary MDMs""" + + for secondary_mdm in cluster_details['slaves']: + current_secondary_ips = secondary_mdm['ips'] + for ips in standby_ip: + if ips in current_secondary_ips: + LOG.info(self.exist_msg) + return False + return True + + def check_ip_in_tbs(self, standby_ip, cluster_details): + """whether standby IPs present in tie-breaker MDMs""" + + for tb_mdm in cluster_details['tieBreakers']: + current_tb_ips = tb_mdm['ips'] + for ips in standby_ip: + if ips in current_tb_ips: + LOG.info(self.exist_msg) + return False + return True + + def check_ip_in_standby(self, standby_ip, cluster_details): + """whether standby IPs present in standby MDMs""" + + if 'standbyMDMs' in cluster_details: + for stb_mdm in cluster_details['tieBreakers']: + current_stb_ips = stb_mdm['ips'] + for ips in standby_ip: + if ips in current_stb_ips: + LOG.info(self.exist_msg) + return False + return True + + def check_mdm_exists(self, standby_ip=None, cluster_details=None): + """Check whether standby MDM exists in MDM Cluster""" + + # check in master node + current_master_ips = cluster_details['master']['ips'] + for ips in standby_ip: + if ips in current_master_ips: + LOG.info(self.exist_msg) + return False + + # check in secondary nodes + in_secondary = self.check_ip_in_secondarys(standby_ip=standby_ip, + cluster_details=cluster_details) + if not in_secondary: + return False + + # check in tie-breaker nodes + in_tbs = self.check_ip_in_tbs(standby_ip=standby_ip, + cluster_details=cluster_details) + if not in_tbs: + return False + + # check in Standby nodes + in_standby = self.check_ip_in_standby(standby_ip=standby_ip, + cluster_details=cluster_details) + if not in_standby: + return False + + LOG.info("New Standby MDM does not exists in MDM cluster") + return True + + def get_system_details(self): + """Get system details + :return: Details of PowerFlex system + """ + + try: + resp = self.powerflex_conn.system.get() + if len(resp) == 0: + self.module.fail_json(msg="No system exist on the given " + "host.") + if len(resp) > 1: + self.module.fail_json(msg="Multiple systems exist on the " + "given host.") + return resp[0] + except Exception as e: + msg = "Failed to get system id with error %s" % str(e) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def validate_parameters(self): + """Validate the input parameters""" + + name_params = ['mdm_name', 'mdm_id', 'mdm_new_name'] + msg = "Please provide the valid {0}" + + for n_item in name_params: + if self.module.params[n_item] is not None and \ + (len(self.module.params[n_item].strip()) or + self.module.params[n_item].count(" ") > 0) == 0: + err_msg = msg.format(n_item) + self.module.fail_json(msg=err_msg) + + def perform_module_operation(self): + """ + Perform different actions on MDM cluster based on parameters passed in + the playbook + """ + mdm_name = self.module.params['mdm_name'] + mdm_id = self.module.params['mdm_id'] + mdm_new_name = self.module.params['mdm_new_name'] + standby_mdm = copy.deepcopy(self.module.params['standby_mdm']) + is_primary = self.module.params['is_primary'] + cluster_mode = self.module.params['cluster_mode'] + mdm = copy.deepcopy(self.module.params['mdm']) + mdm_state = self.module.params['mdm_state'] + virtual_ip_interfaces = self.module.params['virtual_ip_interfaces'] + clear_interfaces = self.module.params['clear_interfaces'] + performance_profile = self.module.params['performance_profile'] + state = self.module.params['state'] + + # result is a dictionary to contain end state and MDM cluster details + changed = False + result = dict( + changed=False, + mdm_cluster_details={} + ) + self.validate_parameters() + + mdm_cluster_details = self.get_mdm_cluster_details() + msg = "Fetched the MDM cluster details {0}".\ + format(str(mdm_cluster_details)) + LOG.info(msg) + + standby_changed = False + performance_changed = False + renamed_changed = False + interface_changed = False + remove_changed = False + mode_changed = False + owner_changed = False + + # Add standby MDM + standby_changed, mdm_cluster_details = self.\ + add_standby_mdm(mdm_name, standby_mdm, mdm_cluster_details) + + # Update performance profile + performance_changed = self.\ + set_performance_profile(performance_profile, mdm_cluster_details) + + # Rename MDM + if state == 'present' and mdm_new_name: + renamed_changed = self.rename_mdm(mdm_name, mdm_id, mdm_new_name, + mdm_cluster_details) + + # Change MDM virtual IP interfaces + if state == 'present' and (virtual_ip_interfaces or clear_interfaces): + interface_changed = self.\ + set_mdm_virtual_interface(mdm_id, mdm_name, + virtual_ip_interfaces, + clear_interfaces, + mdm_cluster_details) + # change cluster mode + if state == 'present' and cluster_mode and mdm and mdm_state: + mode_changed = self.change_cluster_mode(cluster_mode, mdm, + mdm_cluster_details) + + # Remove standby MDM + if state == 'absent': + remove_changed = self.remove_standby_mdm(mdm_name, mdm_id, + mdm_cluster_details) + + # change ownership of MDM cluster + if state == 'present' and is_primary: + owner_changed = self.change_ownership(mdm_id, mdm_name, + mdm_cluster_details) + + # Setting Changed Flag + changed = update_change_flag(standby_changed, performance_changed, + renamed_changed, interface_changed, + mode_changed, remove_changed, + owner_changed) + + # Returning the updated MDM cluster details + # Checking whether owner of MDM cluster has changed + if owner_changed: + mdm_cluster_details = {} + else: + mdm_cluster_details = self.get_mdm_cluster_details() + + result['mdm_cluster_details'] = mdm_cluster_details + result['changed'] = changed + self.module.exit_json(**result) + + +def update_change_flag(standby_changed, performance_changed, renamed_changed, + interface_changed, mode_changed, remove_changed, + owner_changed): + """ Update the changed flag based on the operation performed in the task""" + + if standby_changed or performance_changed or renamed_changed or \ + interface_changed or mode_changed or remove_changed or \ + owner_changed: + return True + return False + + +def prepare_standby_payload(standby_mdm): + """prepare the payload for add standby MDM""" + payload_dict = {} + for mdm_keys in standby_mdm: + if standby_mdm[mdm_keys]: + payload_dict[mdm_keys] = standby_mdm[mdm_keys] + else: + payload_dict[mdm_keys] = None + return payload_dict + + +def is_modify_mdm_virtual_interface(virtual_ip_interfaces, clear_interfaces, + mdm_details): + """Check if modification in MDM virtual IP interface required.""" + + modify_list = [] + clear = False + existing_interfaces = mdm_details['virtualInterfaces'] + + # Idempotency check for virtual IP interface + if clear_interfaces is None and \ + len(existing_interfaces) == len(virtual_ip_interfaces) and \ + set(existing_interfaces) == set(virtual_ip_interfaces): + LOG.info("No changes required for virtual IP interface.") + return None, False + + # Idempotency check for clear_interfaces + if clear_interfaces and len(mdm_details['virtualInterfaces']) == 0: + LOG.info("No change required for clear interface.") + return None, False + + # clearing all virtual IP interfaces of MDM + elif clear_interfaces and len(mdm_details['virtualInterfaces']) != 0 and \ + virtual_ip_interfaces is None: + LOG.info("Clear all interfaces of the MDM.") + clear = True + return None, clear + + if virtual_ip_interfaces and clear_interfaces is None: + for interface in virtual_ip_interfaces: + modify_list.append(interface) + return modify_list, clear + + +def get_powerflex_mdm_cluster_parameters(): + """This method provide parameter required for the MDM cluster + module on PowerFlex""" + return dict( + mdm_name=dict(), mdm_id=dict(), mdm_new_name=dict(), + virtual_ip_interfaces=dict(type='list', elements='str'), + clear_interfaces=dict(type='bool'), is_primary=dict(type='bool'), + standby_mdm=dict(type='dict', options=dict( + mdm_ips=dict(type='list', elements='str', required=True), + role=dict(required=True, choices=['Manager', 'TieBreaker']), + management_ips=dict(type='list', elements='str'), + port=dict(type='int'), allow_multiple_ips=dict(type='bool'), + virtual_interfaces=dict(type='list', elements='str'))), + cluster_mode=dict(choices=['OneNode', 'ThreeNodes', 'FiveNodes']), + mdm=dict(type='list', elements='dict', + options=dict(mdm_id=dict(), mdm_name=dict(), + mdm_type=dict(required=True, + choices=['Secondary', 'TieBreaker']))), + mdm_state=dict(choices=['present-in-cluster', 'absent-in-cluster']), + performance_profile=dict(choices=['Compact', 'HighPerformance']), + state=dict(required=True, type='str', choices=['present', 'absent']) + ) + + +def main(): + """ Perform actions on MDM cluster based on user input from playbook""" + obj = PowerFlexMdmCluster() + obj.perform_module_operation() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/powerflex/plugins/modules/protection_domain.py b/ansible_collections/dellemc/powerflex/plugins/modules/protection_domain.py new file mode 100644 index 00000000..5ffdc6b6 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/plugins/modules/protection_domain.py @@ -0,0 +1,1122 @@ +#!/usr/bin/python + +# Copyright: (c) 2022, Dell Technologies +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +""" Ansible module for managing Protection Domain on Dell Technologies (Dell) PowerFlex""" +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = r''' +module: protection_domain +version_added: '1.2.0' +short_description: Manage Protection Domain on Dell PowerFlex +description: +- Managing Protection Domain on PowerFlex storage system includes creating, + modifying attributes, deleting and getting details of Protection Domain. +author: +- Bhavneet Sharma (@sharmb5) <ansible.team@dell.com> +extends_documentation_fragment: + - dellemc.powerflex.powerflex +options: + protection_domain_name: + description: + - The name of the protection domain. + - Mandatory for create operation. + - It is unique across the PowerFlex array. + - Mutually exclusive with I(protection_domain_id). + type: str + protection_domain_id: + description: + - The ID of the protection domain. + - Except for create operation, all other operations can be performed + using protection_domain_id. + - Mutually exclusive with I(protection_domain_name). + type: str + protection_domain_new_name: + description: + - Used to rename the protection domain. + type: str + is_active: + description: + - Used to activate or deactivate the protection domain. + type: bool + network_limits: + description: + - Network bandwidth limit used by all SDS in protection domain. + type: dict + suboptions: + rebuild_limit: + description: + - Limit the network bandwidth for rebuild. + type: int + rebalance_limit: + description: + - Limit the network bandwidth for rebalance. + type: int + vtree_migration_limit: + description: + - Limit the network bandwidth for vtree migration. + type: int + overall_limit: + description: + - Limit the overall network bandwidth. + type: int + bandwidth_unit: + description: + - Unit for network bandwidth limits. + type: str + choices: ['KBps', 'MBps', 'GBps'] + default: 'KBps' + rf_cache_limits: + description: + - Used to set the RFcache parameters of the protection domain. + type: dict + suboptions: + is_enabled: + description: + - Used to enable or disable RFcache in the protection domain. + type: bool + page_size: + description: + - Used to set the cache page size in KB. + type: int + max_io_limit: + description: + - Used to set cache maximum I/O limit in KB. + type: int + pass_through_mode: + description: + - Used to set the cache mode. + choices: ['None', 'Read', 'Write', 'ReadAndWrite', 'WriteMiss'] + type: str + state: + description: + - State of the protection domain. + required: true + type: str + choices: ['present', 'absent'] +notes: + - The protection domain can only be deleted if all its related objects have + been dissociated from the protection domain. + - If the protection domain set to inactive, then no operation can be + performed on protection domain. + - The I(check_mode) is not supported. +''' + +EXAMPLES = r''' +- name: Create protection domain + dellemc.powerflex.protection_domain: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + protection_domain_name: "domain1" + state: "present" + +- name: Create protection domain with all parameters + dellemc.powerflex.protection_domain: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + protection_domain_name: "domain1" + is_active: true + network_limits: + rebuild_limit: 10 + rebalance_limit: 17 + vtree_migration_limit: 14 + overall_limit: 20 + bandwidth_unit: "MBps" + rf_cache_limits: + is_enabled: true + page_size: 16 + max_io_limit: 128 + pass_through_mode: "Read" + state: "present" + +- name: Get protection domain details using name + dellemc.powerflex.protection_domain: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + protection_domain_name: "domain1" + state: "present" + +- name: Get protection domain details using ID + dellemc.powerflex.protection_domain: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + protection_domain_id: "5718253c00000004" + state: "present" + +- name: Modify protection domain attributes + dellemc.powerflex.protection_domain: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + protection_domain_name: "domain1" + protection_domain_new_name: "domain1_new" + network_limits: + rebuild_limit: 14 + rebalance_limit: 20 + overall_limit: 25 + bandwidth_unit: "MBps" + rf_cache_limits: + page_size: 64 + pass_through_mode: "WriteMiss" + state: "present" + +- name: Delete protection domain using name + dellemc.powerflex.protection_domain: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + protection_domain_name: "domain1_new" + state: "absent" +''' + +RETURN = r''' +changed: + description: Whether or not the resource has changed. + returned: always + type: bool + sample: 'false' +protection_domain_details: + description: Details of the protection domain. + returned: When protection domain exists + type: dict + contains: + fglDefaultMetadataCacheSize: + description: FGL metadata cache size. + type: int + fglDefaultNumConcurrentWrites: + description: FGL concurrent writes. + type: str + fglMetadataCacheEnabled: + description: Whether FGL cache enabled. + type: bool + id: + description: Protection domain ID. + type: str + links: + description: Protection domain links. + type: list + contains: + href: + description: Protection domain instance URL. + type: str + rel: + description: Protection domain's relationship with + different entities. + type: str + mdmSdsNetworkDisconnectionsCounterParameters: + description: MDM's SDS counter parameter. + type: dict + contains: + longWindow: + description: Long window for Counter Parameters. + type: int + mediumWindow: + description: Medium window for Counter Parameters. + type: int + shortWindow: + description: Short window for Counter Parameters. + type: int + name: + description: Name of the protection domain. + type: str + overallIoNetworkThrottlingEnabled: + description: Whether overall network throttling enabled. + type: bool + overallIoNetworkThrottlingInKbps: + description: Overall network throttling in KBps. + type: int + protectedMaintenanceModeNetworkThrottlingEnabled: + description: Whether protected maintenance mode network throttling + enabled. + type: bool + protectedMaintenanceModeNetworkThrottlingInKbps: + description: Protected maintenance mode network throttling in + KBps. + type: int + protectionDomainState: + description: State of protection domain. + type: int + rebalanceNetworkThrottlingEnabled: + description: Whether rebalance network throttling enabled. + type: int + rebalanceNetworkThrottlingInKbps: + description: Rebalance network throttling in KBps. + type: int + rebuildNetworkThrottlingEnabled: + description: Whether rebuild network throttling enabled. + type: int + rebuildNetworkThrottlingInKbps: + description: Rebuild network throttling in KBps. + type: int + rfcacheAccpId: + description: Id of RF cache acceleration pool. + type: str + rfcacheEnabled: + description: Whether RF cache is enabled or not. + type: bool + rfcacheMaxIoSizeKb: + description: RF cache maximum I/O size in KB. + type: int + rfcacheOpertionalMode: + description: RF cache operational mode. + type: str + rfcachePageSizeKb: + description: RF cache page size in KB. + type: bool + sdrSdsConnectivityInfo: + description: Connectivity info of SDR and SDS. + type: dict + contains: + clientServerConnStatus: + description: Connectivity status of client and server. + type: str + disconnectedClientId: + description: Disconnected client ID. + type: str + disconnectedClientName: + description: Disconnected client name. + type: str + disconnectedServerId: + description: Disconnected server ID. + type: str + disconnectedServerIp: + description: Disconnected server IP. + type: str + disconnectedServerName: + description: Disconnected server name. + type: str + sdsSdsNetworkDisconnectionsCounterParameters: + description: Counter parameter for SDS-SDS network. + type: dict + contains: + longWindow: + description: Long window for Counter Parameters. + type: int + mediumWindow: + description: Medium window for Counter Parameters. + type: int + shortWindow: + description: Short window for Counter Parameters. + type: int + storagePool: + description: List of storage pools. + type: list + systemId: + description: ID of system. + type: str + vtreeMigrationNetworkThrottlingEnabled: + description: Whether V-Tree migration network throttling enabled. + type: bool + vtreeMigrationNetworkThrottlingInKbps: + description: V-Tree migration network throttling in KBps. + type: int + sample: { + "fglDefaultMetadataCacheSize": 0, + "fglDefaultNumConcurrentWrites": 1000, + "fglMetadataCacheEnabled": false, + "id": "7bd6457000000000", + "links": [ + { + "href": "/api/instances/ProtectionDomain::7bd6457000000000", + "rel": "self" + }, + { + "href": "/api/instances/ProtectionDomain::7bd6457000000000/ + relationships/Statistics", + "rel": "/api/ProtectionDomain/relationship/Statistics" + }, + { + "href": "/api/instances/ProtectionDomain::7bd6457000000000/ + relationships/Sdr", + "rel": "/api/ProtectionDomain/relationship/Sdr" + }, + { + "href": "/api/instances/ProtectionDomain::7bd6457000000000/ + relationships/AccelerationPool", + "rel": "/api/ProtectionDomain/relationship/AccelerationPool" + }, + { + "href": "/api/instances/ProtectionDomain::7bd6457000000000/ + relationships/StoragePool", + "rel": "/api/ProtectionDomain/relationship/StoragePool" + }, + { + "href": "/api/instances/ProtectionDomain::7bd6457000000000/ + relationships/Sds", + "rel": "/api/ProtectionDomain/relationship/Sds" + }, + { + "href": "/api/instances/ProtectionDomain::7bd6457000000000/ + relationships/ReplicationConsistencyGroup", + "rel": "/api/ProtectionDomain/relationship/ + ReplicationConsistencyGroup" + }, + { + "href": "/api/instances/ProtectionDomain::7bd6457000000000/ + relationships/FaultSet", + "rel": "/api/ProtectionDomain/relationship/FaultSet" + }, + { + "href": "/api/instances/System::0989ce79058f150f", + "rel": "/api/parent/relationship/systemId" + } + ], + "mdmSdsNetworkDisconnectionsCounterParameters": { + "longWindow": { + "threshold": 700, + "windowSizeInSec": 86400 + }, + "mediumWindow": { + "threshold": 500, + "windowSizeInSec": 3600 + }, + "shortWindow": { + "threshold": 300, + "windowSizeInSec": 60 + } + }, + "name": "domain1", + "overallIoNetworkThrottlingEnabled": false, + "overallIoNetworkThrottlingInKbps": null, + "protectedMaintenanceModeNetworkThrottlingEnabled": false, + "protectedMaintenanceModeNetworkThrottlingInKbps": null, + "protectionDomainState": "Active", + "rebalanceNetworkThrottlingEnabled": false, + "rebalanceNetworkThrottlingInKbps": null, + "rebuildNetworkThrottlingEnabled": false, + "rebuildNetworkThrottlingInKbps": null, + "rfcacheAccpId": null, + "rfcacheEnabled": true, + "rfcacheMaxIoSizeKb": 128, + "rfcacheOpertionalMode": "WriteMiss", + "rfcachePageSizeKb": 64, + "sdrSdsConnectivityInfo": { + "clientServerConnStatus": "CLIENT_SERVER_CONN_STATUS_ALL + _CONNECTED", + "disconnectedClientId": null, + "disconnectedClientName": null, + "disconnectedServerId": null, + "disconnectedServerIp": null, + "disconnectedServerName": null + }, + "sdsConfigurationFailureCounterParameters": { + "longWindow": { + "threshold": 700, + "windowSizeInSec": 86400 + }, + "mediumWindow": { + "threshold": 500, + "windowSizeInSec": 3600 + }, + "shortWindow": { + "threshold": 300, + "windowSizeInSec": 60 + } + }, + "sdsDecoupledCounterParameters": { + "longWindow": { + "threshold": 700, + "windowSizeInSec": 86400 + }, + "mediumWindow": { + "threshold": 500, + "windowSizeInSec": 3600 + }, + "shortWindow": { + "threshold": 300, + "windowSizeInSec": 60 + } + }, + "sdsReceiveBufferAllocationFailuresCounterParameters": { + "longWindow": { + "threshold": 2000000, + "windowSizeInSec": 86400 + }, + "mediumWindow": { + "threshold": 200000, + "windowSizeInSec": 3600 + }, + "shortWindow": { + "threshold": 20000, + "windowSizeInSec": 60 + } + }, + "sdsSdsNetworkDisconnectionsCounterParameters": { + "longWindow": { + "threshold": 700, + "windowSizeInSec": 86400 + }, + "mediumWindow": { + "threshold": 500, + "windowSizeInSec": 3600 + }, + "shortWindow": { + "threshold": 300, + "windowSizeInSec": 60 + } + }, + "storagePool": [ + { + "id": "8d1cba1700000000", + "name": "pool1" + } + ], + "systemId": "0989ce79058f150f", + "vtreeMigrationNetworkThrottlingEnabled": false, + "vtreeMigrationNetworkThrottlingInKbps": null + } +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell \ + import utils + +LOG = utils.get_logger('protection_domain') + + +class PowerFlexProtectionDomain(object): + """Class with protection domain operations""" + + def __init__(self): + """ Define all parameters required by this module""" + self.module_params = utils.get_powerflex_gateway_host_parameters() + self.module_params.update(get_powerflex_protection_domain_parameters()) + + mut_ex_args = [['protection_domain_name', 'protection_domain_id']] + + required_one_of_args = [['protection_domain_name', + 'protection_domain_id']] + + # initialize the Ansible module + self.module = AnsibleModule( + argument_spec=self.module_params, + supports_check_mode=False, + mutually_exclusive=mut_ex_args, + required_one_of=required_one_of_args) + + utils.ensure_required_libs(self.module) + + try: + self.powerflex_conn = utils.get_powerflex_gateway_host_connection( + self.module.params) + LOG.info("Got the PowerFlex system connection object instance") + except Exception as e: + LOG.error(str(e)) + self.module.fail_json(msg=str(e)) + + def validate_input_params(self): + """Validate the input parameters""" + + name_params = ['protection_domain_name', 'protection_domain_new_name', + 'protection_domain_id'] + msg = "Please provide the valid {0}" + + for n_item in name_params: + if self.module.params[n_item] is not None and (len( + self.module.params[n_item].strip()) or self. + module.params[n_item].count(" ") > 0) == 0: + err_msg = msg.format(n_item) + self.module.fail_json(msg=err_msg) + + def is_id_or_new_name_in_create(self): + """Checking if protection domain id or new names present in create """ + + if self.module.params['protection_domain_new_name'] or \ + self.module.params['protection_domain_id']: + error_msg = "protection_domain_new_name/protection_domain_id " \ + "are not supported during creation of protection " \ + "domain. Please try with protection_domain_name." + LOG.info(error_msg) + self.module.fail_json(msg=error_msg) + + def get_storage_pool(self, protection_domain_id): + """ + Get Storage pools details + :param protection_domain_id: Name of the protection domain + :type protection_domain_id: str + :return: list containing storage pools which are present in + protection domain + """ + + try: + sps_list = [] + resp = self.powerflex_conn.protection_domain. \ + get_storage_pools(protection_domain_id=protection_domain_id) + for items in resp: + sp_name_id = dict() + sp_name_id['id'] = items['id'] + sp_name_id['name'] = items['name'] + sps_list.append(sp_name_id) + return sps_list + + except Exception as e: + errmsg = "Failed to get the storage pools present in protection" \ + " domain %s with error %s" % (protection_domain_id, str(e)) + LOG.error(errmsg) + self.module.fail_json(msg=errmsg) + + def get_protection_domain(self, protection_domain_name=None, + protection_domain_id=None): + """ + Get protection domain details + :param protection_domain_name: Name of the protection domain + :param protection_domain_id: ID of the protection domain + :return: Protection domain details if exists + :rtype: dict + """ + + name_or_id = protection_domain_id if protection_domain_id \ + else protection_domain_name + + try: + if protection_domain_id: + pd_details = self.powerflex_conn.protection_domain.get( + filter_fields={'id': protection_domain_id}) + + else: + pd_details = self.powerflex_conn.protection_domain.get( + filter_fields={'name': protection_domain_name}) + + if len(pd_details) == 0: + error_msg = "Unable to find the protection domain with " \ + "'%s'." % name_or_id + LOG.info(error_msg) + return None + + # Append storage pool list present in protection domain + pd_details[0]['storagePool'] = self.get_storage_pool(pd_details + [0]['id']) + return pd_details[0] + + except Exception as e: + error_msg = "Failed to get the protection domain '%s' with " \ + "error '%s'" % (name_or_id, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def create_protection_domain(self, protection_domain_name): + """ + Create Protection Domain + :param protection_domain_name: Name of the protection domain + :type protection_domain_name: str + :return: Boolean indicating if create operation is successful + """ + # Creation of Protection domain + try: + LOG.info("Creating protection domain with name: %s ", + protection_domain_name) + self.powerflex_conn.protection_domain.\ + create(name=protection_domain_name) + return True + + except Exception as e: + error_msg = "Create protection domain '%s' operation failed" \ + " with error '%s'" % (protection_domain_name, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def perform_create_operation(self, state, pd_details, + protection_domain_name): + """performing creation of protection domain details""" + + if state == 'present' and not pd_details: + self.is_id_or_new_name_in_create() + create_change = self.\ + create_protection_domain(protection_domain_name) + if create_change: + pd_details = self. \ + get_protection_domain(protection_domain_name) + msg = "Protection domain created successfully, fetched" \ + " protection domain details {0}". \ + format(str(pd_details)) + LOG.info(msg) + return create_change, pd_details + + return False, pd_details + + def is_modify_required(self, pd_details, network_limits, rf_cache_limits, + protection_domain_new_name, is_active): + """Check if modification required""" + + if (self.module.params['state'] == 'present') and pd_details and \ + (network_limits is not None or rf_cache_limits is not None + or protection_domain_new_name is not None or is_active is + not None): + return True + + def modify_nw_limits(self, protection_domain_id, nw_modify_dict, + create_flag=False): + """ + Modify Protection domain attributes + :param protection_domain_id: ID of the protection domain + :type protection_domain_id: str + :param nw_modify_dict: Dictionary containing the attributes of + protection domain which are to be updated + :type nw_modify_dict: dict + :param create_flag: Flag to indicate whether modify operation is + followed by create operation or not + :type create_flag: bool + :return: Boolean indicating if the operation is successful + """ + try: + msg = "Dict containing network modify params {0}".\ + format(str(nw_modify_dict)) + LOG.info(msg) + if 'rebuild_limit' in nw_modify_dict or 'rebalance_limit' in \ + nw_modify_dict or 'vtree_migration_limit' in \ + nw_modify_dict or 'overall_limit' in nw_modify_dict: + self.powerflex_conn.protection_domain.network_limits( + protection_domain_id=protection_domain_id, + rebuild_limit=nw_modify_dict['rebuild_limit'], + rebalance_limit=nw_modify_dict['rebalance_limit'], + vtree_migration_limit=nw_modify_dict['vtree_migration_limit'], + overall_limit=nw_modify_dict['overall_limit']) + msg = "The Network limits are updated to {0}, {1}, {2}, " \ + "{3} successfully.". \ + format(nw_modify_dict['rebuild_limit'], + nw_modify_dict['rebalance_limit'], + nw_modify_dict['vtree_migration_limit'], + nw_modify_dict['overall_limit']) + LOG.info(msg) + return True + + except Exception as e: + if create_flag: + err_msg = "Create protection domain is successful," \ + " but failed to update the network limits" \ + " {0} with error {1}".format(protection_domain_id, + str(e)) + else: + err_msg = "Failed to update the network limits of " \ + "protection domain {0} with error {1}".\ + format(protection_domain_id, str(e)) + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + + def modify_rf_limits(self, protection_domain_id, rf_modify_dict, + create_flag): + """ + Modify Protection domain attributes + :param protection_domain_id: ID of the protection domain + :type protection_domain_id: str + :param rf_modify_dict: Dict containing the attributes of rf cache + which are to be updated + :type rf_modify_dict: dict + :param create_flag: Flag to indicate whether modify operation is + followed by create operation or not + :type create_flag: bool + :return: Boolean indicating if the operation is successful + """ + try: + msg = "Dict containing network modify params {0}". \ + format(str(rf_modify_dict)) + LOG.info(msg) + + if 'is_enabled' in rf_modify_dict and \ + rf_modify_dict['is_enabled'] is not None: + self.powerflex_conn.protection_domain.set_rfcache_enabled( + protection_domain_id, rf_modify_dict['is_enabled']) + msg = "The RFcache is enabled to '%s' successfully." \ + % rf_modify_dict['is_enabled'] + LOG.info(msg) + + if 'page_size' in rf_modify_dict or 'max_io_limit' in \ + rf_modify_dict or 'pass_through_mode' in rf_modify_dict: + self.powerflex_conn.protection_domain.rfcache_parameters( + protection_domain_id=protection_domain_id, + page_size=rf_modify_dict['page_size'], + max_io_limit=rf_modify_dict['max_io_limit'], + pass_through_mode=rf_modify_dict['pass_through_mode']) + msg = "The RFcache parameters are updated to {0}, {1},{2}.'" \ + .format(rf_modify_dict['page_size'], + rf_modify_dict['max_io_limit'], + rf_modify_dict['pass_through_mode']) + LOG.info(msg) + return True + + except Exception as e: + if create_flag: + err_msg = "Create protection domain is successful," \ + " but failed to update the rf cache limits" \ + " {0} with error {1}".format(protection_domain_id, + str(e)) + else: + err_msg = "Failed to update the rf cache limits of " \ + "protection domain {0} with error {1}". \ + format(protection_domain_id, str(e)) + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + + def modify_pd_attributes(self, protection_domain_id, modify_dict, + create_flag=False): + """ + Modify Protection domain attributes + :param protection_domain_id: ID of the protection domain + :type protection_domain_id: str + :param modify_dict: Dictionary containing the attributes of + protection domain which are to be updated + :type modify_dict: dict + :param create_flag: Flag to indicate whether modify operation is + followed by create operation or not + :type create_flag: bool + :return: Boolean indicating if the operation is successful + """ + try: + msg = "Dictionary containing attributes which need to be" \ + " updated are '%s'." % (str(modify_dict)) + LOG.info(msg) + + if 'protection_domain_new_name' in modify_dict: + self.powerflex_conn.protection_domain. \ + rename(protection_domain_id, + modify_dict['protection_domain_new_name']) + msg = "The name of the protection domain is updated to " \ + "'%s' successfully." % \ + modify_dict['protection_domain_new_name'] + LOG.info(msg) + + if 'is_active' in modify_dict and modify_dict['is_active']: + self.powerflex_conn.protection_domain. \ + activate(protection_domain_id, modify_dict['is_active']) + msg = "The protection domain is activated successfully, by " \ + "setting as is_active: '%s' " % \ + modify_dict['is_active'] + LOG.info(msg) + + if 'is_active' in modify_dict and not modify_dict['is_active']: + self.powerflex_conn.protection_domain. \ + inactivate(protection_domain_id, modify_dict['is_active']) + msg = "The protection domain is inactivated successfully, " \ + "by setting as is_active: '%s' " % \ + modify_dict['is_active'] + LOG.info(msg) + return True + + except Exception as e: + if create_flag: + err_msg = "Create protection domain is successful," \ + " but failed to update the protection domain" \ + " {0} with error {1}".format(protection_domain_id, + str(e)) + else: + err_msg = "Failed to update the protection domain {0}" \ + " with error {1}".format(protection_domain_id, + str(e)) + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + + def delete_protection_domain(self, protection_domain_id): + """ + Delete Protection Domain + :param protection_domain_id: ID of the protection domain + :type protection_domain_id: str + :return: Boolean indicating if delete operation is successful + """ + try: + self.powerflex_conn.protection_domain.delete(protection_domain_id) + LOG.info("Protection domain deleted successfully.") + return True + except Exception as e: + error_msg = "Delete protection domain '%s' operation failed" \ + " with error '%s'" % (protection_domain_id, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def perform_module_operation(self): + """ + Perform different actions on protection domain based on parameters + passed in the playbook + """ + protection_domain_name = self.module.params['protection_domain_name'] + protection_domain_id = self.module.params['protection_domain_id'] + protection_domain_new_name = self.module.params[ + 'protection_domain_new_name'] + is_active = self.module.params['is_active'] + network_limits = self.convert_limits_in_kbps( + self.module.params['network_limits']) + rf_cache_limits = self.module.params['rf_cache_limits'] + state = self.module.params['state'] + + # result is a dictionary to contain end state and protection domain + # details + changed = False + result = dict( + changed=False, + protection_domain_details=None + ) + + # Checking invalid value for id, name and rename + self.validate_input_params() + + # get Protection Domain details + pd_details = self.get_protection_domain(protection_domain_name, + protection_domain_id) + + if pd_details: + protection_domain_id = pd_details['id'] + msg = "Fetched the protection domain details with id '%s', name" \ + " '%s'" % (protection_domain_id, protection_domain_name) + LOG.info(msg) + + # create operation + create_changed = False + create_changed, pd_details = self.\ + perform_create_operation(state, pd_details, + protection_domain_name) + + # checking if basic protection domain parameters are modified or not + modify_dict = {} + nw_modify_dict = {} + rf_modify_dict = {} + + if self.is_modify_required(pd_details, network_limits, + rf_cache_limits, + protection_domain_new_name, is_active): + modify_dict = to_modify(pd_details, protection_domain_new_name, + is_active) + nw_modify_dict = to_nw_limit_modify(pd_details, network_limits) + rf_modify_dict = to_rf_limit_modify(pd_details, rf_cache_limits) + msg = "Parameters to be modified are as follows: %s %s, %s" \ + % (str(modify_dict), str(nw_modify_dict), + str(rf_modify_dict)) + LOG.info(msg) + + # modify operation + modify_changed = False + is_nw_limit = all(value is None for value in nw_modify_dict.values()) + is_rf_limit = all(value is None for value in rf_modify_dict.values()) + + if not is_nw_limit and state == 'present': + modify_changed = self.modify_nw_limits(pd_details['id'], + nw_modify_dict, + create_changed) + if not is_rf_limit and state == 'present': + modify_changed = self.modify_rf_limits(pd_details['id'], + rf_modify_dict, + create_changed) + if modify_dict and state == 'present': + modify_changed = self. \ + modify_pd_attributes(pd_details['id'], modify_dict, + create_changed) + if modify_changed: + pd_details = self.get_protection_domain( + protection_domain_id=pd_details['id']) + msg = "Protection domain details after modification:" \ + " '%s'" % str(pd_details) + LOG.info(msg) + + # delete operation + delete_changed = False + if state == 'absent' and pd_details: + delete_changed = self.\ + delete_protection_domain(pd_details['id']) + + if create_changed or modify_changed or delete_changed: + changed = True + + # Returning the updated Protection domain details + if state == 'present': + pd_details = self.get_protection_domain( + protection_domain_id=pd_details['id']) + result['protection_domain_details'] = pd_details + result['changed'] = changed + self.module.exit_json(**result) + + def convert_limits_in_kbps(self, network_limits): + """ + Convert the limits into KBps + + :param network_limits: dict containing all Network bandwidth limits + :rtype: converted network limits + """ + limit_params = ['rebuild_limit', 'rebalance_limit', + 'vtree_migration_limit', 'overall_limit'] + modified_limits = dict() + modified_limits['rebuild_limit'] = None + modified_limits['rebalance_limit'] = None + modified_limits['vtree_migration_limit'] = None + modified_limits['overall_limit'] = None + if network_limits is None: + return None + for limits in network_limits: + if network_limits[limits] is not None and limits in limit_params: + if network_limits['bandwidth_unit'] == "GBps": + modified_limits[limits] = \ + network_limits[limits] * 1024 * 1024 + elif network_limits['bandwidth_unit'] == "MBps": + modified_limits[limits] = network_limits[limits] * 1024 + else: + modified_limits[limits] = network_limits[limits] + + return modified_limits + + +def to_modify(pd_details, protection_domain_new_name, is_active): + """ + Check if modification required for rename and is_active for protection + domain + :param pd_details: Details of the protection domain + :type pd_details: dict + :param protection_domain_new_name: To rename protection domain + :type protection_domain_new_name: str + :param is_active: Whether to activate protection domain + :type is_active: bool + :return: Dictionary containing the attributes of protection domain + which are to be updated + :rtype: dict + """ + + modify_dict = dict() + if protection_domain_new_name is not None and \ + protection_domain_new_name != pd_details['name']: + modify_dict['protection_domain_new_name'] = \ + protection_domain_new_name + + if is_active is not None and \ + ((pd_details['protectionDomainState'] == 'Active' and + not is_active) or + (pd_details['protectionDomainState'] == 'Inactive' and + is_active)): + modify_dict['is_active'] = is_active + + return modify_dict + + +def to_nw_limit_modify(pd_details, network_limits): + """ + Check if modification required network bandwidth limit for protection + domain + :param pd_details: Details of the protection domain + :type pd_details: dict + :param network_limits: dict of Network bandwidth limit + :type network_limits: dict + :return: Dictionary containing the attributes of protection domain + which are to be updated + :rtype: dict + """ + + modify_dict = {} + if network_limits is not None: + modify_dict['rebuild_limit'] = None + modify_dict['rebalance_limit'] = None + modify_dict['vtree_migration_limit'] = None + modify_dict['overall_limit'] = None + + if network_limits['rebuild_limit'] is not None and \ + pd_details['rebuildNetworkThrottlingInKbps'] != network_limits['rebuild_limit']: + modify_dict['rebuild_limit'] = network_limits['rebuild_limit'] + + if network_limits['rebalance_limit'] is not None and \ + pd_details['rebalanceNetworkThrottlingInKbps'] \ + != network_limits['rebalance_limit']: + modify_dict['rebalance_limit'] = network_limits['rebalance_limit'] + + if network_limits['vtree_migration_limit'] is not None and \ + pd_details['vtreeMigrationNetworkThrottlingInKbps'] != \ + network_limits['vtree_migration_limit']: + modify_dict['vtree_migration_limit'] = network_limits['vtree_migration_limit'] + + if network_limits['overall_limit'] is not None and \ + pd_details['overallIoNetworkThrottlingInKbps'] != \ + network_limits['overall_limit']: + modify_dict['overall_limit'] = network_limits['overall_limit'] + + return modify_dict + + +def to_rf_limit_modify(pd_details, rf_cache_limits): + """ + Check if modification required for RF cache for protection domain + :param pd_details: Details of the protection domain + :type pd_details: dict + :param rf_cache_limits: dict for RF cache + :type rf_cache_limits: dict + :return: Dictionary containing the attributes of protection domain + which are to be updated + :rtype: dict + """ + modify_dict = {} + if rf_cache_limits is not None: + modify_dict['is_enabled'] = None + modify_dict['page_size'] = None + modify_dict['max_io_limit'] = None + modify_dict['pass_through_mode'] = None + + if rf_cache_limits['is_enabled'] is not None and pd_details['rfcacheEnabled'] != \ + rf_cache_limits['is_enabled']: + modify_dict['is_enabled'] = rf_cache_limits['is_enabled'] + + if rf_cache_limits['page_size'] is not None and pd_details['rfcachePageSizeKb'] != \ + rf_cache_limits['page_size']: + modify_dict['page_size'] = rf_cache_limits['page_size'] + + if rf_cache_limits['max_io_limit'] is not None and pd_details['rfcacheMaxIoSizeKb'] != \ + rf_cache_limits['max_io_limit']: + modify_dict['max_io_limit'] = rf_cache_limits['max_io_limit'] + + if rf_cache_limits['pass_through_mode'] is not None and \ + pd_details['rfcacheOpertionalMode'] != rf_cache_limits['pass_through_mode']: + modify_dict['pass_through_mode'] = rf_cache_limits['pass_through_mode'] + + return modify_dict + + +def get_powerflex_protection_domain_parameters(): + """This method provides parameters required for the protection domain + module on PowerFlex""" + return dict( + protection_domain_name=dict(), + protection_domain_new_name=dict(), + protection_domain_id=dict(), + is_active=dict(type='bool'), + network_limits=dict( + type='dict', options=dict( + rebuild_limit=dict(type='int'), + rebalance_limit=dict(type='int'), + vtree_migration_limit=dict(type='int'), + overall_limit=dict(type='int'), + bandwidth_unit=dict(choices=['KBps', 'MBps', 'GBps'], + default='KBps') + ) + ), + rf_cache_limits=dict( + type='dict', options=dict( + is_enabled=dict(type='bool'), + page_size=dict(type='int'), + max_io_limit=dict(type='int'), + pass_through_mode=dict(choices=['None', 'Read', 'Write', + 'ReadAndWrite', 'WriteMiss']) + ) + ), + state=dict(required=True, type='str', choices=['present', 'absent']) + ) + + +def main(): + """ Create PowerFlex protection domain object and perform actions on it + based on user input from playbook""" + obj = PowerFlexProtectionDomain() + obj.perform_module_operation() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/powerflex/plugins/modules/replication_consistency_group.py b/ansible_collections/dellemc/powerflex/plugins/modules/replication_consistency_group.py new file mode 100644 index 00000000..2520c143 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/plugins/modules/replication_consistency_group.py @@ -0,0 +1,907 @@ +#!/usr/bin/python + +# Copyright: (c) 2022, Dell Technologies +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +""" Ansible module for managing replication consistency groups on Dell Technologies (Dell) PowerFlex""" + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = r''' +module: replication_consistency_group +version_added: '1.5.0' +short_description: Manage replication consistency groups on Dell PowerFlex +description: +- Managing replication consistency groups on PowerFlex storage system includes + getting details, creating, modifying, creating snapshots, pause, resume, freeze, unfreeze, + activate, inactivate and deleting a replication consistency group. +author: +- Trisha Datta (@Trisha-Datta) <ansible.team@dell.com> +- Jennifer John (@Jennifer-John) <ansible.team@dell.com> +extends_documentation_fragment: + - dellemc.powerflex.powerflex +options: + rcg_name: + description: + - The name of the replication consistency group. + - It is unique across the PowerFlex array. + - Mutually exclusive with I(rcg_id). + type: str + rcg_id: + description: + - The ID of the replication consistency group. + - Mutually exclusive with I(rcg_name). + type: str + create_snapshot: + description: + - Whether to create the snapshot of the replication consistency group. + type: bool + rpo: + description: + - Desired RPO in seconds. + type: int + protection_domain_id: + description: + - Protection domain id. + - Mutually exclusive with I(protection_domain_name). + type: str + protection_domain_name: + description: + - Protection domain name. + - Mutually exclusive with I(protection_domain_id). + type: str + activity_mode: + description: + - Activity mode of RCG. + - This parameter is supported for version 3.6 and above. + choices: ['Active', 'Inactive'] + type: str + pause: + description: + - Pause or resume the RCG. + type: bool + freeze: + description: + - Freeze or unfreeze the RCG. + type: bool + pause_mode: + description: + - Pause mode. + - It is required if pause is set as True. + choices: ['StopDataTransfer', 'OnlyTrackChanges'] + type: str + target_volume_access_mode: + description: + - Target volume access mode. + choices: ['ReadOnly', 'NoAccess'] + type: str + is_consistent: + description: + - Consistency of RCG. + type: bool + new_rcg_name: + description: + - Name of RCG to rename to. + type: str + remote_peer: + description: + - Remote peer system. + type: dict + suboptions: + hostname: + required: true + description: + - IP or FQDN of the remote peer gateway host. + type: str + aliases: + - gateway_host + username: + type: str + required: true + description: + - The username of the remote peer gateway host. + password: + type: str + required: true + description: + - The password of the remote peer gateway host. + validate_certs: + type: bool + default: true + aliases: + - verifycert + description: + - Boolean variable to specify whether or not to validate SSL + certificate. + - C(true) - Indicates that the SSL certificate should be verified. + - C(false) - Indicates that the SSL certificate should not be verified. + port: + description: + - Port number through which communication happens with remote peer + gateway host. + type: int + default: 443 + timeout: + description: + - Time after which connection will get terminated. + - It is to be mentioned in seconds. + type: int + default: 120 + protection_domain_id: + description: + - Remote protection domain id. + - Mutually exclusive with I(protection_domain_name). + type: str + protection_domain_name: + description: + - Remote protection domain name. + - Mutually exclusive with I(protection_domain_id). + type: str + state: + description: + - State of the replication consistency group. + choices: ['present', 'absent'] + default: present + type: str +notes: +- The I(check_mode) is supported. +- Idempotency is not supported for create snapshot operation. +- There is a delay in reflection of final state of RCG after few update operations on RCG. +- In 3.6 and above, the replication consistency group will return back to consistent mode on changing to inconsistent mode + if consistence barrier arrives. Hence idempotency on setting to inconsistent mode will return changed as True. +''' + +EXAMPLES = r''' + +- name: Get RCG details + dellemc.powerflex.replication_consistency_group: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + rcg_name: "{{rcg_name}}" + +- name: Create a snapshot of the RCG + dellemc.powerflex.replication_consistency_group: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + rcg_id: "{{rcg_id}}" + create_snapshot: True + state: "present" + +- name: Create a replication consistency group + dellemc.powerflex.replication_consistency_group: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + rcg_name: "rcg_test" + rpo: 60 + protection_domain_name: "domain1" + activity_mode: "active" + remote_peer: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + protection_domain_name: "domain1" + +- name: Modify replication consistency group + dellemc.powerflex.replication_consistency_group: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + rcg_name: "rcg_test" + rpo: 60 + target_volume_access_mode: "ReadOnly" + activity_mode: "Inactive" + is_consistent: True + +- name: Rename replication consistency group + dellemc.powerflex.replication_consistency_group: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + rcg_name: "rcg_test" + new_rcg_name: "rcg_test_rename" + +- name: Pause replication consistency group + dellemc.powerflex.replication_consistency_group: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + rcg_name: "rcg_test" + action: "pause" + pause_mode: "StopDataTransfer" + +- name: Resume replication consistency group + dellemc.powerflex.replication_consistency_group: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + rcg_name: "rcg_test" + action: "resume" + +- name: Freeze replication consistency group + dellemc.powerflex.replication_consistency_group: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + rcg_name: "rcg_test" + action: "freeze" + +- name: UnFreeze replication consistency group + dellemc.powerflex.replication_consistency_group: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + rcg_name: "rcg_test" + action: "unfreeze" + +- name: Delete replication consistency group + dellemc.powerflex.replication_consistency_group: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + rcg_name: "rcg_test" + state: "absent" +''' + +RETURN = r''' +changed: + description: Whether or not the resource has changed. + returned: always + type: bool + sample: 'false' +replication_consistency_group_details: + description: Details of the replication consistency group. + returned: When replication consistency group exists + type: dict + contains: + id: + description: The ID of the replication consistency group. + type: str + name: + description: The name of the replication consistency group. + type: str + protectionDomainId: + description: The Protection Domain ID of the replication consistency group. + type: str + peerMdmId: + description: The ID of the peer MDM of the replication consistency group. + type: str + remoteId: + description: The ID of the remote replication consistency group. + type: str + remoteMdmId: + description: The ID of the remote MDM of the replication consistency group. + type: str + currConsistMode: + description: The current consistency mode of the replication consistency group. + type: str + freezeState: + description: The freeze state of the replication consistency group. + type: str + lifetimeState: + description: The Lifetime state of the replication consistency group. + type: str + pauseMode: + description: The Lifetime state of the replication consistency group. + type: str + snapCreationInProgress: + description: Whether the process of snapshot creation of the replication consistency group is in progress or not. + type: bool + lastSnapGroupId: + description: ID of the last snapshot of the replication consistency group. + type: str + lastSnapCreationRc: + description: The return code of the last snapshot of the replication consistency group. + type: int + targetVolumeAccessMode: + description: The access mode of the target volume of the replication consistency group. + type: str + remoteProtectionDomainId: + description: The ID of the remote Protection Domain. + type: str + remoteProtectionDomainName: + description: The Name of the remote Protection Domain. + type: str + failoverType: + description: The type of failover of the replication consistency group. + type: str + failoverState: + description: The state of failover of the replication consistency group. + type: str + activeLocal: + description: Whether the local replication consistency group is active. + type: bool + activeRemote: + description: Whether the remote replication consistency group is active + type: bool + abstractState: + description: The abstract state of the replication consistency group. + type: str + localActivityState: + description: The state of activity of the local replication consistency group. + type: str + remoteActivityState: + description: The state of activity of the remote replication consistency group.. + type: str + inactiveReason: + description: The reason for the inactivity of the replication consistency group. + type: int + rpoInSeconds: + description: The RPO value of the replication consistency group in seconds. + type: int + replicationDirection: + description: The direction of the replication of the replication consistency group. + type: str + disasterRecoveryState: + description: The state of disaster recovery of the local replication consistency group. + type: str + remoteDisasterRecoveryState: + description: The state of disaster recovery of the remote replication consistency group. + type: str + error: + description: The error code of the replication consistency group. + type: int + type: + description: The type of the replication consistency group. + type: str + sample: { + "protectionDomainId": "b969400500000000", + "peerMdmId": "6c3d94f600000000", + "remoteId": "2130961a00000000", + "remoteMdmId": "0e7a082862fedf0f", + "currConsistMode": "Consistent", + "freezeState": "Unfrozen", + "lifetimeState": "Normal", + "pauseMode": "None", + "snapCreationInProgress": false, + "lastSnapGroupId": "e58280b300000001", + "lastSnapCreationRc": "SUCCESS", + "targetVolumeAccessMode": "NoAccess", + "remoteProtectionDomainId": "4eeb304600000000", + "remoteProtectionDomainName": "domain1", + "failoverType": "None", + "failoverState": "None", + "activeLocal": true, + "activeRemote": true, + "abstractState": "Ok", + "localActivityState": "Active", + "remoteActivityState": "Active", + "inactiveReason": 11, + "rpoInSeconds": 30, + "replicationDirection": "LocalToRemote", + "disasterRecoveryState": "None", + "remoteDisasterRecoveryState": "None", + "error": 65, + "name": "test_rcg", + "type": "User", + "id": "aadc17d500000000" + } +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell \ + import utils + +LOG = utils.get_logger('replication_consistency_group') + + +class PowerFlexReplicationConsistencyGroup(object): + """Class with replication consistency group operations""" + + def __init__(self): + """ Define all parameters required by this module""" + self.module_params = utils.get_powerflex_gateway_host_parameters() + self.module_params.update(get_powerflex_replication_consistency_group_parameters()) + + mut_ex_args = [['rcg_name', 'rcg_id'], ['protection_domain_id', 'protection_domain_name']] + + required_one_of_args = [['rcg_name', 'rcg_id']] + + # initialize the Ansible module + self.module = AnsibleModule( + argument_spec=self.module_params, + supports_check_mode=True, + mutually_exclusive=mut_ex_args, + required_one_of=required_one_of_args) + + utils.ensure_required_libs(self.module) + + try: + self.powerflex_conn = utils.get_powerflex_gateway_host_connection( + self.module.params) + LOG.info("Got the PowerFlex system connection object instance") + except Exception as e: + LOG.error(str(e)) + self.module.fail_json(msg=str(e)) + + def get_rcg(self, rcg_name=None, rcg_id=None): + """Get rcg details + :param rcg_name: Name of the rcg + :param rcg_id: ID of the rcg + :return: RCG details + """ + name_or_id = rcg_id if rcg_id else rcg_name + try: + rcg_details = None + if rcg_id: + rcg_details = self.powerflex_conn.replication_consistency_group.get( + filter_fields={'id': rcg_id}) + + if rcg_name: + rcg_details = self.powerflex_conn.replication_consistency_group.get( + filter_fields={'name': rcg_name}) + + if rcg_details: + rcg_details[0]['statistics'] = \ + self.powerflex_conn.replication_consistency_group.get_statistics(rcg_details[0]['id']) + rcg_details[0].pop('links', None) + self.append_protection_domain_name(rcg_details[0]) + return rcg_details[0] + + except Exception as e: + errormsg = "Failed to get the replication consistency group {0} with" \ + " error {1}".format(name_or_id, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def create_rcg_snapshot(self, rcg_id): + """Create RCG snapshot + :param rcg_id: Unique identifier of the RCG. + :return: Boolean indicating if create snapshot operation is successful + """ + try: + if not self.module.check_mode: + self.powerflex_conn.replication_consistency_group.create_snapshot( + rcg_id=rcg_id) + return True + + except Exception as e: + errormsg = "Create RCG snapshot for RCG with id {0} operation failed with " \ + "error {1}".format(rcg_id, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def create_rcg(self, rcg_params): + """Create RCG""" + try: + resp = None + # Get remote system details + self.remote_powerflex_conn = utils.get_powerflex_gateway_host_connection( + self.module.params['remote_peer']) + LOG.info("Got the remote peer connection object instance") + protection_domain_id = rcg_params['protection_domain_id'] + if rcg_params['protection_domain_name']: + protection_domain_id = \ + self.get_protection_domain(self.powerflex_conn, rcg_params['protection_domain_name'])['id'] + + remote_protection_domain_id = rcg_params['remote_peer']['protection_domain_id'] + if rcg_params['remote_peer']['protection_domain_name']: + remote_protection_domain_id = \ + self.get_protection_domain(self.remote_powerflex_conn, + rcg_params['remote_peer']['protection_domain_name'])['id'] + + if not self.module.check_mode: + resp = self.powerflex_conn.replication_consistency_group.create( + rpo=rcg_params['rpo'], + protection_domain_id=protection_domain_id, + remote_protection_domain_id=remote_protection_domain_id, + destination_system_id=self.remote_powerflex_conn.system.get()[0]['id'], + name=rcg_params['rcg_name'], + activity_mode=rcg_params['activity_mode']) + return True, resp + + except Exception as e: + errormsg = "Create replication consistency group failed with error {0}".format(str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def modify_rpo(self, rcg_id, rpo): + """Modify rpo + :param rcg_id: Unique identifier of the RCG. + :param rpo: rpo value in seconds + :return: Boolean indicates if modify rpo is successful + """ + try: + if not self.module.check_mode: + self.powerflex_conn.replication_consistency_group.modify_rpo( + rcg_id, rpo) + return True + + except Exception as e: + errormsg = "Modify rpo for replication consistency group {0} failed with " \ + "error {1}".format(rcg_id, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def modify_target_volume_access_mode(self, rcg_id, target_volume_access_mode): + """Modify target volume access mode + :param rcg_id: Unique identifier of the RCG. + :param target_volume_access_mode: Target volume access mode. + :return: Boolean indicates if modify operation is successful + """ + try: + if not self.module.check_mode: + self.powerflex_conn.replication_consistency_group.modify_target_volume_access_mode( + rcg_id, target_volume_access_mode) + return True + + except Exception as e: + errormsg = "Modify target volume access mode for replication consistency group {0} failed with " \ + "error {1}".format(rcg_id, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def modify_activity_mode(self, rcg_id, rcg_details, activity_mode): + """Modify activity mode + :param rcg_id: Unique identifier of the RCG. + :param rcg_details: RCG details. + :param activity_mode: RCG activity mode. + :return: Boolean indicates if modify operation is successful + """ + try: + if activity_mode == 'Active' and rcg_details['localActivityState'].lower() == 'inactive': + if not self.module.check_mode: + self.powerflex_conn.replication_consistency_group.activate(rcg_id) + return True + elif activity_mode == 'Inactive' and rcg_details['localActivityState'].lower() == 'active': + if not self.module.check_mode: + rcg_details = self.powerflex_conn.replication_consistency_group.inactivate(rcg_id) + return True + except Exception as e: + errormsg = "Modify activity_mode for replication consistency group {0} failed with " \ + "error {1}".format(rcg_id, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def pause_or_resume_rcg(self, rcg_id, rcg_details, pause, pause_mode=None): + """Perform specified rcg action + :param rcg_id: Unique identifier of the RCG. + :param rcg_details: RCG details. + :param pause: Pause or resume RCG. + :param pause_mode: Specifies the pause mode if pause is True. + :return: Boolean indicates if rcg action is successful + """ + if pause and rcg_details['pauseMode'] == 'None': + if not pause_mode: + self.module.fail_json(msg="Specify pause_mode to perform pause on replication consistency group.") + return self.pause(rcg_id, pause_mode) + + if not pause and rcg_details['pauseMode'] != 'None': + return self.resume(rcg_id) + + def freeze_or_unfreeze_rcg(self, rcg_id, rcg_details, freeze): + """Perform specified rcg action + :param rcg_id: Unique identifier of the RCG. + :param rcg_details: RCG details. + :param freeze: Freeze or unfreeze RCG. + :return: Boolean indicates if rcg action is successful + """ + if freeze and rcg_details['freezeState'].lower() == 'unfrozen': + return self.freeze(rcg_id) + + if not freeze and rcg_details['freezeState'].lower() == 'frozen': + return self.unfreeze(rcg_id) + + def freeze(self, rcg_id): + try: + if not self.module.check_mode: + self.powerflex_conn.replication_consistency_group.freeze(rcg_id) + return True + except Exception as e: + errormsg = "Freeze replication consistency group {0} failed with error {1}".format(rcg_id, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def unfreeze(self, rcg_id): + try: + if not self.module.check_mode: + self.powerflex_conn.replication_consistency_group.unfreeze(rcg_id) + return True + except Exception as e: + errormsg = "Unfreeze replication consistency group {0} failed with error {1}".format(rcg_id, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def pause(self, rcg_id, pause_mode): + try: + if not self.module.check_mode: + self.powerflex_conn.replication_consistency_group.pause(rcg_id, pause_mode) + return True + except Exception as e: + errormsg = "Pause replication consistency group {0} failed with error {1}".format(rcg_id, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def resume(self, rcg_id): + try: + if not self.module.check_mode: + self.powerflex_conn.replication_consistency_group.resume(rcg_id) + return True + except Exception as e: + errormsg = "Resume replication consistency group {0} failed with error {1}".format(rcg_id, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def set_consistency(self, rcg_id, rcg_details, is_consistent): + """Set rcg to specified mode + :param rcg_id: Unique identifier of the RCG. + :param rcg_details: RCG details. + :param is_consistent: RCG consistency. + :return: Boolean indicates if set consistency is successful + """ + try: + if is_consistent and rcg_details['currConsistMode'].lower() not in ('consistent', 'consistentpending'): + if not self.module.check_mode: + self.powerflex_conn.replication_consistency_group.set_as_consistent(rcg_id) + return True + elif not is_consistent and rcg_details['currConsistMode'].lower() not in ('inconsistent', 'inconsistentpending'): + if not self.module.check_mode: + self.powerflex_conn.replication_consistency_group.set_as_inconsistent(rcg_id) + return True + except Exception as e: + errormsg = "Modifying consistency of replication consistency group failed with error {0}".format(str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def rename_rcg(self, rcg_id, rcg_details, new_name): + """Rename rcg + :param rcg_id: Unique identifier of the RCG. + :param rcg_details: RCG details + :param new_name: RCG name to rename to. + :return: Boolean indicates if rename is successful + """ + try: + if rcg_details['name'] != new_name: + if not self.module.check_mode: + self.powerflex_conn.replication_consistency_group.rename_rcg(rcg_id, new_name) + return True + except Exception as e: + errormsg = "Renaming replication consistency group to {0} failed with error {1}".format(new_name, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def delete_rcg(self, rcg_id): + """Delete RCG + :param rcg_id: Unique identifier of the RCG. + :return: Boolean indicates if delete rcg operation is successful + """ + try: + if not self.module.check_mode: + self.powerflex_conn.replication_consistency_group.delete( + rcg_id=rcg_id) + return True + + except Exception as e: + errormsg = "Delete replication consistency group {0} failed with " \ + "error {1}".format(rcg_id, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def get_protection_domain(self, conn, protection_domain_name=None, protection_domain_id=None): + """ + Get protection domain details + :param conn: local or remote connection + :param protection_domain_name: Name of the protection domain + :param protection_domain_id: ID of the protection domain + :return: Protection domain id if exists + :rtype: str + """ + name_or_id = protection_domain_id if protection_domain_id \ + else protection_domain_name + try: + pd_details = [] + if protection_domain_id: + pd_details = conn.protection_domain.get( + filter_fields={'id': protection_domain_id}) + + if protection_domain_name: + pd_details = conn.protection_domain.get( + filter_fields={'name': protection_domain_name}) + + if len(pd_details) == 0: + error_msg = "Unable to find the protection domain with " \ + "'%s'." % name_or_id + self.module.fail_json(msg=error_msg) + + return pd_details[0] + except Exception as e: + error_msg = "Failed to get the protection domain '%s' with " \ + "error '%s'" % (name_or_id, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def validate_create(self, rcg_params): + """Validate create RCG params""" + params = ['create_snapshot', 'new_rcg_name'] + for param in params: + if rcg_params[param] is not None: + self.module.fail_json(msg="%s cannot be specified while creating replication consistency group" % param) + if not rcg_params['rpo']: + self.module.fail_json(msg='Enter rpo to create replication consistency group') + if not rcg_params['remote_peer']: + self.module.fail_json(msg='Enter remote_peer to create replication consistency group') + if not rcg_params['protection_domain_id'] and not rcg_params['protection_domain_name']: + self.module.fail_json(msg='Enter protection_domain_name or protection_domain_id to create replication consistency group') + if (not rcg_params['remote_peer']['protection_domain_id'] and not rcg_params['remote_peer']['protection_domain_name']) or \ + (rcg_params['remote_peer']['protection_domain_id'] is not None and + rcg_params['remote_peer']['protection_domain_name'] is not None): + self.module.fail_json(msg='Enter remote protection_domain_name or protection_domain_id to create replication consistency group') + + def modify_rcg(self, rcg_id, rcg_details): + create_snapshot = self.module.params['create_snapshot'] + rpo = self.module.params['rpo'] + target_volume_access_mode = self.module.params['target_volume_access_mode'] + pause = self.module.params['pause'] + freeze = self.module.params['freeze'] + is_consistent = self.module.params['is_consistent'] + activity_mode = self.module.params['activity_mode'] + new_rcg_name = self.module.params['new_rcg_name'] + changed = False + + if create_snapshot is True: + changed = self.create_rcg_snapshot(rcg_id) + if rpo and rcg_details['rpoInSeconds'] and \ + rpo != rcg_details['rpoInSeconds']: + changed = self.modify_rpo(rcg_id, rpo) + if target_volume_access_mode and \ + rcg_details['targetVolumeAccessMode'] != target_volume_access_mode: + changed = \ + self.modify_target_volume_access_mode(rcg_id, target_volume_access_mode) + if activity_mode and \ + self.modify_activity_mode(rcg_id, rcg_details, activity_mode): + changed = True + rcg_details = self.get_rcg(rcg_id=rcg_details['id']) + if pause is not None and \ + self.pause_or_resume_rcg(rcg_id, rcg_details, pause, self.module.params['pause_mode']): + changed = True + if freeze is not None and \ + self.freeze_or_unfreeze_rcg(rcg_id, rcg_details, freeze): + changed = True + if is_consistent is not None and \ + self.set_consistency(rcg_id, rcg_details, is_consistent): + changed = True + if new_rcg_name and self.rename_rcg(rcg_id, rcg_details, new_rcg_name): + changed = True + + return changed + + def validate_input(self, rcg_params): + try: + api_version = self.powerflex_conn.system.get()[0]['mdmCluster']['master']['versionInfo'] + if rcg_params['activity_mode'] is not None and utils.is_version_less_than_3_6(api_version): + self.module.fail_json(msg='activity_mode is supported only from version 3.6 and above') + params = ['rcg_name', 'new_rcg_name'] + for param in params: + if rcg_params[param] and utils.is_invalid_name(rcg_params[param]): + self.module.fail_json(msg='Enter a valid %s' % param) + if rcg_params['pause_mode'] and rcg_params['pause'] is None: + self.module.fail_json(msg='Specify pause as True to pause replication consistency group') + except Exception as e: + error_msg = "Validating input parameters failed with " \ + "error '%s'" % (str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def append_protection_domain_name(self, rcg_details): + try: + # Append protection domain name + if 'protectionDomainId' in rcg_details \ + and rcg_details['protectionDomainId']: + pd_details = self.get_protection_domain( + conn=self.powerflex_conn, + protection_domain_id=rcg_details['protectionDomainId']) + rcg_details['protectionDomainName'] = pd_details['name'] + except Exception as e: + error_msg = "Updating replication consistency group details with protection domain name failed with " \ + "error '%s'" % (str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def perform_module_operation(self): + """ + Perform different actions on replication consistency group based on parameters passed in + the playbook + """ + self.validate_input(self.module.params) + rcg_name = self.module.params['rcg_name'] + new_rcg_name = self.module.params['new_rcg_name'] + rcg_id = self.module.params['rcg_id'] + state = self.module.params['state'] + + # result is a dictionary to contain end state and RCG details + changed = False + result = dict( + changed=False, + replication_consistency_group_details=[] + ) + # get RCG details + rcg_details = self.get_rcg(rcg_name, rcg_id) + if rcg_details: + result['replication_consistency_group_details'] = rcg_details + rcg_id = rcg_details['id'] + msg = "Fetched the RCG details {0}".format(str(rcg_details)) + LOG.info(msg) + + # perform create + if state == "present": + if not rcg_details: + self.validate_create(self.module.params) + changed, rcg_details = self.create_rcg(self.module.params) + if rcg_details: + rcg_id = rcg_details['id'] + + if rcg_details and self.modify_rcg(rcg_id, rcg_details): + changed = True + + if state == "absent" and rcg_details: + changed = self.delete_rcg(rcg_id=rcg_details['id']) + + # Returning the RCG details + if changed: + result['replication_consistency_group_details'] = \ + self.get_rcg(new_rcg_name or rcg_name, rcg_id) + result['changed'] = changed + self.module.exit_json(**result) + + +def get_powerflex_replication_consistency_group_parameters(): + """This method provide parameter required for the replication_consistency_group + module on PowerFlex""" + return dict( + rcg_name=dict(), rcg_id=dict(), + create_snapshot=dict(type='bool'), + rpo=dict(type='int'), protection_domain_id=dict(), + protection_domain_name=dict(), new_rcg_name=dict(), + activity_mode=dict(choices=['Active', 'Inactive']), + pause=dict(type='bool'), freeze=dict(type='bool'), + pause_mode=dict(choices=['StopDataTransfer', 'OnlyTrackChanges']), + target_volume_access_mode=dict(choices=['ReadOnly', 'NoAccess']), + is_consistent=dict(type='bool'), + remote_peer=dict(type='dict', + options=dict(hostname=dict(type='str', aliases=['gateway_host'], required=True), + username=dict(type='str', required=True), + password=dict(type='str', required=True, no_log=True), + validate_certs=dict(type='bool', aliases=['verifycert'], default=True), + port=dict(type='int', default=443), + timeout=dict(type='int', default=120), + protection_domain_id=dict(), + protection_domain_name=dict())), + state=dict(default='present', type='str', choices=['present', 'absent']) + ) + + +def main(): + """ Create PowerFlex Replication Consistency Group object and perform actions on it + based on user input from playbook""" + obj = PowerFlexReplicationConsistencyGroup() + obj.perform_module_operation() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/powerflex/plugins/modules/sdc.py b/ansible_collections/dellemc/powerflex/plugins/modules/sdc.py new file mode 100644 index 00000000..a2f05a31 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/plugins/modules/sdc.py @@ -0,0 +1,365 @@ +#!/usr/bin/python + +# Copyright: (c) 2021, Dell Technologies +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +""" Ansible module for managing SDCs on Dell Technologies (Dell) PowerFlex""" + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = r''' +module: sdc +version_added: '1.0.0' +short_description: Manage SDCs on Dell PowerFlex +description: +- Managing SDCs on PowerFlex storage system includes getting details of SDC + and renaming SDC. + +author: +- Akash Shendge (@shenda1) <ansible.team@dell.com> + +extends_documentation_fragment: + - dellemc.powerflex.powerflex + +options: + sdc_name: + description: + - Name of the SDC. + - Specify either I(sdc_name), I(sdc_id) or I(sdc_ip) for get/rename operation. + - Mutually exclusive with I(sdc_id) and I(sdc_ip). + type: str + sdc_id: + description: + - ID of the SDC. + - Specify either I(sdc_name), I(sdc_id) or I(sdc_ip) for get/rename operation. + - Mutually exclusive with I(sdc_name) and I(sdc_ip). + type: str + sdc_ip: + description: + - IP of the SDC. + - Specify either I(sdc_name), I(sdc_id) or I(sdc_ip) for get/rename operation. + - Mutually exclusive with I(sdc_id) and I(sdc_name). + type: str + sdc_new_name: + description: + - New name of the SDC. Used to rename the SDC. + type: str + state: + description: + - State of the SDC. + choices: ['present', 'absent'] + required: true + type: str +notes: + - The I(check_mode) is not supported. +''' + +EXAMPLES = r''' +- name: Get SDC details using SDC ip + dellemc.powerflex.sdc: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + sdc_ip: "{{sdc_ip}}" + state: "present" + +- name: Rename SDC using SDC name + dellemc.powerflex.sdc: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + sdc_name: "centos_sdc" + sdc_new_name: "centos_sdc_renamed" + state: "present" +''' + +RETURN = r''' +changed: + description: Whether or not the resource has changed. + returned: always + type: bool + sample: 'false' + +sdc_details: + description: Details of the SDC. + returned: When SDC exists + type: dict + contains: + id: + description: The ID of the SDC. + type: str + name: + description: Name of the SDC. + type: str + sdcIp: + description: IP of the SDC. + type: str + osType: + description: OS type of the SDC. + type: str + mapped_volumes: + description: The details of the mapped volumes. + type: list + contains: + id: + description: The ID of the volume. + type: str + name: + description: The name of the volume. + type: str + volumeType: + description: Type of the volume. + type: str + sdcApproved: + description: Indicates whether an SDC has approved access to the + system. + type: bool + sample: { + "id": "07335d3d00000006", + "installedSoftwareVersionInfo": "R3_6.0.0", + "kernelBuildNumber": null, + "kernelVersion": "3.10.0", + "links": [ + { + "href": "/api/instances/Sdc::07335d3d00000006", + "rel": "self" + }, + { + "href": "/api/instances/Sdc::07335d3d00000006/relationships/ + Statistics", + "rel": "/api/Sdc/relationship/Statistics" + }, + { + "href": "/api/instances/Sdc::07335d3d00000006/relationships/ + Volume", + "rel": "/api/Sdc/relationship/Volume" + }, + { + "href": "/api/instances/System::4a54a8ba6df0690f", + "rel": "/api/parent/relationship/systemId" + } + ], + "mapped_volumes": [], + "mdmConnectionState": "Disconnected", + "memoryAllocationFailure": null, + "name": "LGLAP203", + "osType": "Linux", + "peerMdmId": null, + "perfProfile": "HighPerformance", + "sdcApproved": true, + "sdcApprovedIps": null, + "sdcGuid": "F8ECB844-23B8-4629-92BB-B6E49A1744CB", + "sdcIp": "N/A", + "sdcIps": null, + "sdcType": "AppSdc", + "sdrId": null, + "socketAllocationFailure": null, + "softwareVersionInfo": "R3_6.0.0", + "systemId": "4a54a8ba6df0690f", + "versionInfo": "R3_6.0.0" + } +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell\ + import utils + +LOG = utils.get_logger('sdc') + + +class PowerFlexSdc(object): + """Class with SDC operations""" + + def __init__(self): + """ Define all parameters required by this module""" + self.module_params = utils.get_powerflex_gateway_host_parameters() + self.module_params.update(get_powerflex_sdc_parameters()) + + mutually_exclusive = [['sdc_id', 'sdc_ip', 'sdc_name']] + + required_one_of = [['sdc_id', 'sdc_ip', 'sdc_name']] + + # initialize the Ansible module + self.module = AnsibleModule( + argument_spec=self.module_params, + supports_check_mode=False, + mutually_exclusive=mutually_exclusive, + required_one_of=required_one_of) + + utils.ensure_required_libs(self.module) + + try: + self.powerflex_conn = utils.get_powerflex_gateway_host_connection( + self.module.params) + LOG.info("Got the PowerFlex system connection object instance") + except Exception as e: + LOG.error(str(e)) + self.module.fail_json(msg=str(e)) + + def rename_sdc(self, sdc_id, new_name): + """Rename SDC + :param sdc_id: The ID of the SDC + :param new_name: The new name of the SDC + :return: Boolean indicating if rename operation is successful + """ + + try: + self.powerflex_conn.sdc.rename(sdc_id=sdc_id, name=new_name) + return True + except Exception as e: + errormsg = "Failed to rename SDC %s with error %s" % (sdc_id, + str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def get_mapped_volumes(self, sdc_id): + """Get volumes mapped to SDC + :param sdc_id: The ID of the SDC + :return: List containing volume details mapped to SDC + """ + + try: + resp = self.powerflex_conn.sdc.get_mapped_volumes(sdc_id=sdc_id) + return resp + except Exception as e: + errormsg = "Failed to get the volumes mapped to SDC %s with " \ + "error %s" % (sdc_id, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def get_sdc(self, sdc_name=None, sdc_ip=None, sdc_id=None): + """Get the SDC Details + :param sdc_name: The name of the SDC + :param sdc_ip: The IP of the SDC + :param sdc_id: The ID of the SDC + :return: The dict containing SDC details + """ + + if sdc_name: + id_ip_name = sdc_name + elif sdc_ip: + id_ip_name = sdc_ip + else: + id_ip_name = sdc_id + + try: + if sdc_name: + sdc_details = self.powerflex_conn.sdc.get( + filter_fields={'name': sdc_name}) + elif sdc_ip: + sdc_details = self.powerflex_conn.sdc.get( + filter_fields={'sdcIp': sdc_ip}) + else: + sdc_details = self.powerflex_conn.sdc.get( + filter_fields={'id': sdc_id}) + + if len(sdc_details) == 0: + error_msg = "Unable to find SDC with identifier %s" \ + % id_ip_name + LOG.error(error_msg) + return None + sdc_details[0]['mapped_volumes'] = self.get_mapped_volumes( + sdc_details[0]['id']) + return sdc_details[0] + except Exception as e: + errormsg = "Failed to get the SDC %s with error %s" % ( + id_ip_name, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def validate_parameters(self, sdc_name=None, sdc_id=None, sdc_ip=None): + """Validate the input parameters""" + + if all(param is None for param in [sdc_name, sdc_id, sdc_ip]): + self.module.fail_json(msg="Please provide sdc_name/sdc_id/sdc_ip " + "with valid input.") + + sdc_identifiers = ['sdc_name', 'sdc_id', 'sdc_ip'] + for param in sdc_identifiers: + if self.module.params[param] is not None and \ + len(self.module.params[param].strip()) == 0: + error_msg = "Please provide valid %s" % param + self.module.fail_json(msg=error_msg) + + def perform_module_operation(self): + """ + Perform different actions on SDC based on parameters passed in + the playbook + """ + sdc_name = self.module.params['sdc_name'] + sdc_id = self.module.params['sdc_id'] + sdc_ip = self.module.params['sdc_ip'] + sdc_new_name = self.module.params['sdc_new_name'] + state = self.module.params['state'] + + # result is a dictionary to contain end state and SDC details + changed = False + result = dict( + changed=False, + sdc_details={} + ) + + self.validate_parameters(sdc_name, sdc_id, sdc_ip) + + sdc_details = self.get_sdc(sdc_name=sdc_name, sdc_id=sdc_id, + sdc_ip=sdc_ip) + if sdc_name: + id_ip_name = sdc_name + elif sdc_ip: + id_ip_name = sdc_ip + else: + id_ip_name = sdc_id + + if state == 'present' and not sdc_details: + error_msg = 'Could not find any SDC instance with ' \ + 'identifier %s.' % id_ip_name + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + if state == 'absent' and sdc_details: + error_msg = 'Removal of SDC is not allowed through Ansible ' \ + 'module.' + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + if state == 'present' and sdc_details and sdc_new_name is not None: + if len(sdc_new_name.strip()) == 0: + self.module.fail_json(msg="Please provide valid SDC name.") + + changed = self.rename_sdc(sdc_details['id'], sdc_new_name) + + if changed: + sdc_name = sdc_new_name + + if state == 'present': + result['sdc_details'] = self.get_sdc(sdc_name=sdc_name, + sdc_id=sdc_id, sdc_ip=sdc_ip) + result['changed'] = changed + self.module.exit_json(**result) + + +def get_powerflex_sdc_parameters(): + """This method provide parameter required for the Ansible SDC module on + PowerFlex""" + return dict( + sdc_id=dict(), + sdc_ip=dict(), + sdc_name=dict(), + sdc_new_name=dict(), + state=dict(required=True, type='str', choices=['present', 'absent']) + ) + + +def main(): + """ Create PowerFlex SDC object and perform actions on it + based on user input from playbook""" + obj = PowerFlexSdc() + obj.perform_module_operation() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/powerflex/plugins/modules/sds.py b/ansible_collections/dellemc/powerflex/plugins/modules/sds.py new file mode 100644 index 00000000..91c28776 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/plugins/modules/sds.py @@ -0,0 +1,1160 @@ +#!/usr/bin/python + +# Copyright: (c) 2021, Dell Technologies +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +""" Ansible module for managing SDS on Dell Technologies (Dell) PowerFlex""" + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = r''' +module: sds +version_added: '1.1.0' +short_description: Manage SDS on Dell PowerFlex +description: +- Managing SDS on PowerFlex storage system includes + creating new SDS, getting details of SDS, adding/removing IP to/from SDS, + modifying attributes of SDS, and deleting SDS. +author: +- Rajshree Khare (@khareRajshree) <ansible.team@dell.com> +extends_documentation_fragment: + - dellemc.powerflex.powerflex +options: + sds_name: + description: + - The name of the SDS. + - Mandatory for create operation. + - It is unique across the PowerFlex array. + - Mutually exclusive with I(sds_id). + type: str + sds_id: + description: + - The ID of the SDS. + - Except create operation, all other operations can be performed + using I(sds_id). + - Mutually exclusive with I(sds_name). + type: str + protection_domain_name: + description: + - The name of the protection domain. + - Mutually exclusive with I(protection_domain_id). + type: str + protection_domain_id: + description: + - The ID of the protection domain. + - Mutually exclusive with I(protection_domain_name). + type: str + sds_ip_list: + description: + - Dictionary of IPs and their roles for the SDS. + - At least one IP-role is mandatory while creating a SDS. + - IP-roles can be updated as well. + type: list + elements: dict + suboptions: + ip: + description: + - IP address of the SDS. + type: str + required: true + role: + description: + - Role assigned to the SDS IP address. + choices: ['sdsOnly', 'sdcOnly', 'all'] + type: str + required: true + sds_ip_state: + description: + - State of IP with respect to the SDS. + choices: ['present-in-sds', 'absent-in-sds'] + type: str + rfcache_enabled: + description: + - Whether to enable the Read Flash cache. + type: bool + rmcache_enabled: + description: + - Whether to enable the Read RAM cache. + type: bool + rmcache_size: + description: + - Read RAM cache size (in MB). + - Minimum size is 128 MB. + - Maximum size is 3911 MB. + type: int + sds_new_name: + description: + - SDS new name. + type: str + performance_profile: + description: + - Performance profile to apply to the SDS. + - The HighPerformance profile configures a predefined set of parameters + for very high performance use cases. + - Default value by API is C(HighPerformance). + choices: ['Compact', 'HighPerformance'] + type: str + state: + description: + - State of the SDS. + choices: ['present', 'absent'] + required: true + type: str +notes: + - The maximum limit for the IPs that can be associated with an SDS is 8. + - There needs to be at least 1 IP for SDS communication and 1 for SDC + communication. + - If only 1 IP exists, it must be with role 'all'; else 1 IP + can be with role 'all'and other IPs with role 'sdcOnly'; or 1 IP must be + with role 'sdsOnly' and others with role 'sdcOnly'. + - There can be 1 or more IPs with role 'sdcOnly'. + - There must be only 1 IP with SDS role (either with role 'all' or + 'sdsOnly'). + - SDS can be created with RF cache disabled, but, be aware that the RF cache + is not always updated. In this case, the user should re-try the operation. + - The I(check_mode) is not supported. +''' + +EXAMPLES = r''' +- name: Create SDS + dellemc.powerflex.sds: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + sds_name: "node0" + protection_domain_name: "domain1" + sds_ip_list: + - ip: "198.10.xxx.xxx" + role: "all" + sds_ip_state: "present-in-sds" + state: "present" + +- name: Create SDS with all parameters + dellemc.powerflex.sds: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + sds_name: "node1" + protection_domain_name: "domain1" + sds_ip_list: + - ip: "198.10.xxx.xxx" + role: "sdcOnly" + sds_ip_state: "present-in-sds" + rmcache_enabled: true + rmcache_size: 128 + performance_profile: "HighPerformance" + state: "present" + +- name: Get SDS details using name + dellemc.powerflex.sds: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + sds_name: "node0" + state: "present" + +- name: Get SDS details using ID + dellemc.powerflex.sds: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + sds_id: "5718253c00000004" + state: "present" + +- name: Modify SDS attributes using name + dellemc.powerflex.sds: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + sds_name: "node0" + sds_new_name: "node0_new" + rfcache_enabled: true + rmcache_enabled: true + rmcache_size: 256 + performance_profile: "HighPerformance" + state: "present" + +- name: Modify SDS attributes using ID + dellemc.powerflex.sds: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + sds_id: "5718253c00000004" + sds_new_name: "node0_new" + rfcache_enabled: true + rmcache_enabled: true + rmcache_size: 256 + performance_profile: "HighPerformance" + state: "present" + +- name: Add IP and role to an SDS + dellemc.powerflex.sds: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + sds_name: "node0" + sds_ip_list: + - ip: "198.10.xxx.xxx" + role: "sdcOnly" + sds_ip_state: "present-in-sds" + state: "present" + +- name: Remove IP and role from an SDS + dellemc.powerflex.sds: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + sds_name: "node0" + sds_ip_list: + - ip: "198.10.xxx.xxx" + role: "sdcOnly" + sds_ip_state: "absent-in-sds" + state: "present" + +- name: Delete SDS using name + dellemc.powerflex.sds: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + sds_name: "node0" + state: "absent" + +- name: Delete SDS using ID + dellemc.powerflex.sds: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + sds_id: "5718253c00000004" + state: "absent" +''' + +RETURN = r''' +changed: + description: Whether or not the resource has changed. + returned: always + type: bool + sample: 'false' +sds_details: + description: Details of the SDS. + returned: When SDS exists + type: dict + contains: + authenticationError: + description: Indicates authentication error. + type: str + certificateInfo: + description: Information about certificate. + type: str + configuredDrlMode: + description: Configured DRL mode. + type: str + drlMode: + description: DRL mode. + type: str + faultSetId: + description: Fault set ID. + type: str + fglMetadataCacheSize: + description: FGL metadata cache size. + type: int + fglMetadataCacheState: + description: FGL metadata cache state. + type: str + fglNumConcurrentWrites: + description: FGL concurrent writes. + type: int + id: + description: SDS ID. + type: str + ipList: + description: SDS IP list. + type: list + contains: + ip: + description: IP present in the SDS. + type: str + role: + description: Role of the SDS IP. + type: str + lastUpgradeTime: + description: Last time SDS was upgraded. + type: str + links: + description: SDS links. + type: list + contains: + href: + description: SDS instance URL. + type: str + rel: + description: SDS's relationship with different entities. + type: str + maintenanceState: + description: Maintenance state. + type: str + maintenanceType: + description: Maintenance type. + type: str + mdmConnectionState: + description: MDM connection state. + type: str + membershipState: + description: Membership state. + type: str + name: + description: Name of the SDS. + type: str + numOfIoBuffers: + description: Number of IO buffers. + type: int + numRestarts: + description: Number of restarts. + type: int + onVmWare: + description: Presence on VMware. + type: bool + perfProfile: + description: Performance profile. + type: str + port: + description: SDS port. + type: int + protectionDomainId: + description: Protection Domain ID. + type: str + protectionDomainName: + description: Protection Domain Name. + type: str + raidControllers: + description: Number of RAID controllers. + type: int + rfcacheEnabled: + description: Whether RF cache is enabled or not. + type: bool + rfcacheErrorApiVersionMismatch: + description: RF cache error for API version mismatch. + type: bool + rfcacheErrorDeviceDoesNotExist: + description: RF cache error for device does not exist. + type: bool + rfcacheErrorInconsistentCacheConfiguration: + description: RF cache error for inconsistent cache configuration. + type: bool + rfcacheErrorInconsistentSourceConfiguration: + description: RF cache error for inconsistent source configuration. + type: bool + rfcacheErrorInvalidDriverPath: + description: RF cache error for invalid driver path. + type: bool + rfcacheErrorLowResources: + description: RF cache error for low resources. + type: bool + rmcacheEnabled: + description: Whether Read RAM cache is enabled or not. + type: bool + rmcacheFrozen: + description: RM cache frozen. + type: bool + rmcacheMemoryAllocationState: + description: RM cache memory allocation state. + type: bool + rmcacheSizeInKb: + description: RM cache size in KB. + type: int + rmcacheSizeInMb: + description: RM cache size in MB. + type: int + sdsConfigurationFailure: + description: SDS configuration failure. + type: str + sdsDecoupled: + description: SDS decoupled. + type: str + sdsReceiveBufferAllocationFailures: + description: SDS receive buffer allocation failures. + type: str + sdsState: + description: SDS state. + type: str + softwareVersionInfo: + description: SDS software version information. + type: str + sample: { + "authenticationError": "None", + "certificateInfo": null, + "configuredDrlMode": "Volatile", + "drlMode": "Volatile", + "faultSetId": null, + "fglMetadataCacheSize": 0, + "fglMetadataCacheState": "Disabled", + "fglNumConcurrentWrites": 1000, + "id": "8f3bb0cc00000002", + "ipList": [ + { + "ip": "10.47.xxx.xxx", + "role": "all" + } + ], + "lastUpgradeTime": 0, + "links": [ + { + "href": "/api/instances/Sds::8f3bb0cc00000002", + "rel": "self" + }, + { + "href": "/api/instances/Sds::8f3bb0cc00000002/relationships + /Statistics", + "rel": "/api/Sds/relationship/Statistics" + }, + { + "href": "/api/instances/Sds::8f3bb0cc00000002/relationships + /SpSds", + "rel": "/api/Sds/relationship/SpSds" + }, + { + "href": "/api/instances/Sds::8f3bb0cc00000002/relationships + /Device", + "rel": "/api/Sds/relationship/Device" + }, + { + "href": "/api/instances/ProtectionDomain::9300c1f900000000", + "rel": "/api/parent/relationship/protectionDomainId" + } + ], + "maintenanceState": "NoMaintenance", + "maintenanceType": "NoMaintenance", + "mdmConnectionState": "Connected", + "membershipState": "Joined", + "name": "node0", + "numOfIoBuffers": null, + "numRestarts": 2, + "onVmWare": true, + "perfProfile": "HighPerformance", + "port": 7072, + "protectionDomainId": "9300c1f900000000", + "protectionDomainName": "domain1", + "raidControllers": null, + "rfcacheEnabled": true, + "rfcacheErrorApiVersionMismatch": false, + "rfcacheErrorDeviceDoesNotExist": false, + "rfcacheErrorInconsistentCacheConfiguration": false, + "rfcacheErrorInconsistentSourceConfiguration": false, + "rfcacheErrorInvalidDriverPath": false, + "rfcacheErrorLowResources": false, + "rmcacheEnabled": true, + "rmcacheFrozen": false, + "rmcacheMemoryAllocationState": "AllocationPending", + "rmcacheSizeInKb": 131072, + "rmcacheSizeInMb": 128, + "sdsConfigurationFailure": null, + "sdsDecoupled": null, + "sdsReceiveBufferAllocationFailures": null, + "sdsState": "Normal", + "softwareVersionInfo": "R3_6.0.0" + } +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell\ + import utils +import copy + +LOG = utils.get_logger('sds') + + +class PowerFlexSDS(object): + """Class with SDS operations""" + + def __init__(self): + """ Define all parameters required by this module""" + self.module_params = utils.get_powerflex_gateway_host_parameters() + self.module_params.update(get_powerflex_sds_parameters()) + + mut_ex_args = [['sds_name', 'sds_id'], + ['protection_domain_name', 'protection_domain_id']] + + required_together_args = [['sds_ip_list', 'sds_ip_state']] + + required_one_of_args = [['sds_name', 'sds_id']] + + # initialize the Ansible module + self.module = AnsibleModule( + argument_spec=self.module_params, + supports_check_mode=False, + mutually_exclusive=mut_ex_args, + required_together=required_together_args, + required_one_of=required_one_of_args) + + utils.ensure_required_libs(self.module) + + try: + self.powerflex_conn = utils.get_powerflex_gateway_host_connection( + self.module.params) + LOG.info("Got the PowerFlex system connection object instance") + except Exception as e: + LOG.error(str(e)) + self.module.fail_json(msg=str(e)) + + def validate_rmcache_size_parameter(self, rmcache_enabled, rmcache_size): + """Validate the input parameters""" + + # RM cache size cannot be set only when RM cache is enabled + if rmcache_size is not None and rmcache_enabled is False: + error_msg = "RM cache size can be set only when RM cache " \ + "is enabled, please enable it along with RM " \ + "cache size." + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def validate_ip_parameter(self, sds_ip_list): + """Validate the input parameters""" + + if sds_ip_list is None or len(sds_ip_list) == 0: + error_msg = "Provide valid values for " \ + "sds_ip_list as 'ip' and 'role' for Create/Modify " \ + "operations." + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def get_sds_details(self, sds_name=None, sds_id=None): + """Get SDS details + :param sds_name: Name of the SDS + :type sds_name: str + :param sds_id: ID of the SDS + :type sds_id: str + :return: Details of SDS if it exist + :rtype: dict + """ + + id_or_name = sds_id if sds_id else sds_name + + try: + if sds_name: + sds_details = self.powerflex_conn.sds.get( + filter_fields={'name': sds_name}) + else: + sds_details = self.powerflex_conn.sds.get( + filter_fields={'id': sds_id}) + + if len(sds_details) == 0: + msg = "SDS with identifier '%s' not found" % id_or_name + LOG.info(msg) + return None + + return sds_details[0] + + except Exception as e: + error_msg = "Failed to get the SDS '%s' with error '%s'" \ + % (id_or_name, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def get_protection_domain(self, protection_domain_name=None, + protection_domain_id=None): + """Get protection domain details + :param protection_domain_name: Name of the protection domain + :param protection_domain_id: ID of the protection domain + :return: Protection domain details + :rtype: dict + """ + name_or_id = protection_domain_id if protection_domain_id \ + else protection_domain_name + try: + pd_details = None + if protection_domain_id: + pd_details = self.powerflex_conn.protection_domain.get( + filter_fields={'id': protection_domain_id}) + + if protection_domain_name: + pd_details = self.powerflex_conn.protection_domain.get( + filter_fields={'name': protection_domain_name}) + + if not pd_details: + error_msg = "Unable to find the protection domain with " \ + "'%s'. Please enter a valid protection domain " \ + "name/id." % name_or_id + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + return pd_details[0] + + except Exception as e: + error_msg = "Failed to get the protection domain '%s' with " \ + "error '%s'" % (name_or_id, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def restructure_ip_role_dict(self, sds_ip_list): + """Restructure IP role dict + :param sds_ip_list: List of one or more IP addresses and + their roles + :type sds_ip_list: list[dict] + :return: List of one or more IP addresses and their roles + :rtype: list[dict] + """ + new_sds_ip_list = [] + for item in sds_ip_list: + new_sds_ip_list.append({"SdsIp": item}) + return new_sds_ip_list + + def create_sds(self, protection_domain_id, sds_ip_list, sds_ip_state, + sds_name, rmcache_enabled=None, rmcache_size=None): + """Create SDS + :param protection_domain_id: ID of the Protection Domain + :type protection_domain_id: str + :param sds_ip_list: List of one or more IP addresses associated + with the SDS over which the data will be + transferred. + :type sds_ip_list: list[dict] + :param sds_ip_state: SDS IP state + :type sds_ip_state: str + :param sds_name: SDS name + :type sds_name: str + :param rmcache_enabled: Whether to enable the Read RAM cache + :type rmcache_enabled: bool + :param rmcache_size: Read RAM cache size (in MB) + :type rmcache_size: int + :return: Boolean indicating if create operation is successful + """ + try: + if sds_name is None or len(sds_name.strip()) == 0: + error_msg = "Please provide valid sds_name value for " \ + "creation of SDS." + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + if protection_domain_id is None: + error_msg = "Protection Domain is a mandatory parameter " \ + "for creating a SDS. Please enter a valid value." + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + if sds_ip_list is None or len(sds_ip_list) == 0: + error_msg = "Please provide valid sds_ip_list values for " \ + "creation of SDS." + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + if sds_ip_state is not None and sds_ip_state != "present-in-sds": + error_msg = "Incorrect IP state given for creation of SDS." + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + # Restructure IP-role parameter format + if sds_ip_list and sds_ip_state == "present-in-sds": + sds_ip_list = self.restructure_ip_role_dict(sds_ip_list) + + if rmcache_size is not None: + self.validate_rmcache_size_parameter(rmcache_enabled, + rmcache_size) + # set rmcache size in KB + rmcache_size = rmcache_size * 1024 + + create_params = ("protection_domain_id: %s," + " sds_ip_list: %s," + " sds_name: %s," + " rmcache_enabled: %s, " + " rmcache_size_KB: %s" + % (protection_domain_id, sds_ip_list, + sds_name, rmcache_enabled, rmcache_size)) + LOG.info("Creating SDS with params: %s", create_params) + + self.powerflex_conn.sds.create( + protection_domain_id=protection_domain_id, + sds_ips=sds_ip_list, + name=sds_name, + rmcache_enabled=rmcache_enabled, + rmcache_size_in_kb=rmcache_size) + return True + + except Exception as e: + error_msg = "Create SDS '%s' operation failed with error '%s'" \ + % (sds_name, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def to_modify(self, sds_details, sds_new_name, rfcache_enabled, + rmcache_enabled, rmcache_size, performance_profile): + """ + :param sds_details: Details of the SDS + :type sds_details: dict + :param sds_new_name: New name of SDS + :type sds_new_name: str + :param rfcache_enabled: Whether to enable the Read Flash cache + :type rfcache_enabled: bool + :param rmcache_enabled: Whether to enable the Read RAM cache + :type rmcache_enabled: bool + :param rmcache_size: Read RAM cache size (in MB) + :type rmcache_size: int + :param performance_profile: Performance profile to apply to the SDS + :type performance_profile: str + :return: Dictionary containing the attributes of SDS which are to be + updated + :rtype: dict + """ + modify_dict = {} + + if sds_new_name is not None: + if len(sds_new_name.strip()) == 0: + error_msg = "Please provide valid SDS name." + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + if sds_new_name != sds_details['name']: + modify_dict['name'] = sds_new_name + + if rfcache_enabled is not None and \ + sds_details['rfcacheEnabled'] != rfcache_enabled: + modify_dict['rfcacheEnabled'] = rfcache_enabled + + if rmcache_enabled is not None and \ + sds_details['rmcacheEnabled'] != rmcache_enabled: + modify_dict['rmcacheEnabled'] = rmcache_enabled + + if rmcache_size is not None: + self.validate_rmcache_size_parameter(rmcache_enabled, + rmcache_size) + exisitng_size_mb = sds_details['rmcacheSizeInKb'] / 1024 + if rmcache_size != exisitng_size_mb: + if sds_details['rmcacheEnabled']: + modify_dict['rmcacheSizeInMB'] = rmcache_size + else: + error_msg = "Failed to update RM cache size for the " \ + "SDS '%s' as RM cache is disabled " \ + "previously, please enable it before " \ + "setting the size." \ + % sds_details['name'] + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + if performance_profile is not None and \ + sds_details['perfProfile'] != performance_profile: + modify_dict['perfProfile'] = performance_profile + + return modify_dict + + def modify_sds_attributes(self, sds_id, modify_dict, + create_flag=False): + """Modify SDS attributes + :param sds_id: SDS ID + :type sds_id: str + :param modify_dict: Dictionary containing the attributes of SDS + which are to be updated + :type modify_dict: dict + :param create_flag: Flag to indicate whether modify operation is + followed by create operation or not + :type create_flag: bool + :return: Boolean indicating if the operation is successful + """ + try: + msg = "Dictionary containing attributes which are to be" \ + " updated is '%s'." % (str(modify_dict)) + LOG.info(msg) + + if 'name' in modify_dict: + self.powerflex_conn.sds.rename(sds_id, modify_dict['name']) + msg = "The name of the SDS is updated to '%s' successfully." \ + % modify_dict['name'] + LOG.info(msg) + + if 'rfcacheEnabled' in modify_dict: + self.powerflex_conn.sds.set_rfcache_enabled( + sds_id, modify_dict['rfcacheEnabled']) + msg = "The use RFcache is updated to '%s' successfully." \ + % modify_dict['rfcacheEnabled'] + LOG.info(msg) + + if 'rmcacheEnabled' in modify_dict: + self.powerflex_conn.sds.set_rmcache_enabled( + sds_id, modify_dict['rmcacheEnabled']) + msg = "The use RMcache is updated to '%s' successfully." \ + % modify_dict['rmcacheEnabled'] + LOG.info(msg) + + if 'rmcacheSizeInMB' in modify_dict: + self.powerflex_conn.sds.set_rmcache_size( + sds_id, modify_dict['rmcacheSizeInMB']) + msg = "The size of RMcache is updated to '%s' successfully." \ + % modify_dict['rmcacheSizeInMB'] + LOG.info(msg) + + if 'perfProfile' in modify_dict: + self.powerflex_conn.sds.set_performance_parameters( + sds_id, modify_dict['perfProfile']) + msg = "The performance profile is updated to '%s'" \ + % modify_dict['perfProfile'] + LOG.info(msg) + + return True + except Exception as e: + if create_flag: + error_msg = "Create SDS is successful, but failed to update" \ + " the SDS '%s' with error '%s'"\ + % (sds_id, str(e)) + else: + error_msg = "Failed to update the SDS '%s' with error '%s'" \ + % (sds_id, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def identify_ip_role(self, sds_ip_list, sds_details, sds_ip_state): + """Identify IPs before addition/removal + :param sds_ip_list: List of one or more IP addresses and + their roles + :type sds_ip_list: list[dict] + :param sds_details: SDS details + :type sds_details: dict + :param sds_ip_state: State of IP in SDS + :type sds_ip_state: str + :return: List containing the key-value pairs of IP-role for an + SDS + :rtype: list[dict] + """ + existing_ip_role_list = sds_details['ipList'] + + # identify IPs to add or roles to update + if sds_ip_state == "present-in-sds": + update_role = [] + ips_to_add = [] + + # identify IPs to add + existing_ip_list = [] + if existing_ip_role_list: + for ip in existing_ip_role_list: + existing_ip_list.append(ip['ip']) + for given_ip in sds_ip_list: + ip = given_ip['ip'] + if ip not in existing_ip_list: + ips_to_add.append(given_ip) + LOG.info("IP(s) to be added: %s", ips_to_add) + + if len(ips_to_add) != 0: + for ip in ips_to_add: + sds_ip_list.remove(ip) + + # identify IPs whose role needs to be updated + update_role = [ip for ip in sds_ip_list + if ip not in existing_ip_role_list] + LOG.info("Role update needed for: %s", update_role) + + return ips_to_add, update_role + + elif sds_ip_state == "absent-in-sds": + # identify IPs to remove + ips_to_remove = [ip for ip in existing_ip_role_list + if ip in sds_ip_list] + if len(ips_to_remove) != 0: + LOG.info("IP(s) to remove: %s", ips_to_remove) + return ips_to_remove + else: + LOG.info("IP(s) do not exists.") + return False, None + + def add_ip(self, sds_id, sds_ip_list): + """Add IP to SDS + :param sds_id: SDS ID + :type sds_id: str + :param sds_ip_list: List of one or more IP addresses and + their roles + :type sds_ip_list: list[dict] + :return: Boolean indicating if add IP operation is successful + """ + try: + for ip in sds_ip_list: + LOG.info("IP to add: %s", ip) + self.powerflex_conn.sds.add_ip(sds_id=sds_id, sds_ip=ip) + LOG.info("IP added successfully.") + return True + except Exception as e: + error_msg = "Add IP to SDS '%s' operation failed with " \ + "error '%s'" % (sds_id, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def update_role(self, sds_id, sds_ip_list): + """Update IP's role for an SDS + :param sds_id: SDS ID + :type sds_id: str + :param sds_ip_list: List of one or more IP addresses and + their roles + :type sds_ip_list: list[dict] + :return: Boolean indicating if add IP operation is successful + """ + try: + LOG.info("Role updates for: %s", sds_ip_list) + if len(sds_ip_list) != 0: + for ip in sds_ip_list: + LOG.info("ip-role: %s", ip) + self.powerflex_conn.sds.set_ip_role(sds_id, ip['ip'], + ip['role']) + msg = "The role '%s' for IP '%s' is updated " \ + "successfully." % (ip['role'], ip['ip']) + LOG.info(msg) + return True + except Exception as e: + error_msg = "Update role of IP for SDS '%s' operation failed " \ + "with error '%s'" % (sds_id, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def remove_ip(self, sds_id, sds_ip_list): + """Remove IP from SDS + :param sds_id: SDS ID + :type sds_id: str + :param sds_ip_list: List of one or more IP addresses and + their roles. + :type sds_ip_list: list[dict] + :return: Boolean indicating if remove IP operation is successful + """ + try: + for ip in sds_ip_list: + LOG.info("IP to remove: %s", ip) + self.powerflex_conn.sds.remove_ip(sds_id=sds_id, ip=ip['ip']) + LOG.info("IP removed successfully.") + return True + except Exception as e: + error_msg = "Remove IP from SDS '%s' operation failed with " \ + "error '%s'" % (sds_id, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def delete_sds(self, sds_id): + """Delete SDS + :param sds_id: SDS ID + :type sds_id: str + :return: Boolean indicating if delete operation is successful + """ + try: + self.powerflex_conn.sds.delete(sds_id) + return True + except Exception as e: + error_msg = "Delete SDS '%s' operation failed with error '%s'" \ + % (sds_id, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def perform_module_operation(self): + """ + Perform different actions on SDS based on parameters passed in + the playbook + """ + sds_name = self.module.params['sds_name'] + sds_id = self.module.params['sds_id'] + sds_new_name = self.module.params['sds_new_name'] + protection_domain_name = self.module.params['protection_domain_name'] + protection_domain_id = self.module.params['protection_domain_id'] + rfcache_enabled = self.module.params['rfcache_enabled'] + rmcache_enabled = self.module.params['rmcache_enabled'] + rmcache_size = self.module.params['rmcache_size'] + sds_ip_list = copy.deepcopy(self.module.params['sds_ip_list']) + sds_ip_state = self.module.params['sds_ip_state'] + performance_profile = self.module.params['performance_profile'] + state = self.module.params['state'] + + # result is a dictionary to contain end state and SDS details + changed = False + result = dict( + changed=False, + sds_details={} + ) + + # get SDS details + sds_details = self.get_sds_details(sds_name, sds_id) + if sds_details: + sds_id = sds_details['id'] + msg = "Fetched the SDS details %s" % (str(sds_details)) + LOG.info(msg) + + # get Protection Domain ID from name + if protection_domain_name: + pd_details = self.get_protection_domain(protection_domain_name) + if pd_details: + protection_domain_id = pd_details['id'] + msg = "Fetched the protection domain details with id '%s', " \ + "name '%s'" % (protection_domain_id, protection_domain_name) + LOG.info(msg) + + # create operation + create_changed = False + if state == 'present' and not sds_details: + if sds_id: + error_msg = "Creation of SDS is allowed using sds_name " \ + "only, sds_id given." + LOG.info(error_msg) + self.module.fail_json(msg=error_msg) + + if sds_new_name: + error_msg = "sds_new_name parameter is not supported " \ + "during creation of a SDS. Try renaming the " \ + "SDS after the creation." + LOG.info(error_msg) + self.module.fail_json(msg=error_msg) + + self.validate_ip_parameter(sds_ip_list) + + create_changed = self.create_sds(protection_domain_id, + sds_ip_list, sds_ip_state, + sds_name, rmcache_enabled, + rmcache_size) + if create_changed: + sds_details = self.get_sds_details(sds_name) + sds_id = sds_details['id'] + msg = "SDS created successfully, fetched SDS details %s"\ + % (str(sds_details)) + LOG.info(msg) + + # checking if basic SDS parameters are modified or not + modify_dict = {} + if sds_details and state == 'present': + modify_dict = self.to_modify(sds_details, sds_new_name, + rfcache_enabled, rmcache_enabled, + rmcache_size, performance_profile) + msg = "Parameters to be modified are as follows: %s"\ + % (str(modify_dict)) + LOG.info(msg) + + # modify operation + modify_changed = False + if modify_dict and state == 'present': + LOG.info("Modify SDS params.") + modify_changed = self.modify_sds_attributes(sds_id, modify_dict, + create_changed) + + # get updated SDS details + sds_details = self.get_sds_details(sds_id=sds_id) + + # add IPs to SDS + # update IP's role for an SDS + add_ip_changed = False + update_role_changed = False + if sds_details and state == 'present' \ + and sds_ip_state == "present-in-sds": + self.validate_ip_parameter(sds_ip_list) + ips_to_add, roles_to_update = self.identify_ip_role( + sds_ip_list, sds_details, sds_ip_state) + if ips_to_add: + add_ip_changed = self.add_ip(sds_id, ips_to_add) + if roles_to_update: + update_role_changed = self.update_role(sds_id, + roles_to_update) + + # remove IPs from SDS + remove_ip_changed = False + if sds_details and state == 'present' \ + and sds_ip_state == "absent-in-sds": + self.validate_ip_parameter(sds_ip_list) + ips_to_remove = self.identify_ip_role(sds_ip_list, sds_details, + sds_ip_state) + if ips_to_remove: + remove_ip_changed = self.remove_ip(sds_id, ips_to_remove) + + # delete operation + delete_changed = False + if sds_details and state == 'absent': + delete_changed = self.delete_sds(sds_id) + + if create_changed or modify_changed or add_ip_changed \ + or update_role_changed or remove_ip_changed or delete_changed: + changed = True + + # Returning the updated SDS details + if state == 'present': + sds_details = self.show_output(sds_id) + result['sds_details'] = sds_details + result['changed'] = changed + self.module.exit_json(**result) + + def show_output(self, sds_id): + """Show SDS details + :param sds_id: ID of the SDS + :type sds_id: str + :return: Details of SDS + :rtype: dict + """ + + try: + sds_details = self.powerflex_conn.sds.get( + filter_fields={'id': sds_id}) + + if len(sds_details) == 0: + msg = "SDS with identifier '%s' not found" % sds_id + LOG.error(msg) + return None + + # Append protection domain name + if 'protectionDomainId' in sds_details[0] \ + and sds_details[0]['protectionDomainId']: + pd_details = self.get_protection_domain( + protection_domain_id=sds_details[0]['protectionDomainId']) + sds_details[0]['protectionDomainName'] = pd_details['name'] + + # Append rmcache size in MB + if 'rmcacheSizeInKb' in sds_details[0] \ + and sds_details[0]['rmcacheSizeInKb']: + rmcache_size_mb = sds_details[0]['rmcacheSizeInKb'] / 1024 + sds_details[0]['rmcacheSizeInMb'] = int(rmcache_size_mb) + + return sds_details[0] + + except Exception as e: + error_msg = "Failed to get the SDS '%s' with error '%s'"\ + % (sds_id, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + +def get_powerflex_sds_parameters(): + """This method provide parameter required for the SDS module on + PowerFlex""" + return dict( + sds_name=dict(), + sds_id=dict(), + sds_new_name=dict(), + protection_domain_name=dict(), + protection_domain_id=dict(), + sds_ip_list=dict( + type='list', elements='dict', options=dict( + ip=dict(required=True), + role=dict(required=True, choices=['all', 'sdsOnly', + 'sdcOnly']) + ) + ), + sds_ip_state=dict(choices=['present-in-sds', 'absent-in-sds']), + rfcache_enabled=dict(type='bool'), + rmcache_enabled=dict(type='bool'), + rmcache_size=dict(type='int'), + performance_profile=dict(choices=['Compact', 'HighPerformance']), + state=dict(required=True, type='str', choices=['present', 'absent']) + ) + + +def main(): + """ Create PowerFlex SDS object and perform actions on it + based on user input from playbook""" + obj = PowerFlexSDS() + obj.perform_module_operation() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/powerflex/plugins/modules/snapshot.py b/ansible_collections/dellemc/powerflex/plugins/modules/snapshot.py new file mode 100644 index 00000000..69caea07 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/plugins/modules/snapshot.py @@ -0,0 +1,1285 @@ +#!/usr/bin/python + +# Copyright: (c) 2021, Dell Technologies +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +""" Ansible module for managing Snapshots on Dell Technologies (Dell) PowerFlex""" + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = r''' +module: snapshot +version_added: '1.0.0' +short_description: Manage Snapshots on Dell PowerFlex +description: +- Managing snapshots on PowerFlex Storage System includes + creating, getting details, mapping/unmapping to/from SDC, + modifying the attributes and deleting snapshot. + +author: +- Akash Shendge (@shenda1) <ansible.team@dell.com> + +extends_documentation_fragment: + - dellemc.powerflex.powerflex + +options: + snapshot_name: + description: + - The name of the snapshot. + - Mandatory for create operation. + - Specify either I(snapshot_name) or I(snapshot_id) (but not both) for any operation. + type: str + snapshot_id: + description: + - The ID of the Snapshot. + type: str + vol_name: + description: + - The name of the volume for which snapshot will be taken. + - Specify either I(vol_name) or I(vol_id) while creating snapshot. + type: str + vol_id: + description: + - The ID of the volume. + type: str + read_only: + description: + - Specifies whether mapping of the created snapshot volume will have + read-write access or limited to read-only access. + - If C(true), snapshot is created with read-only access. + - If C(false), snapshot is created with read-write access. + type: bool + size: + description: + - The size of the snapshot. + type: int + cap_unit: + description: + - The unit of the volume size. It defaults to C(GB), if not specified. + choices: ['GB' , 'TB'] + type: str + snapshot_new_name: + description: + - New name of the snapshot. Used to rename the snapshot. + type: str + allow_multiple_mappings: + description: + - Specifies whether to allow multiple mappings or not. + type: bool + desired_retention: + description: + - The retention value for the Snapshot. + - If the desired_retention is not mentioned during creation, snapshot + will be created with unlimited retention. + - Maximum supported desired retention is 31 days. + type: int + retention_unit: + description: + - The unit for retention. It defaults to C(hours), if not specified. + choices: [hours, days] + type: str + sdc: + description: + - Specifies SDC parameters. + type: list + elements: dict + suboptions: + sdc_name: + description: + - Name of the SDC. + - Specify either I(sdc_name), I(sdc_id) or I(sdc_ip). + - Mutually exclusive with I(sdc_id) and I(sdc_ip). + type: str + sdc_id: + description: + - ID of the SDC. + - Specify either I(sdc_name), I(sdc_id) or I(sdc_ip). + - Mutually exclusive with I(sdc_name) and I(sdc_ip). + type: str + sdc_ip: + description: + - IP of the SDC. + - Specify either I(sdc_name), I(sdc_id) or I(sdc_ip). + - Mutually exclusive with I(sdc_id) and I(sdc_ip). + type: str + access_mode: + description: + - Define the access mode for all mappings of the snapshot. + choices: ['READ_WRITE', 'READ_ONLY', 'NO_ACCESS'] + type: str + bandwidth_limit: + description: + - Limit of snapshot network bandwidth. + - Need to mention in multiple of 1024 Kbps. + - To set no limit, 0 is to be passed. + type: int + iops_limit: + description: + - Limit of snapshot IOPS. + - Minimum IOPS limit is 11 and specify 0 for unlimited iops. + type: int + sdc_state: + description: + - Mapping state of the SDC. + choices: ['mapped', 'unmapped'] + type: str + remove_mode: + description: + - Removal mode for the snapshot. + - It defaults to C(ONLY_ME), if not specified. + choices: ['ONLY_ME', 'INCLUDING_DESCENDANTS'] + type: str + state: + description: + - State of the snapshot. + choices: ['present', 'absent'] + required: true + type: str +notes: + - The I(check_mode) is not supported. +''' + +EXAMPLES = r''' +- name: Create snapshot + dellemc.powerflex.snapshot: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + snapshot_name: "ansible_snapshot" + vol_name: "ansible_volume" + read_only: False + desired_retention: 2 + state: "present" + +- name: Get snapshot details using snapshot id + dellemc.powerflex.snapshot: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + snapshot_id: "fe6cb28200000007" + state: "present" + +- name: Map snapshot to SDC + dellemc.powerflex.snapshot: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + snapshot_id: "fe6cb28200000007" + sdc: + - sdc_ip: "198.10.xxx.xxx" + - sdc_id: "663ac0d200000001" + allow_multiple_mappings: True + sdc_state: "mapped" + state: "present" + +- name: Modify the attributes of SDC mapped to snapshot + dellemc.powerflex.snapshot: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + snapshot_id: "fe6cb28200000007" + sdc: + - sdc_ip: "198.10.xxx.xxx" + iops_limit: 11 + bandwidth_limit: 4096 + - sdc_id: "663ac0d200000001" + iops_limit: 20 + bandwidth_limit: 2048 + allow_multiple_mappings: True + sdc_state: "mapped" + state: "present" + +- name: Extend the size of snapshot + dellemc.powerflex.snapshot: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + snapshot_id: "fe6cb28200000007" + size: 16 + state: "present" + +- name: Unmap SDCs from snapshot + dellemc.powerflex.snapshot: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + snapshot_id: "fe6cb28200000007" + sdc: + - sdc_ip: "198.10.xxx.xxx" + - sdc_id: "663ac0d200000001" + sdc_state: "unmapped" + state: "present" + +- name: Rename snapshot + dellemc.powerflex.snapshot: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + snapshot_id: "fe6cb28200000007" + snapshot_new_name: "ansible_renamed_snapshot_10" + state: "present" + +- name: Delete snapshot + dellemc.powerflex.snapshot: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + snapshot_id: "fe6cb28200000007" + remove_mode: "ONLY_ME" + state: "absent" +''' + +RETURN = r''' +changed: + description: Whether or not the resource has changed. + returned: always + type: bool + sample: 'false' + +snapshot_details: + description: Details of the snapshot. + returned: When snapshot exists + type: dict + contains: + ancestorVolumeId: + description: The ID of the root of the specified volume's V-Tree. + type: str + ancestorVolumeName: + description: The name of the root of the specified volume's V-Tree. + type: str + creationTime: + description: The creation time of the snapshot. + type: int + id: + description: The ID of the snapshot. + type: str + mappedSdcInfo: + description: The details of the mapped SDC. + type: dict + contains: + sdcId: + description: ID of the SDC. + type: str + sdcName: + description: Name of the SDC. + type: str + sdcIp: + description: IP of the SDC. + type: str + accessMode: + description: Mapping access mode for the specified snapshot. + type: str + limitIops: + description: IOPS limit for the SDC. + type: int + limitBwInMbps: + description: Bandwidth limit for the SDC. + type: int + name: + description: Name of the snapshot. + type: str + secureSnapshotExpTime: + description: Expiry time of the snapshot. + type: int + sizeInKb: + description: Size of the snapshot. + type: int + sizeInGb: + description: Size of the snapshot. + type: int + retentionInHours: + description: Retention of the snapshot in hours. + type: int + storagePoolId: + description: The ID of the Storage pool in which snapshot resides. + type: str + storagePoolName: + description: The name of the Storage pool in which snapshot resides. + type: str + sample: { + "accessModeLimit": "ReadOnly", + "ancestorVolumeId": "cdd883cf00000002", + "ancestorVolumeName": "ansible-volume-1", + "autoSnapshotGroupId": null, + "compressionMethod": "Invalid", + "consistencyGroupId": "22f1e80c00000001", + "creationTime": 1631619229, + "dataLayout": "MediumGranularity", + "id": "cdd883d000000004", + "links": [ + { + "href": "/api/instances/Volume::cdd883d000000004", + "rel": "self" + }, + { + "href": "/api/instances/Volume::cdd883d000000004/relationships + /Statistics", + "rel": "/api/Volume/relationship/Statistics" + }, + { + "href": "/api/instances/Volume::cdd883cf00000002", + "rel": "/api/parent/relationship/ancestorVolumeId" + }, + { + "href": "/api/instances/VTree::6e86255c00000001", + "rel": "/api/parent/relationship/vtreeId" + }, + { + "href": "/api/instances/StoragePool::e0d8f6c900000000", + "rel": "/api/parent/relationship/storagePoolId" + } + ], + "lockedAutoSnapshot": false, + "lockedAutoSnapshotMarkedForRemoval": false, + "managedBy": "ScaleIO", + "mappedSdcInfo": null, + "name": "ansible_vol_snap_1", + "notGenuineSnapshot": false, + "originalExpiryTime": 0, + "pairIds": null, + "replicationJournalVolume": false, + "replicationTimeStamp": 0, + "retentionInHours": 0, + "retentionLevels": [], + "secureSnapshotExpTime": 0, + "sizeInGb": 16, + "sizeInKb": 16777216, + "snplIdOfAutoSnapshot": null, + "snplIdOfSourceVolume": null, + "storagePoolId": "e0d8f6c900000000", + "storagePoolName": "pool1", + "timeStampIsAccurate": false, + "useRmcache": false, + "volumeReplicationState": "UnmarkedForReplication", + "volumeType": "Snapshot", + "vtreeId": "6e86255c00000001" + } +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell\ + import utils +from datetime import datetime, timedelta +import time +import copy + +LOG = utils.get_logger('snapshot') + + +class PowerFlexSnapshot(object): + """Class with Snapshot operations""" + + def __init__(self): + """ Define all parameters required by this module""" + self.module_params = utils.get_powerflex_gateway_host_parameters() + self.module_params.update(get_powerflex_snapshot_parameters()) + + mutually_exclusive = [['snapshot_name', 'snapshot_id'], + ['vol_name', 'vol_id'], + ['snapshot_id', 'vol_name'], + ['snapshot_id', 'vol_id']] + + required_together = [['sdc', 'sdc_state']] + + required_one_of = [['snapshot_name', 'snapshot_id']] + + # initialize the Ansible module + self.module = AnsibleModule( + argument_spec=self.module_params, + supports_check_mode=False, + mutually_exclusive=mutually_exclusive, + required_together=required_together, + required_one_of=required_one_of) + + utils.ensure_required_libs(self.module) + + try: + self.powerflex_conn = utils.get_powerflex_gateway_host_connection( + self.module.params) + LOG.info("Got the PowerFlex system connection object instance") + except Exception as e: + LOG.error(str(e)) + self.module.fail_json(msg=str(e)) + + def get_storage_pool(self, storage_pool_id): + """Get storage pool details + :param storage_pool_id: The storage pool id + :return: Storage pool details + """ + + try: + return self.powerflex_conn.storage_pool.get( + filter_fields={'id': storage_pool_id}) + + except Exception as e: + errormsg = "Failed to get the storage pool %s with error " \ + "%s" % (storage_pool_id, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def get_snapshot(self, snapshot_name=None, snapshot_id=None): + """Get snapshot details + :param snapshot_name: Name of the snapshot + :param snapshot_id: ID of the snapshot + :return: Details of snapshot if exist. + """ + + id_or_name = snapshot_id if snapshot_id else snapshot_name + + try: + if snapshot_name: + snapshot_details = self.powerflex_conn.volume.get( + filter_fields={'name': snapshot_name}) + else: + snapshot_details = self.powerflex_conn.volume.get( + filter_fields={'id': snapshot_id}) + + if len(snapshot_details) == 0: + msg = "Snapshot with identifier %s is not found" % id_or_name + LOG.error(msg) + return None + + if len(snapshot_details) > 1: + errormsg = "Multiple instances of snapshot " \ + "exist with name {0}".format(snapshot_name) + self.module.fail_json(msg=errormsg) + + # Add ancestor volume name + if 'ancestorVolumeId' in snapshot_details[0] and \ + snapshot_details[0]['ancestorVolumeId']: + vol = self.get_volume( + vol_id=snapshot_details[0]['ancestorVolumeId']) + snapshot_details[0]['ancestorVolumeName'] = vol['name'] + + # Add size in GB + if 'sizeInKb' in snapshot_details[0] and \ + snapshot_details[0]['sizeInKb']: + snapshot_details[0]['sizeInGb'] = utils.get_size_in_gb( + snapshot_details[0]['sizeInKb'], 'KB') + + # Add storage pool name + if 'storagePoolId' in snapshot_details[0] and \ + snapshot_details[0]['storagePoolId']: + sp = self.get_storage_pool(snapshot_details[0]['storagePoolId']) + if len(sp) > 0: + snapshot_details[0]['storagePoolName'] = sp[0]['name'] + + # Add retention in hours + if 'secureSnapshotExpTime' in snapshot_details[0] and\ + 'creationTime' in snapshot_details[0]: + if snapshot_details[0]['secureSnapshotExpTime'] != 0: + expiry_obj = datetime.fromtimestamp( + snapshot_details[0]['secureSnapshotExpTime']) + creation_obj = datetime.fromtimestamp( + snapshot_details[0]['creationTime']) + + td = utils.dateutil.relativedelta.relativedelta( + expiry_obj, creation_obj) + snapshot_details[0]['retentionInHours'] = td.hours + else: + snapshot_details[0]['retentionInHours'] = 0 + + # Match volume details with snapshot details + if any([self.module.params['vol_name'], + self.module.params['vol_id']]): + self.match_vol_details(snapshot_details[0]) + return snapshot_details[0] + except Exception as e: + errormsg = "Failed to get the snapshot %s with error %s" % ( + id_or_name, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def match_vol_details(self, snapshot): + """Match the given volume details with the response + :param snapshot: The snapshot details + """ + vol_name = self.module.params['vol_name'] + vol_id = self.module.params['vol_id'] + + try: + if vol_name and vol_name != snapshot['ancestorVolumeName']: + errormsg = "Given volume name do not match with the " \ + "corresponding snapshot details." + self.module.fail_json(msg=errormsg) + + if vol_id and vol_id != snapshot['ancestorVolumeId']: + errormsg = "Given volume ID do not match with the " \ + "corresponding snapshot details." + self.module.fail_json(msg=errormsg) + except Exception as e: + errormsg = "Failed to match volume details with the snapshot " \ + "with error %s" % str(e) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def get_volume(self, vol_name=None, vol_id=None): + """Get the volume id + :param vol_name: The name of the volume + :param vol_id: The ID of the volume + :return: The volume details + """ + + try: + if vol_name: + vol_details = self.powerflex_conn.volume.get( + filter_fields={'name': vol_name}) + else: + vol_details = self.powerflex_conn.volume.get( + filter_fields={'id': vol_id}) + + if len(vol_details) == 0: + error_msg = "Unable to find volume with name {0}".format( + vol_name) + self.module.fail_json(msg=error_msg) + return vol_details[0] + except Exception as e: + errormsg = "Failed to get the volume %s with error " \ + "%s" % (vol_name, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def get_sdc_id(self, sdc_name=None, sdc_ip=None, sdc_id=None): + """Get the SDC ID + :param sdc_name: The name of the SDC + :param sdc_ip: The IP of the SDC + :param sdc_id: The ID of the SDC + :return: The ID of the SDC + """ + + if sdc_name: + id_ip_name = sdc_name + elif sdc_ip: + id_ip_name = sdc_ip + else: + id_ip_name = sdc_id + + try: + if sdc_name: + sdc_details = self.powerflex_conn.sdc.get( + filter_fields={'name': sdc_name}) + elif sdc_ip: + sdc_details = self.powerflex_conn.sdc.get( + filter_fields={'sdcIp': sdc_ip}) + else: + sdc_details = self.powerflex_conn.sdc.get( + filter_fields={'id': sdc_id}) + + if len(sdc_details) == 0: + error_msg = "Unable to find SDC with identifier {0}".format( + id_ip_name) + self.module.fail_json(msg=error_msg) + return sdc_details[0]['id'] + except Exception as e: + errormsg = "Failed to get the SDC %s with error " \ + "%s" % (id_ip_name, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def get_system_id(self): + """Get system id""" + + try: + resp = self.powerflex_conn.system.get() + + if len(resp) == 0: + self.module.fail_json(msg="No system exist on the given host.") + + if len(resp) > 1: + self.module.fail_json(msg="Multiple systems exist on the " + "given host.") + return resp[0]['id'] + except Exception as e: + msg = "Failed to get system id with error %s" % str(e) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def create_snapshot(self, snapshot_name, vol_id, system_id, + access_mode, retention): + """Create snapshot + :param snapshot_name: The name of the snapshot + :param vol_id: The ID of the source volume + :param system_id: The system id + :param access_mode: Access mode for the snapshot + :param retention: The retention for the snapshot + :return: Boolean indicating if create operation is successful + """ + LOG.debug("Creating Snapshot") + + try: + self.powerflex_conn.system.snapshot_volumes( + system_id=system_id, + snapshot_defs=[utils.SnapshotDef(vol_id, snapshot_name)], + access_mode=access_mode, + retention_period=retention + ) + + return True + except Exception as e: + errormsg = "Create snapshot %s operation failed with " \ + "error %s" % (snapshot_name, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def modify_retention(self, snapshot_id, new_retention): + """Modify snapshot retention + :param snapshot_id: The snapshot id + :param new_retention: Desired retention of the snapshot + :return: Boolean indicating if modifying retention is successful + """ + + try: + self.powerflex_conn.volume.set_retention_period(snapshot_id, + new_retention) + return True + except Exception as e: + errormsg = "Modify retention of snapshot %s operation failed " \ + "with error %s" % (snapshot_id, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def modify_size(self, snapshot_id, new_size): + """Modify snapshot size + :param snapshot_id: The snapshot id + :param new_size: Size of the snapshot + :return: Boolean indicating if extend operation is successful + """ + + try: + self.powerflex_conn.volume.extend(snapshot_id, new_size) + return True + except Exception as e: + errormsg = "Extend snapshot %s operation failed with " \ + "error %s" % (snapshot_id, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def modify_snap_access_mode(self, snapshot_id, snap_access_mode): + """Modify access mode of snapshot + :param snapshot_id: The snapshot id + :param snap_access_mode: Access mode of the snapshot + :return: Boolean indicating if modifying access mode of + snapshot is successful + """ + + try: + self.powerflex_conn.volume.set_volume_access_mode_limit( + volume_id=snapshot_id, access_mode_limit=snap_access_mode) + return True + except Exception as e: + errormsg = "Modify access mode of snapshot %s operation " \ + "failed with error %s" % (snapshot_id, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def modify_access_mode(self, snapshot_id, access_mode_list): + """Modify access mode of SDCs mapped to snapshot + :param snapshot_id: The snapshot id + :param access_mode_list: List containing SDC ID's whose access mode + is to modified + :return: Boolean indicating if modifying access mode is successful + """ + + try: + changed = False + for temp in access_mode_list: + if temp['accessMode']: + self.powerflex_conn.volume.set_access_mode_for_sdc( + volume_id=snapshot_id, sdc_id=temp['sdc_id'], + access_mode=temp['accessMode']) + changed = True + return changed + except Exception as e: + errormsg = "Modify access mode of SDC %s operation failed " \ + "with error %s" % (temp['sdc_id'], str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def modify_limits(self, payload): + """Modify IOPS and bandwidth limits of SDC's mapped to snapshot + :param snapshot_id: The snapshot id + :param limits_dict: Dict containing SDC ID's whose bandwidth and + IOPS is to modified + :return: Boolean indicating if modifying limits is successful + """ + + try: + changed = False + if payload['bandwidth_limit'] is not None or \ + payload['iops_limit'] is not None: + self.powerflex_conn.volume.set_mapped_sdc_limits(**payload) + changed = True + return changed + except Exception as e: + errormsg = "Modify bandwidth/iops limits of SDC %s operation " \ + "failed with error %s" % (payload['sdc_id'], str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def rename_snapshot(self, snapshot_id, new_name): + """Rename snapshot + :param snapshot_id: The snapshot id + :param new_name: The new name of the snapshot + :return: Boolean indicating if rename operation is successful + """ + + try: + self.powerflex_conn.volume.rename(snapshot_id, new_name) + return True + except Exception as e: + errormsg = "Rename snapshot %s operation failed with " \ + "error %s" % (snapshot_id, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def delete_snapshot(self, snapshot_id, remove_mode): + """Delete snapshot + :param snapshot_id: The snapshot id + :param remove_mode: Removal mode for the snapshot + :return: Boolean indicating if delete operation is successful + """ + + try: + self.powerflex_conn.volume.delete(snapshot_id, remove_mode) + return True + except Exception as e: + errormsg = "Delete snapshot %s operation failed with " \ + "error %s" % (snapshot_id, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def validate_desired_retention(self, desired_retention, retention_unit): + """Validates the specified desired retention. + :param desired_retention: Desired retention of the snapshot + :param retention_unit: Retention unit for snapshot + """ + + if retention_unit == 'hours' and (desired_retention < 1 or + desired_retention > 744): + self.module.fail_json(msg="Please provide a valid integer as the" + " desired retention between 1 and 744.") + elif retention_unit == 'days' and (desired_retention < 1 or + desired_retention > 31): + self.module.fail_json(msg="Please provide a valid integer as the" + " desired retention between 1 and 31.") + + def unmap_snapshot_from_sdc(self, snapshot, sdc): + """Unmap SDC's from snapshot + :param snapshot: Snapshot details + :param sdc: List of SDCs to be unmapped + :return: Boolean indicating if unmap operation is successful + """ + + current_sdcs = snapshot['mappedSdcInfo'] + current_sdc_ids = [] + sdc_id_list = [] + + if current_sdcs: + for temp in current_sdcs: + current_sdc_ids.append(temp['sdcId']) + + for temp in sdc: + if 'sdc_name' in temp and temp['sdc_name']: + sdc_id = self.get_sdc_id(sdc_name=temp['sdc_name']) + elif 'sdc_ip' in temp and temp['sdc_ip']: + sdc_id = self.get_sdc_id(sdc_ip=temp['sdc_ip']) + else: + sdc_id = self.get_sdc_id(sdc_id=temp['sdc_id']) + if sdc_id in current_sdc_ids: + sdc_id_list.append(sdc_id) + + LOG.info("SDC IDs to remove %s", sdc_id_list) + + if len(sdc_id_list) == 0: + return False + + try: + for sdc_id in sdc_id_list: + self.powerflex_conn.volume.remove_mapped_sdc( + snapshot['id'], sdc_id) + return True + except Exception as e: + errormsg = "Unmap SDC %s from snapshot %s failed with error " \ + "%s" % (sdc_id, snapshot['id'], str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def map_snapshot_to_sdc(self, snapshot, sdc): + """Map SDC's to snapshot + :param snapshot: Snapshot details + :param sdc: List of SDCs + :return: Boolean indicating if mapping operation is successful + """ + + current_sdcs = snapshot['mappedSdcInfo'] + current_sdc_ids = [] + sdc_id_list = [] + sdc_map_list = [] + sdc_modify_list1 = [] + sdc_modify_list2 = [] + + if current_sdcs: + for temp in current_sdcs: + current_sdc_ids.append(temp['sdcId']) + + for temp in sdc: + if 'sdc_name' in temp and temp['sdc_name']: + sdc_id = self.get_sdc_id(sdc_name=temp['sdc_name']) + elif 'sdc_ip' in temp and temp['sdc_ip']: + sdc_id = self.get_sdc_id(sdc_ip=temp['sdc_ip']) + else: + sdc_id = self.get_sdc_id(sdc_id=temp['sdc_id']) + if sdc_id not in current_sdc_ids: + sdc_id_list.append(sdc_id) + temp['sdc_id'] = sdc_id + if 'access_mode' in temp: + temp['access_mode'] = get_access_mode(temp['access_mode']) + if 'bandwidth_limit' not in temp: + temp['bandwidth_limit'] = None + if 'iops_limit' not in temp: + temp['iops_limit'] = None + sdc_map_list.append(temp) + else: + access_mode_dict, limits_dict = check_for_sdc_modification( + snapshot, sdc_id, temp) + if access_mode_dict: + sdc_modify_list1.append(access_mode_dict) + if limits_dict: + sdc_modify_list2.append(limits_dict) + + LOG.info("SDC to add: %s", sdc_map_list) + + if not sdc_map_list: + return False, sdc_modify_list1, sdc_modify_list2 + + try: + changed = False + for sdc in sdc_map_list: + payload = { + "volume_id": snapshot['id'], + "sdc_id": sdc['sdc_id'], + "access_mode": sdc['access_mode'], + "allow_multiple_mappings": self.module.params['allow_multiple_mappings'] + } + self.powerflex_conn.volume.add_mapped_sdc(**payload) + + if sdc['bandwidth_limit'] or sdc['iops_limit']: + payload = { + "volume_id": snapshot['id'], + "sdc_id": sdc['sdc_id'], + "bandwidth_limit": sdc['bandwidth_limit'], + "iops_limit": sdc['iops_limit'] + } + + self.powerflex_conn.volume.set_mapped_sdc_limits(**payload) + changed = True + return changed, sdc_modify_list1, sdc_modify_list2 + + except Exception as e: + errormsg = "Mapping snapshot %s to SDC %s " \ + "failed with error %s" % (snapshot['name'], + sdc['sdc_id'], str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def validate_parameters(self): + """Validate the input parameters""" + + sdc = self.module.params['sdc'] + cap_unit = self.module.params['cap_unit'] + size = self.module.params['size'] + desired_retention = self.module.params['desired_retention'] + retention_unit = self.module.params['retention_unit'] + + param_list = ['snapshot_name', 'snapshot_id', 'vol_name', 'vol_id'] + for param in param_list: + if self.module.params[param] is not None and \ + len(self.module.params[param].strip()) == 0: + error_msg = "Please provide valid %s" % param + self.module.fail_json(msg=error_msg) + + if sdc: + for temp in sdc: + if (all([temp['sdc_id'], temp['sdc_ip']]) or + all([temp['sdc_id'], temp['sdc_name']]) or + all([temp['sdc_ip'], temp['sdc_name']])): + self.module.fail_json(msg="sdc_id, sdc_ip and sdc_name " + "are mutually exclusive") + + if (cap_unit is not None) and not size: + self.module.fail_json(msg="cap_unit can be specified along " + "with size") + + if (retention_unit is not None) and not desired_retention: + self.module.fail_json(msg="retention_unit can be specified along " + "with desired_retention") + + def perform_module_operation(self): + """ + Perform different actions on snapshot based on parameters passed in + the playbook + """ + snapshot_name = self.module.params['snapshot_name'] + snapshot_id = self.module.params['snapshot_id'] + vol_name = self.module.params['vol_name'] + vol_id = self.module.params['vol_id'] + read_only = self.module.params['read_only'] + size = self.module.params['size'] + cap_unit = self.module.params['cap_unit'] + snapshot_new_name = self.module.params['snapshot_new_name'] + sdc = copy.deepcopy(self.module.params['sdc']) + sdc_state = self.module.params['sdc_state'] + desired_retention = self.module.params['desired_retention'] + retention_unit = self.module.params['retention_unit'] + remove_mode = self.module.params['remove_mode'] + state = self.module.params['state'] + + # result is a dictionary to contain end state and snapshot details + changed = False + is_modified = False + result = dict( + changed=False, + snapshot_details={} + ) + + self.validate_parameters() + + if size and not cap_unit: + cap_unit = 'GB' + + if desired_retention and not retention_unit: + retention_unit = 'hours' + + if desired_retention is not None: + self.validate_desired_retention(desired_retention, retention_unit) + + snapshot_details = self.get_snapshot(snapshot_name, snapshot_id) + + if snapshot_details: + snap_access_mode = None + if read_only is not None: + if read_only: + snap_access_mode = 'ReadOnly' + else: + snap_access_mode = 'ReadWrite' + is_modified, flag1, flag2, flag3 = check_snapshot_modified( + snapshot_details, desired_retention, retention_unit, size, + cap_unit, snap_access_mode) + + if state == 'present' and not snapshot_details: + if snapshot_id: + self.module.fail_json(msg="Creation of snapshot is allowed " + "using snapshot_name only, " + "snapshot_id given.") + + if snapshot_name is None or len(snapshot_name.strip()) == 0: + self.module.fail_json(msg="Please provide valid snapshot " + "name.") + + if vol_name is None and vol_id is None: + self.module.fail_json(msg="Please provide volume details to " + "create new snapshot") + + if snapshot_new_name is not None: + self.module.fail_json(msg="snapshot_new_name is not required" + " while creating snapshot") + + if remove_mode: + self.module.fail_json(msg="remove_mode is not required while " + "creating snapshot") + + if vol_name: + vol = self.get_volume(vol_name=vol_name) + vol_id = vol['id'] + + retention = 0 + if desired_retention: + retention = calculate_retention(desired_retention, + retention_unit) + + system_id = self.get_system_id() + if read_only: + access_mode = 'ReadOnly' + else: + access_mode = 'ReadWrite' + + changed = self.create_snapshot(snapshot_name, vol_id, system_id, + access_mode, retention) + if changed: + snapshot_details = self.get_snapshot(snapshot_name) + + if size: + if cap_unit == 'GB': + new_size = size * 1024 * 1024 + else: + new_size = size * 1024 * 1024 * 1024 + + if new_size != snapshot_details['sizeInKb']: + if cap_unit == 'TB': + size = size * 1024 + changed = self.modify_size(snapshot_details['id'], size) + + if is_modified: + if flag1: + retention = calculate_retention(desired_retention, + retention_unit) + changed = self.modify_retention(snapshot_details['id'], + retention) + + if flag2: + new_size = size + if cap_unit == 'TB': + new_size = size * 1024 + changed = self.modify_size(snapshot_details['id'], new_size) + + if flag3: + changed = self.modify_snap_access_mode( + snapshot_details['id'], snap_access_mode) + + if state == 'present' and snapshot_details and sdc and \ + sdc_state == 'mapped': + + changed_mode = False + changed_limits = False + + changed, access_mode_list, limits_list = \ + self.map_snapshot_to_sdc(snapshot_details, sdc) + + if len(access_mode_list) > 0: + changed_mode = self.modify_access_mode( + snapshot_details['id'], access_mode_list) + + if len(limits_list) > 0: + for temp in limits_list: + payload = { + "volume_id": snapshot_details['id'], + "sdc_id": temp['sdc_id'], + "bandwidth_limit": temp['bandwidth_limit'], + "iops_limit": temp['iops_limit'] + } + changed_limits = self.modify_limits(payload) + + if changed_mode or changed_limits: + changed = True + + if state == 'present' and snapshot_details and sdc and \ + sdc_state == 'unmapped': + changed = self.unmap_snapshot_from_sdc(snapshot_details, sdc) + + if state == 'present' and snapshot_details and \ + snapshot_new_name is not None: + if len(snapshot_new_name.strip()) == 0: + self.module.fail_json(msg="Please provide valid snapshot " + "name.") + changed = self.rename_snapshot(snapshot_details['id'], + snapshot_new_name) + if changed: + snapshot_name = snapshot_new_name + + if state == 'absent' and snapshot_details: + if remove_mode is None: + remove_mode = "ONLY_ME" + changed = self.delete_snapshot(snapshot_details['id'], remove_mode) + + if state == 'present': + snapshot_details = self.get_snapshot(snapshot_name, snapshot_id) + result['snapshot_details'] = snapshot_details + result['changed'] = changed + self.module.exit_json(**result) + + +def check_snapshot_modified(snapshot=None, desired_retention=None, + retention_unit=None, size=None, cap_unit=None, + access_mode=None): + """Check if snapshot modification is required + :param snapshot: Snapshot details + :param desired_retention: Desired retention of the snapshot + :param retention_unit: Retention unit for snapshot + :param size: Size of the snapshot + :param cap_unit: Capacity unit for the snapshot + :param access_mode: Access mode of the snapshot + :return: Boolean indicating if modification is needed + """ + + snap_creation_timestamp = None + expiration_timestamp = None + is_timestamp_modified = False + is_size_modified = False + is_access_modified = False + is_modified = False + + if 'creationTime' in snapshot: + snap_creation_timestamp = snapshot['creationTime'] + + if desired_retention: + if retention_unit == 'hours': + expiration_timestamp = \ + datetime.fromtimestamp(snap_creation_timestamp) + \ + timedelta(hours=desired_retention) + expiration_timestamp = time.mktime(expiration_timestamp.timetuple()) + else: + expiration_timestamp = \ + datetime.fromtimestamp(snap_creation_timestamp) + \ + timedelta(days=desired_retention) + expiration_timestamp = time.mktime(expiration_timestamp.timetuple()) + + if 'secureSnapshotExpTime' in snapshot and expiration_timestamp and \ + snapshot['secureSnapshotExpTime'] != expiration_timestamp: + existing_timestamp = snapshot['secureSnapshotExpTime'] + new_timestamp = expiration_timestamp + + info_message = 'The existing timestamp is: %s and the new ' \ + 'timestamp is: %s' % (existing_timestamp, + new_timestamp) + LOG.info(info_message) + + existing_time_obj = datetime.fromtimestamp(existing_timestamp) + new_time_obj = datetime.fromtimestamp(new_timestamp) + + if existing_time_obj > new_time_obj: + td = utils.dateutil.relativedelta.relativedelta( + existing_time_obj, new_time_obj) + else: + td = utils.dateutil.relativedelta.relativedelta( + new_time_obj, existing_time_obj) + + LOG.info("Time difference: %s", td.minutes) + + # A delta of two minutes is treated as idempotent + if td.seconds > 120 or td.minutes > 2: + is_timestamp_modified = True + + if size: + if cap_unit == 'GB': + new_size = size * 1024 * 1024 + else: + new_size = size * 1024 * 1024 * 1024 + + if new_size != snapshot['sizeInKb']: + is_size_modified = True + + if access_mode and snapshot['accessModeLimit'] != access_mode: + is_access_modified = True + + if is_timestamp_modified or is_size_modified or is_access_modified: + is_modified = True + return is_modified, is_timestamp_modified, is_size_modified, is_access_modified + + +def calculate_retention(desired_retention=None, retention_unit=None): + """ + :param desired_retention: Desired retention of the snapshot + :param retention_unit: Retention unit for snapshot + :return: Retention in minutes + """ + + retention = 0 + if retention_unit == 'days': + retention = desired_retention * 24 * 60 + else: + retention = desired_retention * 60 + return retention + + +def check_for_sdc_modification(snapshot, sdc_id, sdc_details): + """ + :param snapshot: The snapshot details + :param sdc_id: The ID of the SDC + :param sdc_details: The details of SDC + :return: Dictionary with SDC attributes to be modified + """ + access_mode_dict = dict() + limits_dict = dict() + + for sdc in snapshot['mappedSdcInfo']: + if sdc['sdcId'] == sdc_id: + if sdc['accessMode'] != get_access_mode(sdc_details['access_mode']): + access_mode_dict['sdc_id'] = sdc_id + access_mode_dict['accessMode'] = get_access_mode( + sdc_details['access_mode']) + if sdc['limitIops'] != sdc_details['iops_limit'] or \ + sdc['limitBwInMbps'] != sdc_details['bandwidth_limit']: + limits_dict['sdc_id'] = sdc_id + limits_dict['iops_limit'] = None + limits_dict['bandwidth_limit'] = None + if sdc['limitIops'] != sdc_details['iops_limit']: + limits_dict['iops_limit'] = sdc_details['iops_limit'] + if sdc['limitBwInMbps'] != get_limits_in_mb(sdc_details['bandwidth_limit']): + limits_dict['bandwidth_limit'] = \ + sdc_details['bandwidth_limit'] + break + return access_mode_dict, limits_dict + + +def get_limits_in_mb(limits): + """ + :param limits: Limits in KB + :return: Limits in MB + """ + + if limits: + return limits / 1024 + + +def get_access_mode(access_mode): + """ + :param access_mode: Access mode of the SDC + :return: The enum for the access mode + """ + + access_mode_dict = { + "READ_WRITE": "ReadWrite", + "READ_ONLY": "ReadOnly", + "NO_ACCESS": "NoAccess" + } + return access_mode_dict.get(access_mode) + + +def get_powerflex_snapshot_parameters(): + """This method provide parameter required for the Ansible snapshot + module on PowerFlex""" + return dict( + snapshot_name=dict(), snapshot_id=dict(), + vol_name=dict(), vol_id=dict(), + read_only=dict(required=False, type='bool'), + size=dict(required=False, type='int'), + cap_unit=dict(choices=['GB', 'TB']), + snapshot_new_name=dict(), + allow_multiple_mappings=dict(required=False, type='bool'), + sdc=dict( + type='list', elements='dict', options=dict( + sdc_id=dict(), sdc_ip=dict(), + sdc_name=dict(), + access_mode=dict(choices=['READ_WRITE', 'READ_ONLY', + 'NO_ACCESS']), + bandwidth_limit=dict(type='int'), + iops_limit=dict(type='int') + ) + ), + desired_retention=dict(type='int'), + retention_unit=dict(choices=['hours', 'days']), + remove_mode=dict(choices=['ONLY_ME', 'INCLUDING_DESCENDANTS']), + sdc_state=dict(choices=['mapped', 'unmapped']), + state=dict(required=True, type='str', choices=['present', 'absent']) + ) + + +def main(): + """ Create PowerFlex Snapshot object and perform actions on it + based on user input from playbook""" + obj = PowerFlexSnapshot() + obj.perform_module_operation() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/powerflex/plugins/modules/storagepool.py b/ansible_collections/dellemc/powerflex/plugins/modules/storagepool.py new file mode 100644 index 00000000..ca343212 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/plugins/modules/storagepool.py @@ -0,0 +1,914 @@ +#!/usr/bin/python + +# Copyright: (c) 2021, Dell Technologies +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +"""Ansible module for managing Dell Technologies (Dell) PowerFlex storage pool""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: storagepool + +version_added: '1.0.0' + +short_description: Managing Dell PowerFlex storage pool + +description: +- Dell PowerFlex storage pool module includes getting the details of + storage pool, creating a new storage pool, and modifying the attribute of + a storage pool. + +extends_documentation_fragment: + - dellemc.powerflex.powerflex + +author: +- Arindam Datta (@dattaarindam) <ansible.team@dell.com> +- P Srinivas Rao (@srinivas-rao5) <ansible.team@dell.com> + +options: + storage_pool_name: + description: + - The name of the storage pool. + - If more than one storage pool is found with the same name then + protection domain id/name is required to perform the task. + - Mutually exclusive with I(storage_pool_id). + type: str + storage_pool_id: + description: + - The id of the storage pool. + - It is auto generated, hence should not be provided during + creation of a storage pool. + - Mutually exclusive with I(storage_pool_name). + type: str + protection_domain_name: + description: + - The name of the protection domain. + - During creation of a pool, either protection domain name or id must be + mentioned. + - Mutually exclusive with I(protection_domain_id). + type: str + protection_domain_id: + description: + - The id of the protection domain. + - During creation of a pool, either protection domain name or id must + be mentioned. + - Mutually exclusive with I(protection_domain_name). + type: str + media_type: + description: + - Type of devices in the storage pool. + type: str + choices: ['HDD', 'SSD', 'TRANSITIONAL'] + storage_pool_new_name: + description: + - New name for the storage pool can be provided. + - This parameter is used for renaming the storage pool. + type: str + use_rfcache: + description: + - Enable/Disable RFcache on a specific storage pool. + type: bool + use_rmcache: + description: + - Enable/Disable RMcache on a specific storage pool. + type: bool + state: + description: + - State of the storage pool. + type: str + choices: ["present", "absent"] + required: true +notes: + - TRANSITIONAL media type is supported only during modification. + - The I(check_mode) is not supported. +''' + +EXAMPLES = r''' + +- name: Get the details of storage pool by name + dellemc.powerflex.storagepool: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + storage_pool_name: "sample_pool_name" + protection_domain_name: "sample_protection_domain" + state: "present" + +- name: Get the details of storage pool by id + dellemc.powerflex.storagepool: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + storage_pool_id: "abcd1234ab12r" + state: "present" + +- name: Create a new storage pool by name + dellemc.powerflex.storagepool: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + storage_pool_name: "ansible_test_pool" + protection_domain_id: "1c957da800000000" + media_type: "HDD" + state: "present" + +- name: Modify a storage pool by name + dellemc.powerflex.storagepool: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + storage_pool_name: "ansible_test_pool" + protection_domain_id: "1c957da800000000" + use_rmcache: True + use_rfcache: True + state: "present" + +- name: Rename storage pool by id + dellemc.powerflex.storagepool: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + storage_pool_id: "abcd1234ab12r" + storage_pool_new_name: "new_ansible_pool" + state: "present" +''' + +RETURN = r''' +changed: + description: Whether or not the resource has changed. + returned: always + type: bool + sample: 'false' +storage_pool_details: + description: Details of the storage pool. + returned: When storage pool exists + type: dict + contains: + mediaType: + description: Type of devices in the storage pool. + type: str + useRfcache: + description: Enable/Disable RFcache on a specific storage pool. + type: bool + useRmcache: + description: Enable/Disable RMcache on a specific storage pool. + type: bool + id: + description: ID of the storage pool under protection domain. + type: str + name: + description: Name of the storage pool under protection domain. + type: str + protectionDomainId: + description: ID of the protection domain in which pool resides. + type: str + protectionDomainName: + description: Name of the protection domain in which pool resides. + type: str + "statistics": + description: Statistics details of the storage pool. + type: dict + contains: + "capacityInUseInKb": + description: Total capacity of the storage pool. + type: str + "unusedCapacityInKb": + description: Unused capacity of the storage pool. + type: str + "deviceIds": + description: Device Ids of the storage pool. + type: list + sample: { + "addressSpaceUsage": "Normal", + "addressSpaceUsageType": "DeviceCapacityLimit", + "backgroundScannerBWLimitKBps": 3072, + "backgroundScannerMode": "DataComparison", + "bgScannerCompareErrorAction": "ReportAndFix", + "bgScannerReadErrorAction": "ReportAndFix", + "capacityAlertCriticalThreshold": 90, + "capacityAlertHighThreshold": 80, + "capacityUsageState": "Normal", + "capacityUsageType": "NetCapacity", + "checksumEnabled": false, + "compressionMethod": "Invalid", + "dataLayout": "MediumGranularity", + "externalAccelerationType": "None", + "fglAccpId": null, + "fglExtraCapacity": null, + "fglMaxCompressionRatio": null, + "fglMetadataSizeXx100": null, + "fglNvdimmMetadataAmortizationX100": null, + "fglNvdimmWriteCacheSizeInMb": null, + "fglOverProvisioningFactor": null, + "fglPerfProfile": null, + "fglWriteAtomicitySize": null, + "fragmentationEnabled": true, + "id": "e0d8f6c900000000", + "links": [ + { + "href": "/api/instances/StoragePool::e0d8f6c900000000", + "rel": "self" + }, + { + "href": "/api/instances/StoragePool::e0d8f6c900000000 + /relationships/Statistics", + "rel": "/api/StoragePool/relationship/Statistics" + }, + { + "href": "/api/instances/StoragePool::e0d8f6c900000000 + /relationships/SpSds", + "rel": "/api/StoragePool/relationship/SpSds" + }, + { + "href": "/api/instances/StoragePool::e0d8f6c900000000 + /relationships/Volume", + "rel": "/api/StoragePool/relationship/Volume" + }, + { + "href": "/api/instances/StoragePool::e0d8f6c900000000 + /relationships/Device", + "rel": "/api/StoragePool/relationship/Device" + }, + { + "href": "/api/instances/StoragePool::e0d8f6c900000000 + /relationships/VTree", + "rel": "/api/StoragePool/relationship/VTree" + }, + { + "href": "/api/instances/ProtectionDomain::9300c1f900000000", + "rel": "/api/parent/relationship/protectionDomainId" + } + ], + "statistics": { + "BackgroundScannedInMB": 3466920, + "activeBckRebuildCapacityInKb": 0, + "activeEnterProtectedMaintenanceModeCapacityInKb": 0, + "aggregateCompressionLevel": "Uncompressed", + "atRestCapacityInKb": 1248256, + "backgroundScanCompareErrorCount": 0, + "backgroundScanFixedCompareErrorCount": 0, + "bckRebuildReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "bckRebuildWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "capacityAvailableForVolumeAllocationInKb": 369098752, + "capacityInUseInKb": 2496512, + "capacityInUseNoOverheadInKb": 2496512, + "capacityLimitInKb": 845783040, + "compressedDataCompressionRatio": 0.0, + "compressionRatio": 1.0, + "currentFglMigrationSizeInKb": 0, + "deviceIds": [ + ], + "enterProtectedMaintenanceModeCapacityInKb": 0, + "enterProtectedMaintenanceModeReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "enterProtectedMaintenanceModeWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "exitProtectedMaintenanceModeReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "exitProtectedMaintenanceModeWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "exposedCapacityInKb": 0, + "failedCapacityInKb": 0, + "fwdRebuildReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "fwdRebuildWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "inMaintenanceCapacityInKb": 0, + "inMaintenanceVacInKb": 0, + "inUseVacInKb": 184549376, + "inaccessibleCapacityInKb": 0, + "logWrittenBlocksInKb": 0, + "maxCapacityInKb": 845783040, + "migratingVolumeIds": [ + ], + "migratingVtreeIds": [ + ], + "movingCapacityInKb": 0, + "netCapacityInUseInKb": 1248256, + "normRebuildCapacityInKb": 0, + "normRebuildReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "normRebuildWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "numOfDeviceAtFaultRebuilds": 0, + "numOfDevices": 3, + "numOfIncomingVtreeMigrations": 0, + "numOfVolumes": 8, + "numOfVolumesInDeletion": 0, + "numOfVtrees": 8, + "overallUsageRatio": 73.92289, + "pendingBckRebuildCapacityInKb": 0, + "pendingEnterProtectedMaintenanceModeCapacityInKb": 0, + "pendingExitProtectedMaintenanceModeCapacityInKb": 0, + "pendingFwdRebuildCapacityInKb": 0, + "pendingMovingCapacityInKb": 0, + "pendingMovingInBckRebuildJobs": 0, + "persistentChecksumBuilderProgress": 100.0, + "persistentChecksumCapacityInKb": 414720, + "primaryReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "primaryReadFromDevBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "primaryReadFromRmcacheBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "primaryVacInKb": 92274688, + "primaryWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "protectedCapacityInKb": 2496512, + "protectedVacInKb": 184549376, + "provisionedAddressesInKb": 2496512, + "rebalanceCapacityInKb": 0, + "rebalanceReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "rebalanceWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "rfacheReadHit": 0, + "rfacheWriteHit": 0, + "rfcacheAvgReadTime": 0, + "rfcacheAvgWriteTime": 0, + "rfcacheIoErrors": 0, + "rfcacheIosOutstanding": 0, + "rfcacheIosSkipped": 0, + "rfcacheReadMiss": 0, + "rmPendingAllocatedInKb": 0, + "rmPendingThickInKb": 0, + "rplJournalCapAllowed": 0, + "rplTotalJournalCap": 0, + "rplUsedJournalCap": 0, + "secondaryReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "secondaryReadFromDevBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "secondaryReadFromRmcacheBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "secondaryVacInKb": 92274688, + "secondaryWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "semiProtectedCapacityInKb": 0, + "semiProtectedVacInKb": 0, + "snapCapacityInUseInKb": 0, + "snapCapacityInUseOccupiedInKb": 0, + "snapshotCapacityInKb": 0, + "spSdsIds": [ + "abdfe71b00030001", + "abdce71d00040001", + "abdde71e00050001" + ], + "spareCapacityInKb": 84578304, + "targetOtherLatency": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "targetReadLatency": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "targetWriteLatency": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "tempCapacityInKb": 0, + "tempCapacityVacInKb": 0, + "thickCapacityInUseInKb": 0, + "thinAndSnapshotRatio": 73.92289, + "thinCapacityAllocatedInKm": 184549376, + "thinCapacityInUseInKb": 0, + "thinUserDataCapacityInKb": 2496512, + "totalFglMigrationSizeInKb": 0, + "totalReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "totalWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "trimmedUserDataCapacityInKb": 0, + "unreachableUnusedCapacityInKb": 0, + "unusedCapacityInKb": 758708224, + "userDataCapacityInKb": 2496512, + "userDataCapacityNoTrimInKb": 2496512, + "userDataReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataSdcReadLatency": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataSdcTrimLatency": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataSdcWriteLatency": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataTrimBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "volMigrationReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "volMigrationWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "volumeAddressSpaceInKb": 922XXXXX, + "volumeAllocationLimitInKb": 3707XXXXX, + "volumeIds": [ + "456afc7900XXXXXXXX" + ], + "vtreeAddresSpaceInKb": 92274688, + "vtreeIds": [ + "32b1681bXXXXXXXX", + ] + }, + "mediaType": "HDD", + "name": "pool1", + "numOfParallelRebuildRebalanceJobsPerDevice": 2, + "persistentChecksumBuilderLimitKb": 3072, + "persistentChecksumEnabled": true, + "persistentChecksumState": "Protected", + "persistentChecksumValidateOnRead": false, + "protectedMaintenanceModeIoPriorityAppBwPerDeviceThresholdInKbps": null, + "protectedMaintenanceModeIoPriorityAppIopsPerDeviceThreshold": null, + "protectedMaintenanceModeIoPriorityBwLimitPerDeviceInKbps": 10240, + "protectedMaintenanceModeIoPriorityNumOfConcurrentIosPerDevice": 1, + "protectedMaintenanceModeIoPriorityPolicy": "limitNumOfConcurrentIos", + "protectedMaintenanceModeIoPriorityQuietPeriodInMsec": null, + "protectionDomainId": "9300c1f900000000", + "protectionDomainName": "domain1", + "rebalanceEnabled": true, + "rebalanceIoPriorityAppBwPerDeviceThresholdInKbps": null, + "rebalanceIoPriorityAppIopsPerDeviceThreshold": null, + "rebalanceIoPriorityBwLimitPerDeviceInKbps": 10240, + "rebalanceIoPriorityNumOfConcurrentIosPerDevice": 1, + "rebalanceIoPriorityPolicy": "favorAppIos", + "rebalanceIoPriorityQuietPeriodInMsec": null, + "rebuildEnabled": true, + "rebuildIoPriorityAppBwPerDeviceThresholdInKbps": null, + "rebuildIoPriorityAppIopsPerDeviceThreshold": null, + "rebuildIoPriorityBwLimitPerDeviceInKbps": 10240, + "rebuildIoPriorityNumOfConcurrentIosPerDevice": 1, + "rebuildIoPriorityPolicy": "limitNumOfConcurrentIos", + "rebuildIoPriorityQuietPeriodInMsec": null, + "replicationCapacityMaxRatio": 32, + "rmcacheWriteHandlingMode": "Cached", + "sparePercentage": 10, + "useRfcache": false, + "useRmcache": false, + "vtreeMigrationIoPriorityAppBwPerDeviceThresholdInKbps": null, + "vtreeMigrationIoPriorityAppIopsPerDeviceThreshold": null, + "vtreeMigrationIoPriorityBwLimitPerDeviceInKbps": 10240, + "vtreeMigrationIoPriorityNumOfConcurrentIosPerDevice": 1, + "vtreeMigrationIoPriorityPolicy": "favorAppIos", + "vtreeMigrationIoPriorityQuietPeriodInMsec": null, + "zeroPaddingEnabled": true + } +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell\ + import utils + +LOG = utils.get_logger('storagepool') + + +class PowerFlexStoragePool(object): + """Class with StoragePool operations""" + + def __init__(self): + """ Define all parameters required by this module""" + + self.module_params = utils.get_powerflex_gateway_host_parameters() + self.module_params.update(get_powerflex_storagepool_parameters()) + + """ initialize the ansible module """ + mut_ex_args = [['storage_pool_name', 'storage_pool_id'], + ['protection_domain_name', 'protection_domain_id'], + ['storage_pool_id', 'protection_domain_name'], + ['storage_pool_id', 'protection_domain_id']] + + required_one_of_args = [['storage_pool_name', 'storage_pool_id']] + self.module = AnsibleModule(argument_spec=self.module_params, + supports_check_mode=False, + mutually_exclusive=mut_ex_args, + required_one_of=required_one_of_args) + + utils.ensure_required_libs(self.module) + + try: + self.powerflex_conn = utils.get_powerflex_gateway_host_connection( + self.module.params) + LOG.info('Got the PowerFlex system connection object instance') + except Exception as e: + LOG.error(str(e)) + self.module.fail_json(msg=str(e)) + + def get_protection_domain(self, protection_domain_name=None, + protection_domain_id=None): + """Get protection domain details + :param protection_domain_name: Name of the protection domain + :param protection_domain_id: ID of the protection domain + :return: Protection domain details + """ + name_or_id = protection_domain_id if protection_domain_id \ + else protection_domain_name + try: + filter_fields = {} + if protection_domain_id: + filter_fields = {'id': protection_domain_id} + if protection_domain_name: + filter_fields = {'name': protection_domain_name} + + pd_details = self.powerflex_conn.protection_domain.get( + filter_fields=filter_fields) + if pd_details: + return pd_details[0] + + if not pd_details: + err_msg = "Unable to find the protection domain with {0}. " \ + "Please enter a valid protection domain" \ + " name/id.".format(name_or_id) + self.module.fail_json(msg=err_msg) + + except Exception as e: + errormsg = "Failed to get the protection domain {0} with" \ + " error {1}".format(name_or_id, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def get_storage_pool(self, storage_pool_id=None, storage_pool_name=None, + pd_id=None): + """Get storage pool details + :param pd_id: ID of the protection domain + :param storage_pool_name: The name of the storage pool + :param storage_pool_id: The storage pool id + :return: Storage pool details + """ + name_or_id = storage_pool_id if storage_pool_id \ + else storage_pool_name + try: + filter_fields = {} + if storage_pool_id: + filter_fields = {'id': storage_pool_id} + if storage_pool_name: + filter_fields.update({'name': storage_pool_name}) + if pd_id: + filter_fields.update({'protectionDomainId': pd_id}) + pool_details = self.powerflex_conn.storage_pool.get( + filter_fields=filter_fields) + if pool_details: + if len(pool_details) > 1: + + err_msg = "More than one storage pool found with {0}," \ + " Please provide protection domain Name/Id" \ + " to fetch the unique" \ + " storage pool".format(storage_pool_name) + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + elif len(pool_details) == 1: + pool_details = pool_details[0] + statistics = self.powerflex_conn.storage_pool.get_statistics(pool_details['id']) + pool_details['statistics'] = statistics if statistics else {} + pd_id = pool_details['protectionDomainId'] + pd_name = self.get_protection_domain( + protection_domain_id=pd_id)['name'] + # adding protection domain name in the pool details + pool_details['protectionDomainName'] = pd_name + else: + pool_details = None + + return pool_details + + except Exception as e: + errormsg = "Failed to get the storage pool {0} with error " \ + "{1}".format(name_or_id, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def create_storage_pool(self, pool_name, pd_id, media_type, + use_rfcache=None, use_rmcache=None): + """ + Create a storage pool + :param pool_name: Name of the storage pool + :param pd_id: ID of the storage pool + :param media_type: Type of storage device in the pool + :param use_rfcache: Enable/Disable RFcache on pool + :param use_rmcache: Enable/Disable RMcache on pool + :return: True, if the operation is successful + """ + try: + if media_type == "Transitional": + self.module.fail_json(msg="TRANSITIONAL media type is not" + " supported during creation." + " Please enter a valid media type") + + if pd_id is None: + self.module.fail_json( + msg="Please provide protection domain details for " + "creation of a storage pool") + self.powerflex_conn.storage_pool.create( + media_type=media_type, + protection_domain_id=pd_id, name=pool_name, + use_rfcache=use_rfcache, use_rmcache=use_rmcache) + + return True + except Exception as e: + errormsg = "Failed to create the storage pool {0} with error " \ + "{1}".format(pool_name, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def modify_storage_pool(self, pool_id, modify_dict): + """ + Modify the parameters of the storage pool. + :param modify_dict: Dict containing parameters which are to be + modified + :param pool_id: Id of the pool. + :return: True, if the operation is successful. + """ + + try: + + if 'new_name' in modify_dict: + self.powerflex_conn.storage_pool.rename( + pool_id, modify_dict['new_name']) + if 'use_rmcache' in modify_dict: + self.powerflex_conn.storage_pool.set_use_rmcache( + pool_id, modify_dict['use_rmcache']) + if 'use_rfcache' in modify_dict: + self.powerflex_conn.storage_pool.set_use_rfcache( + pool_id, modify_dict['use_rfcache']) + if 'media_type' in modify_dict: + self.powerflex_conn.storage_pool.set_media_type( + pool_id, modify_dict['media_type']) + return True + + except Exception as e: + err_msg = "Failed to update the storage pool {0} with error " \ + "{1}".format(pool_id, str(e)) + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + + def verify_params(self, pool_details, pd_name, pd_id): + """ + :param pool_details: Details of the storage pool + :param pd_name: Name of the protection domain + :param pd_id: Id of the protection domain + """ + if pd_id and pd_id != pool_details['protectionDomainId']: + self.module.fail_json(msg="Entered protection domain id does not" + " match with the storage pool's " + "protection domain id. Please enter " + "a correct protection domain id.") + + if pd_name and pd_name != pool_details['protectionDomainName']: + self.module.fail_json(msg="Entered protection domain name does" + " not match with the storage pool's " + "protection domain name. Please enter" + " a correct protection domain name.") + + def perform_module_operation(self): + """ Perform different actions on Storage Pool based on user input + in the playbook """ + + pool_name = self.module.params['storage_pool_name'] + pool_id = self.module.params['storage_pool_id'] + pool_new_name = self.module.params['storage_pool_new_name'] + state = self.module.params['state'] + pd_name = self.module.params['protection_domain_name'] + pd_id = self.module.params['protection_domain_id'] + use_rmcache = self.module.params['use_rmcache'] + use_rfcache = self.module.params['use_rfcache'] + media_type = self.module.params['media_type'] + if media_type == "TRANSITIONAL": + media_type = 'Transitional' + + result = dict( + storage_pool_details={} + ) + changed = False + pd_details = None + if pd_name or pd_id: + pd_details = self.get_protection_domain( + protection_domain_id=pd_id, + protection_domain_name=pd_name) + if pd_details: + pd_id = pd_details['id'] + + if pool_name is not None and (len(pool_name.strip()) == 0): + self.module.fail_json( + msg="Empty or white spaced string provided in " + "storage_pool_name. Please provide valid storage" + " pool name.") + + # Get the details of the storage pool. + pool_details = self.get_storage_pool(storage_pool_id=pool_id, + storage_pool_name=pool_name, + pd_id=pd_id) + if pool_name and pool_details: + pool_id = pool_details['id'] + self.verify_params(pool_details, pd_name, pd_id) + + # create a storage pool + if state == 'present' and not pool_details: + LOG.info("Creating new storage pool") + if pool_id: + self.module.fail_json( + msg="storage_pool_name is missing & name required to " + "create a storage pool. Please enter a valid " + "storage_pool_name.") + if pool_new_name is not None: + self.module.fail_json( + msg="storage_pool_new_name is passed during creation. " + "storage_pool_new_name is not allowed during " + "creation of a storage pool.") + changed = self.create_storage_pool( + pool_name, pd_id, media_type, use_rfcache, use_rmcache) + if changed: + pool_id = self.get_storage_pool(storage_pool_id=pool_id, + storage_pool_name=pool_name, + pd_id=pd_id)['id'] + + # modify the storage pool parameters + if state == 'present' and pool_details: + # check if the parameters are to be updated or not + if pool_new_name is not None and len(pool_new_name.strip()) == 0: + self.module.fail_json( + msg="Empty/White spaced name is not allowed during " + "renaming of a storage pool. Please enter a valid " + "storage pool new name.") + modify_dict = to_modify(pool_details, use_rmcache, use_rfcache, + pool_new_name, media_type) + if bool(modify_dict): + LOG.info("Modify attributes of storage pool") + changed = self.modify_storage_pool(pool_id, modify_dict) + + # Delete a storage pool + if state == 'absent' and pool_details: + msg = "Deleting storage pool is not supported through" \ + " ansible module." + LOG.error(msg) + self.module.fail_json(msg=msg) + + # Show the updated storage pool details + if state == 'present': + pool_details = self.get_storage_pool(storage_pool_id=pool_id) + # fetching Id from pool details to address a case where + # protection domain is not passed + pd_id = pool_details['protectionDomainId'] + pd_name = self.get_protection_domain( + protection_domain_id=pd_id)['name'] + # adding protection domain name in the pool details + pool_details['protectionDomainName'] = pd_name + result['storage_pool_details'] = pool_details + result['changed'] = changed + + self.module.exit_json(**result) + + +def to_modify(pool_details, use_rmcache, use_rfcache, new_name, media_type): + """ + Check whether a parameter is required to be updated. + + :param media_type: Type of the media supported by the pool. + :param pool_details: Details of the storage pool + :param use_rmcache: Enable/Disable RMcache on pool + :param use_rfcache: Enable/Disable RFcache on pool + :param new_name: New name for the storage pool + :return: dict, containing parameters to be modified + """ + pool_name = pool_details['name'] + pool_use_rfcache = pool_details['useRfcache'] + pool_use_rmcache = pool_details['useRmcache'] + pool_media_type = pool_details['mediaType'] + modify_params = {} + + if new_name is not None and pool_name != new_name: + modify_params['new_name'] = new_name + if use_rfcache is not None and pool_use_rfcache != use_rfcache: + modify_params['use_rfcache'] = use_rfcache + if use_rmcache is not None and pool_use_rmcache != use_rmcache: + modify_params['use_rmcache'] = use_rmcache + if media_type is not None and media_type != pool_media_type: + modify_params['media_type'] = media_type + return modify_params + + +def get_powerflex_storagepool_parameters(): + """This method provides parameters required for the ansible + Storage Pool module on powerflex""" + return dict( + storage_pool_name=dict(required=False, type='str'), + storage_pool_id=dict(required=False, type='str'), + protection_domain_name=dict(required=False, type='str'), + protection_domain_id=dict(required=False, type='str'), + media_type=dict(required=False, type='str', + choices=['HDD', 'SSD', 'TRANSITIONAL']), + use_rfcache=dict(required=False, type='bool'), + use_rmcache=dict(required=False, type='bool'), + storage_pool_new_name=dict(required=False, type='str'), + state=dict(required=True, type='str', choices=['present', 'absent'])) + + +def main(): + """ Create PowerFlex Storage Pool object and perform action on it + based on user input from playbook""" + obj = PowerFlexStoragePool() + obj.perform_module_operation() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/powerflex/plugins/modules/volume.py b/ansible_collections/dellemc/powerflex/plugins/modules/volume.py new file mode 100644 index 00000000..9c1e1cd2 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/plugins/modules/volume.py @@ -0,0 +1,1599 @@ +#!/usr/bin/python + +# Copyright: (c) 2021, Dell Technologies +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +""" Ansible module for managing volumes on Dell Technologies (Dell) PowerFlex""" + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = r''' +module: volume +version_added: '1.0.0' +short_description: Manage volumes on Dell PowerFlex +description: +- Managing volumes on PowerFlex storage system includes + creating, getting details, modifying attributes and deleting volume. +- It also includes adding/removing snapshot policy, + mapping/unmapping volume to/from SDC and listing + associated snapshots. +author: +- P Srinivas Rao (@srinivas-rao5) <ansible.team@dell.com> +extends_documentation_fragment: + - dellemc.powerflex.powerflex +options: + vol_name: + description: + - The name of the volume. + - Mandatory for create operation. + - It is unique across the PowerFlex array. + - Mutually exclusive with I(vol_id). + type: str + vol_id: + description: + - The ID of the volume. + - Except create operation, all other operations can be performed + using I(vol_id). + - Mutually exclusive with I(vol_name). + type: str + storage_pool_name: + description: + - The name of the storage pool. + - Either name or the id of the storage pool is required for creating a + volume. + - During creation, if storage pool name is provided then either + protection domain name or id must be mentioned along with it. + - Mutually exclusive with I(storage_pool_id). + type: str + storage_pool_id: + description: + - The ID of the storage pool. + - Either name or the id of the storage pool is required for creating + a volume. + - Mutually exclusive with I(storage_pool_name). + type: str + protection_domain_name: + description: + - The name of the protection domain. + - During creation of a volume, if more than one storage pool exists with + the same name then either protection domain name or id must be + mentioned along with it. + - Mutually exclusive with I(protection_domain_id). + type: str + protection_domain_id: + description: + - The ID of the protection domain. + - During creation of a volume, if more than one storage pool exists with + the same name then either protection domain name or id must be + mentioned along with it. + - Mutually exclusive with I(protection_domain_name). + type: str + vol_type: + description: + - Type of volume provisioning. + choices: ["THICK_PROVISIONED", "THIN_PROVISIONED"] + type: str + compression_type: + description: + - Type of the compression method. + choices: ["NORMAL", "NONE"] + type: str + use_rmcache: + description: + - Whether to use RM Cache or not. + type: bool + snapshot_policy_name: + description: + - Name of the snapshot policy. + - To remove/detach snapshot policy, empty + I(snapshot_policy_id)/I(snapshot_policy_name) is to be passed along with + I(auto_snap_remove_type). + type: str + snapshot_policy_id: + description: + - ID of the snapshot policy. + - To remove/detach snapshot policy, empty + I(snapshot_policy_id)/I(snapshot_policy_name) is to be passed along with + I(auto_snap_remove_type). + type: str + auto_snap_remove_type: + description: + - Whether to remove or detach the snapshot policy. + - To remove/detach snapshot policy, empty + I(snapshot_policy_id)/I(snapshot_policy_name) is to be passed along with + I(auto_snap_remove_type). + - If the snapshot policy name/id is passed empty then + I(auto_snap_remove_type) is defaulted to C(detach). + choices: ['remove', 'detach'] + type: str + size: + description: + - The size of the volume. + - Size of the volume will be assigned as higher multiple of 8 GB. + type: int + cap_unit: + description: + - The unit of the volume size. It defaults to 'GB'. + choices: ['GB' , 'TB'] + type: str + vol_new_name: + description: + - New name of the volume. Used to rename the volume. + type: str + allow_multiple_mappings: + description: + - Specifies whether to allow or not allow multiple mappings. + - If the volume is mapped to one SDC then for every new mapping + I(allow_multiple_mappings) has to be passed as True. + type: bool + sdc: + description: + - Specifies SDC parameters. + type: list + elements: dict + suboptions: + sdc_name: + description: + - Name of the SDC. + - Specify either I(sdc_name), I(sdc_id) or I(sdc_ip). + - Mutually exclusive with I(sdc_id) and I(sdc_ip). + type: str + sdc_id: + description: + - ID of the SDC. + - Specify either I(sdc_name), I(sdc_id) or I(sdc_ip). + - Mutually exclusive with I(sdc_name) and I(sdc_ip). + type: str + sdc_ip: + description: + - IP of the SDC. + - Specify either I(sdc_name), I(sdc_id) or I(sdc_ip). + - Mutually exclusive with I(sdc_id) and I(sdc_ip). + type: str + access_mode: + description: + - Define the access mode for all mappings of the volume. + choices: ['READ_WRITE', 'READ_ONLY', 'NO_ACCESS'] + type: str + bandwidth_limit: + description: + - Limit of volume network bandwidth. + - Need to mention in multiple of 1024 Kbps. + - To set no limit, 0 is to be passed. + type: int + iops_limit: + description: + - Limit of volume IOPS. + - Minimum IOPS limit is 11 and specify 0 for unlimited iops. + type: int + sdc_state: + description: + - Mapping state of the SDC. + choices: ['mapped', 'unmapped'] + type: str + delete_snapshots: + description: + - If C(True), the volume and all its dependent snapshots will be deleted. + - If C(False), only the volume will be deleted. + - It can be specified only when the I(state) is C(absent). + - It defaults to C(False), if not specified. + type: bool + state: + description: + - State of the volume. + choices: ['present', 'absent'] + required: true + type: str +notes: + - The I(check_mode) is not supported. +''' + +EXAMPLES = r''' +- name: Create a volume + dellemc.powerflex.volume: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + vol_name: "sample_volume" + storage_pool_name: "pool_1" + protection_domain_name: "pd_1" + vol_type: "THICK_PROVISIONED" + compression_type: "NORMAL" + use_rmcache: True + size: 16 + state: "present" + +- name: Map a SDC to volume + dellemc.powerflex.volume: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + vol_name: "sample_volume" + allow_multiple_mappings: True + sdc: + - sdc_id: "92A304DB-EFD7-44DF-A07E-D78134CC9764" + access_mode: "READ_WRITE" + sdc_state: "mapped" + state: "present" + +- name: Unmap a SDC to volume + dellemc.powerflex.volume: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + vol_name: "sample_volume" + sdc: + - sdc_id: "92A304DB-EFD7-44DF-A07E-D78134CC9764" + sdc_state: "unmapped" + state: "present" + +- name: Map multiple SDCs to a volume + dellemc.powerflex.volume: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + vol_name: "sample_volume" + protection_domain_name: "pd_1" + sdc: + - sdc_id: "92A304DB-EFD7-44DF-A07E-D78134CC9764" + access_mode: "READ_WRITE" + bandwidth_limit: 2048 + iops_limit: 20 + - sdc_ip: "198.10.xxx.xxx" + access_mode: "READ_ONLY" + allow_multiple_mappings: True + sdc_state: "mapped" + state: "present" + +- name: Get the details of the volume + dellemc.powerflex.volume: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + vol_id: "fe6c8b7100000005" + state: "present" + +- name: Modify the details of the Volume + dellemc.powerflex.volume: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + vol_name: "sample_volume" + storage_pool_name: "pool_1" + new_vol_name: "new_sample_volume" + size: 64 + state: "present" + +- name: Delete the Volume + dellemc.powerflex.volume: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + vol_name: "sample_volume" + delete_snapshots: False + state: "absent" + +- name: Delete the Volume and all its dependent snapshots + dellemc.powerflex.volume: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + vol_name: "sample_volume" + delete_snapshots: True + state: "absent" +''' + +RETURN = r''' +changed: + description: Whether or not the resource has changed. + returned: always + type: bool + sample: 'false' +volume_details: + description: Details of the volume. + returned: When volume exists + type: dict + contains: + id: + description: The ID of the volume. + type: str + mappedSdcInfo: + description: The details of the mapped SDC. + type: dict + contains: + sdcId: + description: ID of the SDC. + type: str + sdcName: + description: Name of the SDC. + type: str + sdcIp: + description: IP of the SDC. + type: str + accessMode: + description: Mapping access mode for the specified volume. + type: str + limitIops: + description: IOPS limit for the SDC. + type: int + limitBwInMbps: + description: Bandwidth limit for the SDC. + type: int + name: + description: Name of the volume. + type: str + sizeInKb: + description: Size of the volume in Kb. + type: int + sizeInGb: + description: Size of the volume in Gb. + type: int + storagePoolId: + description: ID of the storage pool in which volume resides. + type: str + storagePoolName: + description: Name of the storage pool in which volume resides. + type: str + protectionDomainId: + description: ID of the protection domain in which volume resides. + type: str + protectionDomainName: + description: Name of the protection domain in which volume resides. + type: str + snapshotPolicyId: + description: ID of the snapshot policy associated with volume. + type: str + snapshotPolicyName: + description: Name of the snapshot policy associated with volume. + type: str + snapshotsList: + description: List of snapshots associated with the volume. + type: str + "statistics": + description: Statistics details of the storage pool. + type: dict + contains: + "numOfChildVolumes": + description: Number of child volumes. + type: int + "numOfMappedSdcs": + description: Number of mapped Sdcs of the volume. + type: int + sample: { + "accessModeLimit": "ReadWrite", + "ancestorVolumeId": null, + "autoSnapshotGroupId": null, + "compressionMethod": "Invalid", + "consistencyGroupId": null, + "creationTime": 1631618520, + "dataLayout": "MediumGranularity", + "id": "cdd883cf00000002", + "links": [ + { + "href": "/api/instances/Volume::cdd883cf00000002", + "rel": "self" + }, + { + "href": "/api/instances/Volume::cdd883cf00000002/relationships + /Statistics", + "rel": "/api/Volume/relationship/Statistics" + }, + { + "href": "/api/instances/VTree::6e86255c00000001", + "rel": "/api/parent/relationship/vtreeId" + }, + { + "href": "/api/instances/StoragePool::e0d8f6c900000000", + "rel": "/api/parent/relationship/storagePoolId" + } + ], + "lockedAutoSnapshot": false, + "lockedAutoSnapshotMarkedForRemoval": false, + "managedBy": "ScaleIO", + "mappedSdcInfo": null, + "name": "ansible-volume-1", + "notGenuineSnapshot": false, + "originalExpiryTime": 0, + "pairIds": null, + "protectionDomainId": "9300c1f900000000", + "protectionDomainName": "domain1", + "replicationJournalVolume": false, + "replicationTimeStamp": 0, + "retentionLevels": [], + "secureSnapshotExpTime": 0, + "sizeInGB": 16, + "sizeInKb": 16777216, + "snapshotPolicyId": null, + "snapshotPolicyName": null, + "snapshotsList": [ + { + "accessModeLimit": "ReadOnly", + "ancestorVolumeId": "cdd883cf00000002", + "autoSnapshotGroupId": null, + "compressionMethod": "Invalid", + "consistencyGroupId": "22f1e80c00000001", + "creationTime": 1631619229, + "dataLayout": "MediumGranularity", + "id": "cdd883d000000004", + "links": [ + { + "href": "/api/instances/Volume::cdd883d000000004", + "rel": "self" + }, + { + "href": "/api/instances/Volume::cdd883d000000004 + /relationships/Statistics", + "rel": "/api/Volume/relationship/Statistics" + }, + { + "href": "/api/instances/Volume::cdd883cf00000002", + "rel": "/api/parent/relationship/ancestorVolumeId" + }, + { + "href": "/api/instances/VTree::6e86255c00000001", + "rel": "/api/parent/relationship/vtreeId" + }, + { + "href": "/api/instances/StoragePool::e0d8f6c900000000", + "rel": "/api/parent/relationship/storagePoolId" + } + ], + "lockedAutoSnapshot": false, + "lockedAutoSnapshotMarkedForRemoval": false, + "managedBy": "ScaleIO", + "mappedSdcInfo": null, + "name": "ansible_vol_snap_1", + "notGenuineSnapshot": false, + "originalExpiryTime": 0, + "pairIds": null, + "replicationJournalVolume": false, + "replicationTimeStamp": 0, + "retentionLevels": [], + "secureSnapshotExpTime": 0, + "sizeInKb": 16777216, + "snplIdOfAutoSnapshot": null, + "snplIdOfSourceVolume": null, + "storagePoolId": "e0d8f6c900000000", + "timeStampIsAccurate": false, + "useRmcache": false, + "volumeReplicationState": "UnmarkedForReplication", + "volumeType": "Snapshot", + "vtreeId": "6e86255c00000001" + } + ], + "statistics": { + "childVolumeIds": [ + ], + "descendantVolumeIds": [ + ], + "initiatorSdcId": null, + "mappedSdcIds": [ + "c42425XXXXXX" + ], + "numOfChildVolumes": 0, + "numOfDescendantVolumes": 0, + "numOfMappedSdcs": 1, + "registrationKey": null, + "registrationKeys": [ + ], + "replicationJournalVolume": false, + "replicationState": "UnmarkedForReplication", + "reservationType": "NotReserved", + "rplTotalJournalCap": 0, + "rplUsedJournalCap": 0, + "userDataReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataSdcReadLatency": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataSdcTrimLatency": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataSdcWriteLatency": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataTrimBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + } + }, + "snplIdOfAutoSnapshot": null, + "snplIdOfSourceVolume": null, + "storagePoolId": "e0d8f6c900000000", + "storagePoolName": "pool1", + "timeStampIsAccurate": false, + "useRmcache": false, + "volumeReplicationState": "UnmarkedForReplication", + "volumeType": "ThinProvisioned", + "vtreeId": "6e86255c00000001" + } +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell\ + import utils +import copy + +LOG = utils.get_logger('volume') + + +class PowerFlexVolume(object): + """Class with volume operations""" + + def __init__(self): + """ Define all parameters required by this module""" + self.module_params = utils.get_powerflex_gateway_host_parameters() + self.module_params.update(get_powerflex_volume_parameters()) + + mut_ex_args = [['vol_name', 'vol_id'], + ['storage_pool_name', 'storage_pool_id'], + ['protection_domain_name', 'protection_domain_id'], + ['snapshot_policy_name', 'snapshot_policy_id'], + ['vol_id', 'storage_pool_name'], + ['vol_id', 'storage_pool_id'], + ['vol_id', 'protection_domain_name'], + ['vol_id', 'protection_domain_id']] + + required_together_args = [['sdc', 'sdc_state']] + + required_one_of_args = [['vol_name', 'vol_id']] + + # initialize the Ansible module + self.module = AnsibleModule( + argument_spec=self.module_params, + supports_check_mode=False, + mutually_exclusive=mut_ex_args, + required_together=required_together_args, + required_one_of=required_one_of_args) + + utils.ensure_required_libs(self.module) + + try: + self.powerflex_conn = utils.get_powerflex_gateway_host_connection( + self.module.params) + LOG.info("Got the PowerFlex system connection object instance") + except Exception as e: + LOG.error(str(e)) + self.module.fail_json(msg=str(e)) + + def get_protection_domain(self, protection_domain_name=None, + protection_domain_id=None): + """Get protection domain details + :param protection_domain_name: Name of the protection domain + :param protection_domain_id: ID of the protection domain + :return: Protection domain details + """ + name_or_id = protection_domain_id if protection_domain_id \ + else protection_domain_name + try: + pd_details = None + if protection_domain_id: + pd_details = self.powerflex_conn.protection_domain.get( + filter_fields={'id': protection_domain_id}) + + if protection_domain_name: + pd_details = self.powerflex_conn.protection_domain.get( + filter_fields={'name': protection_domain_name}) + + if not pd_details: + err_msg = "Unable to find the protection domain with {0}. " \ + "Please enter a valid protection domain" \ + " name/id.".format(name_or_id) + self.module.fail_json(msg=err_msg) + + return pd_details[0] + + except Exception as e: + errormsg = "Failed to get the protection domain {0} with" \ + " error {1}".format(name_or_id, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def get_snapshot_policy(self, snap_pol_id=None, snap_pol_name=None): + """Get snapshot policy details + :param snap_pol_name: Name of the snapshot policy + :param snap_pol_id: ID of the snapshot policy + :return: snapshot policy details + """ + name_or_id = snap_pol_id if snap_pol_id else snap_pol_name + try: + snap_pol_details = None + if snap_pol_id: + snap_pol_details = self.powerflex_conn.snapshot_policy.get( + filter_fields={'id': snap_pol_id}) + + if snap_pol_name: + snap_pol_details = self.powerflex_conn.snapshot_policy.get( + filter_fields={'name': snap_pol_name}) + + if not snap_pol_details: + err_msg = "Unable to find the snapshot policy with {0}. " \ + "Please enter a valid snapshot policy" \ + " name/id.".format(name_or_id) + self.module.fail_json(msg=err_msg) + + return snap_pol_details[0] + + except Exception as e: + errormsg = "Failed to get the snapshot policy {0} with" \ + " error {1}".format(name_or_id, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def get_storage_pool(self, storage_pool_id=None, storage_pool_name=None, + protection_domain_id=None): + """Get storage pool details + :param protection_domain_id: ID of the protection domain + :param storage_pool_name: The name of the storage pool + :param storage_pool_id: The storage pool id + :return: Storage pool details + """ + name_or_id = storage_pool_id if storage_pool_id \ + else storage_pool_name + try: + sp_details = None + if storage_pool_id: + sp_details = self.powerflex_conn.storage_pool.get( + filter_fields={'id': storage_pool_id}) + + if storage_pool_name: + sp_details = self.powerflex_conn.storage_pool.get( + filter_fields={'name': storage_pool_name}) + + if len(sp_details) > 1 and protection_domain_id is None: + err_msg = "More than one storage pool found with {0}," \ + " Please provide protection domain Name/Id" \ + " to fetch the unique" \ + " pool".format(storage_pool_name) + self.module.fail_json(msg=err_msg) + + if len(sp_details) > 1 and protection_domain_id: + sp_details = self.powerflex_conn.storage_pool.get( + filter_fields={'name': storage_pool_name, + 'protectionDomainId': + protection_domain_id}) + if not sp_details: + err_msg = "Unable to find the storage pool with {0}. " \ + "Please enter a valid pool " \ + "name/id.".format(name_or_id) + self.module.fail_json(msg=err_msg) + return sp_details[0] + + except Exception as e: + errormsg = "Failed to get the storage pool {0} with error " \ + "{1}".format(name_or_id, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def get_volume(self, vol_name=None, vol_id=None): + """Get volume details + :param vol_name: Name of the volume + :param vol_id: ID of the volume + :return: Details of volume if exist. + """ + + id_or_name = vol_id if vol_id else vol_name + + try: + if vol_name: + volume_details = self.powerflex_conn.volume.get( + filter_fields={'name': vol_name}) + else: + volume_details = self.powerflex_conn.volume.get( + filter_fields={'id': vol_id}) + + if len(volume_details) == 0: + msg = "Volume with identifier {0} not found".format( + id_or_name) + LOG.info(msg) + return None + + # Append size in GB in the volume details + if 'sizeInKb' in volume_details[0] and \ + volume_details[0]['sizeInKb']: + volume_details[0]['sizeInGB'] = utils.get_size_in_gb( + volume_details[0]['sizeInKb'], 'KB') + + # Append storage pool name and id. + sp = None + pd_id = None + if 'storagePoolId' in volume_details[0] and \ + volume_details[0]['storagePoolId']: + sp = \ + self.get_storage_pool(volume_details[0]['storagePoolId']) + if len(sp) > 0: + volume_details[0]['storagePoolName'] = sp['name'] + pd_id = sp['protectionDomainId'] + + # Append protection domain name and id + if sp and 'protectionDomainId' in sp and \ + sp['protectionDomainId']: + pd = self.get_protection_domain(protection_domain_id=pd_id) + volume_details[0]['protectionDomainId'] = pd_id + volume_details[0]['protectionDomainName'] = pd['name'] + + # Append snapshot policy name and id + if volume_details[0]['snplIdOfSourceVolume'] is not None: + snap_policy_id = volume_details[0]['snplIdOfSourceVolume'] + volume_details[0]['snapshotPolicyId'] = snap_policy_id + volume_details[0]['snapshotPolicyName'] = \ + self.get_snapshot_policy(snap_policy_id)['name'] + + return volume_details[0] + + except Exception as e: + error_msg = "Failed to get the volume {0} with error {1}" + error_msg = error_msg.format(id_or_name, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def get_sdc_id(self, sdc_name=None, sdc_ip=None, sdc_id=None): + """Get the SDC ID + :param sdc_name: The name of the SDC + :param sdc_ip: The IP of the SDC + :param sdc_id: The ID of the SDC + :return: The ID of the SDC + """ + + if sdc_name: + id_ip_name = sdc_name + elif sdc_ip: + id_ip_name = sdc_ip + else: + id_ip_name = sdc_id + + try: + if sdc_name: + sdc_details = self.powerflex_conn.sdc.get( + filter_fields={'name': sdc_name}) + elif sdc_ip: + sdc_details = self.powerflex_conn.sdc.get( + filter_fields={'sdcIp': sdc_ip}) + else: + sdc_details = self.powerflex_conn.sdc.get( + filter_fields={'id': sdc_id}) + + if len(sdc_details) == 0: + error_msg = "Unable to find SDC with identifier {0}".format( + id_ip_name) + self.module.fail_json(msg=error_msg) + return sdc_details[0]['id'] + except Exception as e: + errormsg = "Failed to get the SDC {0} with error " \ + "{1}".format(id_ip_name, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def create_volume(self, vol_name, pool_id, size, vol_type=None, + use_rmcache=None, comp_type=None): + """Create volume + :param use_rmcache: Boolean indicating whether to use RM cache. + :param comp_type: Type of compression method for the volume. + :param vol_type: Type of volume. + :param size: Size of the volume. + :param pool_id: Id of the storage pool. + :param vol_name: The name of the volume. + :return: Boolean indicating if create operation is successful + """ + try: + if vol_name is None or len(vol_name.strip()) == 0: + self.module.fail_json(msg="Please provide valid volume name.") + + if not size: + self.module.fail_json(msg="Size is a mandatory parameter " + "for creating a volume. Please " + "enter a valid size") + pool_data_layout = None + if pool_id: + pool_details = self.get_storage_pool(storage_pool_id=pool_id) + pool_data_layout = pool_details['dataLayout'] + if comp_type and pool_data_layout and \ + pool_data_layout != "FineGranularity": + err_msg = "compression_type for volume can only be " \ + "mentioned when storage pools have Fine " \ + "Granularity layout. Storage Pool found" \ + " with {0}".format(pool_data_layout) + self.module.fail_json(msg=err_msg) + + # Basic volume created. + self.powerflex_conn.volume.create( + storage_pool_id=pool_id, size_in_gb=size, name=vol_name, + volume_type=vol_type, use_rmcache=use_rmcache, + compression_method=comp_type) + return True + + except Exception as e: + errormsg = "Create volume {0} operation failed with " \ + "error {1}".format(vol_name, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def modify_access_mode(self, vol_id, access_mode_list): + """Modify access mode of SDCs mapped to volume + :param vol_id: The volume id + :param access_mode_list: List containing SDC ID's + whose access mode is to modified + :return: Boolean indicating if modifying access + mode is successful + """ + + try: + changed = False + for temp in access_mode_list: + if temp['accessMode']: + self.powerflex_conn.volume.set_access_mode_for_sdc( + volume_id=vol_id, sdc_id=temp['sdc_id'], + access_mode=temp['accessMode']) + changed = True + return changed + except Exception as e: + errormsg = "Modify access mode of SDC operation failed " \ + "with error {0}".format(str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def modify_limits(self, payload): + """Modify IOPS and bandwidth limits of SDC's mapped to volume + :param payload: Dict containing SDC ID's whose bandwidth and + IOPS is to modified + :return: Boolean indicating if modifying limits is successful + """ + + try: + changed = False + if payload['bandwidth_limit'] is not None or \ + payload['iops_limit'] is not None: + self.powerflex_conn.volume.set_mapped_sdc_limits(**payload) + changed = True + return changed + except Exception as e: + errormsg = "Modify bandwidth/iops limits of SDC %s operation " \ + "failed with error %s" % (payload['sdc_id'], str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def delete_volume(self, vol_id, remove_mode): + """Delete volume + :param vol_id: The volume id + :param remove_mode: Removal mode for the volume + :return: Boolean indicating if delete operation is successful + """ + + try: + self.powerflex_conn.volume.delete(vol_id, remove_mode) + return True + except Exception as e: + errormsg = "Delete volume {0} operation failed with " \ + "error {1}".format(vol_id, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def unmap_volume_from_sdc(self, volume, sdc): + """Unmap SDC's from volume + :param volume: volume details + :param sdc: List of SDCs to be unmapped + :return: Boolean indicating if unmap operation is successful + """ + + current_sdcs = volume['mappedSdcInfo'] + current_sdc_ids = [] + sdc_id_list = [] + sdc_id = None + if current_sdcs: + for temp in current_sdcs: + current_sdc_ids.append(temp['sdcId']) + + for temp in sdc: + if 'sdc_name' in temp and temp['sdc_name']: + sdc_id = self.get_sdc_id(sdc_name=temp['sdc_name']) + elif 'sdc_ip' in temp and temp['sdc_ip']: + sdc_id = self.get_sdc_id(sdc_ip=temp['sdc_ip']) + else: + sdc_id = self.get_sdc_id(sdc_id=temp['sdc_id']) + if sdc_id in current_sdc_ids: + sdc_id_list.append(sdc_id) + + LOG.info("SDC IDs to remove %s", sdc_id_list) + + if len(sdc_id_list) == 0: + return False + + try: + for sdc_id in sdc_id_list: + self.powerflex_conn.volume.remove_mapped_sdc( + volume['id'], sdc_id) + return True + except Exception as e: + errormsg = "Unmap SDC {0} from volume {1} failed with error " \ + "{2}".format(sdc_id, volume['id'], str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def map_volume_to_sdc(self, volume, sdc): + """Map SDC's to volume + :param volume: volume details + :param sdc: List of SDCs + :return: Boolean indicating if mapping operation is successful + """ + + current_sdcs = volume['mappedSdcInfo'] + current_sdc_ids = [] + sdc_id_list = [] + sdc_map_list = [] + sdc_modify_list1 = [] + sdc_modify_list2 = [] + + if current_sdcs: + for temp in current_sdcs: + current_sdc_ids.append(temp['sdcId']) + + for temp in sdc: + if 'sdc_name' in temp and temp['sdc_name']: + sdc_id = self.get_sdc_id(sdc_name=temp['sdc_name']) + elif 'sdc_ip' in temp and temp['sdc_ip']: + sdc_id = self.get_sdc_id(sdc_ip=temp['sdc_ip']) + else: + sdc_id = self.get_sdc_id(sdc_id=temp['sdc_id']) + if sdc_id not in current_sdc_ids: + sdc_id_list.append(sdc_id) + temp['sdc_id'] = sdc_id + if 'access_mode' in temp: + temp['access_mode'] = \ + get_access_mode(temp['access_mode']) + if 'bandwidth_limit' not in temp: + temp['bandwidth_limit'] = None + if 'iops_limit' not in temp: + temp['iops_limit'] = None + sdc_map_list.append(temp) + else: + access_mode_dict, limits_dict = check_for_sdc_modification( + volume, sdc_id, temp) + if access_mode_dict: + sdc_modify_list1.append(access_mode_dict) + if limits_dict: + sdc_modify_list2.append(limits_dict) + + LOG.info("SDC to add: %s", sdc_map_list) + + if not sdc_map_list: + return False, sdc_modify_list1, sdc_modify_list2 + + try: + changed = False + for sdc in sdc_map_list: + payload = { + "volume_id": volume['id'], + "sdc_id": sdc['sdc_id'], + "access_mode": sdc['access_mode'], + "allow_multiple_mappings": + self.module.params['allow_multiple_mappings'] + } + self.powerflex_conn.volume.add_mapped_sdc(**payload) + + if sdc['bandwidth_limit'] or sdc['iops_limit']: + payload = { + "volume_id": volume['id'], + "sdc_id": sdc['sdc_id'], + "bandwidth_limit": sdc['bandwidth_limit'], + "iops_limit": sdc['iops_limit'] + } + + self.powerflex_conn.volume.set_mapped_sdc_limits(**payload) + changed = True + return changed, sdc_modify_list1, sdc_modify_list2 + except Exception as e: + errormsg = "Mapping volume {0} to SDC {1} " \ + "failed with error {2}".format(volume['name'], + sdc['sdc_id'], str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def validate_parameters(self, auto_snap_remove_type, snap_pol_id, + snap_pol_name, delete_snaps, state): + """Validate the input parameters""" + + sdc = self.module.params['sdc'] + cap_unit = self.module.params['cap_unit'] + size = self.module.params['size'] + + if sdc: + for temp in sdc: + if (all([temp['sdc_id'], temp['sdc_ip']]) or + all([temp['sdc_id'], temp['sdc_name']]) or + all([temp['sdc_ip'], temp['sdc_name']])): + self.module.fail_json(msg="sdc_id, sdc_ip and sdc_name " + "are mutually exclusive") + + if (cap_unit is not None) and not size: + self.module.fail_json(msg="cap_unit can be specified along " + "with size only. Please enter a valid" + " value for size") + + if auto_snap_remove_type and snap_pol_name is None \ + and snap_pol_id is None: + err_msg = "To remove/detach snapshot policy, please provide" \ + " empty snapshot policy name/id along with " \ + "auto_snap_remove_type parameter" + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + + if state == "present" and delete_snaps is not None: + self.module.fail_json( + msg="delete_snapshots can be specified only when the state" + " is passed as absent.") + + def modify_volume(self, vol_id, modify_dict): + """ + Update the volume attributes + :param vol_id: Id of the volume + :param modify_dict: Dictionary containing the attributes of + volume which are to be updated + :return: True, if the operation is successful + """ + try: + msg = "Dictionary containing attributes which are to be" \ + " updated is {0}.".format(str(modify_dict)) + LOG.info(msg) + + if 'auto_snap_remove_type' in modify_dict: + snap_type = modify_dict['auto_snap_remove_type'] + msg = "Removing/detaching the snapshot policy from a " \ + "volume. auto_snap_remove_type: {0} and snapshot " \ + "policy id: " \ + "{1}".format(snap_type, modify_dict['snap_pol_id']) + LOG.info(msg) + self.powerflex_conn.snapshot_policy.remove_source_volume( + modify_dict['snap_pol_id'], vol_id, snap_type) + msg = "The snapshot policy has been {0}ed " \ + "successfully".format(snap_type) + LOG.info(msg) + + if 'auto_snap_remove_type' not in modify_dict\ + and 'snap_pol_id' in modify_dict: + self.powerflex_conn.snapshot_policy.add_source_volume( + modify_dict['snap_pol_id'], vol_id) + msg = "Attached the snapshot policy {0} to volume" \ + " successfully.".format(modify_dict['snap_pol_id']) + LOG.info(msg) + + if 'new_name' in modify_dict: + self.powerflex_conn.volume.rename(vol_id, + modify_dict['new_name']) + msg = "The name of the volume is updated" \ + " to {0} sucessfully.".format(modify_dict['new_name']) + LOG.info(msg) + + if 'new_size' in modify_dict: + self.powerflex_conn.volume.extend(vol_id, + modify_dict['new_size']) + msg = "The size of the volume is extended to {0} " \ + "sucessfully.".format(str(modify_dict['new_size'])) + LOG.info(msg) + + if 'use_rmcache' in modify_dict: + self.powerflex_conn.volume.set_use_rmcache( + vol_id, modify_dict['use_rmcache']) + msg = "The use RMcache is updated to {0}" \ + " sucessfully.".format(modify_dict['use_rmcache']) + LOG.info(msg) + + if 'comp_type' in modify_dict: + self.powerflex_conn.volume.set_compression_method( + vol_id, modify_dict['comp_type']) + msg = "The compression method is updated to {0}" \ + " successfully.".format(modify_dict['comp_type']) + LOG.info(msg) + return True + + except Exception as e: + err_msg = "Failed to update the volume {0}" \ + " with error {1}".format(vol_id, str(e)) + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + + def to_modify(self, vol_details, new_size, use_rmcache, comp_type, + new_name, snap_pol_id, + auto_snap_remove_type): + """ + + :param vol_details: Details of the volume + :param new_size: Size of the volume + :param use_rmcache: Bool value of use rm cache + :param comp_type: Type of compression method + :param new_name: The new name of the volume + :param snap_pol_id: Id of the snapshot policy + :param auto_snap_remove_type: Whether to remove or detach the policy + :return: Dictionary containing the attributes of + volume which are to be updated + """ + modify_dict = {} + + if comp_type: + pool_id = vol_details['storagePoolId'] + pool_details = self.get_storage_pool(storage_pool_id=pool_id) + pool_data_layout = pool_details['dataLayout'] + if pool_data_layout != "FineGranularity": + err_msg = "compression_type for volume can only be " \ + "mentioned when storage pools have Fine " \ + "Granularity layout. Storage Pool found" \ + " with {0}".format(pool_data_layout) + self.module.fail_json(msg=err_msg) + + if comp_type != vol_details['compressionMethod']: + modify_dict['comp_type'] = comp_type + + if use_rmcache is not None and \ + vol_details['useRmcache'] != use_rmcache: + modify_dict['use_rmcache'] = use_rmcache + + vol_size_in_gb = utils.get_size_in_gb(vol_details['sizeInKb'], 'KB') + + if new_size is not None and \ + not ((vol_size_in_gb - 8) < new_size <= vol_size_in_gb): + modify_dict['new_size'] = new_size + + if new_name is not None: + if new_name is None or len(new_name.strip()) == 0: + self.module.fail_json(msg="Please provide valid volume " + "name.") + if new_name != vol_details['name']: + modify_dict['new_name'] = new_name + + if snap_pol_id is not None and snap_pol_id == "" and \ + auto_snap_remove_type and vol_details['snplIdOfSourceVolume']: + modify_dict['auto_snap_remove_type'] = auto_snap_remove_type + modify_dict['snap_pol_id'] = \ + vol_details['snplIdOfSourceVolume'] + + if snap_pol_id is not None and snap_pol_id != "": + if auto_snap_remove_type and vol_details['snplIdOfSourceVolume']: + err_msg = "To remove/detach a snapshot policy, provide the" \ + " snapshot policy name/id as empty string" + self.module.fail_json(msg=err_msg) + if auto_snap_remove_type is None and \ + vol_details['snplIdOfSourceVolume'] is None: + modify_dict['snap_pol_id'] = snap_pol_id + + return modify_dict + + def verify_params(self, vol_details, snap_pol_name, snap_pol_id, pd_name, + pd_id, pool_name, pool_id): + """ + :param vol_details: Details of the volume + :param snap_pol_name: Name of the snapshot policy + :param snap_pol_id: Id of the snapshot policy + :param pd_name: Name of the protection domain + :param pd_id: Id of the protection domain + :param pool_name: Name of the storage pool + :param pool_id: Id of the storage pool + """ + + if snap_pol_id and 'snapshotPolicyId' in vol_details and \ + snap_pol_id != vol_details['snapshotPolicyId']: + self.module.fail_json(msg="Entered snapshot policy id does not" + " match with the snapshot policy's id" + " attached to the volume. Please enter" + " a correct snapshot policy id.") + + if snap_pol_name and 'snapshotPolicyId' in vol_details and \ + snap_pol_name != vol_details['snapshotPolicyName']: + self.module.fail_json(msg="Entered snapshot policy name does not" + " match with the snapshot policy's " + "name attached to the volume. Please" + " enter a correct snapshot policy" + " name.") + + if pd_id and pd_id != vol_details['protectionDomainId']: + self.module.fail_json(msg="Entered protection domain id does not" + " match with the volume's protection" + " domain id. Please enter a correct" + " protection domain id.") + + if pool_id and pool_id != vol_details['storagePoolId']: + self.module.fail_json(msg="Entered storage pool id does" + " not match with the volume's " + "storage pool id. Please enter" + " a correct storage pool id.") + + if pd_name and pd_name != vol_details['protectionDomainName']: + self.module.fail_json(msg="Entered protection domain name does" + " not match with the volume's " + "protection domain name. Please enter" + " a correct protection domain name.") + + if pool_name and pool_name != vol_details['storagePoolName']: + self.module.fail_json(msg="Entered storage pool name does" + " not match with the volume's " + "storage pool name. Please enter" + " a correct storage pool name.") + + def perform_module_operation(self): + """ + Perform different actions on volume based on parameters passed in + the playbook + """ + vol_name = self.module.params['vol_name'] + vol_id = self.module.params['vol_id'] + vol_type = self.module.params['vol_type'] + compression_type = self.module.params['compression_type'] + sp_name = self.module.params['storage_pool_name'] + sp_id = self.module.params['storage_pool_id'] + pd_name = self.module.params['protection_domain_name'] + pd_id = self.module.params['protection_domain_id'] + snap_pol_name = self.module.params['snapshot_policy_name'] + snap_pol_id = self.module.params['snapshot_policy_id'] + auto_snap_remove_type = self.module.params['auto_snap_remove_type'] + use_rmcache = self.module.params['use_rmcache'] + size = self.module.params['size'] + cap_unit = self.module.params['cap_unit'] + vol_new_name = self.module.params['vol_new_name'] + sdc = copy.deepcopy(self.module.params['sdc']) + sdc_state = self.module.params['sdc_state'] + delete_snapshots = self.module.params['delete_snapshots'] + state = self.module.params['state'] + + if compression_type: + compression_type = compression_type.capitalize() + if vol_type: + vol_type = get_vol_type(vol_type) + if auto_snap_remove_type: + auto_snap_remove_type = auto_snap_remove_type.capitalize() + + # result is a dictionary to contain end state and volume details + changed = False + result = dict( + changed=False, + volume_details={} + ) + self.validate_parameters(auto_snap_remove_type, snap_pol_id, + snap_pol_name, delete_snapshots, state) + + if not auto_snap_remove_type and\ + (snap_pol_name == "" or snap_pol_id == ""): + auto_snap_remove_type = "Detach" + if size: + if not cap_unit: + cap_unit = 'GB' + + if cap_unit == 'TB': + size = size * 1024 + + if pd_name: + pd_details = self.get_protection_domain(pd_name) + if pd_details: + pd_id = pd_details['id'] + msg = "Fetched the protection domain details with id {0}," \ + " name {1}".format(pd_id, pd_name) + LOG.info(msg) + + if sp_name: + sp_details = self.get_storage_pool(storage_pool_name=sp_name, + protection_domain_id=pd_id) + if sp_details: + sp_id = sp_details['id'] + msg = "Fetched the storage pool details id {0}," \ + " name {1}".format(sp_id, sp_name) + LOG.info(msg) + + if snap_pol_name is not None: + snap_pol_details = None + if snap_pol_name: + snap_pol_details = \ + self.get_snapshot_policy(snap_pol_name=snap_pol_name) + if snap_pol_details: + snap_pol_id = snap_pol_details['id'] + + if snap_pol_name == "": + snap_pol_id = "" + msg = "Fetched the snapshot policy details with id {0}," \ + " name {1}".format(snap_pol_id, snap_pol_name) + LOG.info(msg) + + # get volume details + volume_details = self.get_volume(vol_name, vol_id) + if volume_details: + vol_id = volume_details['id'] + msg = "Fetched the volume details {0}".format(str(volume_details)) + LOG.info(msg) + + if vol_name and volume_details: + self.verify_params( + volume_details, snap_pol_name, snap_pol_id, pd_name, pd_id, + sp_name, sp_id) + + # create operation + create_changed = False + if state == 'present' and not volume_details: + if vol_id: + self.module.fail_json(msg="Creation of volume is allowed " + "using vol_name only, " + "vol_id given.") + + if vol_new_name: + self.module.fail_json( + msg="vol_new_name parameter is not supported during " + "creation of a volume. Try renaming the volume after" + " the creation.") + create_changed = self.create_volume(vol_name, sp_id, size, + vol_type, use_rmcache, + compression_type) + if create_changed: + volume_details = self.get_volume(vol_name) + vol_id = volume_details['id'] + msg = "Volume created successfully, fetched " \ + "volume details {0}".format(str(volume_details)) + LOG.info(msg) + + # checking if basic volume parameters are modified or not. + modify_dict = {} + if volume_details and state == 'present': + modify_dict = self.to_modify( + volume_details, size, use_rmcache, compression_type, + vol_new_name, snap_pol_id, auto_snap_remove_type) + msg = "Parameters to be modified are as" \ + " follows: {0}".format(str(modify_dict)) + LOG.info(msg) + + # Mapping the SDCs to a volume + mode_changed = False + limits_changed = False + map_changed = False + if state == 'present' and volume_details and sdc and \ + sdc_state == 'mapped': + map_changed, access_mode_list, limits_list = \ + self.map_volume_to_sdc(volume_details, sdc) + if len(access_mode_list) > 0: + mode_changed = self.modify_access_mode(vol_id, + access_mode_list) + if len(limits_list) > 0: + for temp in limits_list: + payload = { + "volume_id": volume_details['id'], + "sdc_id": temp['sdc_id'], + "bandwidth_limit": temp['bandwidth_limit'], + "iops_limit": temp['iops_limit'] + } + limits_changed = self.modify_limits(payload) + + # Unmap the SDCs to a volume + unmap_changed = False + if state == 'present' and volume_details and sdc and \ + sdc_state == 'unmapped': + unmap_changed = self.unmap_volume_from_sdc(volume_details, sdc) + + # Update the basic volume attributes + modify_changed = False + if modify_dict and state == 'present': + modify_changed = self.modify_volume(vol_id, modify_dict) + + # delete operation + del_changed = False + if state == 'absent' and volume_details: + if delete_snapshots is True: + delete_snapshots = 'INCLUDING_DESCENDANTS' + if delete_snapshots is None or delete_snapshots is False: + delete_snapshots = 'ONLY_ME' + del_changed = \ + self.delete_volume(vol_id, delete_snapshots) + + if modify_changed or unmap_changed or map_changed or create_changed\ + or del_changed or mode_changed or limits_changed: + changed = True + + # Returning the updated volume details + if state == 'present': + vol_details = self.show_output(vol_id) + result['volume_details'] = vol_details + result['changed'] = changed + self.module.exit_json(**result) + + def show_output(self, vol_id): + """Show volume details + :param vol_id: ID of the volume + :return: Details of volume if exist. + """ + + try: + volume_details = self.powerflex_conn.volume.get( + filter_fields={'id': vol_id}) + + if len(volume_details) == 0: + msg = "Volume with identifier {0} not found".format( + vol_id) + LOG.error(msg) + return None + + # Append size in GB in the volume details + if 'sizeInKb' in volume_details[0] and \ + volume_details[0]['sizeInKb']: + volume_details[0]['sizeInGB'] = utils.get_size_in_gb( + volume_details[0]['sizeInKb'], 'KB') + + # Append storage pool name and id. + sp = None + pd_id = None + if 'storagePoolId' in volume_details[0] and \ + volume_details[0]['storagePoolId']: + sp = \ + self.get_storage_pool(volume_details[0]['storagePoolId']) + if len(sp) > 0: + volume_details[0]['storagePoolName'] = sp['name'] + pd_id = sp['protectionDomainId'] + + # Append protection domain name and id + if sp and 'protectionDomainId' in sp and \ + sp['protectionDomainId']: + pd = self.get_protection_domain(protection_domain_id=pd_id) + volume_details[0]['protectionDomainId'] = pd_id + volume_details[0]['protectionDomainName'] = pd['name'] + + # Append snapshot policy name and id + if volume_details[0]['snplIdOfSourceVolume'] is not None: + snap_policy_id = volume_details[0]['snplIdOfSourceVolume'] + volume_details[0]['snapshotPolicyId'] = snap_policy_id + volume_details[0]['snapshotPolicyName'] = \ + self.get_snapshot_policy(snap_policy_id)['name'] + else: + volume_details[0]['snapshotPolicyId'] = None + volume_details[0]['snapshotPolicyName'] = None + + # Append the list of snapshots associated with the volume + list_of_snaps = self.powerflex_conn.volume.get( + filter_fields={'ancestorVolumeId': volume_details[0]['id']}) + volume_details[0]['snapshotsList'] = list_of_snaps + + # Append statistics + statistics = self.powerflex_conn.volume.get_statistics(volume_details[0]['id']) + volume_details[0]['statistics'] = statistics if statistics else {} + + return volume_details[0] + + except Exception as e: + error_msg = "Failed to get the volume {0} with error {1}" + error_msg = error_msg.format(vol_id, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + +def check_for_sdc_modification(volume, sdc_id, sdc_details): + """ + :param volume: The volume details + :param sdc_id: The ID of the SDC + :param sdc_details: The details of SDC + :return: Dictionary with SDC attributes to be modified + """ + access_mode_dict = dict() + limits_dict = dict() + + for sdc in volume['mappedSdcInfo']: + if sdc['sdcId'] == sdc_id: + if sdc['accessMode'] != \ + get_access_mode(sdc_details['access_mode']): + access_mode_dict['sdc_id'] = sdc_id + access_mode_dict['accessMode'] = get_access_mode( + sdc_details['access_mode']) + if sdc['limitIops'] != sdc_details['iops_limit'] or \ + sdc['limitBwInMbps'] != sdc_details['bandwidth_limit']: + limits_dict['sdc_id'] = sdc_id + limits_dict['iops_limit'] = None + limits_dict['bandwidth_limit'] = None + if sdc['limitIops'] != sdc_details['iops_limit']: + limits_dict['iops_limit'] = sdc_details['iops_limit'] + if sdc['limitBwInMbps'] != \ + get_limits_in_mb(sdc_details['bandwidth_limit']): + limits_dict['bandwidth_limit'] = \ + sdc_details['bandwidth_limit'] + break + return access_mode_dict, limits_dict + + +def get_limits_in_mb(limits): + """ + :param limits: Limits in KB + :return: Limits in MB + """ + + if limits: + return limits / 1024 + + +def get_access_mode(access_mode): + """ + :param access_mode: Access mode of the SDC + :return: The enum for the access mode + """ + + access_mode_dict = { + "READ_WRITE": "ReadWrite", + "READ_ONLY": "ReadOnly", + "NO_ACCESS": "NoAccess" + } + return access_mode_dict.get(access_mode) + + +def get_vol_type(vol_type): + """ + :param vol_type: Type of the volume + :return: Corresponding value for the entered vol_type + """ + vol_type_dict = { + "THICK_PROVISIONED": "ThickProvisioned", + "THIN_PROVISIONED": "ThinProvisioned", + } + return vol_type_dict.get(vol_type) + + +def get_powerflex_volume_parameters(): + """This method provide parameter required for the volume + module on PowerFlex""" + return dict( + vol_name=dict(), vol_id=dict(), + storage_pool_name=dict(), storage_pool_id=dict(), + protection_domain_name=dict(), protection_domain_id=dict(), + use_rmcache=dict(type='bool'), snapshot_policy_name=dict(), + snapshot_policy_id=dict(), + size=dict(type='int'), + cap_unit=dict(choices=['GB', 'TB']), + vol_type=dict(choices=['THICK_PROVISIONED', 'THIN_PROVISIONED']), + compression_type=dict(choices=['NORMAL', 'NONE']), + auto_snap_remove_type=dict(choices=['detach', 'remove']), + vol_new_name=dict(), + allow_multiple_mappings=dict(type='bool'), + delete_snapshots=dict(type='bool'), + sdc=dict( + type='list', elements='dict', options=dict( + sdc_id=dict(), sdc_ip=dict(), + sdc_name=dict(), + access_mode=dict(choices=['READ_WRITE', 'READ_ONLY', + 'NO_ACCESS']), + bandwidth_limit=dict(type='int'), + iops_limit=dict(type='int') + ) + ), + sdc_state=dict(choices=['mapped', 'unmapped']), + state=dict(required=True, type='str', choices=['present', 'absent']) + ) + + +def main(): + """ Create PowerFlex volume object and perform actions on it + based on user input from playbook""" + obj = PowerFlexVolume() + obj.perform_module_operation() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/powerflex/requirements.txt b/ansible_collections/dellemc/powerflex/requirements.txt new file mode 100644 index 00000000..d0fb0f63 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/requirements.txt @@ -0,0 +1,4 @@ +PyPowerFlex +requests>=2.23.0 +python-dateutil>=2.8.0 +setuptools diff --git a/ansible_collections/dellemc/powerflex/requirements.yml b/ansible_collections/dellemc/powerflex/requirements.yml new file mode 100644 index 00000000..1fa924bf --- /dev/null +++ b/ansible_collections/dellemc/powerflex/requirements.yml @@ -0,0 +1,3 @@ +--- +collections: + - name: dellemc.powerflex diff --git a/ansible_collections/dellemc/powerflex/tests/requirements.txt b/ansible_collections/dellemc/powerflex/tests/requirements.txt new file mode 100644 index 00000000..3541acd1 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/tests/requirements.txt @@ -0,0 +1,7 @@ +pytest +pytest-xdist +pytest-mock +pytest-cov +pytest-forked +coverage==4.5.4 +mock diff --git a/ansible_collections/dellemc/powerflex/tests/sanity/ignore-2.12.txt b/ansible_collections/dellemc/powerflex/tests/sanity/ignore-2.12.txt new file mode 100644 index 00000000..adc32988 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/tests/sanity/ignore-2.12.txt @@ -0,0 +1,10 @@ +plugins/modules/device.py validate-modules:missing-gplv3-license +plugins/modules/sdc.py validate-modules:missing-gplv3-license +plugins/modules/sds.py validate-modules:missing-gplv3-license +plugins/modules/snapshot.py validate-modules:missing-gplv3-license +plugins/modules/storagepool.py validate-modules:missing-gplv3-license +plugins/modules/volume.py validate-modules:missing-gplv3-license +plugins/modules/info.py validate-modules:missing-gplv3-license +plugins/modules/protection_domain.py validate-modules:missing-gplv3-license +plugins/modules/mdm_cluster.py validate-modules:missing-gplv3-license +plugins/modules/replication_consistency_group.py validate-modules:missing-gplv3-license diff --git a/ansible_collections/dellemc/powerflex/tests/sanity/ignore-2.13.txt b/ansible_collections/dellemc/powerflex/tests/sanity/ignore-2.13.txt new file mode 100644 index 00000000..adc32988 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/tests/sanity/ignore-2.13.txt @@ -0,0 +1,10 @@ +plugins/modules/device.py validate-modules:missing-gplv3-license +plugins/modules/sdc.py validate-modules:missing-gplv3-license +plugins/modules/sds.py validate-modules:missing-gplv3-license +plugins/modules/snapshot.py validate-modules:missing-gplv3-license +plugins/modules/storagepool.py validate-modules:missing-gplv3-license +plugins/modules/volume.py validate-modules:missing-gplv3-license +plugins/modules/info.py validate-modules:missing-gplv3-license +plugins/modules/protection_domain.py validate-modules:missing-gplv3-license +plugins/modules/mdm_cluster.py validate-modules:missing-gplv3-license +plugins/modules/replication_consistency_group.py validate-modules:missing-gplv3-license diff --git a/ansible_collections/dellemc/powerflex/tests/sanity/ignore-2.14.txt b/ansible_collections/dellemc/powerflex/tests/sanity/ignore-2.14.txt new file mode 100644 index 00000000..adc32988 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/tests/sanity/ignore-2.14.txt @@ -0,0 +1,10 @@ +plugins/modules/device.py validate-modules:missing-gplv3-license +plugins/modules/sdc.py validate-modules:missing-gplv3-license +plugins/modules/sds.py validate-modules:missing-gplv3-license +plugins/modules/snapshot.py validate-modules:missing-gplv3-license +plugins/modules/storagepool.py validate-modules:missing-gplv3-license +plugins/modules/volume.py validate-modules:missing-gplv3-license +plugins/modules/info.py validate-modules:missing-gplv3-license +plugins/modules/protection_domain.py validate-modules:missing-gplv3-license +plugins/modules/mdm_cluster.py validate-modules:missing-gplv3-license +plugins/modules/replication_consistency_group.py validate-modules:missing-gplv3-license diff --git a/ansible_collections/dellemc/powerflex/tests/unit/__init__.py b/ansible_collections/dellemc/powerflex/tests/unit/__init__.py new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/dellemc/powerflex/tests/unit/__init__.py diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/__init__.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/__init__.py new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/__init__.py diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_api_exception.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_api_exception.py new file mode 100644 index 00000000..5128e54b --- /dev/null +++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_api_exception.py @@ -0,0 +1,14 @@ +# Copyright: (c) 2022, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +"""Mock ApiException for Dell Technologies (Dell) PowerFlex Test modules""" + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + + +class MockApiException(Exception): + body = "PyPowerFlex Error message" + status = "500" diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_info_api.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_info_api.py new file mode 100644 index 00000000..9af1ac7f --- /dev/null +++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_info_api.py @@ -0,0 +1,235 @@ +# Copyright: (c) 2022, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +""" +Mock Api response for Unit tests of info module on Dell Technologies (Dell) PowerFlex +""" + +from __future__ import (absolute_import, division, print_function) +from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_storagepool_api import MockStoragePoolApi +from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_volume_api import MockVolumeApi +from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_replication_consistency_group_api \ + import MockReplicationConsistencyGroupApi + + +__metaclass__ = type + + +class MockInfoApi: + INFO_COMMON_ARGS = { + "hostname": "**.***.**.***", + "gather_subset": [], + "filters": None + } + + DUMMY_IP = 'xx.xx.xx.xx' + INFO_ARRAY_DETAILS = [ + { + 'systemVersionName': 'DellEMC PowerFlex Version', + 'perfProfile': 'Compact', + 'authenticationMethod': 'Native', + 'capacityAlertHighThresholdPercent': 80, + 'capacityAlertCriticalThresholdPercent': 90, + 'upgradeState': 'NoUpgrade', + 'remoteReadOnlyLimitState': False, + 'mdmManagementPort': 6611, + 'mdmExternalPort': 7611, + 'sdcMdmNetworkDisconnectionsCounterParameters': { + 'shortWindow': { + 'threshold': 300, + 'windowSizeInSec': 60 + }, + 'mediumWindow': { + 'threshold': 500, + 'windowSizeInSec': 3600 + }, + 'longWindow': { + 'threshold': 700, + 'windowSizeInSec': 86400 + } + }, + 'sdcSdsNetworkDisconnectionsCounterParameters': { + 'shortWindow': { + 'threshold': 800, + 'windowSizeInSec': 60 + }, + 'mediumWindow': { + 'threshold': 4000, + 'windowSizeInSec': 3600 + }, + 'longWindow': { + 'threshold': 20000, + 'windowSizeInSec': 86400 + } + }, + 'sdcMemoryAllocationFailuresCounterParameters': { + 'shortWindow': { + 'threshold': 300, + 'windowSizeInSec': 60 + }, + 'mediumWindow': { + 'threshold': 500, + 'windowSizeInSec': 3600 + }, + 'longWindow': { + 'threshold': 700, + 'windowSizeInSec': 86400 + } + }, + 'sdcSocketAllocationFailuresCounterParameters': { + 'shortWindow': { + 'threshold': 300, + 'windowSizeInSec': 60 + }, + 'mediumWindow': { + 'threshold': 500, + 'windowSizeInSec': 3600 + }, + 'longWindow': { + 'threshold': 700, + 'windowSizeInSec': 86400 + } + }, + 'sdcLongOperationsCounterParameters': { + 'shortWindow': { + 'threshold': 10000, + 'windowSizeInSec': 60 + }, + 'mediumWindow': { + 'threshold': 100000, + 'windowSizeInSec': 3600 + }, + 'longWindow': { + 'threshold': 1000000, + 'windowSizeInSec': 86400 + } + }, + 'cliPasswordAllowed': True, + 'managementClientSecureCommunicationEnabled': True, + 'tlsVersion': 'TLSv1.2', + 'showGuid': True, + 'defragmentationEnabled': True, + 'mdmSecurityPolicy': 'None', + 'mdmCluster': { + 'clusterState': 'ClusteredNormal', + 'clusterMode': 'ThreeNodes', + 'slaves': [ + { + 'managementIPs': [ + DUMMY_IP + ], + 'ips': [ + DUMMY_IP + ], + 'versionInfo': '', + 'virtualInterfaces': [ + '' + ], + 'opensslVersion': 'OpenSSL 26 Jan 2017', + 'role': 'Manager', + 'status': 'Normal', + 'name': 'test_node1_MDM', + 'id': 'test_id_1', + 'port': 0000 + } + ], + 'goodNodesNum': 3, + 'master': { + 'managementIPs': [ + DUMMY_IP + ], + 'ips': [ + DUMMY_IP + ], + 'versionInfo': 'R3_6.0.0', + 'virtualInterfaces': [ + 'ens192' + ], + 'opensslVersion': 'OpenSSL26 Jan 2017', + 'role': 'Manager', + 'status': 'Normal', + 'name': 'test_node_0', + 'id': 'test_id_2', + 'port': 0000 + }, + 'tieBreakers': [ + { + 'managementIPs': [ + DUMMY_IP + ], + 'ips': [ + DUMMY_IP + ], + 'versionInfo': '', + 'opensslVersion': 'N/A', + 'role': 'TieBreaker', + 'status': 'Normal', + 'id': 'test_id_3', + 'port': 0000 + } + ], + 'goodReplicasNum': 2, + 'id': '' + }, + 'sdcSdsConnectivityInfo': { + 'clientServerConnectivityStatus': 'AllConnected', + 'disconnectedClientId': None, + 'disconnectedClientName': None, + 'disconnectedServerId': None, + 'disconnectedServerName': None, + 'disconnectedServerIp': None + }, + 'addressSpaceUsage': 'Normal', + 'lastUpgradeTime': 0, + 'sdcSdrConnectivityInfo': { + 'clientServerConnectivityStatus': 'AllConnected', + 'disconnectedClientId': None, + 'disconnectedClientName': None, + 'disconnectedServerId': None, + 'disconnectedServerName': None, + 'disconnectedServerIp': None + }, + 'sdrSdsConnectivityInfo': { + 'clientServerConnectivityStatus': 'AllConnected', + 'disconnectedClientId': None, + 'disconnectedClientName': None, + 'disconnectedServerId': None, + 'disconnectedServerName': None, + 'disconnectedServerIp': None + }, + 'isInitialLicense': False, + 'capacityTimeLeftInDays': '253', + 'swid': 'abcdXXX', + 'installId': 'id_111', + 'restrictedSdcModeEnabled': False, + 'restrictedSdcMode': 'None', + 'enterpriseFeaturesEnabled': True, + 'daysInstalled': 112, + 'maxCapacityInGb': '5120', + 'id': 'id_222' + } + ] + + INFO_VOLUME_GET_LIST = MockVolumeApi.VOLUME_GET_LIST + + INFO_VOLUME_STATISTICS = { + 'test_vol_id_1': MockVolumeApi.VOLUME_STATISTICS + } + + INFO_STORAGE_POOL_GET_LIST = MockStoragePoolApi.STORAGE_POOL_GET_LIST + + INFO_STORAGE_POOL_STATISTICS = { + 'test_pool_id_1': MockStoragePoolApi.STORAGE_POOL_STATISTICS + } + + RCG_LIST = MockReplicationConsistencyGroupApi.get_rcg_details() + + @staticmethod + def get_exception_response(response_type): + if response_type == 'volume_get_details': + return "Get volumes list from powerflex array failed with error " + elif response_type == 'sp_get_details': + return "Get storage pool list from powerflex array failed with error " + elif response_type == 'rcg_get_details': + return "Get replication consistency group list from powerflex array failed with error " diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_mdm_cluster_api.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_mdm_cluster_api.py new file mode 100644 index 00000000..e2966fad --- /dev/null +++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_mdm_cluster_api.py @@ -0,0 +1,403 @@ +# Copyright: (c) 2022, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +""" +Mock Api response for Unit tests of MDM cluster module on PowerFlex +""" + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + + +class MockMdmClusterApi: + MODULE_PATH = 'ansible_collections.dellemc.powerflex.plugins.modules.mdm_cluster.PowerFlexMdmCluster' + MODULE_UTILS_PATH = 'ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell.utils' + + MDM_CLUSTER_COMMON_ARGS = { + "hostname": "**.***.**.***", + "mdm_id": None, + "mdm_name": None, + "mdm_new_name": None, + "performance_profile": None, + "standby_mdm": None, + "is_primary": None, + "cluster_mode": None, + "mdm": None, + "mdm_state": None, + "virtual_ip_interfaces": None, + "clear_interfaces": None, + 'state': None + } + + MDM_NAME = "mdm_node1" + MDM_NAME_STB_MGR = "mdm_node_mgr" + MDM_ID = "5908d328581d1401" + STB_TB_MDM_ID = "5908d328581d1403" + STB_MGR_MDM_ID = "36279b98215e5a04" + IP_1 = "10.x.y.z" + IP_2 = "10.x.x.z" + IP_3 = "10.x.z.z" + IP_4 = "10.x.y.y" + SSL_VERSION = "OpenSSL 1.0.2k-fips 26 Jan 2017" + SYS_VERSION = "DellEMC PowerFlex Version: R3_6.0.354" + + THREE_MDM_CLUSTER_DETAILS = { + "clusterState": "ClusteredNormal", + "clusterMode": "ThreeNodes", + "goodNodesNum": 3, + "master": { + "virtualInterfaces": [ + "ens1" + ], + "managementIPs": [ + IP_1 + ], + "ips": [ + IP_1 + ], + "versionInfo": "R3_6.0.0", + "opensslVersion": SSL_VERSION, + "role": "Manager", + "status": "Normal", + "name": "sample_mdm", + "id": "5908d328581d1400", + "port": 9011 + }, + "perfProfile": "HighPerformance", + "slaves": [ + { + "virtualInterfaces": [ + "ens1" + ], + "managementIPs": [ + IP_2 + ], + "ips": [ + IP_2 + ], + "versionInfo": "R3_6.0.0", + "opensslVersion": SSL_VERSION, + "role": "Manager", + "status": "Normal", + "name": "sample_mdm1", + "id": MDM_ID, + "port": 9011 + } + ], + "tieBreakers": [ + { + "managementIPs": [], + "ips": [ + IP_4 + ], + "versionInfo": "R3_6.0.0", + "opensslVersion": "N/A", + "role": "TieBreaker", + "status": "Normal", + "id": "5908d328581d1402", + "port": 9011 + } + ], + "standbyMDMs": [ + { + "managementIPs": [ + IP_3 + ], + "ips": [ + IP_3 + ], + "versionInfo": "R3_6.0.0", + "opensslVersion": "N/A", + "role": "TieBreaker", + "status": "Normal", + "name": MDM_NAME, + "id": STB_TB_MDM_ID, + "port": 9011 + }, + { + "virtualInterfaces": [ + "ens12" + ], + "managementIPs": [ + IP_3 + ], + "ips": [ + IP_3 + ], + "versionInfo": "R3_6.0.0", + "opensslVersion": "N/A", + "role": "Manager", + "status": "Normal", + "name": MDM_NAME_STB_MGR, + "id": STB_MGR_MDM_ID, + "port": 9011 + } + ], + "goodReplicasNum": 2, + "id": "cdd883cf00000002" + } + + THREE_MDM_CLUSTER_DETAILS_2 = { + "clusterState": "ClusteredNormal", + "clusterMode": "ThreeNodes", + "goodNodesNum": 3, + "master": { + "virtualInterfaces": [ + "ens1" + ], + "managementIPs": [ + IP_1 + ], + "ips": [ + IP_1 + ], + "versionInfo": "R3_6.0.0", + "opensslVersion": SSL_VERSION, + "role": "Manager", + "status": "Normal", + "name": "sample_mdm", + "id": "5908d328581d1400", + "port": 9011 + }, + "perfProfile": "HighPerformance", + "slaves": [ + { + "virtualInterfaces": [ + "ens1" + ], + "managementIPs": [ + IP_2 + ], + "ips": [ + IP_2 + ], + "versionInfo": "R3_6.0.0", + "opensslVersion": SSL_VERSION, + "role": "Manager", + "status": "Normal", + "name": "sample_mdm1", + "id": MDM_ID, + "port": 9011 + } + ], + "tieBreakers": [ + { + "managementIPs": [], + "ips": [ + IP_4 + ], + "versionInfo": "R3_6.0.0", + "opensslVersion": "N/A", + "role": "TieBreaker", + "status": "Normal", + "id": "5908d328581d1402", + "port": 9011 + } + ], + "goodReplicasNum": 2, + "id": "cdd883cf00000002" + } + + FIVE_MDM_CLUSTER_DETAILS = { + "clusterState": "ClusteredNormal", + "clusterMode": "FiveNodes", + "goodNodesNum": 5, + "master": { + "virtualInterfaces": [ + "ens1" + ], + "managementIPs": [ + IP_1 + ], + "ips": [ + IP_1 + ], + "versionInfo": "R3_6.0.0", + "opensslVersion": SSL_VERSION, + "role": "Manager", + "status": "Normal", + "name": "sample_mdm", + "id": "5908d328581d1400", + "port": 9011 + }, + "perfProfile": "HighPerformance", + "slaves": [ + { + "virtualInterfaces": [], + "managementIPs": [ + IP_2 + ], + "ips": [ + IP_2 + ], + "versionInfo": "R3_6.0.0", + "opensslVersion": SSL_VERSION, + "role": "Manager", + "status": "Normal", + "name": "sample_mdm11", + "id": MDM_ID, + "port": 9011 + }, + { + "virtualInterfaces": [ + "ens12" + ], + "managementIPs": [ + IP_3 + ], + "ips": [ + IP_3 + ], + "versionInfo": "R3_6.0.0", + "opensslVersion": "N/A", + "role": "Manager", + "status": "Normal", + "name": MDM_NAME_STB_MGR, + "id": STB_MGR_MDM_ID, + "port": 9011 + } + ], + "tieBreakers": [ + { + "managementIPs": [ + IP_3 + ], + "ips": [ + IP_3 + ], + "versionInfo": "R3_6.0.0", + "opensslVersion": "N/A", + "role": "TieBreaker", + "status": "Normal", + "name": MDM_NAME, + "id": STB_TB_MDM_ID, + "port": 9011 + }, + { + "managementIPs": [], + "ips": [ + IP_4 + ], + "versionInfo": "R3_6.0.0", + "opensslVersion": "N/A", + "role": "TieBreaker", + "status": "Normal", + "id": "5908d328581d1402", + "port": 9011 + } + ], + "standbyMDMs": [ + { + "virtualInterfaces": [ + "ens13" + ], + "managementIPs": [ + IP_1 + ], + "ips": [ + IP_1 + ], + "versionInfo": "R3_6.0.0", + "opensslVersion": "N/A", + "role": "Manager", + "status": "Normal", + "name": "mgr_node_2", + "id": "5120af354fb17305", + "port": 9011 + } + ], + "goodReplicasNum": 2, + "id": "cdd883cf00000002" + } + PARTIAL_SYSTEM_DETAILS = [ + { + "systemVersionName": SYS_VERSION, + "perfProfile": "Compact", + "name": "System:3c567fd2298f020f", + "id": "3c567fd2298f020f" + }, + { + "systemVersionName": SYS_VERSION, + "perfProfile": "Compact", + "name": "System:3c567fd2298f0201", + "id": "3c567fd2298f0201" + } + ] + PARTIAL_SYSTEM_DETAILS_1 = [ + { + "systemVersionName": SYS_VERSION, + "perfProfile": "Compact", + "name": "System:3c567fd2298f020f", + "id": "3c567fd2298f020f" + } + ] + + @staticmethod + def get_failed_response(): + return "Failed to get the MDM cluster with error" + + @staticmethod + def rename_failed_response(): + return "Failed to rename the MDM mdm_node1 with error" + + @staticmethod + def perf_profile_failed_response(): + return "Failed to update performance profile to Compact with error" + + @staticmethod + def virtual_ip_interface_failed_response(): + return "Failed to modify the virtual IP interfaces of MDM 5908d328581d1401 with error" + + @staticmethod + def remove_mdm_failed_response(): + return "Failed to remove the standby MDM 5908d328581d1403 from the MDM cluster with error" + + @staticmethod + def add_mdm_failed_response(): + return "Failed to Add a standby MDM with error" + + @staticmethod + def owner_failed_response(): + return "Failed to update the Owner of MDM cluster to MDM sample_mdm1 with error" + + @staticmethod + def switch_mode_failed_response(): + return "Failed to change the MDM cluster mode with error" + + @staticmethod + def system_failed_response(): + return "Failed to get system id with error" + + @staticmethod + def multiple_system_failed_response(): + return "Multiple systems exist on the given host." + + @staticmethod + def remove_mdm_no_id_name_failed_response(): + return "Either mdm_name or mdm_id is required while removing the standby MDM." + + @staticmethod + def without_standby_failed_response(): + return "No Standby MDMs found. To expand cluster size, first add standby MDMs." + + @staticmethod + def no_cluster_failed_response(): + return "MDM cluster not found" + + @staticmethod + def id_none_interface_failed_response(): + return "Please provide mdm_name/mdm_id to modify virtual IP interfaces the MDM" + + @staticmethod + def id_none_rename_failed_response(): + return "Please provide mdm_name/mdm_id to rename the MDM" + + @staticmethod + def id_none_change_owner_failed_response(): + return "Either mdm_name or mdm_id is required while changing ownership of MDM cluster" + + @staticmethod + def new_name_add_mdm_failed_response(): + return "Parameters mdm_id/mdm_new_name are not allowed while adding a standby MDM" diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_protection_domain_api.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_protection_domain_api.py new file mode 100644 index 00000000..60452ecd --- /dev/null +++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_protection_domain_api.py @@ -0,0 +1,68 @@ +# Copyright: (c) 2022, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +""" +Mock Api response for Unit tests of protection domain module on Dell Technologies (Dell) PowerFlex +""" + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + + +class MockProtectionDomainApi: + MODULE_PATH = 'ansible_collections.dellemc.powerflex.plugins.modules.protection_domain.PowerFlexProtectionDomain' + MODULE_UTILS_PATH = 'ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell.utils' + + PROTECTION_DOMAIN = { + "protectiondomain": [ + { + "id": "7bd6457000000000", + "name": "test_domain", + "protectionDomainState": "Active", + "overallIoNetworkThrottlingInKbps": 20480, + "rebalanceNetworkThrottlingInKbps": 10240, + "rebuildNetworkThrottlingInKbps": 10240, + "vtreeMigrationNetworkThrottlingInKbps": 10240, + "rfcacheEnabled": "false", + "rfcacheMaxIoSizeKb": 128, + "rfcacheOpertionalMode": "None", + "rfcachePageSizeKb": 64, + "storagePools": [ + { + "id": "8d1cba1700000000", + "name": "pool1" + } + ] + } + ] + } + STORAGE_POOL = { + "storagepool": [ + { + "protectionDomainId": "7bd6457000000000", + "rebuildEnabled": True, + "mediaType": "HDD", + "name": "pool1", + "id": "8d1cba1700000000" + } + ] + } + + @staticmethod + def modify_pd_with_failed_msg(protection_domain_name): + return "Failed to update the rf cache limits of protection domain " + protection_domain_name + " with error " + + @staticmethod + def delete_pd_failed_msg(protection_domain_id): + return "Delete protection domain '" + protection_domain_id + "' operation failed with error ''" + + @staticmethod + def rename_pd_failed_msg(protection_domain_name): + return "Failed to update the protection domain " + protection_domain_name + " with error " + + @staticmethod + def version_pd_failed_msg(): + return "Getting PyPowerFlex SDK version, failed with Error The 'PyPowerFlex' distribution was " \ + "not found and is required by the application" diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_replication_consistency_group_api.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_replication_consistency_group_api.py new file mode 100644 index 00000000..6671fd87 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_replication_consistency_group_api.py @@ -0,0 +1,70 @@ +# Copyright: (c) 2022, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +""" +Mock Api response for Unit tests of volume module on Dell Technologies (Dell) PowerFlex +""" + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + + +class MockReplicationConsistencyGroupApi: + RCG_COMMON_ARGS = { + "hostname": "**.***.**.***", + "rcg_name": None, + "rcg_id": None, + "create_snapshot": None, "new_rcg_name": None, + "rpo": None, "protection_domain_name": None, "protection_domain_id": None, + "activity_mode": None, "pause": None, "pause_mode": None, "freeze": None, + "remote_peer": {"hostname": None, "username": None, "password": None, + "verifycert": None, "port": None, "protection_domain_name": None, + "protection_domain_id": None}, + "target_volume_access_mode": None, "is_consistent": None, + "state": None + } + RCG_ID = "aadc17d500000000" + FAIL_MSG = " failed with error" + + @staticmethod + def get_rcg_details(pause_mode="None", freeze_state="Unfrozen", activity_mode="Active", consistency="Consistent"): + return [{"protectionDomainId": "b969400500000000", + "peerMdmId": "6c3d94f600000000", + "remoteId": "2130961a00000000", + "remoteMdmId": "0e7a082862fedf0f", + "currConsistMode": consistency, + "freezeState": freeze_state, + "lifetimeState": "Normal", + "pauseMode": pause_mode, + "snapCreationInProgress": False, + "lastSnapGroupId": "e58280b300000001", + "lastSnapCreationRc": "SUCCESS", + "targetVolumeAccessMode": "NoAccess", + "remoteProtectionDomainId": "4eeb304600000000", + "remoteProtectionDomainName": "domain1", + "failoverType": "None", + "failoverState": "None", + "activeLocal": True, + "activeRemote": True, + "abstractState": "Ok", + "localActivityState": activity_mode, + "remoteActivityState": "Active", + "inactiveReason": 11, + "rpoInSeconds": 30, + "replicationDirection": "LocalToRemote", + "disasterRecoveryState": "None", + "remoteDisasterRecoveryState": "None", + "error": 65, + "name": "test_rcg", + "type": "User", + "id": "aadc17d500000000"}] + + @staticmethod + def get_exception_response(response_type): + return "Failed to get the replication consistency group " + + @staticmethod + def create_snapshot_exception_response(response_type, rcg_id): + return "Create RCG snapshot for RCG with id " + rcg_id + " operation failed" diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_sdk_response.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_sdk_response.py new file mode 100644 index 00000000..9e47f4ba --- /dev/null +++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_sdk_response.py @@ -0,0 +1,15 @@ +# Copyright: (c) 2022, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +"""Mock SDKResponse for Unit tests for Dell Technologies (Dell) PowerFlex modules""" + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + + +class MockSDKResponse: + def __init__(self, data=None, status_code=200): + self.data = data + self.status_code = status_code diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_storagepool_api.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_storagepool_api.py new file mode 100644 index 00000000..0246b9dd --- /dev/null +++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_storagepool_api.py @@ -0,0 +1,467 @@ +# Copyright: (c) 2022, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +""" +Mock Api response for Unit tests of storage pool module on Dell Technologies (Dell) PowerFlex +""" + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + + +class MockStoragePoolApi: + STORAGE_POOL_COMMON_ARGS = { + "hostname": "**.***.**.***", + "storage_pool_name": None, + "storage_pool_id": None, + "storage_pool_new_name": None, + "protection_domain_name": None, + "protection_domain_id": None, + "use_rmcache": None, + "use_rfcache": None, + "media_type": None, + 'state': None + } + + STORAGE_POOL_GET_LIST = [ + { + 'protectionDomainId': '4eeb304600000000', + 'rebuildEnabled': True, + 'dataLayout': 'MediumGranularity', + 'persistentChecksumState': 'Protected', + 'addressSpaceUsage': 'Normal', + 'externalAccelerationType': 'None', + 'rebalanceEnabled': True, + 'sparePercentage': 10, + 'rmcacheWriteHandlingMode': 'Cached', + 'checksumEnabled': False, + 'useRfcache': False, + 'compressionMethod': 'Invalid', + 'fragmentationEnabled': True, + 'numOfParallelRebuildRebalanceJobsPerDevice': 2, + 'capacityAlertHighThreshold': 80, + 'capacityAlertCriticalThreshold': 90, + 'capacityUsageState': 'Normal', + 'capacityUsageType': 'NetCapacity', + 'addressSpaceUsageType': 'DeviceCapacityLimit', + 'bgScannerCompareErrorAction': 'ReportAndFix', + 'bgScannerReadErrorAction': 'ReportAndFix', + 'fglExtraCapacity': None, + 'fglOverProvisioningFactor': None, + 'fglWriteAtomicitySize': None, + 'fglMaxCompressionRatio': None, + 'fglPerfProfile': None, + 'replicationCapacityMaxRatio': 0, + 'persistentChecksumEnabled': True, + 'persistentChecksumBuilderLimitKb': 3072, + 'persistentChecksumValidateOnRead': False, + 'useRmcache': False, + 'fglAccpId': None, + 'rebuildIoPriorityPolicy': 'limitNumOfConcurrentIos', + 'rebalanceIoPriorityPolicy': 'favorAppIos', + 'vtreeMigrationIoPriorityPolicy': 'favorAppIos', + 'protectedMaintenanceModeIoPriorityPolicy': 'limitNumOfConcurrentIos', + 'rebuildIoPriorityNumOfConcurrentIosPerDevice': 1, + 'rebalanceIoPriorityNumOfConcurrentIosPerDevice': 1, + 'vtreeMigrationIoPriorityNumOfConcurrentIosPerDevice': 1, + 'protectedMaintenanceModeIoPriorityNumOfConcurrentIosPerDevice': 1, + 'rebuildIoPriorityBwLimitPerDeviceInKbps': 10240, + 'rebalanceIoPriorityBwLimitPerDeviceInKbps': 10240, + 'vtreeMigrationIoPriorityBwLimitPerDeviceInKbps': 10240, + 'protectedMaintenanceModeIoPriorityBwLimitPerDeviceInKbps': 10240, + 'rebuildIoPriorityAppIopsPerDeviceThreshold': None, + 'rebalanceIoPriorityAppIopsPerDeviceThreshold': None, + 'vtreeMigrationIoPriorityAppIopsPerDeviceThreshold': None, + 'protectedMaintenanceModeIoPriorityAppIopsPerDeviceThreshold': None, + 'rebuildIoPriorityAppBwPerDeviceThresholdInKbps': None, + 'rebalanceIoPriorityAppBwPerDeviceThresholdInKbps': None, + 'vtreeMigrationIoPriorityAppBwPerDeviceThresholdInKbps': None, + 'protectedMaintenanceModeIoPriorityAppBwPerDeviceThresholdInKbps': None, + 'rebuildIoPriorityQuietPeriodInMsec': None, + 'rebalanceIoPriorityQuietPeriodInMsec': None, + 'vtreeMigrationIoPriorityQuietPeriodInMsec': None, + 'protectedMaintenanceModeIoPriorityQuietPeriodInMsec': None, + 'zeroPaddingEnabled': True, + 'backgroundScannerMode': 'DataComparison', + 'backgroundScannerBWLimitKBps': 3072, + 'fglMetadataSizeXx100': None, + 'fglNvdimmWriteCacheSizeInMb': None, + 'fglNvdimmMetadataAmortizationX100': None, + 'mediaType': 'HDD', + 'name': 'test_pool', + 'id': 'test_pool_id_1' + } + ] + + STORAGE_POOL_STATISTICS = { + 'backgroundScanFixedReadErrorCount': 0, + 'pendingMovingOutBckRebuildJobs': 0, + 'degradedHealthyCapacityInKb': 0, + 'activeMovingOutFwdRebuildJobs': 0, + 'bckRebuildWriteBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'netFglUncompressedDataSizeInKb': 0, + 'primaryReadFromDevBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'BackgroundScannedInMB': 3209584, + 'volumeIds': [ + 'test_vol_id_1' + ], + 'maxUserDataCapacityInKb': 761204736, + 'persistentChecksumBuilderProgress': 100.0, + 'rfcacheReadsSkippedAlignedSizeTooLarge': 0, + 'pendingMovingInRebalanceJobs': 0, + 'rfcacheWritesSkippedHeavyLoad': 0, + 'unusedCapacityInKb': 761204736, + 'userDataSdcReadLatency': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'totalReadBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'numOfDeviceAtFaultRebuilds': 0, + 'totalWriteBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'persistentChecksumCapacityInKb': 414720, + 'rmPendingAllocatedInKb': 0, + 'numOfVolumes': 1, + 'rfcacheIosOutstanding': 0, + 'capacityAvailableForVolumeAllocationInKb': 377487360, + 'numOfMappedToAllVolumes': 0, + 'netThinUserDataCapacityInKb': 0, + 'backgroundScanFixedCompareErrorCount': 0, + 'volMigrationWriteBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'thinAndSnapshotRatio': 'Infinity', + 'fglUserDataCapacityInKb': 0, + 'pendingMovingInEnterProtectedMaintenanceModeJobs': 0, + 'activeMovingInNormRebuildJobs': 0, + 'aggregateCompressionLevel': 'Uncompressed', + 'targetOtherLatency': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'netUserDataCapacityInKb': 0, + 'pendingMovingOutExitProtectedMaintenanceModeJobs': 0, + 'overallUsageRatio': 'Infinity', + 'volMigrationReadBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'netCapacityInUseNoOverheadInKb': 0, + 'pendingMovingInBckRebuildJobs': 0, + 'rfcacheReadsSkippedInternalError': 0, + 'activeBckRebuildCapacityInKb': 0, + 'rebalanceCapacityInKb': 0, + 'pendingMovingInExitProtectedMaintenanceModeJobs': 0, + 'rfcacheReadsSkippedLowResources': 0, + 'rplJournalCapAllowed': 0, + 'thinCapacityInUseInKb': 0, + 'userDataSdcTrimLatency': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'activeMovingInEnterProtectedMaintenanceModeJobs': 0, + 'rfcacheWritesSkippedInternalError': 0, + 'netUserDataCapacityNoTrimInKb': 0, + 'rfcacheWritesSkippedCacheMiss': 0, + 'degradedFailedCapacityInKb': 0, + 'activeNormRebuildCapacityInKb': 0, + 'fglSparesInKb': 0, + 'snapCapacityInUseInKb': 0, + 'numOfMigratingVolumes': 0, + 'compressionRatio': 0.0, + 'rfcacheWriteMiss': 0, + 'primaryReadFromRmcacheBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'migratingVtreeIds': [ + ], + 'numOfVtrees': 1, + 'userDataCapacityNoTrimInKb': 0, + 'rfacheReadHit': 0, + 'compressedDataCompressionRatio': 0.0, + 'rplUsedJournalCap': 0, + 'pendingMovingCapacityInKb': 0, + 'numOfSnapshots': 0, + 'pendingFwdRebuildCapacityInKb': 0, + 'tempCapacityInKb': 0, + 'totalFglMigrationSizeInKb': 0, + 'normRebuildCapacityInKb': 0, + 'logWrittenBlocksInKb': 0, + 'primaryWriteBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'numOfThickBaseVolumes': 0, + 'enterProtectedMaintenanceModeReadBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'activeRebalanceCapacityInKb': 0, + 'numOfReplicationJournalVolumes': 0, + 'rfcacheReadsSkippedLockIos': 0, + 'unreachableUnusedCapacityInKb': 0, + 'netProvisionedAddressesInKb': 0, + 'trimmedUserDataCapacityInKb': 0, + 'provisionedAddressesInKb': 0, + 'numOfVolumesInDeletion': 0, + 'pendingMovingOutFwdRebuildJobs': 0, + 'maxCapacityInKb': 845783040, + 'rmPendingThickInKb': 0, + 'protectedCapacityInKb': 0, + 'secondaryWriteBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'normRebuildReadBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'thinCapacityAllocatedInKb': 16777216, + 'netFglUserDataCapacityInKb': 0, + 'metadataOverheadInKb': 0, + 'thinCapacityAllocatedInKm': 16777216, + 'rebalanceWriteBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'primaryVacInKb': 8388608, + 'deviceIds': [ + 'dv_id_1', + 'dv_id_2', + 'dv_id_3' + ], + 'netSnapshotCapacityInKb': 0, + 'secondaryVacInKb': 8388608, + 'numOfDevices': 3, + 'rplTotalJournalCap': 0, + 'failedCapacityInKb': 0, + 'netMetadataOverheadInKb': 0, + 'activeMovingOutBckRebuildJobs': 0, + 'rfcacheReadsFromCache': 0, + 'activeMovingOutEnterProtectedMaintenanceModeJobs': 0, + 'enterProtectedMaintenanceModeCapacityInKb': 0, + 'pendingMovingInNormRebuildJobs': 0, + 'failedVacInKb': 0, + 'primaryReadBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'fglUncompressedDataSizeInKb': 0, + 'fglCompressedDataSizeInKb': 0, + 'pendingRebalanceCapacityInKb': 0, + 'rfcacheAvgReadTime': 0, + 'semiProtectedCapacityInKb': 0, + 'pendingMovingOutEnterProtectedMaintenanceModeJobs': 0, + 'mgUserDdataCcapacityInKb': 0, + 'snapshotCapacityInKb': 0, + 'netMgUserDataCapacityInKb': 0, + 'fwdRebuildReadBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'rfcacheWritesReceived': 0, + 'netUnusedCapacityInKb': 380602368, + 'thinUserDataCapacityInKb': 0, + 'protectedVacInKb': 16777216, + 'activeMovingRebalanceJobs': 0, + 'bckRebuildCapacityInKb': 0, + 'activeMovingInFwdRebuildJobs': 0, + 'netTrimmedUserDataCapacityInKb': 0, + 'pendingMovingRebalanceJobs': 0, + 'numOfMarkedVolumesForReplication': 0, + 'degradedHealthyVacInKb': 0, + 'semiProtectedVacInKb': 0, + 'userDataReadBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'pendingBckRebuildCapacityInKb': 0, + 'capacityLimitInKb': 845783040, + 'vtreeIds': [ + 'vtree_id_1' + ], + 'activeMovingCapacityInKb': 1, + 'targetWriteLatency': { + 'numSeconds': 1, + 'totalWeightInKb': 1, + 'numOccured': 1 + }, + 'pendingExitProtectedMaintenanceModeCapacityInKb': 1, + 'rfcacheIosSkipped': 1, + 'userDataWriteBwc': { + 'numSeconds': 1, + 'totalWeightInKb': 1, + 'numOccured': 1 + }, + 'inMaintenanceVacInKb': 1, + 'exitProtectedMaintenanceModeReadBwc': { + 'numSeconds': 1, + 'totalWeightInKb': 1, + 'numOccured': 1 + }, + 'netFglSparesInKb': 1, + 'rfcacheReadsSkipped': 1, + 'activeExitProtectedMaintenanceModeCapacityInKb': 1, + 'activeMovingOutExitProtectedMaintenanceModeJobs': 1, + 'numOfUnmappedVolumes': 2, + 'tempCapacityVacInKb': 1, + 'volumeAddressSpaceInKb': 80000, + 'currentFglMigrationSizeInKb': 1, + 'rfcacheWritesSkippedMaxIoSize': 1, + 'netMaxUserDataCapacityInKb': 380600000, + 'numOfMigratingVtrees': 1, + 'atRestCapacityInKb': 1, + 'rfacheWriteHit': 1, + 'bckRebuildReadBwc': { + 'numSeconds': 1, + 'totalWeightInKb': 1, + 'numOccured': 1 + }, + 'rfcacheSourceDeviceWrites': 1, + 'spareCapacityInKb': 84578000, + 'enterProtectedMaintenanceModeWriteBwc': { + 'numSeconds': 1, + 'totalWeightInKb': 1, + 'numOccured': 1 + }, + 'rfcacheIoErrors': 1, + 'inaccessibleCapacityInKb': 1, + 'normRebuildWriteBwc': { + 'numSeconds': 1, + 'totalWeightInKb': 1, + 'numOccured': 1 + }, + 'capacityInUseInKb': 1, + 'rebalanceReadBwc': { + 'numSeconds': 1, + 'totalWeightInKb': 1, + 'numOccured': 1 + }, + 'rfcacheReadsSkippedMaxIoSize': 1, + 'activeMovingInExitProtectedMaintenanceModeJobs': 1, + 'secondaryReadFromDevBwc': { + 'numSeconds': 1, + 'totalWeightInKb': 1, + 'numOccured': 1 + }, + 'secondaryReadBwc': { + 'numSeconds': 1, + 'totalWeightInKb': 1, + 'numOccured': 1 + }, + 'rfcacheWritesSkippedStuckIo': 1, + 'secondaryReadFromRmcacheBwc': { + 'numSeconds': 1, + 'totalWeightInKb': 1, + 'numOccured': 1 + }, + 'inMaintenanceCapacityInKb': 1, + 'exposedCapacityInKb': 1, + 'netFglCompressedDataSizeInKb': 1, + 'userDataSdcWriteLatency': { + 'numSeconds': 1, + 'totalWeightInKb': 1, + 'numOccured': 1 + }, + 'inUseVacInKb': 16777000, + 'fwdRebuildCapacityInKb': 1, + 'thickCapacityInUseInKb': 1, + 'backgroundScanReadErrorCount': 1, + 'activeMovingInRebalanceJobs': 1, + 'migratingVolumeIds': [ + '1xxx' + ], + 'rfcacheWritesSkippedLowResources': 1, + 'capacityInUseNoOverheadInKb': 1, + 'exitProtectedMaintenanceModeWriteBwc': { + 'numSeconds': 1, + 'totalWeightInKb': 1, + 'numOccured': 1 + }, + 'rfcacheSkippedUnlinedWrite': 1, + 'netCapacityInUseInKb': 1, + 'numOfOutgoingMigrations': 1, + 'rfcacheAvgWriteTime': 1, + 'pendingNormRebuildCapacityInKb': 1, + 'pendingMovingOutNormrebuildJobs': 1, + 'rfcacheSourceDeviceReads': 1, + 'rfcacheReadsPending': 1, + 'volumeAllocationLimitInKb': 3791650000, + 'rfcacheReadsSkippedHeavyLoad': 1, + 'fwdRebuildWriteBwc': { + 'numSeconds': 1, + 'totalWeightInKb': 1, + 'numOccured': 1 + }, + 'rfcacheReadMiss': 1, + 'targetReadLatency': { + 'numSeconds': 1, + 'totalWeightInKb': 1, + 'numOccured': 1 + }, + 'userDataCapacityInKb': 1, + 'activeMovingInBckRebuildJobs': 1, + 'movingCapacityInKb': 1, + 'activeEnterProtectedMaintenanceModeCapacityInKb': 1, + 'backgroundScanCompareErrorCount': 1, + 'pendingMovingInFwdRebuildJobs': 1, + 'rfcacheReadsReceived': 1, + 'spSdsIds': [ + 'sp_id_1', + 'sp_id_2', + 'sp_id_3' + ], + 'pendingEnterProtectedMaintenanceModeCapacityInKb': 1, + 'vtreeAddresSpaceInKb': 8388000, + 'snapCapacityInUseOccupiedInKb': 1, + 'activeFwdRebuildCapacityInKb': 1, + 'rfcacheReadsSkippedStuckIo': 1, + 'activeMovingOutNormRebuildJobs': 1, + 'rfcacheWritePending': 1, + 'numOfThinBaseVolumes': 2, + 'degradedFailedVacInKb': 1, + 'userDataTrimBwc': { + 'numSeconds': 1, + 'totalWeightInKb': 1, + 'numOccured': 1 + }, + 'numOfIncomingVtreeMigrations': 1 + } + + @staticmethod + def get_exception_response(response_type): + if response_type == 'get_details': + return "Failed to get the storage pool test_pool with error " diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_volume_api.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_volume_api.py new file mode 100644 index 00000000..b05cc84d --- /dev/null +++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_volume_api.py @@ -0,0 +1,548 @@ +# Copyright: (c) 2022, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +""" +Mock Api response for Unit tests of volume module on Dell Technologies (Dell) PowerFlex +""" + +from __future__ import (absolute_import, division, print_function) +from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_storagepool_api import MockStoragePoolApi + +__metaclass__ = type + + +class MockVolumeApi: + VOLUME_COMMON_ARGS = { + "hostname": "**.***.**.***", + "vol_name": None, + "vol_id": None, + "vol_type": None, + "compression_type": None, + "storage_pool_name": None, + "storage_pool_id": None, + "protection_domain_name": None, + "protection_domain_id": None, + "snapshot_policy_name": None, + "snapshot_policy_id": None, + "auto_snap_remove_type": None, + "use_rmcache": None, + "size": None, + "cap_unit": None, + "vol_new_name": None, + "sdc": {}, + "sdc_state": None, + "delete_snapshots": None, + "state": None + } + + VOLUME_GET_LIST = [ + { + 'storagePoolId': 'test_pool_id_1', + 'dataLayout': 'MediumGranularity', + 'vtreeId': 'vtree_id_1', + 'sizeInKb': 8388608, + 'snplIdOfAutoSnapshot': None, + 'volumeType': 'ThinProvisioned', + 'consistencyGroupId': None, + 'ancestorVolumeId': None, + 'notGenuineSnapshot': False, + 'accessModeLimit': 'ReadWrite', + 'secureSnapshotExpTime': 0, + 'useRmcache': False, + 'managedBy': 'ScaleIO', + 'lockedAutoSnapshot': False, + 'lockedAutoSnapshotMarkedForRemoval': False, + 'autoSnapshotGroupId': None, + 'compressionMethod': 'Invalid', + 'pairIds': None, + 'timeStampIsAccurate': False, + 'mappedSdcInfo': None, + 'originalExpiryTime': 0, + 'retentionLevels': [ + ], + 'snplIdOfSourceVolume': None, + 'volumeReplicationState': 'UnmarkedForReplication', + 'replicationJournalVolume': False, + 'replicationTimeStamp': 0, + 'creationTime': 1655878090, + 'name': 'testing', + 'id': 'test_id_1' + } + ] + + VOLUME_STORAGEPOOL_DETAILS = MockStoragePoolApi.STORAGE_POOL_GET_LIST[0] + + VOLUME_PD_DETAILS = { + 'rebalanceNetworkThrottlingEnabled': False, + 'vtreeMigrationNetworkThrottlingEnabled': False, + 'overallIoNetworkThrottlingEnabled': False, + 'rfcacheEnabled': True, + 'rfcacheAccpId': None, + 'rebuildNetworkThrottlingEnabled': False, + 'sdrSdsConnectivityInfo': { + 'clientServerConnStatus': 'CLIENT_SERVER_CONN_STATUS_ALL_CONNECTED', + 'disconnectedClientId': None, + 'disconnectedClientName': None, + 'disconnectedServerId': None, + 'disconnectedServerName': None, + 'disconnectedServerIp': None + }, + 'protectionDomainState': 'Active', + 'rebuildNetworkThrottlingInKbps': None, + 'rebalanceNetworkThrottlingInKbps': None, + 'overallIoNetworkThrottlingInKbps': None, + 'vtreeMigrationNetworkThrottlingInKbps': None, + 'sdsDecoupledCounterParameters': { + 'shortWindow': { + 'threshold': 300, + 'windowSizeInSec': 60 + }, + 'mediumWindow': { + 'threshold': 500, + 'windowSizeInSec': 3600 + }, + 'longWindow': { + 'threshold': 700, + 'windowSizeInSec': 86400 + } + }, + 'sdsConfigurationFailureCounterParameters': { + 'shortWindow': { + 'threshold': 300, + 'windowSizeInSec': 60 + }, + 'mediumWindow': { + 'threshold': 500, + 'windowSizeInSec': 3600 + }, + 'longWindow': { + 'threshold': 700, + 'windowSizeInSec': 86400 + } + }, + 'mdmSdsNetworkDisconnectionsCounterParameters': { + 'shortWindow': { + 'threshold': 300, + 'windowSizeInSec': 60 + }, + 'mediumWindow': { + 'threshold': 500, + 'windowSizeInSec': 3600 + }, + 'longWindow': { + 'threshold': 700, + 'windowSizeInSec': 86400 + } + }, + 'sdsSdsNetworkDisconnectionsCounterParameters': { + 'shortWindow': { + 'threshold': 300, + 'windowSizeInSec': 60 + }, + 'mediumWindow': { + 'threshold': 500, + 'windowSizeInSec': 3600 + }, + 'longWindow': { + 'threshold': 700, + 'windowSizeInSec': 86400 + } + }, + 'rfcacheOpertionalMode': 'WriteMiss', + 'rfcachePageSizeKb': 64, + 'rfcacheMaxIoSizeKb': 128, + 'sdsReceiveBufferAllocationFailuresCounterParameters': { + 'shortWindow': { + 'threshold': 20000, + 'windowSizeInSec': 60 + }, + 'mediumWindow': { + 'threshold': 200000, + 'windowSizeInSec': 3600 + }, + 'longWindow': { + 'threshold': 2000000, + 'windowSizeInSec': 86400 + } + }, + 'fglDefaultNumConcurrentWrites': 1000, + 'fglMetadataCacheEnabled': False, + 'fglDefaultMetadataCacheSize': 0, + 'protectedMaintenanceModeNetworkThrottlingEnabled': False, + 'protectedMaintenanceModeNetworkThrottlingInKbps': None, + 'rplCapAlertLevel': 'normal', + 'systemId': 'syst_id_1', + 'name': 'domain1', + 'id': '4eeb304600000000', + } + + VOLUME_STATISTICS = { + 'backgroundScanFixedReadErrorCount': 0, + 'pendingMovingOutBckRebuildJobs': 0, + 'degradedHealthyCapacityInKb': 0, + 'activeMovingOutFwdRebuildJobs': 0, + 'bckRebuildWriteBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'netFglUncompressedDataSizeInKb': 0, + 'primaryReadFromDevBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'BackgroundScannedInMB': 3209584, + 'volumeIds': [ + '456ad22e00000003' + ], + 'maxUserDataCapacityInKb': 761204736, + 'persistentChecksumBuilderProgress': 100.0, + 'rfcacheReadsSkippedAlignedSizeTooLarge': 0, + 'pendingMovingInRebalanceJobs': 0, + 'rfcacheWritesSkippedHeavyLoad': 0, + 'unusedCapacityInKb': 761204736, + 'userDataSdcReadLatency': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'totalReadBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'numOfDeviceAtFaultRebuilds': 0, + 'totalWriteBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'persistentChecksumCapacityInKb': 414720, + 'rmPendingAllocatedInKb': 0, + 'numOfVolumes': 1, + 'rfcacheIosOutstanding': 0, + 'capacityAvailableForVolumeAllocationInKb': 377487360, + 'numOfMappedToAllVolumes': 0, + 'netThinUserDataCapacityInKb': 0, + 'backgroundScanFixedCompareErrorCount': 0, + 'volMigrationWriteBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'thinAndSnapshotRatio': 'Infinity', + 'fglUserDataCapacityInKb': 0, + 'pendingMovingInEnterProtectedMaintenanceModeJobs': 0, + 'activeMovingInNormRebuildJobs': 0, + 'aggregateCompressionLevel': 'Uncompressed', + 'targetOtherLatency': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'netUserDataCapacityInKb': 0, + 'pendingMovingOutExitProtectedMaintenanceModeJobs': 0, + 'overallUsageRatio': 'Infinity', + 'volMigrationReadBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'netCapacityInUseNoOverheadInKb': 0, + 'pendingMovingInBckRebuildJobs': 0, + 'rfcacheReadsSkippedInternalError': 0, + 'activeBckRebuildCapacityInKb': 0, + 'rebalanceCapacityInKb': 0, + 'pendingMovingInExitProtectedMaintenanceModeJobs': 0, + 'rfcacheReadsSkippedLowResources': 0, + 'rplJournalCapAllowed': 0, + 'thinCapacityInUseInKb': 0, + 'userDataSdcTrimLatency': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'activeMovingInEnterProtectedMaintenanceModeJobs': 0, + 'rfcacheWritesSkippedInternalError': 0, + 'netUserDataCapacityNoTrimInKb': 0, + 'rfcacheWritesSkippedCacheMiss': 0, + 'degradedFailedCapacityInKb': 0, + 'activeNormRebuildCapacityInKb': 0, + 'fglSparesInKb': 0, + 'snapCapacityInUseInKb': 0, + 'numOfMigratingVolumes': 0, + 'compressionRatio': 0.0, + 'rfcacheWriteMiss': 0, + 'primaryReadFromRmcacheBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'migratingVtreeIds': [ + ], + 'numOfVtrees': 1, + 'userDataCapacityNoTrimInKb': 0, + 'rfacheReadHit': 0, + 'compressedDataCompressionRatio': 0.0, + 'rplUsedJournalCap': 0, + 'pendingMovingCapacityInKb': 0, + 'numOfSnapshots': 0, + 'pendingFwdRebuildCapacityInKb': 0, + 'tempCapacityInKb': 0, + 'totalFglMigrationSizeInKb': 0, + 'normRebuildCapacityInKb': 0, + 'logWrittenBlocksInKb': 0, + 'primaryWriteBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'numOfThickBaseVolumes': 0, + 'enterProtectedMaintenanceModeReadBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'activeRebalanceCapacityInKb': 0, + 'numOfReplicationJournalVolumes': 0, + 'rfcacheReadsSkippedLockIos': 0, + 'unreachableUnusedCapacityInKb': 0, + 'netProvisionedAddressesInKb': 0, + 'trimmedUserDataCapacityInKb': 0, + 'provisionedAddressesInKb': 0, + 'numOfVolumesInDeletion': 0, + 'pendingMovingOutFwdRebuildJobs': 0, + 'maxCapacityInKb': 845783040, + 'rmPendingThickInKb': 0, + 'protectedCapacityInKb': 0, + 'secondaryWriteBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'normRebuildReadBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'thinCapacityAllocatedInKb': 16777216, + 'netFglUserDataCapacityInKb': 0, + 'metadataOverheadInKb': 0, + 'thinCapacityAllocatedInKm': 16777216, + 'rebalanceWriteBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'primaryVacInKb': 8388608, + 'deviceIds': [ + 'bbd7580800030001', + 'bbd4580a00040001', + 'bbd5580b00050001' + ], + 'netSnapshotCapacityInKb': 0, + 'secondaryVacInKb': 8388608, + 'numOfDevices': 3, + 'rplTotalJournalCap': 0, + 'failedCapacityInKb': 0, + 'netMetadataOverheadInKb': 0, + 'activeMovingOutBckRebuildJobs': 0, + 'rfcacheReadsFromCache': 0, + 'activeMovingOutEnterProtectedMaintenanceModeJobs': 0, + 'enterProtectedMaintenanceModeCapacityInKb': 0, + 'pendingMovingInNormRebuildJobs': 0, + 'failedVacInKb': 0, + 'primaryReadBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'fglUncompressedDataSizeInKb': 0, + 'fglCompressedDataSizeInKb': 0, + 'pendingRebalanceCapacityInKb': 0, + 'rfcacheAvgReadTime': 0, + 'semiProtectedCapacityInKb': 0, + 'pendingMovingOutEnterProtectedMaintenanceModeJobs': 0, + 'mgUserDdataCcapacityInKb': 0, + 'snapshotCapacityInKb': 0, + 'netMgUserDataCapacityInKb': 0, + 'fwdRebuildReadBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'rfcacheWritesReceived': 0, + 'netUnusedCapacityInKb': 380602368, + 'thinUserDataCapacityInKb': 0, + 'protectedVacInKb': 16777216, + 'activeMovingRebalanceJobs': 0, + 'bckRebuildCapacityInKb': 0, + 'activeMovingInFwdRebuildJobs': 0, + 'netTrimmedUserDataCapacityInKb': 0, + 'pendingMovingRebalanceJobs': 0, + 'numOfMarkedVolumesForReplication': 0, + 'degradedHealthyVacInKb': 0, + 'semiProtectedVacInKb': 0, + 'userDataReadBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'pendingBckRebuildCapacityInKb': 0, + 'capacityLimitInKb': 845783040, + 'vtreeIds': [ + '32b13de900000003' + ], + 'activeMovingCapacityInKb': 0, + 'targetWriteLatency': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'pendingExitProtectedMaintenanceModeCapacityInKb': 0, + 'rfcacheIosSkipped': 0, + 'userDataWriteBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'inMaintenanceVacInKb': 0, + 'exitProtectedMaintenanceModeReadBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'netFglSparesInKb': 0, + 'rfcacheReadsSkipped': 0, + 'activeExitProtectedMaintenanceModeCapacityInKb': 0, + 'activeMovingOutExitProtectedMaintenanceModeJobs': 0, + 'numOfUnmappedVolumes': 1, + 'tempCapacityVacInKb': 0, + 'volumeAddressSpaceInKb': 8388608, + 'currentFglMigrationSizeInKb': 0, + 'rfcacheWritesSkippedMaxIoSize': 0, + 'netMaxUserDataCapacityInKb': 380602368, + 'numOfMigratingVtrees': 0, + 'atRestCapacityInKb': 0, + 'rfacheWriteHit': 0, + 'bckRebuildReadBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'rfcacheSourceDeviceWrites': 0, + 'spareCapacityInKb': 84578304, + 'enterProtectedMaintenanceModeWriteBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'rfcacheIoErrors': 0, + 'inaccessibleCapacityInKb': 0, + 'normRebuildWriteBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'capacityInUseInKb': 0, + 'rebalanceReadBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'rfcacheReadsSkippedMaxIoSize': 0, + 'activeMovingInExitProtectedMaintenanceModeJobs': 0, + 'secondaryReadFromDevBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'secondaryReadBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'rfcacheWritesSkippedStuckIo': 0, + 'secondaryReadFromRmcacheBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'inMaintenanceCapacityInKb': 0, + 'exposedCapacityInKb': 0, + 'netFglCompressedDataSizeInKb': 0, + 'userDataSdcWriteLatency': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'inUseVacInKb': 16777216, + 'fwdRebuildCapacityInKb': 0, + 'thickCapacityInUseInKb': 0, + 'backgroundScanReadErrorCount': 0, + 'activeMovingInRebalanceJobs': 0, + 'migratingVolumeIds': [ + ], + 'rfcacheWritesSkippedLowResources': 0, + 'capacityInUseNoOverheadInKb': 0, + 'exitProtectedMaintenanceModeWriteBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'rfcacheSkippedUnlinedWrite': 0, + 'netCapacityInUseInKb': 0, + 'numOfOutgoingMigrations': 0, + 'rfcacheAvgWriteTime': 0, + 'pendingNormRebuildCapacityInKb': 0, + 'pendingMovingOutNormrebuildJobs': 0, + 'rfcacheSourceDeviceReads': 0, + 'rfcacheReadsPending': 0, + 'volumeAllocationLimitInKb': 3791650816, + 'rfcacheReadsSkippedHeavyLoad': 0, + 'fwdRebuildWriteBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'rfcacheReadMiss': 0, + 'targetReadLatency': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'userDataCapacityInKb': 0, + 'activeMovingInBckRebuildJobs': 0, + 'movingCapacityInKb': 0, + 'activeEnterProtectedMaintenanceModeCapacityInKb': 0, + 'backgroundScanCompareErrorCount': 0, + 'pendingMovingInFwdRebuildJobs': 0, + 'rfcacheReadsReceived': 0, + 'spSdsIds': [ + 'abdfe71b00030001', + 'abdce71d00040001', + 'abdde71e00050001' + ], + 'pendingEnterProtectedMaintenanceModeCapacityInKb': 0, + 'vtreeAddresSpaceInKb': 8388608, + 'snapCapacityInUseOccupiedInKb': 0, + 'activeFwdRebuildCapacityInKb': 0, + 'rfcacheReadsSkippedStuckIo': 0, + 'activeMovingOutNormRebuildJobs': 0, + 'rfcacheWritePending': 0, + 'numOfThinBaseVolumes': 1, + 'degradedFailedVacInKb': 0, + 'userDataTrimBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'numOfIncomingVtreeMigrations': 0 + } + + @staticmethod + def get_exception_response(response_type): + if response_type == 'get_details': + return "Failed to get the volume test_id_1 with error " diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/__init__.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/__init__.py new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/__init__.py diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_info.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_info.py new file mode 100644 index 00000000..1af574da --- /dev/null +++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_info.py @@ -0,0 +1,130 @@ +# Copyright: (c) 2022, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +"""Unit Tests for info module on PowerFlex""" + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +from mock.mock import MagicMock +from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_info_api import MockInfoApi +from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_sdk_response \ + import MockSDKResponse +from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_api_exception \ + import MockApiException +from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell \ + import utils + +utils.get_logger = MagicMock() +utils.get_powerflex_gateway_host_connection = MagicMock() +utils.PowerFlexClient = MagicMock() + +from ansible.module_utils import basic +basic.AnsibleModule = MagicMock() +from ansible_collections.dellemc.powerflex.plugins.modules.info import PowerFlexInfo + + +class TestPowerflexInfo(): + + get_module_args = MockInfoApi.INFO_COMMON_ARGS + + @pytest.fixture + def info_module_mock(self, mocker): + info_module_mock = PowerFlexInfo() + info_module_mock.module.check_mode = False + info_module_mock.powerflex_conn.system.api_version = MagicMock( + return_value=3.5 + ) + info_module_mock.powerflex_conn.system.get = MagicMock( + return_value=MockInfoApi.INFO_ARRAY_DETAILS + ) + return info_module_mock + + def test_get_volume_details(self, info_module_mock): + self.get_module_args.update({ + "gather_subset": ['vol'] + }) + info_module_mock.module.params = self.get_module_args + volume_resp = MockInfoApi.INFO_VOLUME_GET_LIST + info_module_mock.powerflex_conn.volume.get = MagicMock( + return_value=volume_resp + ) + volume_stat_resp = MockInfoApi.INFO_VOLUME_STATISTICS + info_module_mock.powerflex_conn.utility.get_statistics_for_all_volumes = MagicMock( + return_value=volume_stat_resp + ) + info_module_mock.perform_module_operation() + info_module_mock.powerflex_conn.volume.get.assert_called() + info_module_mock.powerflex_conn.utility.get_statistics_for_all_volumes.assert_called() + + def test_get_volume_details_with_exception(self, info_module_mock): + self.get_module_args.update({ + "gather_subset": ['vol'] + }) + info_module_mock.module.params = self.get_module_args + volume_resp = MockInfoApi.INFO_VOLUME_GET_LIST + info_module_mock.powerflex_conn.volume.get = MagicMock( + return_value=volume_resp + ) + info_module_mock.powerflex_conn.utility.get_statistics_for_all_volumes = MagicMock( + side_effect=MockApiException + ) + info_module_mock.perform_module_operation() + assert MockInfoApi.get_exception_response('volume_get_details') in info_module_mock.module.fail_json.call_args[1]['msg'] + + def test_get_sp_details(self, info_module_mock): + self.get_module_args.update({ + "gather_subset": ['storage_pool'] + }) + info_module_mock.module.params = self.get_module_args + sp_resp = MockInfoApi.INFO_STORAGE_POOL_GET_LIST + info_module_mock.powerflex_conn.storage_pool.get = MagicMock( + return_value=sp_resp + ) + sp_stat_resp = MockInfoApi.INFO_STORAGE_POOL_STATISTICS + info_module_mock.powerflex_conn.utility.get_statistics_for_all_storagepools = MagicMock( + return_value=sp_stat_resp + ) + info_module_mock.perform_module_operation() + info_module_mock.powerflex_conn.storage_pool.get.assert_called() + info_module_mock.powerflex_conn.utility.get_statistics_for_all_storagepools.assert_called() + + def test_get_sp_details_with_exception(self, info_module_mock): + self.get_module_args.update({ + "gather_subset": ['storage_pool'] + }) + info_module_mock.module.params = self.get_module_args + sp_resp = MockInfoApi.INFO_STORAGE_POOL_GET_LIST + info_module_mock.powerflex_conn.storage_pool.get = MagicMock( + return_value=sp_resp + ) + info_module_mock.powerflex_conn.utility.get_statistics_for_all_storagepools = MagicMock( + side_effect=MockApiException + ) + info_module_mock.perform_module_operation() + assert MockInfoApi.get_exception_response('sp_get_details') in info_module_mock.module.fail_json.call_args[1]['msg'] + + def test_get_rcg_details(self, info_module_mock): + self.get_module_args.update({ + "gather_subset": ['rcg'] + }) + info_module_mock.module.params = self.get_module_args + rcg_resp = MockInfoApi.RCG_LIST + info_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + return_value=rcg_resp) + info_module_mock.perform_module_operation() + info_module_mock.powerflex_conn.replication_consistency_group.get.assert_called() + + def test_get_rcg_details_throws_exception(self, info_module_mock): + self.get_module_args.update({ + "gather_subset": ['rcg'] + }) + info_module_mock.module.params = self.get_module_args + info_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + side_effect=MockApiException + ) + info_module_mock.perform_module_operation() + assert MockInfoApi.get_exception_response('rcg_get_details') in info_module_mock.module.fail_json.call_args[1]['msg'] diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_mdm_cluster.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_mdm_cluster.py new file mode 100644 index 00000000..f8f3cdc2 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_mdm_cluster.py @@ -0,0 +1,636 @@ +# Copyright: (c) 2022, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +"""Unit Tests for MDM cluster module on PowerFlex""" + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +from mock.mock import MagicMock +from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_mdm_cluster_api import MockMdmClusterApi +from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_sdk_response \ + import MockSDKResponse +from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_api_exception \ + import MockApiException +from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell \ + import utils + +utils.get_logger = MagicMock() +utils.get_powerflex_gateway_host_connection = MagicMock() +utils.PowerFlexClient = MagicMock() +from ansible.module_utils import basic +basic.AnsibleModule = MagicMock() +from ansible_collections.dellemc.powerflex.plugins.modules.mdm_cluster import PowerFlexMdmCluster + + +class TestPowerflexMDMCluster(): + + get_module_args = MockMdmClusterApi.MDM_CLUSTER_COMMON_ARGS + add_mdm_ip = "xx.3x.xx.xx" + + @pytest.fixture + def mdm_cluster_module_mock(self, mocker): + mocker.patch(MockMdmClusterApi.MODULE_UTILS_PATH + '.PowerFlexClient', new=MockApiException) + mdm_cluster_module_mock = PowerFlexMdmCluster() + mdm_cluster_module_mock.module.check_mode = False + return mdm_cluster_module_mock + + def test_get_mdm_cluster(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.powerflex_conn.system.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.perform_module_operation() + mdm_cluster_module_mock.powerflex_conn.system.get_mdm_cluster_details.assert_called() + + def test_get_mdm_cluster_with_exception(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_module_mock.powerflex_conn.system.get_mdm_cluster_details = MagicMock( + side_effect=utils.PowerFlexClient + ) + mdm_cluster_module_mock.perform_module_operation() + assert MockMdmClusterApi.get_failed_response() in mdm_cluster_module_mock.module.fail_json.call_args[1]['msg'] + mdm_cluster_module_mock.powerflex_conn.system.get_mdm_cluster_details.assert_called() + + def test_rename_mdm(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "mdm_name": MockMdmClusterApi.MDM_NAME, + "mdm_new_name": "mdm_node_renamed", + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.powerflex_conn.system.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.powerflex_conn.system.rename_mdm = MagicMock() + mdm_cluster_module_mock.perform_module_operation() + mdm_cluster_module_mock.powerflex_conn.system.rename_mdm.assert_called() + assert mdm_cluster_module_mock.module.exit_json.call_args[1]['changed'] is True + + def test_rename_mdm_exception(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "mdm_name": MockMdmClusterApi.MDM_NAME, + "mdm_new_name": "mdm_node_renamed", + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.powerflex_conn.system.rename_mdm = MagicMock( + side_effect=utils.PowerFlexClient + ) + mdm_cluster_module_mock.perform_module_operation() + mdm_cluster_module_mock.powerflex_conn.system.rename_mdm.assert_called() + assert MockMdmClusterApi.rename_failed_response() in mdm_cluster_module_mock.module.fail_json.call_args[1]['msg'] + + def test_set_performance_profile_mdm_cluster(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "performance_profile": "Compact", + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.powerflex_conn.system.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.powerflex_conn.system.set_cluster_mdm_performance_profile = MagicMock() + mdm_cluster_module_mock.perform_module_operation() + mdm_cluster_module_mock.powerflex_conn.system.set_cluster_mdm_performance_profile.assert_called() + assert mdm_cluster_module_mock.module.exit_json.call_args[1]['changed'] is True + + def test_set_performance_profile_mdm_cluster_exception(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "performance_profile": "Compact", + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.powerflex_conn.system.set_cluster_mdm_performance_profile = MagicMock( + side_effect=utils.PowerFlexClient + ) + mdm_cluster_module_mock.perform_module_operation() + mdm_cluster_module_mock.powerflex_conn.system.set_cluster_mdm_performance_profile.assert_called() + assert MockMdmClusterApi.perf_profile_failed_response() in mdm_cluster_module_mock.module.fail_json.call_args[1]['msg'] + + def test_set_virtual_ip_interface_mdm(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "mdm_id": MockMdmClusterApi.MDM_ID, + "virtual_ip_interfaces": ["ens11"], + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.powerflex_conn.system.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.powerflex_conn.system.modify_virtual_ip_interface = MagicMock() + mdm_cluster_module_mock.perform_module_operation() + mdm_cluster_module_mock.powerflex_conn.system.modify_virtual_ip_interface.assert_called() + assert mdm_cluster_module_mock.module.exit_json.call_args[1]['changed'] is True + + def test_set_virtual_ip_interface_mdm_exception(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "mdm_id": MockMdmClusterApi.MDM_ID, + "virtual_ip_interfaces": ["ens11"], + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.powerflex_conn.system.modify_virtual_ip_interface = MagicMock( + side_effect=utils.PowerFlexClient + ) + mdm_cluster_module_mock.perform_module_operation() + mdm_cluster_module_mock.powerflex_conn.system.modify_virtual_ip_interface.assert_called() + assert MockMdmClusterApi.virtual_ip_interface_failed_response() in mdm_cluster_module_mock.module.fail_json.call_args[1]['msg'] + + def test_set_virtual_ip_interface_mdm_idempotency(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "mdm_id": MockMdmClusterApi.MDM_ID, + "virtual_ip_interfaces": ["ens1"], + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.powerflex_conn.system.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.perform_module_operation() + assert mdm_cluster_module_mock.module.exit_json.call_args[1]['changed'] is False + + def test_remove_standby_mdm(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "mdm_id": MockMdmClusterApi.STB_TB_MDM_ID, + "state": "absent" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.powerflex_conn.system.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.powerflex_conn.system.remove_standby_mdm = MagicMock() + mdm_cluster_module_mock.perform_module_operation() + mdm_cluster_module_mock.powerflex_conn.system.remove_standby_mdm.assert_called() + assert mdm_cluster_module_mock.module.exit_json.call_args[1]['changed'] is True + + def test_remove_standby_mdm_idempotency(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "mdm_name": "non_existing_node", + "state": "absent" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.powerflex_conn.system.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.perform_module_operation() + assert mdm_cluster_module_mock.module.exit_json.call_args[1]['changed'] is False + + def test_remove_standby_mdm_exception(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "mdm_id": MockMdmClusterApi.STB_TB_MDM_ID, + "state": "absent" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.powerflex_conn.system.remove_standby_mdm = MagicMock( + side_effect=utils.PowerFlexClient + ) + mdm_cluster_module_mock.perform_module_operation() + mdm_cluster_module_mock.powerflex_conn.system.remove_standby_mdm.assert_called() + assert MockMdmClusterApi.remove_mdm_failed_response() in mdm_cluster_module_mock.module.fail_json.call_args[1]['msg'] + + def test_add_standby_mdm(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "mdm_name": "standby_node", + "standby_mdm": { + "mdm_ips": [self.add_mdm_ip], + "role": "Manager", + "port": 9011, + "management_ips": [self.add_mdm_ip], + "virtual_interfaces": ["ens1"], + "allow_multiple_ips": True + }, + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_module_mock.get_mdm_cluster_details = MagicMock( + return_value=MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS + ) + mdm_cluster_module_mock.powerflex_conn.system.add_standby_mdm = MagicMock() + mdm_cluster_module_mock.perform_module_operation() + mdm_cluster_module_mock.powerflex_conn.system.add_standby_mdm.assert_called() + assert mdm_cluster_module_mock.module.exit_json.call_args[1]['changed'] is True + + def test_add_standby_mdm_idempotency(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "mdm_name": MockMdmClusterApi.MDM_NAME, + "standby_mdm": { + "mdm_ips": ["10.x.z.z"], + "role": "TieBreaker", + "port": 9011 + }, + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.powerflex_conn.system.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.perform_module_operation() + assert mdm_cluster_module_mock.module.exit_json.call_args[1]['changed'] is False + + def test_add_standby_mdm_exception(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "mdm_name": "standby_node", + "standby_mdm": { + "mdm_ips": [self.add_mdm_ip], + "role": "Manager", + "port": 9011, + "management_ips": [self.add_mdm_ip], + "virtual_interfaces": ["ens1"], + "allow_multiple_ips": True + }, + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_module_mock.get_mdm_cluster_details = MagicMock( + return_value=MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS + ) + mdm_cluster_module_mock.powerflex_conn.system.add_standby_mdm = MagicMock( + side_effect=utils.PowerFlexClient + ) + mdm_cluster_module_mock.perform_module_operation() + mdm_cluster_module_mock.powerflex_conn.system.add_standby_mdm.assert_called() + assert MockMdmClusterApi.add_mdm_failed_response() in mdm_cluster_module_mock.module.fail_json.call_args[1]['msg'] + + def test_change_mdm_cluster_owner(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "mdm_name": "sample_mdm1", + "is_primary": True, + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.powerflex_conn.system.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.powerflex_conn.system.change_mdm_ownership = MagicMock() + mdm_cluster_module_mock.perform_module_operation() + mdm_cluster_module_mock.powerflex_conn.system.change_mdm_ownership.assert_called() + assert mdm_cluster_module_mock.module.exit_json.call_args[1]['changed'] is True + + def test_change_mdm_cluster_owner_idempotency(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "mdm_id": "5908d328581d1400", + "is_primary": True, + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_resp = MockSDKResponse( + MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.powerflex_conn.system.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.perform_module_operation() + assert mdm_cluster_module_mock.module.exit_json.call_args[1]['changed'] is False + + def test_change_mdm_cluster_owner_execption(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "mdm_name": "sample_mdm1", + "is_primary": True, + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.powerflex_conn.system.change_mdm_ownership = MagicMock( + side_effect=utils.PowerFlexClient + ) + mdm_cluster_module_mock.perform_module_operation() + mdm_cluster_module_mock.powerflex_conn.system.change_mdm_ownership.assert_called() + assert MockMdmClusterApi.owner_failed_response() in mdm_cluster_module_mock.module.fail_json.call_args[1]['msg'] + + def test_expand_mdm_cluster_mode(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "cluster_mode": "FiveNodes", + "mdm": [ + { + "mdm_name": MockMdmClusterApi.MDM_NAME_STB_MGR, + "mdm_id": None, + "mdm_type": "Secondary" + }, + { + "mdm_id": MockMdmClusterApi.STB_TB_MDM_ID, + "mdm_name": None, + "mdm_type": "TieBreaker" + } + ], + "mdm_state": "present-in-cluster", + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.powerflex_conn.system.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.powerflex_conn.system.switch_cluster_mode = MagicMock() + mdm_cluster_module_mock.perform_module_operation() + mdm_cluster_module_mock.powerflex_conn.system.switch_cluster_mode.assert_called() + assert mdm_cluster_module_mock.module.exit_json.call_args[1]['changed'] is True + + def test_reduce_mdm_cluster_mode_idempotency(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "cluster_mode": "ThreeNodes", + "mdm": [ + { + "mdm_name": None, + "mdm_id": MockMdmClusterApi.STB_MGR_MDM_ID, + "mdm_type": "Secondary" + }, + { + "mdm_id": None, + "mdm_name": MockMdmClusterApi.MDM_NAME, + "mdm_type": "TieBreaker" + } + ], + "mdm_state": "absent-in-cluster", + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.powerflex_conn.system.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.perform_module_operation() + assert mdm_cluster_module_mock.module.exit_json.call_args[1]['changed'] is False + + def test_expand_mdm_cluster_mode_exception(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "cluster_mode": "FiveNodes", + "mdm": [ + { + "mdm_name": MockMdmClusterApi.MDM_NAME_STB_MGR, + "mdm_id": None, + "mdm_type": "Secondary" + }, + { + "mdm_id": MockMdmClusterApi.STB_TB_MDM_ID, + "mdm_name": None, + "mdm_type": "TieBreaker" + } + ], + "mdm_state": "present-in-cluster", + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.powerflex_conn.system.switch_cluster_mode = MagicMock( + side_effect=utils.PowerFlexClient + ) + mdm_cluster_module_mock.perform_module_operation() + mdm_cluster_module_mock.powerflex_conn.system.switch_cluster_mode.assert_called() + assert MockMdmClusterApi.switch_mode_failed_response() in mdm_cluster_module_mock.module.fail_json.call_args[1]['msg'] + + def test_reduce_mdm_cluster_mode(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "cluster_mode": "ThreeNodes", + "mdm": [ + { + "mdm_name": None, + "mdm_id": MockMdmClusterApi.STB_MGR_MDM_ID, + "mdm_type": "Secondary" + }, + { + "mdm_id": None, + "mdm_name": MockMdmClusterApi.MDM_NAME, + "mdm_type": "TieBreaker" + } + ], + "mdm_state": "absent-in-cluster", + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.FIVE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.powerflex_conn.system.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.powerflex_conn.system.switch_cluster_mode = MagicMock() + mdm_cluster_module_mock.perform_module_operation() + mdm_cluster_module_mock.powerflex_conn.system.switch_cluster_mode.assert_called() + assert mdm_cluster_module_mock.module.exit_json.call_args[1]['changed'] is True + + def test_clear_virtual_ip_interface_mdm(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "mdm_id": MockMdmClusterApi.STB_MGR_MDM_ID, + "clear_interfaces": True, + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.powerflex_conn.system.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.powerflex_conn.system.modify_virtual_ip_interface = MagicMock() + mdm_cluster_module_mock.perform_module_operation() + mdm_cluster_module_mock.powerflex_conn.system.modify_virtual_ip_interface.assert_called() + assert mdm_cluster_module_mock.module.exit_json.call_args[1]['changed'] is True + + def test_clear_virtual_ip_interface_mdm_idempotency(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "mdm_name": "sample_mdm11", + "clear_interfaces": True, + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.FIVE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.powerflex_conn.system.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.perform_module_operation() + assert mdm_cluster_module_mock.module.exit_json.call_args[1]['changed'] is False + + def test_get_system_id_exception(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_module_mock.powerflex_conn.system.get = MagicMock( + side_effect=utils.PowerFlexClient + ) + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.powerflex_conn.system.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.perform_module_operation() + assert MockMdmClusterApi.system_failed_response() in mdm_cluster_module_mock.module.fail_json.call_args[1]['msg'] + + def test_remove_mdm_cluster_owner_none(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "state": "absent" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.perform_module_operation() + assert MockMdmClusterApi.remove_mdm_no_id_name_failed_response() in mdm_cluster_module_mock.module.fail_json.call_args[1]['msg'] + + def test_expand_cluster_without_standby(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "cluster_mode": "FiveNodes", + "mdm": [ + { + "mdm_name": None, + "mdm_id": None, + "mdm_type": "Secondary" + }, + { + "mdm_id": None, + "mdm_name": None, + "mdm_type": "TieBreaker" + } + ], + "mdm_state": "present-in-cluster", + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS_2) + mdm_cluster_module_mock.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.perform_module_operation() + assert MockMdmClusterApi.without_standby_failed_response() in mdm_cluster_module_mock.module.fail_json.call_args[1]['msg'] + + def test_get_system_exception(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + system_resp = MockSDKResponse(MockMdmClusterApi.PARTIAL_SYSTEM_DETAILS_1) + mdm_cluster_module_mock.powerflex_conn.system.get = MagicMock( + return_value=system_resp.__dict__['data'] + ) + mdm_cluster_module_mock.powerflex_conn.system.get_mdm_cluster_details = MagicMock( + return_value={} + ) + mdm_cluster_module_mock.perform_module_operation() + assert MockMdmClusterApi.no_cluster_failed_response() in mdm_cluster_module_mock.module.fail_json.call_args[1]['msg'] + + def test_clear_virtual_ip_interface_mdm_id_none(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "mdm_id": None, + "clear_interfaces": True, + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.is_mdm_name_id_exists = MagicMock( + return_value=MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS['master'] + ) + mdm_cluster_module_mock.perform_module_operation() + assert MockMdmClusterApi.id_none_interface_failed_response() in mdm_cluster_module_mock.module.fail_json.call_args[1]['msg'] + + def test_rename_mdm_id_none(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "mdm_id": None, + "mdm_new_name": "new_node", + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.is_mdm_name_id_exists = MagicMock( + return_value=MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS['master'] + ) + mdm_cluster_module_mock.perform_module_operation() + assert MockMdmClusterApi.id_none_rename_failed_response() in mdm_cluster_module_mock.module.fail_json.call_args[1]['msg'] + + def test_change_owner_id_none(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "mdm_id": None, + "is_primary": True, + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.is_mdm_name_id_exists = MagicMock( + return_value=MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS['master'] + ) + mdm_cluster_module_mock.perform_module_operation() + assert MockMdmClusterApi.id_none_change_owner_failed_response() in mdm_cluster_module_mock.module.fail_json.call_args[1]['msg'] + + def test_get_multiple_system_exception(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + system_resp = MockSDKResponse(MockMdmClusterApi.PARTIAL_SYSTEM_DETAILS) + mdm_cluster_module_mock.powerflex_conn.system.get = MagicMock( + return_value=system_resp.__dict__['data'] + ) + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.powerflex_conn.system.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.perform_module_operation() + assert MockMdmClusterApi.multiple_system_failed_response() in mdm_cluster_module_mock.module.fail_json.call_args[1]['msg'] + + def test_add_standby_mdm_new_name_exception(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "mdm_name": "standby_node", + "standby_mdm": { + "mdm_ips": [self.add_mdm_ip], + "role": "Manager", + "port": 9011, + "management_ips": [self.add_mdm_ip], + "virtual_interfaces": ["ens1"], + "allow_multiple_ips": True + }, + "mdm_new_name": "new_node", + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_module_mock.get_mdm_cluster_details = MagicMock( + return_value=MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS + ) + mdm_cluster_module_mock.is_mdm_name_id_exists = MagicMock( + return_value=MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS['master'] + ) + mdm_cluster_module_mock.perform_module_operation() + assert MockMdmClusterApi.new_name_add_mdm_failed_response() in mdm_cluster_module_mock.module.fail_json.call_args[1]['msg'] diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_protection_domain.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_protection_domain.py new file mode 100644 index 00000000..ced9fc7f --- /dev/null +++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_protection_domain.py @@ -0,0 +1,236 @@ +# Copyright: (c) 2022, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +"""Unit Tests for Protection Domain module on Dell Technologies (Dell) PowerFlex""" + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type +import pytest +from mock.mock import MagicMock +from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_protection_domain_api import MockProtectionDomainApi +from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_sdk_response \ + import MockSDKResponse +from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_api_exception \ + import MockApiException +from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell \ + import utils + +utils.get_logger = MagicMock() +utils.get_powerflex_gateway_host_connection = MagicMock() +utils.PowerFlexClient = MagicMock() +from ansible.module_utils import basic +basic.AnsibleModule = MagicMock() +from ansible_collections.dellemc.powerflex.plugins.modules.protection_domain import PowerFlexProtectionDomain + + +class TestPowerflexProtectionDomain(): + + get_module_args = { + 'hostname': '**.***.**.***', + 'protection_domain_id': '7bd6457000000000', + 'protection_domain_name': None, + 'protection_domain_new_name': None, + 'is_active': True, + 'network_limits': { + 'rebuild_limit': 10240, + 'rebalance_limit': 10240, + 'vtree_migration_limit': 10240, + 'overall_limit': 20480, + 'bandwidth_unit': 'KBps', + }, + 'rf_cache_limits': { + 'is_enabled': None, + 'page_size': 4, + 'max_io_limit': 16, + 'pass_through_mode': 'None' + }, + 'state': 'present' + } + + @pytest.fixture + def protection_domain_module_mock(self, mocker): + mocker.patch(MockProtectionDomainApi.MODULE_UTILS_PATH + '.PowerFlexClient', new=MockApiException) + protection_domain_module_mock = PowerFlexProtectionDomain() + return protection_domain_module_mock + + def test_get_protection_domain_response(self, protection_domain_module_mock): + protection_domain_module_mock.module.params = self.get_module_args + pd_resp = MockSDKResponse(MockProtectionDomainApi.PROTECTION_DOMAIN) + protection_domain_module_mock.powerflex_conn.protection_domain.get = MagicMock( + return_value=pd_resp.__dict__['data']['protectiondomain'] + ) + protection_domain_module_mock.perform_module_operation() + protection_domain_module_mock.powerflex_conn.protection_domain.get.assert_called() + + def test_create_protection_domain(self, protection_domain_module_mock): + self.get_module_args.update({ + "protection_domain_name": "test_domain", + "state": "present" + }) + protection_domain_module_mock.module.params = self.get_module_args + pd_resp = MockSDKResponse(MockProtectionDomainApi.PROTECTION_DOMAIN) + protection_domain_module_mock.get_protection_domain = MagicMock( + return_value=pd_resp.__dict__['data']['protectiondomain'][0] + ) + protection_domain_module_mock.powerflex_conn.protection_domain.create = MagicMock(return_values=None) + protection_domain_module_mock.perform_module_operation() + assert (self.get_module_args['protection_domain_name'] == + protection_domain_module_mock.module.exit_json.call_args[1]["protection_domain_details"]['name']) + assert protection_domain_module_mock.module.exit_json.call_args[1]['changed'] is True + + def test_modify_protection_domain(self, protection_domain_module_mock): + self.get_module_args.update({ + 'network_limits': { + 'rebuild_limit': 10, + 'rebalance_limit': 10, + 'vtree_migration_limit': 11, + 'overall_limit': 21, + 'bandwidth_unit': 'GBps', + } + }) + protection_domain_module_mock.module.params = self.get_module_args + pd_resp = MockSDKResponse(MockProtectionDomainApi.PROTECTION_DOMAIN) + sp_resp = MockSDKResponse(MockProtectionDomainApi.STORAGE_POOL) + protection_domain_module_mock.powerflex_conn.protection_domain.get = MagicMock( + return_value=pd_resp.__dict__['data']['protectiondomain'] + ) + protection_domain_module_mock.powerflex_conn.protection_domain.get_storage_pools = MagicMock( + return_value=sp_resp.__dict__['data']['storagepool'] + ) + protection_domain_module_mock.perform_module_operation() + protection_domain_module_mock.powerflex_conn.protection_domain.network_limits.assert_called() + + def test_rename_protection_domain(self, protection_domain_module_mock): + self.get_module_args.update({ + 'protection_domain_new_name': 'new_test_domain' + }) + protection_domain_module_mock.module.params = self.get_module_args + pd_resp = MockSDKResponse(MockProtectionDomainApi.PROTECTION_DOMAIN) + protection_domain_module_mock.powerflex_conn.protection_domain.get = MagicMock( + return_value=pd_resp.__dict__['data']['protectiondomain'] + ) + protection_domain_module_mock.perform_module_operation() + protection_domain_module_mock.powerflex_conn.protection_domain.rename.assert_called() + + def test_inactivate_protection_domain(self, protection_domain_module_mock): + self.get_module_args.update({ + 'is_active': False + }) + protection_domain_module_mock.module.params = self.get_module_args + pd_resp = MockSDKResponse(MockProtectionDomainApi.PROTECTION_DOMAIN) + protection_domain_module_mock.powerflex_conn.protection_domain.get = MagicMock( + return_value=pd_resp.__dict__['data']['protectiondomain'] + ) + protection_domain_module_mock.perform_module_operation() + protection_domain_module_mock.powerflex_conn.protection_domain. \ + inactivate.assert_called() + + def test_activate_protection_domain(self, protection_domain_module_mock): + self.get_module_args.update({ + 'is_active': True + }) + protection_domain_module_mock.module.params = self.get_module_args + pd_resp = MockSDKResponse(MockProtectionDomainApi.PROTECTION_DOMAIN) + protection_domain_module_mock.powerflex_conn.protection_domain.get = MagicMock( + return_value=pd_resp.__dict__['data']['protectiondomain'] + ) + protection_domain_module_mock.powerflex_conn.protection_domain.activate = MagicMock(return_value=None) + protection_domain_module_mock.perform_module_operation() + assert protection_domain_module_mock.module.exit_json.call_args[1]['changed'] is True + + def test_delete_protection_domain(self, protection_domain_module_mock): + self.get_module_args.update({ + 'protection_domain_name': 'new_test_domain', + 'state': 'absent' + }) + protection_domain_module_mock.module.params = self.get_module_args + protection_domain_module_mock.get_protection_domain = MagicMock(return_values=None) + protection_domain_module_mock.perform_module_operation() + assert protection_domain_module_mock.module.exit_json.call_args[1]['changed'] is True + + def test_delete_protection_domain_throws_exception(self, protection_domain_module_mock): + self.get_module_args.update({ + 'protection_domain_id': '7bd6457000000000', + 'state': 'absent' + }) + protection_domain_module_mock.module.params = self.get_module_args + pd_resp = MockSDKResponse(MockProtectionDomainApi.PROTECTION_DOMAIN) + protection_domain_module_mock.powerflex_conn.protection_domain.get = MagicMock( + return_value=pd_resp.__dict__['data']['protectiondomain'] + ) + protection_domain_module_mock.powerflex_conn.protection_domain.delete = MagicMock( + side_effect=utils.PowerFlexClient) + protection_domain_module_mock.perform_module_operation() + assert MockProtectionDomainApi.delete_pd_failed_msg(self.get_module_args['protection_domain_id']) in \ + protection_domain_module_mock.module.fail_json.call_args[1]['msg'] + + def test_get_with_404_exception(self, protection_domain_module_mock): + MockProtectionDomainApi.status = 404 + self.get_module_args.update({ + "protection_domain_name": "test_domain1" + }) + protection_domain_module_mock.module.params = self.get_module_args + pd_resp = MockSDKResponse(MockProtectionDomainApi.PROTECTION_DOMAIN) + protection_domain_module_mock.powerflex_conn.protection_domain.get = MagicMock( + return_value=pd_resp.__dict__['data']['protectiondomain'] + ) + protection_domain_module_mock.powerflex_conn.protection_domain.create = MagicMock( + side_effect=utils.PowerFlexClient) + protection_domain_module_mock.perform_module_operation() + assert protection_domain_module_mock.module.exit_json.call_args[1]['changed'] is True + + def test_modify_protection_domain_throws_exception(self, protection_domain_module_mock): + self.get_module_args.update({ + "protection_domain_id": "7bd6457000000000", + 'rf_cache_limits': { + 'is_enabled': True, + 'page_size': 64, + 'max_io_limit': 128, + 'pass_through_mode': 'invalid_Read' + } + }) + protection_domain_module_mock.module.params = self.get_module_args + pd_resp = MockSDKResponse(MockProtectionDomainApi.PROTECTION_DOMAIN) + protection_domain_module_mock.powerflex_conn.protection_domain.get = MagicMock( + return_value=pd_resp.__dict__['data']['protectiondomain'] + ) + protection_domain_module_mock.powerflex_conn.protection_domain.set_rfcache_enabled = MagicMock( + side_effect=utils.PowerFlexClient) + protection_domain_module_mock.perform_module_operation() + assert MockProtectionDomainApi.modify_pd_with_failed_msg(self.get_module_args['protection_domain_id']) in \ + protection_domain_module_mock.module.fail_json.call_args[1]['msg'] + + def test_rename_protection_domain_invalid_value(self, protection_domain_module_mock): + self.get_module_args.update({ + "protection_domain_name": "test_domain", + "protection_domain_new_name": " test domain", + }) + protection_domain_module_mock.module.params = self.get_module_args + pd_resp = MockSDKResponse(MockProtectionDomainApi.PROTECTION_DOMAIN) + protection_domain_module_mock.powerflex_conn.protection_domain.get = MagicMock( + return_value=pd_resp.__dict__['data']['protectiondomain'] + ) + protection_domain_module_mock.powerflex_conn.protection_domain.rename = MagicMock( + side_effect=utils.PowerFlexClient) + protection_domain_module_mock.perform_module_operation() + assert MockProtectionDomainApi.rename_pd_failed_msg(self.get_module_args['protection_domain_id']) in \ + protection_domain_module_mock.module.fail_json.call_args[1]['msg'] + + def test_create_protection_domain_invalid_param(self, protection_domain_module_mock): + self.get_module_args.update({ + "protection_domain_name": "test_domain1", + "protection_domain_new_name": "new_domain", + "state": "present" + }) + protection_domain_module_mock.module.params = self.get_module_args + pd_resp = MockSDKResponse(MockProtectionDomainApi.PROTECTION_DOMAIN) + protection_domain_module_mock.powerflex_conn.protection_domain.get = MagicMock( + return_value=pd_resp.__dict__['data']['protectiondomain'] + ) + protection_domain_module_mock.powerflex_conn.protection_domain.create = MagicMock( + side_effect=utils.PowerFlexClient) + protection_domain_module_mock.perform_module_operation() + assert MockProtectionDomainApi.version_pd_failed_msg() in \ + protection_domain_module_mock.module.fail_json.call_args[1]['msg'] diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_replication_consistency_group.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_replication_consistency_group.py new file mode 100644 index 00000000..b77cfb9c --- /dev/null +++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_replication_consistency_group.py @@ -0,0 +1,344 @@ +# Copyright: (c) 2022, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +"""Unit Tests for volume module on PowerFlex""" + +from __future__ import (absolute_import, division, print_function) +from unittest.mock import Mock + +__metaclass__ = type + +import pytest +from mock.mock import MagicMock +from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_replication_consistency_group_api import MockReplicationConsistencyGroupApi +from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_api_exception \ + import MockApiException +from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell \ + import utils + +utils.get_logger = MagicMock() +utils.get_powerflex_gateway_host_connection = MagicMock() +utils.PowerFlexClient = MagicMock() + +from ansible.module_utils import basic +basic.AnsibleModule = MagicMock() +from ansible_collections.dellemc.powerflex.plugins.modules.replication_consistency_group import PowerFlexReplicationConsistencyGroup + + +class TestPowerflexReplicationConsistencyGroup(): + + get_module_args = MockReplicationConsistencyGroupApi.RCG_COMMON_ARGS + + @pytest.fixture + def replication_consistency_group_module_mock(self): + replication_consistency_group_module_mock = PowerFlexReplicationConsistencyGroup() + replication_consistency_group_module_mock.module.check_mode = False + return replication_consistency_group_module_mock + + def test_get_rcg_details(self, replication_consistency_group_module_mock): + self.get_module_args.update({ + "rcg_name": "test_rcg", + "state": "present" + }) + replication_consistency_group_module_mock.module.params = self.get_module_args + replication_consistency_group_resp = MockReplicationConsistencyGroupApi.get_rcg_details() + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + return_value=replication_consistency_group_resp + ) + replication_consistency_group_module_mock.perform_module_operation() + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get.assert_called() + + def test_get_rcg_details_with_exception(self, replication_consistency_group_module_mock): + self.get_module_args.update({ + "rcg_name": "test_rcg", + "state": "present" + }) + replication_consistency_group_module_mock.module.params = self.get_module_args + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + side_effect=MockApiException) + replication_consistency_group_module_mock.validate_create = MagicMock() + replication_consistency_group_module_mock.perform_module_operation() + assert MockReplicationConsistencyGroupApi.get_exception_response('get_details') in \ + replication_consistency_group_module_mock.module.fail_json.call_args[1]['msg'] + + def test_create_rcg_snapshot_response(self, replication_consistency_group_module_mock): + self.get_module_args.update({ + "rcg_name": "test_rcg", + "create_snapshot": True, + "state": "present" + }) + replication_consistency_group_module_mock.module.params = self.get_module_args + replication_consistency_group_resp = MockReplicationConsistencyGroupApi.get_rcg_details() + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + return_value=replication_consistency_group_resp + ) + replication_consistency_group_module_mock.perform_module_operation() + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.create_snapshot.assert_called() + + def test_create_rcg_snapshot_exception(self, replication_consistency_group_module_mock): + self.get_module_args.update({ + "rcg_id": "aadc17d500000000", + "create_snapshot": True, + "state": "present" + }) + replication_consistency_group_module_mock.module.params = self.get_module_args + replication_consistency_group_resp = MockReplicationConsistencyGroupApi.get_rcg_details() + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + return_value=replication_consistency_group_resp + ) + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.create_snapshot = MagicMock( + side_effect=MockApiException + ) + replication_consistency_group_module_mock.perform_module_operation() + assert MockReplicationConsistencyGroupApi.create_snapshot_exception_response('create_snapshot', self.get_module_args['rcg_id']) \ + in replication_consistency_group_module_mock.module.fail_json.call_args[1]['msg'] + + def test_create_rcg(self, replication_consistency_group_module_mock): + self.get_module_args.update({ + "rcg_name": "test_rcg", "rpo": 60, "protection_domain_name": "domain1", + "protection_domain_id": None, "activity_mode": "active", "state": "present", + "remote_peer": {"hostname": "1.1.1.1", "username": "username", "password": "password", + "verifycert": "verifycert", "port": "port", "protection_domain_name": "None", + "protection_domain_id": "123"}}) + replication_consistency_group_module_mock.module.params = self.get_module_args + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + return_value=None + ) + replication_consistency_group_module_mock.perform_module_operation() + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.create.assert_called() + + def test_modify_rpo(self, replication_consistency_group_module_mock): + self.get_module_args.update({"rcg_name": "test_rcg", "rpo": 60, "state": "present"}) + replication_consistency_group_module_mock.module.params = self.get_module_args + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + return_value=MockReplicationConsistencyGroupApi.get_rcg_details() + ) + replication_consistency_group_module_mock.perform_module_operation() + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.modify_rpo.assert_called() + + def test_modify_rpo_throws_exception(self, replication_consistency_group_module_mock): + self.get_module_args.update({"rcg_name": "test_rcg", "rpo": 60, "state": "present"}) + replication_consistency_group_module_mock.module.params = self.get_module_args + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + return_value=MockReplicationConsistencyGroupApi.get_rcg_details()) + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.modify_rpo = MagicMock( + side_effect=MockApiException) + replication_consistency_group_module_mock.perform_module_operation() + assert "Modify rpo for replication consistency group " + MockReplicationConsistencyGroupApi.RCG_ID + MockReplicationConsistencyGroupApi.FAIL_MSG in \ + replication_consistency_group_module_mock.module.fail_json.call_args[1]['msg'] + + def test_modify_target_volume_access_mode(self, replication_consistency_group_module_mock): + self.get_module_args.update({"rcg_name": "test_rcg", "target_volume_access_mode": "Readonly", "state": "present"}) + replication_consistency_group_module_mock.module.params = self.get_module_args + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + return_value=MockReplicationConsistencyGroupApi.get_rcg_details() + ) + replication_consistency_group_module_mock.perform_module_operation() + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.modify_target_volume_access_mode.assert_called() + + def test_modify_target_volume_access_mode_throws_exception(self, replication_consistency_group_module_mock): + self.get_module_args.update({"rcg_name": "test_rcg", "target_volume_access_mode": "Readonly", "state": "present"}) + replication_consistency_group_module_mock.module.params = self.get_module_args + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + return_value=MockReplicationConsistencyGroupApi.get_rcg_details()) + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.modify_target_volume_access_mode = \ + MagicMock(side_effect=MockApiException) + replication_consistency_group_module_mock.perform_module_operation() + assert "Modify target volume access mode for replication consistency group " + MockReplicationConsistencyGroupApi.RCG_ID \ + + MockReplicationConsistencyGroupApi.FAIL_MSG in \ + replication_consistency_group_module_mock.module.fail_json.call_args[1]['msg'] + + def test_modify_activity_mode(self, replication_consistency_group_module_mock): + self.get_module_args.update({"rcg_name": "test_rcg", "activity_mode": "Inactive", "state": "present"}) + replication_consistency_group_module_mock.module.params = self.get_module_args + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + return_value=MockReplicationConsistencyGroupApi.get_rcg_details()) + replication_consistency_group_module_mock.perform_module_operation() + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.inactivate.assert_called() + + def test_modify_activity_mode_throws_exception(self, replication_consistency_group_module_mock): + self.get_module_args.update({"rcg_name": "test_rcg", "activity_mode": "Active", "state": "present"}) + replication_consistency_group_module_mock.module.params = self.get_module_args + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + return_value=MockReplicationConsistencyGroupApi.get_rcg_details(activity_mode="Inactive")) + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.activate = \ + MagicMock(side_effect=MockApiException) + replication_consistency_group_module_mock.perform_module_operation() + assert "Modify activity_mode for replication consistency group " + MockReplicationConsistencyGroupApi.RCG_ID \ + + MockReplicationConsistencyGroupApi.FAIL_MSG in \ + replication_consistency_group_module_mock.module.fail_json.call_args[1]['msg'] + + def test_pause_rcg(self, replication_consistency_group_module_mock): + self.get_module_args.update({"rcg_name": "test_rcg", "pause": True, + "pause_mode": "StopDataTransfer", "state": "present"}) + replication_consistency_group_module_mock.module.params = self.get_module_args + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + return_value=MockReplicationConsistencyGroupApi.get_rcg_details() + ) + replication_consistency_group_module_mock.perform_module_operation() + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.pause.assert_called() + + def test_pause_rcg_throws_exception(self, replication_consistency_group_module_mock): + self.get_module_args.update({"rcg_name": "test_rcg", "pause": True, + "pause_mode": "StopDataTransfer", "state": "present"}) + replication_consistency_group_module_mock.module.params = self.get_module_args + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + return_value=MockReplicationConsistencyGroupApi.get_rcg_details()) + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.pause = \ + MagicMock(side_effect=MockApiException) + replication_consistency_group_module_mock.perform_module_operation() + assert "Pause replication consistency group " + MockReplicationConsistencyGroupApi.RCG_ID \ + + MockReplicationConsistencyGroupApi.FAIL_MSG in \ + replication_consistency_group_module_mock.module.fail_json.call_args[1]['msg'] + + def test_resume_rcg(self, replication_consistency_group_module_mock): + self.get_module_args.update({"rcg_name": "test_rcg", "pause": False, "state": "present"}) + replication_consistency_group_module_mock.module.params = self.get_module_args + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + return_value=MockReplicationConsistencyGroupApi.get_rcg_details(pause_mode="StopDataTransfer")) + replication_consistency_group_module_mock.perform_module_operation() + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.resume.assert_called() + + def test_resume_rcg_throws_exception(self, replication_consistency_group_module_mock): + self.get_module_args.update({"rcg_name": "test_rcg", "pause": False, "state": "present"}) + replication_consistency_group_module_mock.module.params = self.get_module_args + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + return_value=MockReplicationConsistencyGroupApi.get_rcg_details(pause_mode="StopDataTransfer")) + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.resume = \ + MagicMock(side_effect=MockApiException) + replication_consistency_group_module_mock.perform_module_operation() + assert "Resume replication consistency group " + MockReplicationConsistencyGroupApi.RCG_ID \ + + MockReplicationConsistencyGroupApi.FAIL_MSG in \ + replication_consistency_group_module_mock.module.fail_json.call_args[1]['msg'] + + def test_freeze_rcg(self, replication_consistency_group_module_mock): + self.get_module_args.update({"rcg_name": "test_rcg", "freeze": True, "state": "present"}) + replication_consistency_group_module_mock.module.params = self.get_module_args + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + return_value=MockReplicationConsistencyGroupApi.get_rcg_details()) + replication_consistency_group_module_mock.perform_module_operation() + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.freeze.assert_called() + + def test_freeze_rcg_throws_exception(self, replication_consistency_group_module_mock): + self.get_module_args.update({"rcg_name": "test_rcg", "freeze": True, "state": "present"}) + replication_consistency_group_module_mock.module.params = self.get_module_args + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + return_value=MockReplicationConsistencyGroupApi.get_rcg_details()) + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.freeze = \ + MagicMock(side_effect=MockApiException) + replication_consistency_group_module_mock.perform_module_operation() + assert "Freeze replication consistency group " + MockReplicationConsistencyGroupApi.RCG_ID \ + + MockReplicationConsistencyGroupApi.FAIL_MSG in \ + replication_consistency_group_module_mock.module.fail_json.call_args[1]['msg'] + + def test_unfreeze_rcg(self, replication_consistency_group_module_mock): + self.get_module_args.update({"rcg_name": "test_rcg", "freeze": False, "state": "present"}) + replication_consistency_group_module_mock.module.params = self.get_module_args + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + return_value=MockReplicationConsistencyGroupApi.get_rcg_details(freeze_state="Frozen") + ) + replication_consistency_group_module_mock.perform_module_operation() + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.unfreeze.assert_called() + + def test_unfreeze_rcg_throws_exception(self, replication_consistency_group_module_mock): + self.get_module_args.update({"rcg_name": "test_rcg", "freeze": False, "state": "present"}) + replication_consistency_group_module_mock.module.params = self.get_module_args + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + return_value=MockReplicationConsistencyGroupApi.get_rcg_details(freeze_state="Frozen")) + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.unfreeze = \ + MagicMock(side_effect=MockApiException) + replication_consistency_group_module_mock.perform_module_operation() + assert "Unfreeze replication consistency group " + MockReplicationConsistencyGroupApi.RCG_ID \ + + MockReplicationConsistencyGroupApi.FAIL_MSG in \ + replication_consistency_group_module_mock.module.fail_json.call_args[1]['msg'] + + def test_rename_rcg(self, replication_consistency_group_module_mock): + self.get_module_args.update({"rcg_name": "test_rcg", "new_rcg_name": "test_rcg_rename", "state": "present"}) + replication_consistency_group_module_mock.module.params = self.get_module_args + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + return_value=MockReplicationConsistencyGroupApi.get_rcg_details()) + replication_consistency_group_module_mock.perform_module_operation() + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.rename_rcg.assert_called() + + def test_rename_rcg_throws_exception(self, replication_consistency_group_module_mock): + self.get_module_args.update({"rcg_name": "test_rcg", "new_rcg_name": "test_rcg_rename", "state": "present"}) + replication_consistency_group_module_mock.module.params = self.get_module_args + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + return_value=MockReplicationConsistencyGroupApi.get_rcg_details()) + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.rename_rcg = \ + MagicMock(side_effect=MockApiException) + replication_consistency_group_module_mock.perform_module_operation() + assert "Renaming replication consistency group to test_rcg_rename failed with error" in \ + replication_consistency_group_module_mock.module.fail_json.call_args[1]['msg'] + + def test_delete_rcg(self, replication_consistency_group_module_mock): + self.get_module_args.update({"rcg_name": "test_rcg", "state": "absent"}) + replication_consistency_group_module_mock.module.params = self.get_module_args + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + return_value=MockReplicationConsistencyGroupApi.get_rcg_details()) + replication_consistency_group_module_mock.perform_module_operation() + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.delete.assert_called() + + def test_delete_rcg_throws_exception(self, replication_consistency_group_module_mock): + self.get_module_args.update({"rcg_name": "test_rcg", "state": "absent"}) + replication_consistency_group_module_mock.module.params = self.get_module_args + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + return_value=MockReplicationConsistencyGroupApi.get_rcg_details()) + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.delete = \ + MagicMock(side_effect=MockApiException) + replication_consistency_group_module_mock.perform_module_operation() + assert "Delete replication consistency group " + MockReplicationConsistencyGroupApi.RCG_ID + MockReplicationConsistencyGroupApi.FAIL_MSG in \ + replication_consistency_group_module_mock.module.fail_json.call_args[1]['msg'] + + def test_modify_rcg_as_inconsistent(self, replication_consistency_group_module_mock): + self.get_module_args.update({"rcg_name": "test_rcg", "is_consistent": False, "state": "present"}) + replication_consistency_group_module_mock.module.params = self.get_module_args + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + return_value=MockReplicationConsistencyGroupApi.get_rcg_details()) + replication_consistency_group_module_mock.perform_module_operation() + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.set_as_inconsistent.assert_called() + + def test_modify_rcg_as_consistent_throws_exception(self, replication_consistency_group_module_mock): + self.get_module_args.update({"rcg_name": "test_rcg", "is_consistent": True, "state": "present"}) + replication_consistency_group_module_mock.module.params = self.get_module_args + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + return_value=MockReplicationConsistencyGroupApi.get_rcg_details(consistency="InConsistent")) + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.set_as_consistent = \ + MagicMock(side_effect=MockApiException) + replication_consistency_group_module_mock.perform_module_operation() + assert "Modifying consistency of replication consistency group failed with error" in \ + replication_consistency_group_module_mock.module.fail_json.call_args[1]['msg'] + + def test_pause_rcg_without_pause_mode(self, replication_consistency_group_module_mock): + self.get_module_args.update({"rcg_name": "test_rcg", "pause": True, "state": "present"}) + replication_consistency_group_module_mock.module.params = self.get_module_args + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + return_value=MockReplicationConsistencyGroupApi.get_rcg_details()) + replication_consistency_group_module_mock.powerflex_conn.protection_domain.get = MagicMock(return_value=[{"name": "pd_id"}]) + replication_consistency_group_module_mock.perform_module_operation() + assert "Specify pause_mode to perform pause on replication consistency group." in \ + replication_consistency_group_module_mock.module.fail_json.call_args[1]['msg'] + + def test_create_rcg_with_invalid_params(self, replication_consistency_group_module_mock): + self.get_module_args.update({ + "rcg_name": "test_rcg", "activity_mode": "active", "state": "present", + "remote_peer": {"hostname": "1.1.1.1", "username": "username", "password": "password", + "verifycert": "verifycert", "port": "port", "protection_domain_name": None, + "protection_domain_id": None}}) + replication_consistency_group_module_mock.module.params = self.get_module_args + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + return_value=None) + replication_consistency_group_module_mock.perform_module_operation() + assert "Enter remote protection_domain_name or protection_domain_id to create replication consistency group" in \ + replication_consistency_group_module_mock.module.fail_json.call_args[1]['msg'] + + def test_pause_rcg_without_pause(self, replication_consistency_group_module_mock): + self.get_module_args.update({"rcg_name": "test_rcg", "pause_mode": "StopDataTransfer", "state": "present"}) + replication_consistency_group_module_mock.module.params = self.get_module_args + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + return_value=MockReplicationConsistencyGroupApi.get_rcg_details()) + replication_consistency_group_module_mock.powerflex_conn.protection_domain.get = MagicMock(return_value=[{"name": "pd_id"}]) + replication_consistency_group_module_mock.perform_module_operation() + assert "Specify pause as True to pause replication consistency group" in \ + replication_consistency_group_module_mock.module.fail_json.call_args[1]['msg'] diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_storagepool.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_storagepool.py new file mode 100644 index 00000000..a2c463f6 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_storagepool.py @@ -0,0 +1,72 @@ +# Copyright: (c) 2022, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +"""Unit Tests for storage pool module on PowerFlex""" + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +from mock.mock import MagicMock +from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_storagepool_api import MockStoragePoolApi +from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_sdk_response \ + import MockSDKResponse +from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_api_exception \ + import MockApiException +from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell \ + import utils + +utils.get_logger = MagicMock() +utils.get_powerflex_gateway_host_connection = MagicMock() +utils.PowerFlexClient = MagicMock() + +from ansible.module_utils import basic +basic.AnsibleModule = MagicMock() +from ansible_collections.dellemc.powerflex.plugins.modules.storagepool import PowerFlexStoragePool + + +class TestPowerflexStoragePool(): + + get_module_args = MockStoragePoolApi.STORAGE_POOL_COMMON_ARGS + + @pytest.fixture + def storagepool_module_mock(self, mocker): + storagepool_module_mock = PowerFlexStoragePool() + storagepool_module_mock.module.check_mode = False + return storagepool_module_mock + + def test_get_storagepool_details(self, storagepool_module_mock): + self.get_module_args.update({ + "storage_pool_name": "test_pool", + "state": "present" + }) + storagepool_module_mock.module.params = self.get_module_args + storagepool_resp = MockStoragePoolApi.STORAGE_POOL_GET_LIST + storagepool_module_mock.powerflex_conn.storage_pool.get = MagicMock( + return_value=storagepool_resp + ) + storagepool_statistics_resp = MockStoragePoolApi.STORAGE_POOL_STATISTICS + storagepool_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock( + return_value=storagepool_statistics_resp + ) + storagepool_module_mock.perform_module_operation() + storagepool_module_mock.powerflex_conn.storage_pool.get.assert_called() + storagepool_module_mock.powerflex_conn.storage_pool.get_statistics.assert_called() + + def test_get_storagepool_details_with_exception(self, storagepool_module_mock): + self.get_module_args.update({ + "storage_pool_name": "test_pool" + }) + storagepool_module_mock.module.params = self.get_module_args + storagepool_resp = MockStoragePoolApi.STORAGE_POOL_GET_LIST + storagepool_module_mock.powerflex_conn.storage_pool.get = MagicMock( + return_value=storagepool_resp + ) + storagepool_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock( + side_effect=MockApiException + ) + storagepool_module_mock.create_storage_pool = MagicMock(return_value=None) + storagepool_module_mock.perform_module_operation() + assert MockStoragePoolApi.get_exception_response('get_details') in storagepool_module_mock.module.fail_json.call_args[1]['msg'] diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_volume.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_volume.py new file mode 100644 index 00000000..53cdcfc0 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_volume.py @@ -0,0 +1,81 @@ +# Copyright: (c) 2022, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +"""Unit Tests for volume module on PowerFlex""" + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +from mock.mock import MagicMock +from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_volume_api import MockVolumeApi +from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_sdk_response \ + import MockSDKResponse +from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_api_exception \ + import MockApiException +from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell \ + import utils + +utils.get_logger = MagicMock() +utils.get_powerflex_gateway_host_connection = MagicMock() +utils.PowerFlexClient = MagicMock() + +from ansible.module_utils import basic +basic.AnsibleModule = MagicMock() +from ansible_collections.dellemc.powerflex.plugins.modules.volume import PowerFlexVolume + + +class TestPowerflexVolume(): + + get_module_args = MockVolumeApi.VOLUME_COMMON_ARGS + + @pytest.fixture + def volume_module_mock(self, mocker): + volume_module_mock = PowerFlexVolume() + volume_module_mock.module.check_mode = False + return volume_module_mock + + def test_get_volume_details(self, volume_module_mock): + self.get_module_args.update({ + "vol_name": "testing", + "state": "present" + }) + volume_module_mock.module.params = self.get_module_args + volume_resp = MockVolumeApi.VOLUME_GET_LIST + volume_module_mock.powerflex_conn.volume.get = MagicMock( + return_value=volume_resp + ) + volume_sp_resp = MockVolumeApi.VOLUME_STORAGEPOOL_DETAILS + volume_module_mock.get_storage_pool = MagicMock( + return_value=volume_sp_resp + ) + volume_pd_resp = MockVolumeApi.VOLUME_PD_DETAILS + volume_module_mock.get_protection_domain = MagicMock( + return_value=volume_pd_resp + ) + volume_statistics_resp = MockVolumeApi.VOLUME_STATISTICS + volume_module_mock.powerflex_conn.volume.get_statistics = MagicMock( + return_value=volume_statistics_resp + ) + volume_module_mock.perform_module_operation() + volume_module_mock.powerflex_conn.volume.get.assert_called() + volume_module_mock.powerflex_conn.volume.get_statistics.assert_called() + + def test_get_volume_details_with_exception(self, volume_module_mock): + self.get_module_args.update({ + "vol_name": "testing", + "state": "present" + }) + volume_module_mock.module.params = self.get_module_args + volume_resp = MockVolumeApi.VOLUME_GET_LIST + volume_module_mock.powerflex_conn.volume.get = MagicMock( + return_value=volume_resp + ) + volume_module_mock.powerflex_conn.volume.get_statistics = MagicMock( + side_effect=MockApiException + ) + volume_module_mock.create_volume = MagicMock(return_value=None) + volume_module_mock.perform_module_operation() + assert MockVolumeApi.get_exception_response('get_details') in volume_module_mock.module.fail_json.call_args[1]['msg'] |