diff options
Diffstat (limited to '')
53 files changed, 2309 insertions, 677 deletions
diff --git a/ansible_collections/dellemc/powerflex/.github/CODEOWNERS b/ansible_collections/dellemc/powerflex/.github/CODEOWNERS index f3093ce14..e4209b68b 100644 --- a/ansible_collections/dellemc/powerflex/.github/CODEOWNERS +++ b/ansible_collections/dellemc/powerflex/.github/CODEOWNERS @@ -15,4 +15,4 @@ # Trisha Datta (trisha-dell) # for all files: -* @kuttattz @Bhavneet-Sharma @Jennifer-John @meenakshidembi691 @Pavan-Mudunuri @trisha-dell @felixs88 @sachin-apa +* @Bhavneet-Sharma @Jennifer-John @meenakshidembi691 @trisha-dell @felixs88 @sachin-apa diff --git a/ansible_collections/dellemc/powerflex/.github/workflows/ansible-test.yml b/ansible_collections/dellemc/powerflex/.github/workflows/ansible-test.yml index 058c434e2..76f953872 100644 --- a/ansible_collections/dellemc/powerflex/.github/workflows/ansible-test.yml +++ b/ansible_collections/dellemc/powerflex/.github/workflows/ansible-test.yml @@ -14,7 +14,7 @@ jobs: strategy: fail-fast: false matrix: - ansible-version: [stable-2.14, stable-2.15, stable-2.16, devel] + ansible-version: [stable-2.15, stable-2.16, stable-2.17, devel] steps: - name: Check out code uses: actions/checkout@v3 @@ -50,13 +50,15 @@ jobs: matrix: python: ['3.9', '3.10', '3.11'] ansible: - - stable-2.14 - stable-2.15 - stable-2.16 + - stable-2.17 - devel exclude: - ansible: stable-2.16 python: '3.9' + - ansible: stable-2.17 + python: '3.9' - ansible: devel python: '3.9' @@ -81,13 +83,15 @@ jobs: matrix: python: ['3.9', '3.10', '3.11'] ansible: - - stable-2.14 - stable-2.15 - stable-2.16 + - stable-2.17 - devel exclude: - ansible: stable-2.16 python: '3.9' + - ansible: stable-2.17 + python: '3.9' - ansible: devel python: '3.9' runs-on: ubuntu-latest @@ -109,12 +113,14 @@ jobs: fail-fast: false matrix: python-version: ["3.9", "3.10", "3.11"] - ansible-version: [stable-2.14, stable-2.15, stable-2.16, devel] + ansible-version: [stable-2.15, stable-2.16, stable-2.17, devel] exclude: # Ansible-core 2.16 is supported only from Python 3.10 onwards - python-version: "3.9" ansible-version: stable-2.16 - python-version: "3.9" + ansible-version: stable-2.17 + - python-version: "3.9" ansible-version: devel steps: diff --git a/ansible_collections/dellemc/powerflex/CHANGELOG.rst b/ansible_collections/dellemc/powerflex/CHANGELOG.rst index 6224280f0..981bbfc43 100644 --- a/ansible_collections/dellemc/powerflex/CHANGELOG.rst +++ b/ansible_collections/dellemc/powerflex/CHANGELOG.rst @@ -4,6 +4,16 @@ Dellemc.PowerFlex Change Logs .. contents:: Topics +v2.5.0 +====== + +Minor Changes +------------- + +- Fixed the roles to support attaching the MDM cluster to the gateway. +- The storage pool module has been enhanced to support more features. +- Added support for PowerFlex Onyx version(4.6.x). + v2.4.0 ====== diff --git a/ansible_collections/dellemc/powerflex/FILES.json b/ansible_collections/dellemc/powerflex/FILES.json index ac7c75c78..ba53dc1d6 100644 --- a/ansible_collections/dellemc/powerflex/FILES.json +++ b/ansible_collections/dellemc/powerflex/FILES.json @@ -32,7 +32,7 @@ "name": ".github/CODEOWNERS", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "64591e526136e05232234eaf058ee461ca6ff91ea32a07ead2da6c0e54dd917f", + "chksum_sha256": "6d71584c96824a971b8a3a52da96dada30159239f0429b35f10e2e0eb1d6a71b", "format": 1 }, { @@ -95,7 +95,7 @@ "name": ".github/workflows/ansible-test.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "5804cd1364b07aa6ebe9d2d8b29598dd815c33471f6760da29039c40a6beadba", + "chksum_sha256": "6ee73bce211a732615ebb9e1fa631136c0e643d5f2bce1cb96ba7693de0904e6", "format": 1 }, { @@ -109,7 +109,7 @@ "name": "CHANGELOG.rst", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "453a68b618853e7feccf984745d38fb5566aab5d3de884790f5fa85c28347993", + "chksum_sha256": "42311210c546ae7974fb2278599b4a88072515b26ad366ac30d087b7da5fe1cf", "format": 1 }, { @@ -130,7 +130,7 @@ "name": "README.md", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7b4e0d601cddc58a5b325e2543789d29ea69f5dd362c080a16c77b0b3239a439", + "chksum_sha256": "dcbd65f6b2fbc4835b2b17f27723477430d200713a818bf7a40ef49ea27b34ee", "format": 1 }, { @@ -151,7 +151,7 @@ "name": "changelogs/changelog.yaml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "0a9799578efac17952b5672ebb2d3a4f9541aa524ede37aa2ffe0372c0399fd8", + "chksum_sha256": "406b3d8a1383caa4f2380e97981db04a1b6e524644d14c28c349839a856d0fa4", "format": 1 }, { @@ -200,49 +200,49 @@ "name": "docs/CONTRIBUTING.md", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "f054a45c8a3b7032987d66180a9c5cc852af935e07f633489976b8f3d2b6755f", + "chksum_sha256": "d35d0f6c46fa37f4525387d514a656ec2896a0926953865f42c9422347ee75ed", "format": 1 }, { "name": "docs/INSTALLATION.md", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "782332edfc5dfac225338eec316fcb80264c8a80d64356b3849fa4d1063f4eb7", + "chksum_sha256": "9e6612ef91ba62f6bf3858f1818a43c2a9ddc93ce5ad2526ffd0f003472f5d68", "format": 1 }, { "name": "docs/ISSUE_TRIAGE.md", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "db3b92f11a5d38287ead84d7baf84655736fd7c377e88bd3fc29f44ea46ff57e", + "chksum_sha256": "ed45f202bf22a3d68b49e52dc20078bd210b4d4d6e5eccbcca7dac3aeafb92c1", "format": 1 }, { "name": "docs/MAINTAINERS.md", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "0758ca62988538fa1b83403308bfac6637b523dd20e769b889a2c54561ae1512", + "chksum_sha256": "f1338cc336eb09f38805ce990c9205bdcab3fbcf1c4b55637953d92ba0972bb4", "format": 1 }, { "name": "docs/MAINTAINER_GUIDE.md", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "475e92f2e159eaa82f4666d9fd91907000790bea8417fa720d9f57328e101078", + "chksum_sha256": "777194d83ecf45ef4b13311bfa52a862b6d4d9d3195cecc40f0636cc7ad00356", "format": 1 }, { "name": "docs/Release Notes.md", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "461b82fb097263724e23d0477036a9fbd7ed46f7a8ad14ff7bfc90e4dc6555a6", + "chksum_sha256": "45572c8e81821c53b3fa7f6f273858dbc954c78dcd87bc90f15270314e6ff62f", "format": 1 }, { "name": "docs/SECURITY.md", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "94ff66c47cb36e079846fd744ad870f627535e64326691b0421cad93feaffca2", + "chksum_sha256": "1bfb50b1b361321514beda96e3017aba1ddb9f137cddedf9e0acdd165a7af82a", "format": 1 }, { @@ -263,98 +263,98 @@ "name": "docs/modules/device.rst", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "724112e62b9e42bf54860d5d6490df28db02f48a1470b222ddb44a7ad830ef8c", + "chksum_sha256": "286cc8f42910b29f45f45d514d18d1c274010c7bf1290ab258363cfa97947981", "format": 1 }, { "name": "docs/modules/fault_set.rst", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "8e5cf661716df94032a49f43d5ce8d751dea569def8ac99e26c5cfada44f4f61", + "chksum_sha256": "6983109628b14b6cf780b138d1a24fb4b810d9e038d141cea805b8ee1ddaf6f4", "format": 1 }, { "name": "docs/modules/info.rst", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "6d7cbe381aa23de4ce4acb228213a916f7ac5898ccf95105995134abf2496f3a", + "chksum_sha256": "83e21122599db39633103002e6fd4056952ae51b7aec81931dc2c48aa4ef6efd", "format": 1 }, { "name": "docs/modules/mdm_cluster.rst", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "9ffc87301f4e67b79451450f964c6559088696c246210342c65678d3b6823eaa", + "chksum_sha256": "af22c975649b7ac3c1dd5c529e23bb9e124cbe1070fd7c4a93352abd3297fa22", "format": 1 }, { "name": "docs/modules/protection_domain.rst", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "a31481e55cbcd48e2de17c5f909958a48a641c9407ca97ac81159d5a732b2769", + "chksum_sha256": "e4491d39e1ba456eb36027a550011f75ae8c2bb9c4db8ed6fac3ff4c2a78d7ec", "format": 1 }, { "name": "docs/modules/replication_consistency_group.rst", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "a7020f015b38a75b76608685358c0d40f1994e942e23728ba563ba0ad76d92d3", + "chksum_sha256": "01eb098a7f1fc4177ccc1401f1afbea57ee93e02bfd0ca6c00777fe9b9b2bd45", "format": 1 }, { "name": "docs/modules/replication_pair.rst", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "1493e8c1d08acd7c51ee0423e0a688b3ee5801c915fdd3ecbf4c40461516fef7", + "chksum_sha256": "68a488ec3acf28312649942c9e5813af0e4b32a37e3b718a8ed0f14665368d91", "format": 1 }, { "name": "docs/modules/resource_group.rst", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "d38e031f9d39e1c92241fc635abfa1cae0a8081dd77a794b03f7784d98d9eb05", + "chksum_sha256": "3f44d80f59a115994e3eb8b746cc79c791596fa716c89610204303cf7c71853c", "format": 1 }, { "name": "docs/modules/sdc.rst", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "fd486d97fd31a569846b33d38336a705e451f884a2ecd9197a90b01312e48a94", + "chksum_sha256": "01f3325b74a8cd65e3765d0b028d3459be105d9eb119c44abc8efa77106e2ee4", "format": 1 }, { "name": "docs/modules/sds.rst", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "35b848c6fc91ff8af4608d360dc36a1b7a8134712eafd23b6b3c25c1cb4c1d86", + "chksum_sha256": "13c99c52ff4b228d8efb5fe53597b42a8e33eb46ce3d8a21bb1adda204b18985", "format": 1 }, { "name": "docs/modules/snapshot.rst", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "df9b5ac178c0a16ba79a5d57a97e4dd0dfbb4e332af9864d8a1b90aa35227ff0", + "chksum_sha256": "8492bdabc052db257394148318299ee2702f1588042e2afe6a2530c91488f445", "format": 1 }, { "name": "docs/modules/snapshot_policy.rst", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7b71c242c4cad07bd71731058093532976a02f9bc93ac658e65def287971cdf2", + "chksum_sha256": "6be6d8af2027e62e6a25fc72c3fc6d3aa7cee0e4afa310e66598f55bc783c446", "format": 1 }, { "name": "docs/modules/storagepool.rst", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "2954cea5c6999667466427d000d016ed770a0c9357dde997449b222b28ee8ea6", + "chksum_sha256": "7710d1d6ff490c4c9bf3198af7f77a285266252bc45db801930dbf5cb31b6b8f", "format": 1 }, { "name": "docs/modules/volume.rst", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "8515171be935508a35837ac2e8f58c5c3ee6f284a0f822b4d74128d2803d93f2", + "chksum_sha256": "b710062afc775994b4898765cc8434000216be778989ec86ba67ad4a4a0465fd", "format": 1 }, { @@ -368,14 +368,14 @@ "name": "meta/execution-environment.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "dca92e407d7f5bb562d233cf4872326fb7d2b60a07530934b5633c75dfa3ef1f", + "chksum_sha256": "4ecb20f07e2aaee8a0797f300826a400eaa575f21f566502a3b13973a154ce72", "format": 1 }, { "name": "meta/runtime.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "272017e3158d619273cbce9badfb648a524232f573aa80e5933541215490d9b3", + "chksum_sha256": "aaef590e35a44c510ce8192aafaf8d8cdfd8c431ea47678a6c6e97061953e616", "format": 1 }, { @@ -480,7 +480,7 @@ "name": "playbooks/modules/storagepool.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7059aed35251235a82b26324e6fab795fc260c72851d72affc54295b8733e94a", + "chksum_sha256": "4d8cf1538c775aaa943a167598461c6c69b17705e09c6667f2a6914e04064c42", "format": 1 }, { @@ -592,7 +592,7 @@ "name": "plugins/doc_fragments/powerflex.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "fe81e9901c08fa0086544e591ea7dda990ef30905ce80ed61ffd3f138b8e57ca", + "chksum_sha256": "b3df2bd769b95473c8c3650614cca915b4e8556416d66b66786109564a36233f", "format": 1 }, { @@ -662,7 +662,7 @@ "name": "plugins/module_utils/storage/dell/utils.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "f9cdf312c0aea0c6686bcf9d1121049e222050d11a1be6c51fcbe9dab64892e8", + "chksum_sha256": "3e2c156f1c9646ac425c9cfb3c4355bce9e3d266279c1a72bf784da12ceecfcb", "format": 1 }, { @@ -760,7 +760,7 @@ "name": "plugins/modules/storagepool.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "efd6c30ca7e5d8d61c13e3e1c154d28d80c8962e256c7dc4a0114f34e41c678d", + "chksum_sha256": "ad0464fb536badce506c0f8b9d54706dd73add6936d422d9f0c9b95fad4afc77", "format": 1 }, { @@ -1236,7 +1236,7 @@ "name": "roles/powerflex_config/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "10f2358dbee525cf86fc27e1496b394bfaeb6ddcdce7af7accb194315861444b", + "chksum_sha256": "c2aa104693b08805ffad25ea7ad593881cb008947332b23a3841d7772f092f63", "format": 1 }, { @@ -1404,7 +1404,7 @@ "name": "roles/powerflex_gateway/tasks/install_keepalived.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "c5012b69295e36ac759ab5fa4aa16cf0b2ac7a336964aa75b9e3f8473c4f0e64", + "chksum_sha256": "e7c9afa1618df5a94af53dbacfaa54c6a171e66b40e0f3bc3aaad172708cfb1e", "format": 1 }, { @@ -1600,7 +1600,7 @@ "name": "roles/powerflex_lia/tasks/install_lia.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "2ace8d8c2789df5acea0bc156f0db8730471c2996ee7ca0194211a63708c72eb", + "chksum_sha256": "e5056e59fd0fe45e444c6c675c1598e034f2e7857951659c1a6868444e371ac0", "format": 1 }, { @@ -1761,7 +1761,7 @@ "name": "roles/powerflex_mdm/tasks/install_powerflex4x_mdm.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "9c56bea05dbe554fec279bb36958e567bd784461f451e0d1b1e2342f833c8b15", + "chksum_sha256": "46e80b52c93a896554ca448cd07df5847429955684c3ab4b3266f5a405b01fe3", "format": 1 }, { @@ -1859,7 +1859,7 @@ "name": "roles/powerflex_sdc/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "9cee69ae196e0f0abecc93111ffeda653e64c7ee46e3cd7d413ae96bb96879e0", + "chksum_sha256": "b2ddcb9b937bd6067ced4504b8bb7662478ea8eeeb7aed39b68804a1796fadb7", "format": 1 }, { @@ -2062,7 +2062,7 @@ "name": "roles/powerflex_sdc/tasks/install_sdc.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "891ab050e6db8b216367d2075f31356aec6684f686e9a909c50924f70ede0e14", + "chksum_sha256": "b95f888155f80899c127aec413c9955511ee7c39e23f8b95bf87807b7d26fd82", "format": 1 }, { @@ -2076,7 +2076,7 @@ "name": "roles/powerflex_sdc/tasks/register_esxi_sdc.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "ed36076c66cd63ccefb2b3deee053948a973e06525e02122da1000536533ea63", + "chksum_sha256": "506e46f2c44f7cf505b3e9099cf22ed808c9a48ed7b17dcc07badcdacc20a3fc", "format": 1 }, { @@ -2286,7 +2286,7 @@ "name": "roles/powerflex_sdr/tasks/add_sdr.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "0ef7dde0476382d5348a15a3f59870c4623789c200a4710eb9e7db3ce205c3c3", + "chksum_sha256": "6760d118ec10bf4a9494106859eead19c0794d947551f5cf13d1b6e10a33d006", "format": 1 }, { @@ -2300,7 +2300,7 @@ "name": "roles/powerflex_sdr/tasks/remove_sdr.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "2852a831ab356a6d98ffc7c4168ab07555371253333dcf020fe651a06d1aa56c", + "chksum_sha256": "8dde871cd8d0459063a717a090523bc7ea6afe025048275256599f99663cfe62", "format": 1 }, { @@ -2440,7 +2440,7 @@ "name": "roles/powerflex_sds/tasks/install_sds.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "eb7b2291ea143accdb28777ab6bd4b5929ebd0f569891d9b47ce13ad8b0b9b76", + "chksum_sha256": "8156904dc2f87c9ef2d553f6b4c55d93767e9e96fe249f690c11d5a83e577b22", "format": 1 }, { @@ -2619,6 +2619,13 @@ "format": 1 }, { + "name": "roles/powerflex_tb/tasks/connect_mdm_cluster.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2f971a96331dbf2ba033ee2c3ef1828cb1440cc69e86301c7648d2c1dc7a9ca6", + "format": 1 + }, + { "name": "roles/powerflex_tb/tasks/install_tb.yml", "ftype": "file", "chksum_type": "sha256", @@ -2636,7 +2643,7 @@ "name": "roles/powerflex_tb/tasks/install_tb4x.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "40b6cf736e196e90e599dc4808970ebd64b9a1f848ad7ea0c29184408ecb6ea8", + "chksum_sha256": "cf82243b080e35c3d04e519d1140559474258722dd8eecd4a555f208364fc69f", "format": 1 }, { @@ -2650,7 +2657,7 @@ "name": "roles/powerflex_tb/tasks/set_tb_ips.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "3f6c75067f5575f48ada843abd9456ace5582fdc9f8e0d5483ea46724a0f35f0", + "chksum_sha256": "ae2a84b434ef8c1ed46af20bd506d84e7f3871098bc4e96cb69f8a626da3c013", "format": 1 }, { @@ -2850,24 +2857,17 @@ "format": 1 }, { - "name": "tests/sanity/ignore-2.14.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "71fd11298139d7839c2d0cf9c6933ad687b3a6bf7ca09862ea3b984e43bf72a4", - "format": 1 - }, - { "name": "tests/sanity/ignore-2.15.txt", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "71fd11298139d7839c2d0cf9c6933ad687b3a6bf7ca09862ea3b984e43bf72a4", + "chksum_sha256": "a147975e23ec4159543142fdfd6993febe9b847f213ccf68226d3f14803c9987", "format": 1 }, { "name": "tests/sanity/ignore-2.16.txt", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "9ae1170098d5b400a80d561fbaef8149b6707489c5f1424b64d418b80f1cecb9", + "chksum_sha256": "4a88db7e60ce66fe91fb6839b1bce91d4e2a0073e09a59b022b3e71ab5f7330b", "format": 1 }, { @@ -3056,7 +3056,7 @@ "name": "tests/unit/plugins/module_utils/mock_storagepool_api.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "5e7e3dfc7f6ac68a53092f5ba3292ec4c7c861f6972ca9c290f223ef10c8afad", + "chksum_sha256": "e4f59aab1f7a3c8a10fa218e5d19bea3584736400767c36b93a9a8967b8b6e16", "format": 1 }, { @@ -3105,7 +3105,7 @@ "name": "tests/unit/plugins/modules/test_mdm_cluster.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "4a242b57ed85421cb8823e0814484d077407f00c761e23169542ac34cc9aa0d3", + "chksum_sha256": "e4dc16742a2d276a678746695798c8d08ba7d1309152cc4c70fc8dbe7b3d55bf", "format": 1 }, { @@ -3161,7 +3161,7 @@ "name": "tests/unit/plugins/modules/test_storagepool.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "8c6bad9def6e6b32b7358bca2c4494be3c077fe49b47b08fc2e0c7305fcdb685", + "chksum_sha256": "f686969427a273790d3ec2b4376968492b5eb4851396fefd8228f4e82694b8a0", "format": 1 }, { diff --git a/ansible_collections/dellemc/powerflex/MANIFEST.json b/ansible_collections/dellemc/powerflex/MANIFEST.json index 9e4656fef..38b48b7a3 100644 --- a/ansible_collections/dellemc/powerflex/MANIFEST.json +++ b/ansible_collections/dellemc/powerflex/MANIFEST.json @@ -2,7 +2,7 @@ "collection_info": { "namespace": "dellemc", "name": "powerflex", - "version": "2.4.0", + "version": "2.5.0", "authors": [ "Akash Shendge <ansible.team@dell.com>", "Arindam Datta <ansible.team@dell.com>", @@ -24,16 +24,16 @@ ], "license_file": null, "dependencies": {}, - "repository": "https://github.com/dell/ansible-powerflex/tree/2.4.0", - "documentation": "https://github.com/dell/ansible-powerflex/tree/2.4.0/docs", - "homepage": "https://github.com/dell/ansible-powerflex/tree/2.4.0", + "repository": "https://github.com/dell/ansible-powerflex/tree/main", + "documentation": "https://github.com/dell/ansible-powerflex/tree/main/docs", + "homepage": "https://github.com/dell/ansible-powerflex/tree/main", "issues": "https://www.dell.com/community/Automation/bd-p/Automation" }, "file_manifest_file": { "name": "FILES.json", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "1e3524ca4d32c06f7162058bfe2e094e72e1b205ab39316a94dde334b0d59992", + "chksum_sha256": "51f896ec559ff97298cc533c294c99efe947cbfefff775b717b3448199bde426", "format": 1 }, "format": 1 diff --git a/ansible_collections/dellemc/powerflex/README.md b/ansible_collections/dellemc/powerflex/README.md index 6bb8ee215..b4002a0ec 100644 --- a/ansible_collections/dellemc/powerflex/README.md +++ b/ansible_collections/dellemc/powerflex/README.md @@ -1,64 +1,89 @@ # Ansible Modules for Dell Technologies PowerFlex +[![Contributor Covenant](https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg)](https://github.com/dell/ansible-powerflex/blob/main/docs/CODE_OF_CONDUCT.md) +[![License](https://img.shields.io/github/license/dell/ansible-powerflex)](https://github.com/dell/ansible-powerflex/blob/main/LICENSE) +[![Python version](https://img.shields.io/badge/python-3.9.6+-blue.svg)](https://www.python.org/downloads/) +[![Ansible version](https://img.shields.io/badge/ansible-2.15.6+-blue.svg)](https://pypi.org/project/ansible/) +[![PyPowerFlex](https://img.shields.io/github/v/release/dell/python-powerflex?include_prereleases&label=PyPowerFlex&style=flat-square)](https://github.com/dell/python-powerflex/releases) +[![GitHub release (latest by date including pre-releases)](https://img.shields.io/github/v/release/dell/ansible-powerflex?include_prereleases&label=latest&style=flat-square)](https://github.com/dell/ansible-powerflex/releases) +[![codecov](https://codecov.io/gh/dell/ansible-powerflex/branch/main/graph/badge.svg)](https://app.codecov.io/gh/dell/ansible-powerflex) + The Ansible Modules for Dell Technologies (Dell) PowerFlex allow Data Center and IT administrators to use RedHat Ansible to automate and orchestrate the provisioning and management of Dell PowerFlex storage systems. The capabilities of the Ansible modules are managing SDCs, volumes, snapshots, snapshot policy, storage pools, replication consistency groups, replication pairs, SDSs, devices, protection domains, MDM cluster, Fault Set and to gather high level facts from the storage system. The options available are list, show, create, modify and delete. These tasks can be executed by running simple playbooks written in yaml syntax. The modules are written so that all the operations are idempotent, so making multiple identical requests has the same effect as making a single request. ## Table of contents -* [Code of conduct](https://github.com/dell/ansible-powerflex/blob/2.4.0/docs/CODE_OF_CONDUCT.md) -* [Maintainer guide](https://github.com/dell/ansible-powerflex/blob/2.4.0/docs/MAINTAINER_GUIDE.md) -* [Committer guide](https://github.com/dell/ansible-powerflex/blob/2.4.0/docs/COMMITTER_GUIDE.md) -* [Contributing guide](https://github.com/dell/ansible-powerflex/blob/2.4.0/docs/CONTRIBUTING.md) -* [Branching strategy](https://github.com/dell/ansible-powerflex/blob/2.4.0/docs/BRANCHING.md) -* [List of adopters](https://github.com/dell/ansible-powerflex/blob/2.4.0/docs/ADOPTERS.md) -* [Maintainers](https://github.com/dell/ansible-powerflex/blob/2.4.0/docs/MAINTAINERS.md) -* [Support](https://github.com/dell/ansible-powerflex/blob/2.4.0/docs/SUPPORT.md) +* [Code of conduct](https://github.com/dell/ansible-powerflex/blob/main/docs/CODE_OF_CONDUCT.md) +* [Maintainer guide](https://github.com/dell/ansible-powerflex/blob/main/docs/MAINTAINER_GUIDE.md) +* [Committer guide](https://github.com/dell/ansible-powerflex/blob/main/docs/COMMITTER_GUIDE.md) +* [Contributing guide](https://github.com/dell/ansible-powerflex/blob/main/docs/CONTRIBUTING.md) +* [Branching strategy](https://github.com/dell/ansible-powerflex/blob/main/docs/BRANCHING.md) +* [List of adopters](https://github.com/dell/ansible-powerflex/blob/main/docs/ADOPTERS.md) +* [Maintainers](https://github.com/dell/ansible-powerflex/blob/main/docs/MAINTAINERS.md) +* [Support](https://github.com/dell/ansible-powerflex/blob/main/docs/SUPPORT.md) * [License](#license) -* [Security](https://github.com/dell/ansible-powerflex/blob/2.4.0/docs/SECURITY.md) +* [Security](https://github.com/dell/ansible-powerflex/blob/main/docs/SECURITY.md) * [Prerequisites](#prerequisites) * [List of Ansible modules for Dell PowerFlex](#list-of-ansible-modules-for-dell-powerflex) * [Installation and execution of Ansible modules for Dell PowerFlex](#installation-and-execution-of-ansible-modules-for-dell-powerflex) * [Releasing, Maintenance and Deprecation](#releasing-maintenance-and-deprecation) -## License -The Ansible collection for PowerFlex is released and licensed under the GPL-3.0 license. See [LICENSE](https://github.com/dell/ansible-powerflex/blob/2.4.0/LICENSE) for the full terms. Ansible modules and modules utilities that are part of the Ansible collection for PowerFlex are released and licensed under the Apache 2.0 license. See [MODULE-LICENSE](https://github.com/dell/ansible-powerflex/blob/2.4.0/MODULE-LICENSE) for the full terms. -## Prerequisites + +## Requirements | **Ansible Modules** | **PowerFlex/VxFlex OS Version** | **SDK version** | **Python version** | **Ansible** | |---------------------|-----------------------|-------|--------------------|--------------------------| -| v2.4.0 |3.6 <br> 4.0 <br> 4.5 | 1.11.0 | 3.9.x <br> 3.10.x <br> 3.11.x | 2.14 <br> 2.15 <br> 2.16 | +| v2.5.0 |3.6 <br> 4.0 <br> 4.5 <br> APEX Block Storage for Mircrosoft Azure <br> APEX Block Storage for AWS | 1.12.0 | 3.9.x <br> 3.10.x <br> 3.11.x <br> 3.12.x | 2.15 <br> 2.16 <br> 2.17 | * Please follow PyPowerFlex installation instructions on [PyPowerFlex Documentation](https://github.com/dell/python-powerflex) - -## Idempotency -The modules are written in such a way that all requests are idempotent and hence fault-tolerant. It essentially means that the result of a successfully performed request is independent of the number of times it is executed. - -## List of Ansible modules for Dell PowerFlex - * [Info module](https://github.com/dell/ansible-powerflex/blob/2.4.0/docs/modules/info.rst) - * [Snapshot module](https://github.com/dell/ansible-powerflex/blob/2.4.0/docs/modules/snapshot.rst) - * [SDC module](https://github.com/dell/ansible-powerflex/blob/2.4.0/docs/modules/sdc.rst) - * [Storage pool module](https://github.com/dell/ansible-powerflex/blob/2.4.0/docs/modules/storagepool.rst) - * [Volume module](https://github.com/dell/ansible-powerflex/blob/2.4.0/docs/modules/volume.rst) - * [SDS module](https://github.com/dell/ansible-powerflex/blob/2.4.0/docs/modules/sds.rst) - * [Device Module](https://github.com/dell/ansible-powerflex/blob/2.4.0/docs/modules/device.rst) - * [Protection Domain Module](https://github.com/dell/ansible-powerflex/blob/2.4.0/docs/modules/protection_domain.rst) - * [MDM Cluster Module](https://github.com/dell/ansible-powerflex/blob/2.4.0/docs/modules/mdm_cluster.rst) - * [Replication Consistency Group Module](https://github.com/dell/ansible-powerflex/blob/2.4.0/docs/modules/replication_consistency_group.rst) - * [Replication Pair Module](https://github.com/dell/ansible-powerflex/blob/2.4.0/docs/modules/replication_pair.rst) - * [Snapshot Policy Module](https://github.com/dell/ansible-powerflex/blob/2.4.0/docs/modules/snapshot_policy.rst) - * [Fault Sets Module](https://github.com/dell/ansible-powerflex/blob/2.4.0/docs/modules/fault_set.rst) ## Installation and execution of Ansible modules for Dell PowerFlex -The installation and execution steps of Ansible modules for Dell PowerFlex can be found [here](https://github.com/dell/ansible-powerflex/blob/2.4.0/docs/INSTALLATION.md). +The installation and execution steps of Ansible modules for Dell PowerFlex can be found [here](https://github.com/dell/ansible-powerflex/blob/main/docs/INSTALLATION.md). + +## Use Cases +Refer the [example playbooks](https://github.com/dell/ansible-powerflex/tree/main/playbooks) on how the collection can be used for [modules](https://github.com/dell/ansible-powerflex/tree/main/playbooks/modules) and [roles](https://github.com/dell/ansible-powerflex/tree/main/playbooks/roles). -## Releasing, Maintenance and Deprecation +## Testing +The following tests are done on ansible-powerflex collection +- Unit tests +- Integration tests. +## Support +Refer [Support](https://github.com/dell/ansible-powerflex/blob/main/docs/SUPPORT.md) documenetation for more information on the support from Dell Technologies. + +## Release Notes, Maintenance and Deprecation Ansible Modules for Dell Technologies PowerFlex follows [Semantic Versioning](https://semver.org/). New version will be release regularly if significant changes (bug fix or new feature) are made in the collection. -Released code versions are located on "release" branches with names of the form "release-x.y.z" where x.y.z corresponds to the version number. More information on branching strategy followed can be found [here](https://github.com/dell/ansible-powerflex/blob/2.4.0/docs/BRANCHING.md). +Released code versions are located on "release" branches with names of the form "release-x.y.z" where x.y.z corresponds to the version number. More information on branching strategy followed can be found [here](https://github.com/dell/ansible-powerflex/blob/main/docs/BRANCHING.md). + +Ansible Modules for Dell Technologies PowerFlex deprecation cycle is aligned with that of [Ansible](https://docs.ansible.com/ansible/latest/dev_guide/module_lifecycle.html). -Ansible Modules for Dell Technologies PowerFlex deprecation cycle is aligned with that of [Ansible](https://docs.ansible.com/ansible/latest/dev_guide/module_lifecycle.html).
\ No newline at end of file +See [change logs](https://github.com/dell/ansible-powerflex/blob/main/CHANGELOG.rst) for more information on what is new in the releases. + +## Related Information + +### Idempotency +The modules are written in such a way that all requests are idempotent and hence fault-tolerant. It essentially means that the result of a successfully performed request is independent of the number of times it is executed. + +### List of Ansible modules for Dell PowerFlex + * [Info module](https://github.com/dell/ansible-powerflex/blob/main/docs/modules/info.rst) + * [Snapshot module](https://github.com/dell/ansible-powerflex/blob/main/docs/modules/snapshot.rst) + * [SDC module](https://github.com/dell/ansible-powerflex/blob/main/docs/modules/sdc.rst) + * [Storage pool module](https://github.com/dell/ansible-powerflex/blob/main/docs/modules/storagepool.rst) + * [Volume module](https://github.com/dell/ansible-powerflex/blob/main/docs/modules/volume.rst) + * [SDS module](https://github.com/dell/ansible-powerflex/blob/main/docs/modules/sds.rst) + * [Device Module](https://github.com/dell/ansible-powerflex/blob/main/docs/modules/device.rst) + * [Protection Domain Module](https://github.com/dell/ansible-powerflex/blob/main/docs/modules/protection_domain.rst) + * [MDM Cluster Module](https://github.com/dell/ansible-powerflex/blob/main/docs/modules/mdm_cluster.rst) + * [Replication Consistency Group Module](https://github.com/dell/ansible-powerflex/blob/main/docs/modules/replication_consistency_group.rst) + * [Replication Pair Module](https://github.com/dell/ansible-powerflex/blob/main/docs/modules/replication_pair.rst) + * [Snapshot Policy Module](https://github.com/dell/ansible-powerflex/blob/main/docs/modules/snapshot_policy.rst) + * [Fault Sets Module](https://github.com/dell/ansible-powerflex/blob/main/docs/modules/fault_set.rst) + + +## License +The Ansible collection for PowerFlex is released and licensed under the GPL-3.0 license. See [LICENSE](https://github.com/dell/ansible-powerflex/blob/main/LICENSE) for the full terms. Ansible modules and modules utilities that are part of the Ansible collection for PowerFlex are released and licensed under the Apache 2.0 license. See [MODULE-LICENSE](https://github.com/dell/ansible-powerflex/blob/main/MODULE-LICENSE) for the full terms.
\ No newline at end of file diff --git a/ansible_collections/dellemc/powerflex/changelogs/changelog.yaml b/ansible_collections/dellemc/powerflex/changelogs/changelog.yaml index a4fb3c6c8..15db60f2a 100644 --- a/ansible_collections/dellemc/powerflex/changelogs/changelog.yaml +++ b/ansible_collections/dellemc/powerflex/changelogs/changelog.yaml @@ -163,3 +163,10 @@ releases: minor_changes: - Added support for executing Ansible PowerFlex modules and roles on AWS environment. release_date: '2024-04-30' + 2.5.0: + changes: + minor_changes: + - The storage pool module has been enhanced to support more features. + - Fixed the roles to support attaching the MDM cluster to the gateway. + - Added support for PowerFlex Onyx version(4.6.x). + release_date: '2024-05-31' diff --git a/ansible_collections/dellemc/powerflex/docs/CONTRIBUTING.md b/ansible_collections/dellemc/powerflex/docs/CONTRIBUTING.md index 84531753a..12ce41fec 100644 --- a/ansible_collections/dellemc/powerflex/docs/CONTRIBUTING.md +++ b/ansible_collections/dellemc/powerflex/docs/CONTRIBUTING.md @@ -10,7 +10,7 @@ You may obtain a copy of the License at # How to contribute -Become one of the contributors to this project! We thrive to build a welcoming and open community for anyone who wants to use the project or contribute to it. There are just a few small guidelines you need to follow. To help us create a safe and positive community experience for all, we require all participants to adhere to the [Code of Conduct](https://github.com/dell/ansible-powerflex/blob/2.4.0/CODE_OF_CONDUCT.md). +Become one of the contributors to this project! We thrive to build a welcoming and open community for anyone who wants to use the project or contribute to it. There are just a few small guidelines you need to follow. To help us create a safe and positive community experience for all, we require all participants to adhere to the [Code of Conduct](https://github.com/dell/ansible-powerflex/blob/main/CODE_OF_CONDUCT.md). ## Table of contents @@ -76,7 +76,7 @@ Triage helps ensure that issues resolve quickly by: If you don't have the knowledge or time to code, consider helping with _issue triage_. The Ansible modules for Dell PowerFlex community will thank you for saving them time by spending some of yours. -Read more about the ways you can [Triage issues](https://github.com/dell/ansible-powerflex/blob/2.4.0/ISSUE_TRIAGE.md). +Read more about the ways you can [Triage issues](https://github.com/dell/ansible-powerflex/blob/main/ISSUE_TRIAGE.md). ## Your first contribution @@ -89,7 +89,7 @@ When you're ready to contribute, it's time to create a pull request. ## Branching -* [Branching Strategy for Ansible modules for Dell PowerFlex](https://github.com/dell/ansible-powerflex/blob/2.4.0/BRANCHING.md) +* [Branching Strategy for Ansible modules for Dell PowerFlex](https://github.com/dell/ansible-powerflex/blob/main/BRANCHING.md) ## Signing your commits @@ -144,7 +144,7 @@ Make sure that the title for your pull request uses the same format as the subje ### Quality gates for pull requests -GitHub Actions are used to enforce quality gates when a pull request is created or when any commit is made to the pull request. These GitHub Actions enforce our minimum code quality requirement for any code that get checked into the repository. If any of the quality gates fail, it is expected that the contributor will look into the check log, understand the problem and resolve the issue. If help is needed, please feel free to reach out the maintainers of the project for [support](https://github.com/dell/ansible-powerflex/blob/2.4.0/SUPPORT.md). +GitHub Actions are used to enforce quality gates when a pull request is created or when any commit is made to the pull request. These GitHub Actions enforce our minimum code quality requirement for any code that get checked into the repository. If any of the quality gates fail, it is expected that the contributor will look into the check log, understand the problem and resolve the issue. If help is needed, please feel free to reach out the maintainers of the project for [support](https://github.com/dell/ansible-powerflex/blob/main/SUPPORT.md). #### Code sanitization diff --git a/ansible_collections/dellemc/powerflex/docs/INSTALLATION.md b/ansible_collections/dellemc/powerflex/docs/INSTALLATION.md index c2b8df3ba..f3e0e1c01 100644 --- a/ansible_collections/dellemc/powerflex/docs/INSTALLATION.md +++ b/ansible_collections/dellemc/powerflex/docs/INSTALLATION.md @@ -41,7 +41,7 @@ You may obtain a copy of the License at * Download the latest tar build from any of the available distribution channel [Ansible Galaxy](https://galaxy.ansible.com/dellemc/powerflex) /[Automation Hub](https://console.redhat.com/ansible/automation-hub/repo/published/dellemc/powerflex) and use this command to install the collection anywhere in your system: - ansible-galaxy collection install dellemc-powerflex-2.4.0.tar.gz -p <install_path> + ansible-galaxy collection install dellemc-powerflex-2.5.0.tar.gz -p <install_path> * Set the environment variable: @@ -68,7 +68,7 @@ You may obtain a copy of the License at ## Ansible modules execution -The Ansible server must be configured with Python library for PowerFlex to run the Ansible playbooks. The [Documents](https://github.com/dell/ansible-powerflex/blob/2.4.0/docs/) provide information on different Ansible modules along with their functions and syntax. The parameters table in the Product Guide provides information on various parameters which needs to be configured before running the modules. +The Ansible server must be configured with Python library for PowerFlex to run the Ansible playbooks. The [Documents](https://github.com/dell/ansible-powerflex/blob/main/docs/) provide information on different Ansible modules along with their functions and syntax. The parameters table in the Product Guide provides information on various parameters which needs to be configured before running the modules. ## SSL certificate validation diff --git a/ansible_collections/dellemc/powerflex/docs/ISSUE_TRIAGE.md b/ansible_collections/dellemc/powerflex/docs/ISSUE_TRIAGE.md index 50d4665ef..317216157 100644 --- a/ansible_collections/dellemc/powerflex/docs/ISSUE_TRIAGE.md +++ b/ansible_collections/dellemc/powerflex/docs/ISSUE_TRIAGE.md @@ -41,10 +41,10 @@ This section describes the various issue templates and the expected content. Should explain what happened, what was expected and how to reproduce it together with any additional information that may help giving a complete picture of what happened such as screenshots, output and any environment related information that's applicable and/or maybe related to the reported problem: - - Ansible Version: [e.g. 2.14] + - Ansible Version: [e.g. 2.15] - Python Version [e.g. 3.11] - - Ansible modules for Dell PowerFlex Version: [e.g. 2.4.0] - - PowerFlex SDK version: [e.g. PyPowerFlex 1.11.0] + - Ansible modules for Dell PowerFlex Version: [e.g. 2.5.0] + - PowerFlex SDK version: [e.g. PyPowerFlex 1.12.0] - Any other additional information... #### Feature requests diff --git a/ansible_collections/dellemc/powerflex/docs/MAINTAINERS.md b/ansible_collections/dellemc/powerflex/docs/MAINTAINERS.md index 4679f6d73..6f0c6100e 100644 --- a/ansible_collections/dellemc/powerflex/docs/MAINTAINERS.md +++ b/ansible_collections/dellemc/powerflex/docs/MAINTAINERS.md @@ -9,10 +9,7 @@ You may obtain a copy of the License at --> # Maintainers - -* Ananthu Kuttattu (kuttattz) * Bhavneet Sharma (Bhavneet-Sharma) * Jennifer John (Jennifer-John) * Meenakshi Dembi (meenakshidembi691) -* Pavan Mudunuri (Pavan-Mudunuri) * Trisha Datta (trisha-dell) diff --git a/ansible_collections/dellemc/powerflex/docs/MAINTAINER_GUIDE.md b/ansible_collections/dellemc/powerflex/docs/MAINTAINER_GUIDE.md index 5f982c2f9..2832c9791 100644 --- a/ansible_collections/dellemc/powerflex/docs/MAINTAINER_GUIDE.md +++ b/ansible_collections/dellemc/powerflex/docs/MAINTAINER_GUIDE.md @@ -27,7 +27,7 @@ If a candidate is approved, a Maintainer contacts the candidate to invite them t ## Maintainer policies * Lead by example -* Follow the [Code of Conduct](https://github.com/dell/ansible-powerflex/blob/2.4.0/CODE_OF_CONDUCT.md) and the guidelines in the [Contributing](https://github.com/dell/ansible-powerflex/blob/2.4.0/CONTRIBUTING.md) and [Committer](https://github.com/dell/ansible-powerflex/blob/2.4.0/COMMITTER_GUIDE.md) guides +* Follow the [Code of Conduct](https://github.com/dell/ansible-powerflex/blob/main/CODE_OF_CONDUCT.md) and the guidelines in the [Contributing](https://github.com/dell/ansible-powerflex/blob/main/CONTRIBUTING.md) and [Committer](https://github.com/dell/ansible-powerflex/blob/main/COMMITTER_GUIDE.md) guides * Promote a friendly and collaborative environment within our community * Be actively engaged in discussions, answering questions, updating defects, and reviewing pull requests * Criticize code, not people. Ideally, tell the contributor a better way to do what they need. diff --git a/ansible_collections/dellemc/powerflex/docs/Release Notes.md b/ansible_collections/dellemc/powerflex/docs/Release Notes.md index 1a2552326..36cf06f97 100644 --- a/ansible_collections/dellemc/powerflex/docs/Release Notes.md +++ b/ansible_collections/dellemc/powerflex/docs/Release Notes.md @@ -1,6 +1,6 @@ **Ansible Modules for Dell Technologies PowerFlex** ========================================= -### Release notes 2.4.0 +### Release notes 2.5.0 > © 2024 Dell Inc. or its subsidiaries. All rights reserved. Dell > and other trademarks are trademarks of Dell Inc. or its @@ -28,7 +28,7 @@ Table 1. Revision history | Revision | Date | Description | |----------|-----------------|-------------------------------------------------------------| -| 01 | April 2024 | Current release of Ansible Modules for Dell PowerFlex 2.4.0 | +| 01 | May 2024 | Current release of Ansible Modules for Dell PowerFlex 2.5.0 | Product description ------------------- @@ -44,10 +44,8 @@ each of the entities. New features and enhancements ----------------------------- Along with the previous release deliverables, this release supports following features - -- Added support for executing Ansible PowerFlex modules and roles on AWS environment. -- Added support for resource group provisioning to validate, deploy, edit, add nodes and delete a resource group. -- The Info module is enhanced to list out all the firmware repository. -- Added support for PowerFlex ansible modules and roles on Azure. + - The storage pool module has been enhanced to support more features. + - Fixed the roles to support attaching the MDM cluster to the gateway. Known issues ------------ @@ -63,11 +61,11 @@ Limitations Distribution ------------ The software package is available for download from the [Ansible Modules -for PowerFlex GitHub](https://github.com/dell/ansible-powerflex/tree/2.4.0) page. +for PowerFlex GitHub](https://github.com/dell/ansible-powerflex/tree/main) page. Documentation ------------- -The documentation is available on [Ansible Modules for PowerFlex GitHub](https://github.com/dell/ansible-powerflex/tree/2.4.0/docs) +The documentation is available on [Ansible Modules for PowerFlex GitHub](https://github.com/dell/ansible-powerflex/tree/main/docs) page. It includes the following: - README diff --git a/ansible_collections/dellemc/powerflex/docs/SECURITY.md b/ansible_collections/dellemc/powerflex/docs/SECURITY.md index a7eab1ba4..b2de265bb 100644 --- a/ansible_collections/dellemc/powerflex/docs/SECURITY.md +++ b/ansible_collections/dellemc/powerflex/docs/SECURITY.md @@ -12,7 +12,7 @@ You may obtain a copy of the License at The Ansible modules for Dell PowerFlex repository are inspected for security vulnerabilities via blackduck scans and static code analysis. -In addition to this, there are various security checks that get executed against a branch when a pull request is created/updated. Please refer to [pull request](https://github.com/dell/ansible-powerflex/blob/2.4.0/docs/CONTRIBUTING.md#Pull-requests) for more information. +In addition to this, there are various security checks that get executed against a branch when a pull request is created/updated. Please refer to [pull request](https://github.com/dell/ansible-powerflex/blob/main/docs/CONTRIBUTING.md#Pull-requests) for more information. ## Reporting a vulnerability diff --git a/ansible_collections/dellemc/powerflex/docs/modules/device.rst b/ansible_collections/dellemc/powerflex/docs/modules/device.rst index cbeb0f813..9eccc2627 100644 --- a/ansible_collections/dellemc/powerflex/docs/modules/device.rst +++ b/ansible_collections/dellemc/powerflex/docs/modules/device.rst @@ -21,9 +21,7 @@ Requirements The below requirements are needed on the host that executes this module. - A Dell PowerFlex storage system version 3.6 or later. -- Ansible-core 2.14 or later. -- PyPowerFlex 1.11.0. -- Python 3.9, 3.10 or 3.11. +- PyPowerFlex 1.12.0. diff --git a/ansible_collections/dellemc/powerflex/docs/modules/fault_set.rst b/ansible_collections/dellemc/powerflex/docs/modules/fault_set.rst index 191ab73ca..d1d5fb321 100644 --- a/ansible_collections/dellemc/powerflex/docs/modules/fault_set.rst +++ b/ansible_collections/dellemc/powerflex/docs/modules/fault_set.rst @@ -21,9 +21,7 @@ Requirements The below requirements are needed on the host that executes this module. - A Dell PowerFlex storage system version 3.6 or later. -- Ansible-core 2.14 or later. -- PyPowerFlex 1.11.0. -- Python 3.9, 3.10 or 3.11. +- PyPowerFlex 1.12.0. diff --git a/ansible_collections/dellemc/powerflex/docs/modules/info.rst b/ansible_collections/dellemc/powerflex/docs/modules/info.rst index fd674804f..49c1f0f77 100644 --- a/ansible_collections/dellemc/powerflex/docs/modules/info.rst +++ b/ansible_collections/dellemc/powerflex/docs/modules/info.rst @@ -23,9 +23,7 @@ Requirements The below requirements are needed on the host that executes this module. - A Dell PowerFlex storage system version 3.6 or later. -- Ansible-core 2.14 or later. -- PyPowerFlex 1.11.0. -- Python 3.9, 3.10 or 3.11. +- PyPowerFlex 1.12.0. diff --git a/ansible_collections/dellemc/powerflex/docs/modules/mdm_cluster.rst b/ansible_collections/dellemc/powerflex/docs/modules/mdm_cluster.rst index fa73ae5d6..4c64170f3 100644 --- a/ansible_collections/dellemc/powerflex/docs/modules/mdm_cluster.rst +++ b/ansible_collections/dellemc/powerflex/docs/modules/mdm_cluster.rst @@ -23,9 +23,7 @@ Requirements The below requirements are needed on the host that executes this module. - A Dell PowerFlex storage system version 3.6 or later. -- Ansible-core 2.14 or later. -- PyPowerFlex 1.11.0. -- Python 3.9, 3.10 or 3.11. +- PyPowerFlex 1.12.0. diff --git a/ansible_collections/dellemc/powerflex/docs/modules/protection_domain.rst b/ansible_collections/dellemc/powerflex/docs/modules/protection_domain.rst index 0bd532b9a..10acf4b23 100644 --- a/ansible_collections/dellemc/powerflex/docs/modules/protection_domain.rst +++ b/ansible_collections/dellemc/powerflex/docs/modules/protection_domain.rst @@ -21,9 +21,7 @@ Requirements The below requirements are needed on the host that executes this module. - A Dell PowerFlex storage system version 3.6 or later. -- Ansible-core 2.14 or later. -- PyPowerFlex 1.11.0. -- Python 3.9, 3.10 or 3.11. +- PyPowerFlex 1.12.0. diff --git a/ansible_collections/dellemc/powerflex/docs/modules/replication_consistency_group.rst b/ansible_collections/dellemc/powerflex/docs/modules/replication_consistency_group.rst index d8d144077..f60ce5bdb 100644 --- a/ansible_collections/dellemc/powerflex/docs/modules/replication_consistency_group.rst +++ b/ansible_collections/dellemc/powerflex/docs/modules/replication_consistency_group.rst @@ -21,9 +21,7 @@ Requirements The below requirements are needed on the host that executes this module. - A Dell PowerFlex storage system version 3.6 or later. -- Ansible-core 2.14 or later. -- PyPowerFlex 1.11.0. -- Python 3.9, 3.10 or 3.11. +- PyPowerFlex 1.12.0. diff --git a/ansible_collections/dellemc/powerflex/docs/modules/replication_pair.rst b/ansible_collections/dellemc/powerflex/docs/modules/replication_pair.rst index 7c883c6fd..9da3699fc 100644 --- a/ansible_collections/dellemc/powerflex/docs/modules/replication_pair.rst +++ b/ansible_collections/dellemc/powerflex/docs/modules/replication_pair.rst @@ -21,9 +21,7 @@ Requirements The below requirements are needed on the host that executes this module. - A Dell PowerFlex storage system version 3.6 or later. -- Ansible-core 2.14 or later. -- PyPowerFlex 1.11.0. -- Python 3.9, 3.10 or 3.11. +- PyPowerFlex 1.12.0. diff --git a/ansible_collections/dellemc/powerflex/docs/modules/resource_group.rst b/ansible_collections/dellemc/powerflex/docs/modules/resource_group.rst index a72918d83..3a0d9cd34 100644 --- a/ansible_collections/dellemc/powerflex/docs/modules/resource_group.rst +++ b/ansible_collections/dellemc/powerflex/docs/modules/resource_group.rst @@ -21,9 +21,7 @@ Requirements The below requirements are needed on the host that executes this module. - A Dell PowerFlex storage system version 3.6 or later. -- Ansible-core 2.14 or later. -- PyPowerFlex 1.11.0. -- Python 3.9, 3.10 or 3.11. +- PyPowerFlex 1.12.0. diff --git a/ansible_collections/dellemc/powerflex/docs/modules/sdc.rst b/ansible_collections/dellemc/powerflex/docs/modules/sdc.rst index 7b0871b30..6d4003f69 100644 --- a/ansible_collections/dellemc/powerflex/docs/modules/sdc.rst +++ b/ansible_collections/dellemc/powerflex/docs/modules/sdc.rst @@ -21,9 +21,7 @@ Requirements The below requirements are needed on the host that executes this module. - A Dell PowerFlex storage system version 3.6 or later. -- Ansible-core 2.14 or later. -- PyPowerFlex 1.11.0. -- Python 3.9, 3.10 or 3.11. +- PyPowerFlex 1.12.0. diff --git a/ansible_collections/dellemc/powerflex/docs/modules/sds.rst b/ansible_collections/dellemc/powerflex/docs/modules/sds.rst index 188fe9f9a..cd2730ceb 100644 --- a/ansible_collections/dellemc/powerflex/docs/modules/sds.rst +++ b/ansible_collections/dellemc/powerflex/docs/modules/sds.rst @@ -21,9 +21,7 @@ Requirements The below requirements are needed on the host that executes this module. - A Dell PowerFlex storage system version 3.6 or later. -- Ansible-core 2.14 or later. -- PyPowerFlex 1.11.0. -- Python 3.9, 3.10 or 3.11. +- PyPowerFlex 1.12.0. diff --git a/ansible_collections/dellemc/powerflex/docs/modules/snapshot.rst b/ansible_collections/dellemc/powerflex/docs/modules/snapshot.rst index e09e80069..f44b3272f 100644 --- a/ansible_collections/dellemc/powerflex/docs/modules/snapshot.rst +++ b/ansible_collections/dellemc/powerflex/docs/modules/snapshot.rst @@ -21,9 +21,7 @@ Requirements The below requirements are needed on the host that executes this module. - A Dell PowerFlex storage system version 3.6 or later. -- Ansible-core 2.14 or later. -- PyPowerFlex 1.11.0. -- Python 3.9, 3.10 or 3.11. +- PyPowerFlex 1.12.0. diff --git a/ansible_collections/dellemc/powerflex/docs/modules/snapshot_policy.rst b/ansible_collections/dellemc/powerflex/docs/modules/snapshot_policy.rst index dd683c921..21823e96d 100644 --- a/ansible_collections/dellemc/powerflex/docs/modules/snapshot_policy.rst +++ b/ansible_collections/dellemc/powerflex/docs/modules/snapshot_policy.rst @@ -21,9 +21,7 @@ Requirements The below requirements are needed on the host that executes this module. - A Dell PowerFlex storage system version 3.6 or later. -- Ansible-core 2.14 or later. -- PyPowerFlex 1.11.0. -- Python 3.9, 3.10 or 3.11. +- PyPowerFlex 1.12.0. diff --git a/ansible_collections/dellemc/powerflex/docs/modules/storagepool.rst b/ansible_collections/dellemc/powerflex/docs/modules/storagepool.rst index f9f3f271f..1cc9717bc 100644 --- a/ansible_collections/dellemc/powerflex/docs/modules/storagepool.rst +++ b/ansible_collections/dellemc/powerflex/docs/modules/storagepool.rst @@ -21,9 +21,7 @@ Requirements The below requirements are needed on the host that executes this module. - A Dell PowerFlex storage system version 3.6 or later. -- Ansible-core 2.14 or later. -- PyPowerFlex 1.11.0. -- Python 3.9, 3.10 or 3.11. +- PyPowerFlex 1.12.0. @@ -80,6 +78,163 @@ Parameters Enable/Disable RMcache on a specific storage pool. + enable_zero_padding (optional, bool, None) + Enable/Disable zero padding on a specific storage pool. + + + rep_cap_max_ratio (optional, int, None) + Set replication journal capacity of a storage pool. + + + enable_rebalance (optional, bool, None) + Enable/Disable rebalance on a specific storage pool. + + + spare_percentage (optional, int, None) + Set the spare percentage of a specific storage pool. + + + rmcache_write_handling_mode (optional, str, Cached) + Set RM cache write handling mode of a storage pool. + + *Passthrough* Writes skip the cache and are stored in storage only. + + *Cached* Writes are stored in both cache and storage (the default). + + Caching is only performed for IOs whose size is a multiple of 4k bytes. + + + enable_rebuild (optional, bool, None) + Enable/Disable rebuild of a specific storage pool. + + + enable_fragmentation (optional, bool, None) + Enable/Disable fragmentation of a specific storage pool. + + + parallel_rebuild_rebalance_limit (optional, int, None) + Set rebuild/rebalance parallelism limit of a storage pool. + + + persistent_checksum (optional, dict, None) + Enable/Disable persistent checksum of a specific storage pool. + + + enable (optional, bool, None) + Enable / disable persistent checksum. + + + validate_on_read (optional, bool, None) + Validate checksum upon reading data. + + + builder_limit (optional, int, 3072) + Bandwidth limit in KB/s for the checksum building process. + + Valid range is 1024 to 10240. + + + + protected_maintenance_mode_io_priority_policy (optional, dict, None) + Set protected maintenance mode I/O priority policy of a storage pool. + + + policy (optional, str, limitNumOfConcurrentIos) + The I/O priority policy for protected maintenance mode. + + ``unlimited`` Protected maintenance mode IOPS are not limited + + ``limitNumOfConcurrentIos``Limit the number of allowed concurrent protected maintenance mode migration I/Os to the value defined for *concurrent_ios_per_device*. + + ``favorAppIos`` Always limit the number of allowed concurrent protected maintenance mode migration I/Os to value defined for *concurrent_ios_per_device*. + + If application I/Os are in progress, should also limit the bandwidth of protected maintenance mode migration I/Os to the limit defined for the *bw_limit_per_device*. + + + concurrent_ios_per_device (optional, int, None) + The maximum number of concurrent protected maintenance mode migration I/Os per device. + + Valid range is 1 to 20. + + + bw_limit_per_device (optional, int, None) + The maximum bandwidth of protected maintenance mode migration I/Os, in KB per second, per device. + + Valid range is 1024 to 1048576. + + + + vtree_migration_io_priority_policy (optional, dict, None) + Set the I/O priority policy for V-Tree migration for a specific Storage Pool. + + + policy (optional, str, None) + The I/O priority policy for protected maintenance mode. + + ``limitNumOfConcurrentIos`` Limit the number of allowed concurrent V-Tree migration I/Os (default) to the *concurrent_ios_per_device*. + + ``favorAppIos`` Always limit the number of allowed concurrent V-Tree migration I/Os to defined for *concurrent_ios_per_device*. + + If application I/Os are in progress, should also limit the bandwidth of V-Tree migration I/Os to the limit defined for the *bw_limit_per_device*. + + + concurrent_ios_per_device (optional, int, None) + The maximum number of concurrent V-Tree migration I/Os per device. + + Valid range is 1 to 20 + + + bw_limit_per_device (optional, int, None) + The maximum bandwidth of V-Tree migration I/Os, in KB per second, per device. + + Valid range is 1024 to 25600. + + + + rebalance_io_priority_policy (optional, dict, None) + Set the rebalance I/O priority policy for a Storage Pool. + + + policy (optional, str, favorAppIos) + Policy to use for rebalance I/O priority. + + ``unlimited`` Rebalance I/Os are not limited. + + ``limitNumOfConcurrentIos`` Limit the number of allowed concurrent rebalance I/Os. + + ``favorAppIos`` Limit the number and bandwidth of rebalance I/Os when application I/Os are in progress. + + + concurrent_ios_per_device (optional, int, None) + The maximum number of concurrent rebalance I/Os per device. + + Valid range is 1 to 20. + + + bw_limit_per_device (optional, int, None) + The maximum bandwidth of rebalance I/Os, in KB/s, per device. + + Valid range is 1024 to 1048576. + + + + cap_alert_thresholds (optional, dict, None) + Set the threshold for triggering capacity usage alerts. + + Alerts thresholds are calculated from each Storage Pool capacity after deducting the defined amount of spare capacity. + + + high_threshold (optional, int, None) + Threshold of the non-spare capacity of the Storage Pool that will trigger a high-priority alert, expressed as a percentage. + + This value must be lower than the *critical_threshold*. + + + critical_threshold (optional, int, None) + Threshold of the non-spare capacity of the Storage Pool that will trigger a critical-priority alert, expressed as a percentage. + + + state (True, str, None) State of the storage pool. @@ -122,7 +277,7 @@ Notes .. note:: - TRANSITIONAL media type is supported only during modification. - - The *check_mode* is not supported. + - The *check_mode* is supported. - The modules present in the collection named as 'dellemc.powerflex' are built to support the Dell PowerFlex storage platform. @@ -153,37 +308,75 @@ Examples storage_pool_id: "abcd1234ab12r" state: "present" - - name: Create a new storage pool by name - dellemc.powerflex.storagepool: - hostname: "{{hostname}}" - username: "{{username}}" - password: "{{password}}" - validate_certs: "{{validate_certs}}" - storage_pool_name: "ansible_test_pool" - protection_domain_id: "1c957da800000000" - media_type: "HDD" - state: "present" - - - name: Modify a storage pool by name + - name: Create a new Storage pool dellemc.powerflex.storagepool: - hostname: "{{hostname}}" - username: "{{username}}" - password: "{{password}}" - validate_certs: "{{validate_certs}}" - storage_pool_name: "ansible_test_pool" - protection_domain_id: "1c957da800000000" + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + validate_certs: "{{ validate_certs }}" + storage_pool_name: "{{ pool_name }}" + protection_domain_name: "{{ protection_domain_name }}" + cap_alert_thresholds: + high_threshold: 30 + critical_threshold: 50 + media_type: "TRANSITIONAL" + enable_zero_padding: true + rep_cap_max_ratio: 40 + rmcache_write_handling_mode: "Passthrough" + spare_percentage: 80 + enable_rebalance: false + enable_fragmentation: false + enable_rebuild: false use_rmcache: true use_rfcache: true + parallel_rebuild_rebalance_limit: 3 + protected_maintenance_mode_io_priority_policy: + policy: "unlimited" + rebalance_io_priority_policy: + policy: "unlimited" + vtree_migration_io_priority_policy: + policy: "limitNumOfConcurrentIos" + concurrent_ios_per_device: 10 + persistent_checksum: + enable: false state: "present" - - name: Rename storage pool by id + - name: Modify a Storage pool by name dellemc.powerflex.storagepool: - hostname: "{{hostname}}" - username: "{{username}}" - password: "{{password}}" - validate_certs: "{{validate_certs}}" - storage_pool_id: "abcd1234ab12r" - storage_pool_new_name: "new_ansible_pool" + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + validate_certs: "{{ validate_certs }}" + storage_pool_name: "{{ pool_name }}" + protection_domain_name: "{{ protection_domain_name }}" + storage_pool_new_name: "pool_name_new" + cap_alert_thresholds: + high_threshold: 50 + critical_threshold: 70 + enable_zero_padding: false + rep_cap_max_ratio: 60 + rmcache_write_handling_mode: "Passthrough" + spare_percentage: 90 + enable_rebalance: true + enable_fragmentation: true + enable_rebuild: true + use_rmcache: true + use_rfcache: true + parallel_rebuild_rebalance_limit: 6 + protected_maintenance_mode_io_priority_policy: + policy: "limitNumOfConcurrentIos" + concurrent_ios_per_device: 4 + rebalance_io_priority_policy: + policy: "favorAppIos" + concurrent_ios_per_device: 10 + bw_limit_per_device: 4096 + vtree_migration_io_priority_policy: + policy: "limitNumOfConcurrentIos" + concurrent_ios_per_device: 10 + persistent_checksum: + enable: true + validate_on_read: true + builder_limit: 1024 state: "present" @@ -260,4 +453,5 @@ Authors - Arindam Datta (@dattaarindam) <ansible.team@dell.com> - P Srinivas Rao (@srinivas-rao5) <ansible.team@dell.com> +- Trisha Datta (@trisha-dell) <ansible.team@dell.com> diff --git a/ansible_collections/dellemc/powerflex/docs/modules/volume.rst b/ansible_collections/dellemc/powerflex/docs/modules/volume.rst index 16dbf2b25..1132bec40 100644 --- a/ansible_collections/dellemc/powerflex/docs/modules/volume.rst +++ b/ansible_collections/dellemc/powerflex/docs/modules/volume.rst @@ -23,9 +23,7 @@ Requirements The below requirements are needed on the host that executes this module. - A Dell PowerFlex storage system version 3.6 or later. -- Ansible-core 2.14 or later. -- PyPowerFlex 1.11.0. -- Python 3.9, 3.10 or 3.11. +- PyPowerFlex 1.12.0. diff --git a/ansible_collections/dellemc/powerflex/meta/execution-environment.yml b/ansible_collections/dellemc/powerflex/meta/execution-environment.yml index 54e87f9c9..2b747b779 100644 --- a/ansible_collections/dellemc/powerflex/meta/execution-environment.yml +++ b/ansible_collections/dellemc/powerflex/meta/execution-environment.yml @@ -1,5 +1,5 @@ --- -version: 1 +version: 3 dependencies: # Absolute/relative path of requirements.yml galaxy: requirements.yml diff --git a/ansible_collections/dellemc/powerflex/meta/runtime.yml b/ansible_collections/dellemc/powerflex/meta/runtime.yml index dd41c3e91..6711a25cc 100644 --- a/ansible_collections/dellemc/powerflex/meta/runtime.yml +++ b/ansible_collections/dellemc/powerflex/meta/runtime.yml @@ -1,5 +1,5 @@ --- -requires_ansible: ">=2.14.0" +requires_ansible: ">=2.15.0" plugin_routing: modules: dellemc_powerflex_gatherfacts: diff --git a/ansible_collections/dellemc/powerflex/playbooks/modules/storagepool.yml b/ansible_collections/dellemc/powerflex/playbooks/modules/storagepool.yml index 7e7860a0a..7d407b610 100644 --- a/ansible_collections/dellemc/powerflex/playbooks/modules/storagepool.yml +++ b/ansible_collections/dellemc/powerflex/playbooks/modules/storagepool.yml @@ -20,7 +20,29 @@ validate_certs: "{{ validate_certs }}" storage_pool_name: "{{ pool_name }}" protection_domain_name: "{{ protection_domain_name }}" - media_type: "HDD" + cap_alert_thresholds: + high_threshold: 30 + critical_threshold: 50 + media_type: "TRANSITIONAL" + enable_zero_padding: true + rep_cap_max_ratio: 40 + rmcache_write_handling_mode: "Passthrough" + spare_percentage: 80 + enable_rebalance: false + enable_fragmentation: false + enable_rebuild: false + use_rmcache: true + use_rfcache: true + parallel_rebuild_rebalance_limit: 3 + protected_maintenance_mode_io_priority_policy: + policy: "unlimited" + rebalance_io_priority_policy: + policy: "unlimited" + vtree_migration_io_priority_policy: + policy: "limitNumOfConcurrentIos" + concurrent_ios_per_device: 10 + persistent_checksum: + enable: false state: "present" - name: Set pool id @@ -47,15 +69,40 @@ storage_pool_new_name: "{{ pool_name }}" state: "present" - - name: Modify a Storage pool by name dellemc.powerflex.storagepool: hostname: "{{ hostname }}" username: "{{ username }}" password: "{{ password }}" validate_certs: "{{ validate_certs }}" - storage_pool_name: "ansible_test_pool" + storage_pool_name: "{{ pool_name }}" protection_domain_name: "{{ protection_domain_name }}" + storage_pool_new_name: "pool_name_new" + cap_alert_thresholds: + high_threshold: 50 + critical_threshold: 70 + enable_zero_padding: false + rep_cap_max_ratio: 60 + rmcache_write_handling_mode: "Passthrough" + spare_percentage: 90 + enable_rebalance: true + enable_fragmentation: true + enable_rebuild: true use_rmcache: true use_rfcache: true + parallel_rebuild_rebalance_limit: 6 + protected_maintenance_mode_io_priority_policy: + policy: "limitNumOfConcurrentIos" + concurrent_ios_per_device: 4 + rebalance_io_priority_policy: + policy: "favorAppIos" + concurrent_ios_per_device: 10 + bw_limit_per_device: 4096 + vtree_migration_io_priority_policy: + policy: "limitNumOfConcurrentIos" + concurrent_ios_per_device: 10 + persistent_checksum: + enable: true + validate_on_read: true + builder_limit: 1024 state: "present" diff --git a/ansible_collections/dellemc/powerflex/plugins/doc_fragments/powerflex.py b/ansible_collections/dellemc/powerflex/plugins/doc_fragments/powerflex.py index 32b17a4d6..b6d8626ae 100644 --- a/ansible_collections/dellemc/powerflex/plugins/doc_fragments/powerflex.py +++ b/ansible_collections/dellemc/powerflex/plugins/doc_fragments/powerflex.py @@ -52,9 +52,7 @@ class ModuleDocFragment(object): default: 120 requirements: - A Dell PowerFlex storage system version 3.6 or later. - - Ansible-core 2.14 or later. - - PyPowerFlex 1.10.0. - - Python 3.9, 3.10 or 3.11. + - PyPowerFlex 1.12.0. notes: - The modules present in the collection named as 'dellemc.powerflex' are built to support the Dell PowerFlex storage platform. diff --git a/ansible_collections/dellemc/powerflex/plugins/module_utils/storage/dell/utils.py b/ansible_collections/dellemc/powerflex/plugins/module_utils/storage/dell/utils.py index 50f41666c..da603e648 100644 --- a/ansible_collections/dellemc/powerflex/plugins/module_utils/storage/dell/utils.py +++ b/ansible_collections/dellemc/powerflex/plugins/module_utils/storage/dell/utils.py @@ -83,10 +83,10 @@ def ensure_required_libs(module): exception=PKG_RSRC_IMP_ERR) if not HAS_POWERFLEX_SDK: - module.fail_json(msg=missing_required_lib("PyPowerFlex V 1.10.0 or above"), + module.fail_json(msg=missing_required_lib("PyPowerFlex V 1.12.0 or above"), exception=POWERFLEX_SDK_IMP_ERR) - min_ver = '1.11.0' + min_ver = '1.12.0' try: curr_version = pkg_resources.require("PyPowerFlex")[0].version supported_version = (parse_version(curr_version) >= parse_version(min_ver)) diff --git a/ansible_collections/dellemc/powerflex/plugins/modules/storagepool.py b/ansible_collections/dellemc/powerflex/plugins/modules/storagepool.py index 9c8bb1d4a..5aca87b4e 100644 --- a/ansible_collections/dellemc/powerflex/plugins/modules/storagepool.py +++ b/ansible_collections/dellemc/powerflex/plugins/modules/storagepool.py @@ -1,6 +1,6 @@ #!/usr/bin/python -# Copyright: (c) 2021, Dell Technologies +# Copyright: (c) 2021-24, Dell Technologies # Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) """Ansible module for managing Dell Technologies (Dell) PowerFlex storage pool""" @@ -28,6 +28,7 @@ extends_documentation_fragment: author: - Arindam Datta (@dattaarindam) <ansible.team@dell.com> - P Srinivas Rao (@srinivas-rao5) <ansible.team@dell.com> +- Trisha Datta (@trisha-dell) <ansible.team@dell.com> options: storage_pool_name: @@ -76,6 +77,160 @@ options: description: - Enable/Disable RMcache on a specific storage pool. type: bool + enable_zero_padding: + description: + - Enable/Disable zero padding on a specific storage pool. + type: bool + rep_cap_max_ratio: + description: + - Set replication journal capacity of a storage pool. + type: int + enable_rebalance: + description: + - Enable/Disable rebalance on a specific storage pool. + type: bool + spare_percentage: + description: + - Set the spare percentage of a specific storage pool. + type: int + rmcache_write_handling_mode : + description: + - Set RM cache write handling mode of a storage pool. + - I(Passthrough) Writes skip the cache and are stored in storage only. + - I(Cached) Writes are stored in both cache and storage (the default). + - Caching is only performed for IOs whose size is a multiple of 4k bytes. + type: str + choices: ['Cached', 'Passthrough'] + default: 'Cached' + enable_rebuild: + description: + - Enable/Disable rebuild of a specific storage pool. + type: bool + enable_fragmentation: + description: + - Enable/Disable fragmentation of a specific storage pool. + type: bool + parallel_rebuild_rebalance_limit: + description: + - Set rebuild/rebalance parallelism limit of a storage pool. + type: int + persistent_checksum: + description: + - Enable/Disable persistent checksum of a specific storage pool. + type: dict + suboptions: + enable: + description: + - Enable / disable persistent checksum. + type: bool + validate_on_read: + description: + - Validate checksum upon reading data. + type: bool + builder_limit: + description: + - Bandwidth limit in KB/s for the checksum building process. + - Valid range is 1024 to 10240. + default: 3072 + type: int + protected_maintenance_mode_io_priority_policy: + description: + - Set protected maintenance mode I/O priority policy of a storage pool. + type: dict + suboptions: + policy: + description: + - The I/O priority policy for protected maintenance mode. + - C(unlimited) Protected maintenance mode IOPS are not limited + - C(limitNumOfConcurrentIos)Limit the number of allowed concurrent protected maintenance mode + migration I/Os to the value defined for I(concurrent_ios_per_device). + - C(favorAppIos) Always limit the number of allowed concurrent protected maintenance mode + migration I/Os to value defined for I(concurrent_ios_per_device). + - If application I/Os are in progress, should also limit the bandwidth of + protected maintenance mode migration I/Os to the limit defined for the I(bw_limit_per_device). + type: str + choices: ['unlimited', 'limitNumOfConcurrentIos', 'favorAppIos'] + default: 'limitNumOfConcurrentIos' + concurrent_ios_per_device: + description: + - The maximum number of concurrent protected maintenance mode migration I/Os per device. + - Valid range is 1 to 20. + type: int + bw_limit_per_device: + description: + - The maximum bandwidth of protected maintenance mode migration I/Os, + in KB per second, per device. + - Valid range is 1024 to 1048576. + type: int + vtree_migration_io_priority_policy: + description: + - Set the I/O priority policy for V-Tree migration for a specific Storage Pool. + type: dict + suboptions: + policy: + description: + - The I/O priority policy for protected maintenance mode. + - C(limitNumOfConcurrentIos) Limit the number of allowed concurrent V-Tree + migration I/Os (default) to the I(concurrent_ios_per_device). + - C(favorAppIos) Always limit the number of allowed concurrent + V-Tree migration I/Os to defined for I(concurrent_ios_per_device). + - If application I/Os are in progress, should also limit the bandwidth of + V-Tree migration I/Os to the limit defined for the I(bw_limit_per_device). + type: str + choices: ['limitNumOfConcurrentIos', 'favorAppIos'] + concurrent_ios_per_device: + description: + - The maximum number of concurrent V-Tree migration I/Os per device. + - Valid range is 1 to 20 + type: int + bw_limit_per_device: + description: + - The maximum bandwidth of V-Tree migration I/Os, + in KB per second, per device. + - Valid range is 1024 to 25600. + type: int + rebalance_io_priority_policy: + description: + - Set the rebalance I/O priority policy for a Storage Pool. + type: dict + suboptions: + policy: + description: + - Policy to use for rebalance I/O priority. + - C(unlimited) Rebalance I/Os are not limited. + - C(limitNumOfConcurrentIos) Limit the number of allowed concurrent rebalance I/Os. + - C(favorAppIos) Limit the number and bandwidth of rebalance I/Os when application I/Os are in progress. + type: str + choices: ['unlimited', 'limitNumOfConcurrentIos', 'favorAppIos'] + default: 'favorAppIos' + concurrent_ios_per_device: + description: + - The maximum number of concurrent rebalance I/Os per device. + - Valid range is 1 to 20. + type: int + bw_limit_per_device: + description: + - The maximum bandwidth of rebalance I/Os, in KB/s, per device. + - Valid range is 1024 to 1048576. + type: int + cap_alert_thresholds: + description: + - Set the threshold for triggering capacity usage alerts. + - Alerts thresholds are calculated from each Storage Pool + capacity after deducting the defined amount of spare capacity. + type: dict + suboptions: + high_threshold: + description: + - Threshold of the non-spare capacity of the Storage Pool that will trigger a + high-priority alert, expressed as a percentage. + - This value must be lower than the I(critical_threshold). + type: int + critical_threshold: + description: + - Threshold of the non-spare capacity of the Storage Pool that will trigger a + critical-priority alert, expressed as a percentage. + type: int state: description: - State of the storage pool. @@ -84,7 +239,7 @@ options: required: true notes: - TRANSITIONAL media type is supported only during modification. - - The I(check_mode) is not supported. + - The I(check_mode) is supported. ''' EXAMPLES = r''' @@ -107,37 +262,75 @@ EXAMPLES = r''' storage_pool_id: "abcd1234ab12r" state: "present" -- name: Create a new storage pool by name - dellemc.powerflex.storagepool: - hostname: "{{hostname}}" - username: "{{username}}" - password: "{{password}}" - validate_certs: "{{validate_certs}}" - storage_pool_name: "ansible_test_pool" - protection_domain_id: "1c957da800000000" - media_type: "HDD" - state: "present" - -- name: Modify a storage pool by name +- name: Create a new Storage pool dellemc.powerflex.storagepool: - hostname: "{{hostname}}" - username: "{{username}}" - password: "{{password}}" - validate_certs: "{{validate_certs}}" - storage_pool_name: "ansible_test_pool" - protection_domain_id: "1c957da800000000" + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + validate_certs: "{{ validate_certs }}" + storage_pool_name: "{{ pool_name }}" + protection_domain_name: "{{ protection_domain_name }}" + cap_alert_thresholds: + high_threshold: 30 + critical_threshold: 50 + media_type: "TRANSITIONAL" + enable_zero_padding: true + rep_cap_max_ratio: 40 + rmcache_write_handling_mode: "Passthrough" + spare_percentage: 80 + enable_rebalance: false + enable_fragmentation: false + enable_rebuild: false use_rmcache: true use_rfcache: true + parallel_rebuild_rebalance_limit: 3 + protected_maintenance_mode_io_priority_policy: + policy: "unlimited" + rebalance_io_priority_policy: + policy: "unlimited" + vtree_migration_io_priority_policy: + policy: "limitNumOfConcurrentIos" + concurrent_ios_per_device: 10 + persistent_checksum: + enable: false state: "present" -- name: Rename storage pool by id +- name: Modify a Storage pool by name dellemc.powerflex.storagepool: - hostname: "{{hostname}}" - username: "{{username}}" - password: "{{password}}" - validate_certs: "{{validate_certs}}" - storage_pool_id: "abcd1234ab12r" - storage_pool_new_name: "new_ansible_pool" + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + validate_certs: "{{ validate_certs }}" + storage_pool_name: "{{ pool_name }}" + protection_domain_name: "{{ protection_domain_name }}" + storage_pool_new_name: "pool_name_new" + cap_alert_thresholds: + high_threshold: 50 + critical_threshold: 70 + enable_zero_padding: false + rep_cap_max_ratio: 60 + rmcache_write_handling_mode: "Passthrough" + spare_percentage: 90 + enable_rebalance: true + enable_fragmentation: true + enable_rebuild: true + use_rmcache: true + use_rfcache: true + parallel_rebuild_rebalance_limit: 6 + protected_maintenance_mode_io_priority_policy: + policy: "limitNumOfConcurrentIos" + concurrent_ios_per_device: 4 + rebalance_io_priority_policy: + policy: "favorAppIos" + concurrent_ios_per_device: 10 + bw_limit_per_device: 4096 + vtree_migration_io_priority_policy: + policy: "limitNumOfConcurrentIos" + concurrent_ios_per_device: 10 + persistent_checksum: + enable: true + validate_on_read: true + builder_limit: 1024 state: "present" ''' @@ -558,75 +751,50 @@ storage_pool_details: ''' from ansible.module_utils.basic import AnsibleModule -from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell\ +from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell.libraries.powerflex_base \ + import PowerFlexBase +from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell.libraries.configuration \ + import Configuration +from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell \ import utils LOG = utils.get_logger('storagepool') -class PowerFlexStoragePool(object): +class PowerFlexStoragePool(PowerFlexBase): """Class with StoragePool operations""" def __init__(self): """ Define all parameters required by this module""" - - self.module_params = utils.get_powerflex_gateway_host_parameters() - self.module_params.update(get_powerflex_storagepool_parameters()) - """ initialize the ansible module """ - mut_ex_args = [['storage_pool_name', 'storage_pool_id'], - ['protection_domain_name', 'protection_domain_id'], - ['storage_pool_id', 'protection_domain_name'], - ['storage_pool_id', 'protection_domain_id']] - - required_one_of_args = [['storage_pool_name', 'storage_pool_id']] - self.module = AnsibleModule(argument_spec=self.module_params, - supports_check_mode=False, - mutually_exclusive=mut_ex_args, - required_one_of=required_one_of_args) + mutually_exclusive = [['storage_pool_name', 'storage_pool_id'], + ['protection_domain_name', 'protection_domain_id'], + ['storage_pool_id', 'protection_domain_name'], + ['storage_pool_id', 'protection_domain_id']] - utils.ensure_required_libs(self.module) - - try: - self.powerflex_conn = utils.get_powerflex_gateway_host_connection( - self.module.params) - LOG.info('Got the PowerFlex system connection object instance') - except Exception as e: - LOG.error(str(e)) - self.module.fail_json(msg=str(e)) - - def get_protection_domain(self, protection_domain_name=None, - protection_domain_id=None): - """Get protection domain details - :param protection_domain_name: Name of the protection domain - :param protection_domain_id: ID of the protection domain - :return: Protection domain details - """ - name_or_id = protection_domain_id if protection_domain_id \ - else protection_domain_name - try: - filter_fields = {} - if protection_domain_id: - filter_fields = {'id': protection_domain_id} - if protection_domain_name: - filter_fields = {'name': protection_domain_name} + required_one_of = [['storage_pool_name', 'storage_pool_id']] - pd_details = self.powerflex_conn.protection_domain.get( - filter_fields=filter_fields) - if pd_details: - return pd_details[0] + ansible_module_params = { + 'argument_spec': get_powerflex_storagepool_parameters(), + 'supports_check_mode': True, + 'mutually_exclusive': mutually_exclusive, + 'required_one_of': required_one_of + } + super().__init__(AnsibleModule, ansible_module_params) - if not pd_details: - err_msg = "Unable to find the protection domain with {0}. " \ - "Please enter a valid protection domain" \ - " name/id.".format(name_or_id) - self.module.fail_json(msg=err_msg) + utils.ensure_required_libs(self.module) + self.result = dict( + changed=False, + storage_pool_details={} + ) - except Exception as e: - errormsg = "Failed to get the protection domain {0} with" \ - " error {1}".format(name_or_id, str(e)) - LOG.error(errormsg) - self.module.fail_json(msg=errormsg) + def get_protection_domain( + self, protection_domain_name=None, protection_domain_id=None + ): + """Get the details of a protection domain in a given PowerFlex storage + system""" + return Configuration(self.powerflex_conn, self.module).get_protection_domain( + protection_domain_name=protection_domain_name, protection_domain_id=protection_domain_id) def get_storage_pool(self, storage_pool_id=None, storage_pool_name=None, pd_id=None): @@ -648,7 +816,7 @@ class PowerFlexStoragePool(object): filter_fields.update({'protectionDomainId': pd_id}) pool_details = self.powerflex_conn.storage_pool.get( filter_fields=filter_fields) - if pool_details: + if pool_details != []: if len(pool_details) > 1: err_msg = "More than one storage pool found with {0}," \ @@ -666,10 +834,9 @@ class PowerFlexStoragePool(object): protection_domain_id=pd_id)['name'] # adding protection domain name in the pool details pool_details['protectionDomainName'] = pd_name - else: - pool_details = None + return pool_details - return pool_details + return None except Exception as e: errormsg = "Failed to get the storage pool {0} with error " \ @@ -698,192 +865,206 @@ class PowerFlexStoragePool(object): self.module.fail_json( msg="Please provide protection domain details for " "creation of a storage pool") - self.powerflex_conn.storage_pool.create( - media_type=media_type, - protection_domain_id=pd_id, name=pool_name, - use_rfcache=use_rfcache, use_rmcache=use_rmcache) + if not self.module.check_mode: + pool_id = self.powerflex_conn.storage_pool.create( + media_type=media_type, + protection_domain_id=pd_id, name=pool_name, + use_rfcache=use_rfcache, use_rmcache=use_rmcache)['id'] + + return self.get_storage_pool(storage_pool_id=pool_id, + pd_id=pd_id) - return True except Exception as e: errormsg = "Failed to create the storage pool {0} with error " \ "{1}".format(pool_name, str(e)) LOG.error(errormsg) self.module.fail_json(msg=errormsg) - def modify_storage_pool(self, pool_id, modify_dict): - """ - Modify the parameters of the storage pool. - :param modify_dict: Dict containing parameters which are to be - modified - :param pool_id: Id of the pool. - :return: True, if the operation is successful. - """ - - try: - - if 'new_name' in modify_dict: - self.powerflex_conn.storage_pool.rename( - pool_id, modify_dict['new_name']) - if 'use_rmcache' in modify_dict: - self.powerflex_conn.storage_pool.set_use_rmcache( - pool_id, modify_dict['use_rmcache']) - if 'use_rfcache' in modify_dict: - self.powerflex_conn.storage_pool.set_use_rfcache( - pool_id, modify_dict['use_rfcache']) - if 'media_type' in modify_dict: - self.powerflex_conn.storage_pool.set_media_type( - pool_id, modify_dict['media_type']) - return True - - except Exception as e: - err_msg = "Failed to update the storage pool {0} with error " \ - "{1}".format(pool_id, str(e)) - LOG.error(err_msg) - self.module.fail_json(msg=err_msg) - - def verify_params(self, pool_details, pd_name, pd_id): + def verify_protection_domain(self, pool_details): """ :param pool_details: Details of the storage pool :param pd_name: Name of the protection domain :param pd_id: Id of the protection domain """ - if pd_id and pd_id != pool_details['protectionDomainId']: - self.module.fail_json(msg="Entered protection domain id does not" - " match with the storage pool's " - "protection domain id. Please enter " - "a correct protection domain id.") - - if pd_name and pd_name != pool_details['protectionDomainName']: - self.module.fail_json(msg="Entered protection domain name does" - " not match with the storage pool's " - "protection domain name. Please enter" - " a correct protection domain name.") - - def perform_module_operation(self): - """ Perform different actions on Storage Pool based on user input - in the playbook """ - - pool_name = self.module.params['storage_pool_name'] - pool_id = self.module.params['storage_pool_id'] - pool_new_name = self.module.params['storage_pool_new_name'] - state = self.module.params['state'] pd_name = self.module.params['protection_domain_name'] pd_id = self.module.params['protection_domain_id'] - use_rmcache = self.module.params['use_rmcache'] - use_rfcache = self.module.params['use_rfcache'] - media_type = self.module.params['media_type'] - if media_type == "TRANSITIONAL": - media_type = 'Transitional' - - result = dict( - storage_pool_details={} - ) - changed = False - pd_details = None - if pd_name or pd_id: - pd_details = self.get_protection_domain( - protection_domain_id=pd_id, - protection_domain_name=pd_name) - if pd_details: - pd_id = pd_details['id'] - - if pool_name is not None and (len(pool_name.strip()) == 0): + if pool_details is not None: + if pd_id and pd_id != pool_details['protectionDomainId']: + self.module.fail_json(msg="Entered protection domain id does not" + " match with the storage pool's " + "protection domain id. Please enter " + "a correct protection domain id.") + + if pd_name and pd_name != pool_details['protectionDomainName']: + self.module.fail_json(msg="Entered protection domain name does" + " not match with the storage pool's " + "protection domain name. Please enter" + " a correct protection domain name.") + + def verify_storage_pool_name(self): + if (self.module.params['storage_pool_name'] is not None and + (len(self.module.params['storage_pool_name'].strip()) == 0)) or \ + (self.module.params['storage_pool_new_name'] is not None and + (len(self.module.params['storage_pool_new_name'].strip()) == 0)): self.module.fail_json( - msg="Empty or white spaced string provided in " - "storage_pool_name. Please provide valid storage" + msg="Empty or white spaced string provided for " + "storage pool name. Provide valid storage" " pool name.") - # Get the details of the storage pool. - pool_details = self.get_storage_pool(storage_pool_id=pool_id, - storage_pool_name=pool_name, - pd_id=pd_id) - if pool_name and pool_details: - pool_id = pool_details['id'] - self.verify_params(pool_details, pd_name, pd_id) - - # create a storage pool - if state == 'present' and not pool_details: - LOG.info("Creating new storage pool") - if pool_id: - self.module.fail_json( - msg="storage_pool_name is missing & name required to " - "create a storage pool. Please enter a valid " - "storage_pool_name.") - if pool_new_name is not None: - self.module.fail_json( - msg="storage_pool_new_name is passed during creation. " - "storage_pool_new_name is not allowed during " - "creation of a storage pool.") - changed = self.create_storage_pool( - pool_name, pd_id, media_type, use_rfcache, use_rmcache) - if changed: - pool_id = self.get_storage_pool(storage_pool_id=pool_id, - storage_pool_name=pool_name, - pd_id=pd_id)['id'] - - # modify the storage pool parameters - if state == 'present' and pool_details: - # check if the parameters are to be updated or not - if pool_new_name is not None and len(pool_new_name.strip()) == 0: - self.module.fail_json( - msg="Empty/White spaced name is not allowed during " - "renaming of a storage pool. Please enter a valid " - "storage pool new name.") - modify_dict = to_modify(pool_details, use_rmcache, use_rfcache, - pool_new_name, media_type) - if bool(modify_dict): - LOG.info("Modify attributes of storage pool") - changed = self.modify_storage_pool(pool_id, modify_dict) - - # Delete a storage pool - if state == 'absent' and pool_details: - msg = "Deleting storage pool is not supported through" \ - " ansible module." - LOG.error(msg) - self.module.fail_json(msg=msg) - - # Show the updated storage pool details - if state == 'present': - pool_details = self.get_storage_pool(storage_pool_id=pool_id) - # fetching Id from pool details to address a case where - # protection domain is not passed - pd_id = pool_details['protectionDomainId'] - pd_name = self.get_protection_domain( - protection_domain_id=pd_id)['name'] - # adding protection domain name in the pool details - pool_details['protectionDomainName'] = pd_name - result['storage_pool_details'] = pool_details - result['changed'] = changed - - self.module.exit_json(**result) - - -def to_modify(pool_details, use_rmcache, use_rfcache, new_name, media_type): - """ - Check whether a parameter is required to be updated. - - :param media_type: Type of the media supported by the pool. - :param pool_details: Details of the storage pool - :param use_rmcache: Enable/Disable RMcache on pool - :param use_rfcache: Enable/Disable RFcache on pool - :param new_name: New name for the storage pool - :return: dict, containing parameters to be modified - """ - pool_name = pool_details['name'] - pool_use_rfcache = pool_details['useRfcache'] - pool_use_rmcache = pool_details['useRmcache'] - pool_media_type = pool_details['mediaType'] - modify_params = {} - - if new_name is not None and pool_name != new_name: - modify_params['new_name'] = new_name - if use_rfcache is not None and pool_use_rfcache != use_rfcache: - modify_params['use_rfcache'] = use_rfcache - if use_rmcache is not None and pool_use_rmcache != use_rmcache: - modify_params['use_rmcache'] = use_rmcache - if media_type is not None and media_type != pool_media_type: - modify_params['media_type'] = media_type - return modify_params + def set_persistent_checksum(self, pool_details, pool_params): + try: + if pool_params['persistent_checksum']['enable']: + if pool_details['persistentChecksumEnabled'] is not True: + self.powerflex_conn.storage_pool.set_persistent_checksum( + storage_pool_id=pool_details['id'], + enable=pool_params['persistent_checksum']['enable'], + validate=pool_params['persistent_checksum']['validate_on_read'], + builder_limit=pool_params['persistent_checksum']['builder_limit']) + else: + self.powerflex_conn.storage_pool.modify_persistent_checksum( + storage_pool_id=pool_details['id'], + validate=pool_params['persistent_checksum']['validate_on_read'], + builder_limit=pool_params['persistent_checksum']['builder_limit']) + + pool_details = self.get_storage_pool(storage_pool_id=pool_details['id']) + return pool_details + + except Exception as e: + err_msg = "Failed to set persistent checksum with error " \ + "{0}".format(str(e)) + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + + def to_modify_persistent_checksum(self, pool_details, pool_params): + checksum_dict = dict() + if pool_params['persistent_checksum']['enable'] is not None and \ + pool_params['persistent_checksum']['enable'] != pool_details['persistentChecksumEnabled']: + checksum_dict['enable'] = pool_params['persistent_checksum']['enable'] + + if pool_params['persistent_checksum']['validate_on_read'] is not None and \ + pool_params['persistent_checksum']['validate_on_read'] != pool_details['persistentChecksumValidateOnRead'] and \ + pool_params['persistent_checksum']['enable'] is True: + checksum_dict['validate_on_read'] = pool_params['persistent_checksum']['validate_on_read'] + + if pool_params['persistent_checksum']['builder_limit'] is not None and \ + pool_params['persistent_checksum']['builder_limit'] != pool_details['persistentChecksumBuilderLimitKb'] and \ + pool_params['persistent_checksum']['enable'] is True: + checksum_dict['builder_limit'] = pool_params['persistent_checksum']['builder_limit'] + + return checksum_dict + + def to_modify_rebalance_io_priority_policy(self, pool_details, pool_params): + + policy_dict = { + 'policy': None, + 'concurrent_ios': None, + 'bw_limit': None + } + modify = False + if pool_params['rebalance_io_priority_policy']['policy'] is not None and \ + pool_params['rebalance_io_priority_policy']['policy'] != pool_details['rebalanceIoPriorityPolicy']: + policy_dict['policy'] = pool_params['rebalance_io_priority_policy']['policy'] + modify = True + + if pool_params['rebalance_io_priority_policy']['concurrent_ios_per_device'] is not None and \ + pool_params['rebalance_io_priority_policy']['concurrent_ios_per_device'] != pool_details['rebalanceIoPriorityNumOfConcurrentIosPerDevice']: + policy_dict['concurrent_ios'] = str(pool_params['rebalance_io_priority_policy']['concurrent_ios_per_device']) + + if pool_params['rebalance_io_priority_policy']['bw_limit_per_device'] is not None and \ + pool_params['rebalance_io_priority_policy']['bw_limit_per_device'] != pool_details['rebalanceIoPriorityBwLimitPerDeviceInKbps']: + policy_dict['bw_limit'] = str(pool_params['rebalance_io_priority_policy']['bw_limit_per_device']) + + if policy_dict['policy'] is None and (policy_dict['concurrent_ios'] is not None or policy_dict['bw_limit'] is not None): + policy_dict['policy'] = pool_details['rebalanceIoPriorityPolicy'] + modify = True + + if modify is True: + return policy_dict + else: + return None + + def to_modify_vtree_migration_io_priority_policy(self, pool_details, pool_params): + policy_dict = { + 'policy': None, + 'concurrent_ios': None, + 'bw_limit': None + } + modify = False + if pool_params['vtree_migration_io_priority_policy']['policy'] is not None and \ + pool_params['vtree_migration_io_priority_policy']['policy'] != pool_details['vtreeMigrationIoPriorityPolicy']: + policy_dict['policy'] = pool_params['vtree_migration_io_priority_policy']['policy'] + modify = True + + if pool_params['vtree_migration_io_priority_policy']['concurrent_ios_per_device'] is not None and \ + pool_params['vtree_migration_io_priority_policy']['concurrent_ios_per_device'] != \ + pool_details['vtreeMigrationIoPriorityNumOfConcurrentIosPerDevice']: + policy_dict['concurrent_ios'] = str(pool_params['vtree_migration_io_priority_policy']['concurrent_ios_per_device']) + + if pool_params['vtree_migration_io_priority_policy']['bw_limit_per_device'] is not None and \ + pool_params['vtree_migration_io_priority_policy']['bw_limit_per_device'] != \ + pool_details['vtreeMigrationIoPriorityBwLimitPerDeviceInKbps']: + policy_dict['bw_limit'] = str(pool_params['vtree_migration_io_priority_policy']['bw_limit_per_device']) + + if policy_dict['policy'] is None and (policy_dict['concurrent_ios'] is not None or policy_dict['bw_limit'] is not None): + policy_dict['policy'] = pool_details['vtreeMigrationIoPriorityPolicy'] + modify = True + + if modify is True: + return policy_dict + else: + return None + + def to_modify_protected_maintenance_mode_io_priority_policy(self, pool_details, pool_params): + + policy_dict = { + 'policy': None, + 'concurrent_ios': None, + 'bw_limit': None + } + modify = False + if pool_params['protected_maintenance_mode_io_priority_policy']['policy'] is not None and \ + pool_params['protected_maintenance_mode_io_priority_policy']['policy'] != pool_details['protectedMaintenanceModeIoPriorityPolicy']: + policy_dict['policy'] = pool_params['protected_maintenance_mode_io_priority_policy']['policy'] + modify = True + + if pool_params['protected_maintenance_mode_io_priority_policy']['concurrent_ios_per_device'] is not None and \ + pool_params['protected_maintenance_mode_io_priority_policy']['concurrent_ios_per_device'] != \ + pool_details['protectedMaintenanceModeIoPriorityNumOfConcurrentIosPerDevice']: + policy_dict['concurrent_ios'] = str(pool_params['protected_maintenance_mode_io_priority_policy']['concurrent_ios_per_device']) + + if pool_params['protected_maintenance_mode_io_priority_policy']['bw_limit_per_device'] is not None and \ + pool_params['protected_maintenance_mode_io_priority_policy']['bw_limit_per_device'] != \ + pool_details['protectedMaintenanceModeIoPriorityBwLimitPerDeviceInKbps']: + policy_dict['bw_limit'] = str(pool_params['protected_maintenance_mode_io_priority_policy']['bw_limit_per_device']) + + if policy_dict['policy'] is None and (policy_dict['concurrent_ios'] is not None or policy_dict['bw_limit'] is not None): + policy_dict['policy'] = pool_details['protectedMaintenanceModeIoPriorityPolicy'] + modify = True + + if modify is True: + return policy_dict + else: + return None + + def to_modify_capacity_alert_thresholds(self, pool_details, pool_params, thresholds): + modify = False + threshold = dict() + if pool_params['cap_alert_thresholds']['high_threshold'] is not None and pool_params['cap_alert_thresholds'][ + 'high_threshold'] != pool_details['capacityAlertHighThreshold']: + threshold['high'] = str(pool_params['cap_alert_thresholds']['high_threshold']) + modify = True + if pool_params['cap_alert_thresholds']['critical_threshold'] is not None and \ + pool_params['cap_alert_thresholds']['critical_threshold'] != pool_details[ + 'capacityAlertCriticalThreshold']: + threshold['critical'] = str(pool_params['cap_alert_thresholds']['critical_threshold']) + modify = True + if modify is True: + if 'high' not in threshold: + threshold['high'] = str(pool_details['capacityAlertHighThreshold']) + if 'critical' not in threshold: + threshold['critical'] = str(pool_details['capacityAlertCriticalThreshold']) + + return threshold def get_powerflex_storagepool_parameters(): @@ -898,15 +1079,464 @@ def get_powerflex_storagepool_parameters(): choices=['HDD', 'SSD', 'TRANSITIONAL']), use_rfcache=dict(required=False, type='bool'), use_rmcache=dict(required=False, type='bool'), + enable_zero_padding=dict(type='bool'), + rep_cap_max_ratio=dict(type='int'), + rmcache_write_handling_mode=dict(choices=['Cached', 'Passthrough'], default='Cached'), + spare_percentage=dict(type='int'), + enable_rebalance=dict(type='bool'), + enable_fragmentation=dict(type='bool'), + enable_rebuild=dict(type='bool'), storage_pool_new_name=dict(required=False, type='str'), + parallel_rebuild_rebalance_limit=dict(type='int'), + cap_alert_thresholds=dict(type='dict', options=dict( + high_threshold=dict(type='int'), + critical_threshold=dict(type='int'))), + protected_maintenance_mode_io_priority_policy=dict(type='dict', options=dict( + policy=dict(choices=['unlimited', 'limitNumOfConcurrentIos', 'favorAppIos'], default='limitNumOfConcurrentIos'), + concurrent_ios_per_device=dict(type='int'), + bw_limit_per_device=dict(type='int'))), + rebalance_io_priority_policy=dict(type='dict', options=dict( + policy=dict(choices=['unlimited', 'limitNumOfConcurrentIos', 'favorAppIos'], default='favorAppIos'), + concurrent_ios_per_device=dict(type='int'), + bw_limit_per_device=dict(type='int'))), + vtree_migration_io_priority_policy=dict(type='dict', options=dict( + policy=dict(choices=['limitNumOfConcurrentIos', 'favorAppIos']), + concurrent_ios_per_device=dict(type='int'), + bw_limit_per_device=dict(type='int'))), + persistent_checksum=dict(type='dict', options=dict( + enable=dict(type='bool'), + validate_on_read=dict(type='bool'), + builder_limit=dict(type='int', default=3072))), state=dict(required=True, type='str', choices=['present', 'absent'])) +class StoragePoolExitHandler(): + def handle(self, pool_obj, pool_details): + if pool_details: + pool_details = pool_obj.get_storage_pool(storage_pool_id=pool_details['id']) + pool_obj.result['storage_pool_details'] = pool_details + + pool_obj.module.exit_json(**pool_obj.result) + + +class StoragePoolDeleteHandler(): + def handle(self, pool_obj, pool_params, pool_details): + if pool_params['state'] == 'absent' and pool_details: + msg = "Deleting storage pool is not supported through" \ + " ansible module." + LOG.error(msg) + pool_obj.module.fail_json(msg=msg) + + StoragePoolExitHandler().handle(pool_obj, pool_details) + + +class StoragePoolModifyPersistentChecksumHandler(): + def handle(self, pool_obj, pool_params, pool_details): + try: + if pool_params['state'] == 'present' and pool_details: + if pool_params['persistent_checksum'] is not None: + checksum_dict = pool_obj.to_modify_persistent_checksum( + pool_details=pool_details, + pool_params=pool_params) + if checksum_dict != {}: + if not pool_obj.module.check_mode: + pool_details = pool_obj.set_persistent_checksum( + pool_details=pool_details, + pool_params=pool_params) + pool_obj.result['changed'] = True + + StoragePoolDeleteHandler().handle(pool_obj, pool_params, pool_details) + + except Exception as e: + error_msg = (f"Modify Persistent Checksum failed " + f"with error {str(e)}") + LOG.error(error_msg) + pool_obj.module.fail_json(msg=error_msg) + + +class StoragePoolModifyRebalanceIOPriorityPolicyHandler(): + def handle(self, pool_obj, pool_params, pool_details): + try: + if pool_params['state'] == 'present' and pool_details: + if pool_params['rebalance_io_priority_policy'] is not None: + policy_dict = pool_obj.to_modify_rebalance_io_priority_policy( + pool_details=pool_details, + pool_params=pool_params + ) + if policy_dict is not None: + if not pool_obj.module.check_mode: + pool_details = pool_obj.powerflex_conn.storage_pool.rebalance_io_priority_policy( + storage_pool_id=pool_details['id'], + policy=policy_dict['policy'], + concurrent_ios_per_device=policy_dict['concurrent_ios'], + bw_limit_per_device=policy_dict['bw_limit']) + pool_obj.result['changed'] = True + + StoragePoolModifyPersistentChecksumHandler().handle(pool_obj, pool_params, pool_details) + + except Exception as e: + error_msg = (f"Modify rebalance IO Priority Policy failed " + f"with error {str(e)}") + LOG.error(error_msg) + pool_obj.module.fail_json(msg=error_msg) + + +class StoragePoolSetVtreeMigrationIOPriorityPolicyHandler(): + def handle(self, pool_obj, pool_params, pool_details): + try: + if pool_params['state'] == 'present' and pool_details: + if pool_params['vtree_migration_io_priority_policy'] is not None: + policy_dict = pool_obj.to_modify_vtree_migration_io_priority_policy( + pool_details=pool_details, + pool_params=pool_params + ) + if policy_dict is not None: + if not pool_obj.module.check_mode: + pool_details = pool_obj.powerflex_conn.storage_pool.set_vtree_migration_io_priority_policy( + storage_pool_id=pool_details['id'], + policy=policy_dict['policy'], + concurrent_ios_per_device=policy_dict['concurrent_ios'], + bw_limit_per_device=policy_dict['bw_limit']) + pool_obj.result['changed'] = True + + StoragePoolModifyRebalanceIOPriorityPolicyHandler().handle(pool_obj, pool_params, pool_details) + + except Exception as e: + error_msg = (f"Set Vtree Migration I/O Priority Policy operation failed " + f"with error {str(e)}") + LOG.error(error_msg) + pool_obj.module.fail_json(msg=error_msg) + + +class StoragePoolSetProtectedMaintenanceModeIOPriorityPolicyHandler(): + def handle(self, pool_obj, pool_params, pool_details): + try: + if pool_params['state'] == 'present' and pool_details: + if pool_params['protected_maintenance_mode_io_priority_policy'] is not None: + policy_dict = pool_obj.to_modify_protected_maintenance_mode_io_priority_policy( + pool_details=pool_details, + pool_params=pool_params + ) + if policy_dict is not None: + if not pool_obj.module.check_mode: + pool_details = pool_obj.powerflex_conn.storage_pool.set_protected_maintenance_mode_io_priority_policy( + storage_pool_id=pool_details['id'], + policy=policy_dict['policy'], + concurrent_ios_per_device=policy_dict['concurrent_ios'], + bw_limit_per_device=policy_dict['bw_limit']) + pool_obj.result['changed'] = True + + StoragePoolSetVtreeMigrationIOPriorityPolicyHandler().handle(pool_obj, pool_params, pool_details) + + except Exception as e: + error_msg = (f"Set Protected Maintenance Mode IO Priority Policy operation failed " + f"with error {str(e)}") + LOG.error(error_msg) + pool_obj.module.fail_json(msg=error_msg) + + +class StoragePoolModifyCapacityAlertThresholdsHandler(): + def handle(self, pool_obj, pool_params, pool_details): + try: + if pool_params['state'] == 'present' and pool_details: + if pool_params['cap_alert_thresholds'] is not None: + threshold = pool_obj.to_modify_capacity_alert_thresholds(pool_details=pool_details, + pool_params=pool_params, + thresholds=pool_params[ + 'cap_alert_thresholds']) + if threshold != {}: + if not pool_obj.module.check_mode: + pool_details = pool_obj.powerflex_conn.storage_pool.set_cap_alert_thresholds( + storage_pool_id=pool_details['id'], + cap_alert_high_threshold=threshold['high'], + cap_alert_critical_threshold=threshold['critical']) + pool_obj.result['changed'] = True + + StoragePoolSetProtectedMaintenanceModeIOPriorityPolicyHandler().handle(pool_obj, pool_params, pool_details) + + except Exception as e: + error_msg = (f"Modify Capacity Alert Thresholds operation failed " + f"with error {str(e)}") + LOG.error(error_msg) + pool_obj.module.fail_json(msg=error_msg) + + +class StoragePoolModifyRebuildRebalanceParallelismLimitHandler(): + def handle(self, pool_obj, pool_params, pool_details): + try: + if pool_params['state'] == 'present' and pool_details: + if pool_params['parallel_rebuild_rebalance_limit'] is not None and \ + pool_params['parallel_rebuild_rebalance_limit'] != pool_details['numOfParallelRebuildRebalanceJobsPerDevice']: + if not pool_obj.module.check_mode: + pool_details = pool_obj.powerflex_conn.storage_pool.set_rebuild_rebalance_parallelism_limit( + pool_details['id'], str(pool_params['parallel_rebuild_rebalance_limit'])) + pool_obj.result['changed'] = True + + StoragePoolModifyCapacityAlertThresholdsHandler().handle(pool_obj, pool_params, pool_details) + + except Exception as e: + error_msg = (f"Modify Rebuild/Rebalance Parallelism Limit operation failed " + f"with error {str(e)}") + LOG.error(error_msg) + pool_obj.module.fail_json(msg=error_msg) + + +class StoragePoolModifyRMCacheWriteHandlingModeHandler(): + def handle(self, pool_obj, pool_params, pool_details): + try: + if pool_params['state'] == 'present' and pool_details: + if pool_params['rmcache_write_handling_mode'] is not None and \ + pool_params['rmcache_write_handling_mode'] != pool_details['rmcacheWriteHandlingMode']: + if not pool_obj.module.check_mode: + pool_details = pool_obj.powerflex_conn.storage_pool.set_rmcache_write_handling_mode( + pool_details['id'], pool_params['rmcache_write_handling_mode']) + pool_obj.result['changed'] = True + + StoragePoolModifyRebuildRebalanceParallelismLimitHandler().handle(pool_obj, pool_params, pool_details) + + except Exception as e: + error_msg = (f"Modify RMCache Write Handling Mode failed " + f"with error {str(e)}") + LOG.error(error_msg) + pool_obj.module.fail_json(msg=error_msg) + + +class StoragePoolModifySparePercentageHandler(): + def handle(self, pool_obj, pool_params, pool_details): + try: + if pool_params['state'] == 'present' and pool_details: + if pool_params['spare_percentage'] is not None and pool_params['spare_percentage'] != pool_details['sparePercentage']: + if not pool_obj.module.check_mode: + pool_details = pool_obj.powerflex_conn.storage_pool.set_spare_percentage( + pool_details['id'], str(pool_params['spare_percentage'])) + pool_obj.result['changed'] = True + + StoragePoolModifyRMCacheWriteHandlingModeHandler().handle(pool_obj, pool_params, pool_details) + + except Exception as e: + error_msg = (f"Modify Spare Percentage operation failed " + f"with error {str(e)}") + LOG.error(error_msg) + pool_obj.module.fail_json(msg=error_msg) + + +class StoragePoolEnableFragmentationHandler(): + def handle(self, pool_obj, pool_params, pool_details): + try: + if pool_params['state'] == 'present' and pool_details: + if pool_params['enable_fragmentation'] is not None and pool_params['enable_fragmentation'] != pool_details['fragmentationEnabled']: + if not pool_obj.module.check_mode: + pool_details = pool_obj.powerflex_conn.storage_pool.set_fragmentation_enabled( + pool_details['id'], pool_params['enable_fragmentation']) + pool_obj.result['changed'] = True + + StoragePoolModifySparePercentageHandler().handle(pool_obj, pool_params, pool_details) + + except Exception as e: + + error_msg = (f"Enable/Disable Fragmentation operation failed " + f"with error {str(e)}") + LOG.error(error_msg) + pool_obj.module.fail_json(msg=error_msg) + + +class StoragePoolEnableRebuildHandler(): + def handle(self, pool_obj, pool_params, pool_details): + try: + if pool_params['state'] == 'present' and pool_details: + if pool_params['enable_rebuild'] is not None and pool_params['enable_rebuild'] != pool_details['rebuildEnabled']: + if not pool_obj.module.check_mode: + pool_details = pool_obj.powerflex_conn.storage_pool.set_rebuild_enabled( + pool_details['id'], pool_params['enable_rebuild']) + pool_obj.result['changed'] = True + + StoragePoolEnableFragmentationHandler().handle(pool_obj, pool_params, pool_details) + + except Exception as e: + error_msg = (f"Enable/Disable Rebuild operation failed " + f"with error {str(e)}") + LOG.error(error_msg) + pool_obj.module.fail_json(msg=error_msg) + + +class StoragePoolEnableRebalanceHandler(): + def handle(self, pool_obj, pool_params, pool_details): + try: + if pool_params['state'] == 'present' and pool_details: + if pool_params['enable_rebalance'] is not None and pool_params['enable_rebalance'] != pool_details['rebalanceEnabled']: + if not pool_obj.module.check_mode: + pool_details = pool_obj.powerflex_conn.storage_pool.set_rebalance_enabled( + pool_details['id'], pool_params['enable_rebalance']) + pool_obj.result['changed'] = True + + StoragePoolEnableRebuildHandler().handle(pool_obj, pool_params, pool_details) + + except Exception as e: + error_msg = (f"Enable/Disable Rebalance failed " + f"with error {str(e)}") + LOG.error(error_msg) + pool_obj.module.fail_json(msg=error_msg) + + +class StoragePoolModifyRepCapMaxRatioHandler(): + def handle(self, pool_obj, pool_params, pool_details): + try: + if pool_params['state'] == 'present' and pool_details: + if pool_params['rep_cap_max_ratio'] is not None and pool_params['rep_cap_max_ratio'] != pool_details['replicationCapacityMaxRatio']: + if not pool_obj.module.check_mode: + pool_details = pool_obj.powerflex_conn.storage_pool.set_rep_cap_max_ratio( + pool_details['id'], str(pool_params['rep_cap_max_ratio'])) + pool_obj.result['changed'] = True + + StoragePoolEnableRebalanceHandler().handle(pool_obj, pool_params, pool_details) + + except Exception as e: + error_msg = (f"Modify Replication Capacity max ratio operation failed " + f"with error {str(e)}") + LOG.error(error_msg) + pool_obj.module.fail_json(msg=error_msg) + + +class StoragePoolEnableZeroPaddingHandler(): + def handle(self, pool_obj, pool_params, pool_details): + try: + if pool_params['state'] == 'present' and pool_details: + if pool_params['enable_zero_padding'] is not None and pool_params['enable_zero_padding'] != pool_details['zeroPaddingEnabled']: + if not pool_obj.module.check_mode: + pool_details = pool_obj.powerflex_conn.storage_pool.set_zero_padding_policy( + pool_details['id'], pool_params['enable_zero_padding']) + pool_obj.result['changed'] = True + + StoragePoolModifyRepCapMaxRatioHandler().handle(pool_obj, pool_params, pool_details) + + except Exception as e: + error_msg = (f"Enable/Disable zero padding operation failed " + f"with error {str(e)}") + LOG.error(error_msg) + pool_obj.module.fail_json(msg=error_msg) + + +class StoragePoolUseRFCacheHandler(): + def handle(self, pool_obj, pool_params, pool_details): + try: + if pool_params['state'] == 'present' and pool_details: + if pool_params['use_rfcache'] is not None and pool_params['use_rfcache'] != pool_details['useRfcache']: + if not pool_obj.module.check_mode: + pool_details = pool_obj.powerflex_conn.storage_pool.set_use_rfcache( + pool_details['id'], pool_params['use_rfcache']) + pool_obj.result['changed'] = True + + StoragePoolEnableZeroPaddingHandler().handle(pool_obj, pool_params, pool_details) + + except Exception as e: + error_msg = (f"Modify RF cache operation failed " + f"with error {str(e)}") + LOG.error(error_msg) + pool_obj.module.fail_json(msg=error_msg) + + +class StoragePoolUseRMCacheHandler(): + def handle(self, pool_obj, pool_params, pool_details): + try: + if pool_params['state'] == 'present' and pool_details: + if pool_params['use_rmcache'] is not None and pool_params['use_rmcache'] != pool_details['useRmcache']: + if not pool_obj.module.check_mode: + pool_details = pool_obj.powerflex_conn.storage_pool.set_use_rmcache( + pool_details['id'], pool_params['use_rmcache']) + pool_obj.result['changed'] = True + + StoragePoolUseRFCacheHandler().handle(pool_obj, pool_params, pool_details) + + except Exception as e: + error_msg = (f"Modify RM cache operation failed " + f"with error {str(e)}") + LOG.error(error_msg) + pool_obj.module.fail_json(msg=error_msg) + + +class StoragePoolRenameHandler(): + def handle(self, pool_obj, pool_params, pool_details): + try: + if pool_params['state'] == 'present' and pool_details: + if pool_params['storage_pool_new_name'] is not None and pool_params['storage_pool_new_name'] != pool_details['name']: + if not pool_obj.module.check_mode: + pool_obj.powerflex_conn.storage_pool.rename(pool_details['id'], pool_params['storage_pool_new_name']) + pool_obj.result['changed'] = True + + StoragePoolUseRMCacheHandler().handle(pool_obj, pool_params, pool_details) + + except Exception as e: + error_msg = (f"Modify storage pool name failed " + f"with error {str(e)}") + LOG.error(error_msg) + pool_obj.module.fail_json(msg=error_msg) + + +class StoragePoolModifyMediaTypeHandler(): + def handle(self, pool_obj, pool_params, pool_details, media_type): + try: + if pool_params['state'] == 'present' and pool_details: + if media_type is not None and media_type != pool_details['mediaType']: + if not pool_obj.module.check_mode: + pool_details = pool_obj.powerflex_conn.storage_pool.set_media_type( + pool_details['id'], media_type) + pool_obj.result['changed'] = True + + StoragePoolRenameHandler().handle(pool_obj, pool_params, pool_details) + + except Exception as e: + error_msg = (f"Modify Media Type failed " + f"with error {str(e)}") + LOG.error(error_msg) + pool_obj.module.fail_json(msg=error_msg) + + +class StoragePoolCreateHandler(): + def handle(self, pool_obj, pool_params, pool_details, pd_id, media_type): + if pool_params['state'] == 'present' and pool_details is None: + if not pool_obj.module.check_mode: + LOG.info("Creating new storage pool") + if pool_params['storage_pool_id']: + self.module.fail_json( + msg="storage_pool_name is missing & name required to " + "create a storage pool. Please enter a valid " + "storage_pool_name.") + + pool_details = pool_obj.create_storage_pool( + pool_name=pool_params['storage_pool_name'], + pd_id=pd_id, + media_type=media_type, + use_rfcache=pool_params['use_rfcache'], + use_rmcache=pool_params['use_rmcache']) + + pool_obj.result['changed'] = True + + StoragePoolModifyMediaTypeHandler().handle(pool_obj, pool_params, pool_details, media_type) + + +class StoragePoolHandler(): + def handle(self, pool_obj, pool_params): + pool_obj.verify_storage_pool_name() + media_type = pool_params['media_type'] + if media_type == "TRANSITIONAL": + media_type = 'Transitional' + pd_id = None + if pool_params['protection_domain_id'] or pool_params['protection_domain_name']: + pd_id = pool_obj.get_protection_domain( + protection_domain_id=pool_params['protection_domain_id'], + protection_domain_name=pool_params['protection_domain_name'])['id'] + pool_details = pool_obj.get_storage_pool(storage_pool_id=pool_params['storage_pool_id'], + storage_pool_name=pool_params['storage_pool_name'], + pd_id=pd_id) + pool_obj.verify_protection_domain(pool_details=pool_details) + StoragePoolCreateHandler().handle(pool_obj, pool_params, pool_details, pd_id, media_type) + + def main(): - """ Create PowerFlex Storage Pool object and perform action on it + """ Create PowerFlex storage pool object and perform action on it based on user input from playbook""" obj = PowerFlexStoragePool() - obj.perform_module_operation() + StoragePoolHandler().handle(obj, obj.module.params) if __name__ == '__main__': diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_config/tasks/main.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_config/tasks/main.yml index 67bad8013..f1611a6b9 100644 --- a/ansible_collections/dellemc/powerflex/roles/powerflex_config/tasks/main.yml +++ b/ansible_collections/dellemc/powerflex/roles/powerflex_config/tasks/main.yml @@ -46,6 +46,14 @@ delegate_to: "{{ powerflex_config_mdm_primary_hostname }}" when: powerflex_config_array_version == '4' +- name: Add certificate file for PowerFlex version 4.x + ansible.builtin.command: scli --add_certificate --certificate_file /opt/emc/scaleio/mdm/cfg/mgmt_ca.pem + run_once: true + register: powerflex_config_add_certificate + changed_when: powerflex_config_add_certificate.rc == 0 + delegate_to: "{{ powerflex_config_mdm_primary_hostname }}" + when: powerflex_config_array_version == '4' + - name: Login to MDM for PowerFlex version 4.x ansible.builtin.command: scli --login --p12_path /opt/emc/scaleio/mdm/cfg/cli_certificate.p12 --p12_password {{ password }} run_once: true diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/tasks/install_keepalived.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/tasks/install_keepalived.yml index df6fd9dac..9b106ab1b 100644 --- a/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/tasks/install_keepalived.yml +++ b/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/tasks/install_keepalived.yml @@ -25,4 +25,4 @@ mode: '0600' owner: root group: root - notify: restart keepalived + notify: Restart keepalived diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_lia/tasks/install_lia.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_lia/tasks/install_lia.yml index 4b987b80e..f5aca7086 100644 --- a/ansible_collections/dellemc/powerflex/roles/powerflex_lia/tasks/install_lia.yml +++ b/ansible_collections/dellemc/powerflex/roles/powerflex_lia/tasks/install_lia.yml @@ -20,3 +20,27 @@ - name: Include install_powerflex.yml ansible.builtin.include_tasks: ../../powerflex_common/tasks/install_powerflex.yml + +- name: Check if /opt/emc/scaleio/lia/cfg/conf.txt exists + ansible.builtin.stat: + path: /opt/emc/scaleio/lia/cfg/conf.txt + register: powerflex_lia_conf_file_stat + +- name: Update lia_mtls_only + ansible.builtin.lineinfile: + path: /opt/emc/scaleio/lia/cfg/conf.txt + regexp: '^lia_mtls_only=1$' + state: absent + when: powerflex_lia_conf_file_stat.stat.exists + +- name: Replace lia_token value + ansible.builtin.replace: + path: /opt/emc/scaleio/lia/cfg/conf.txt + regexp: '^lia_token=.*$' + replace: 'lia_token={{ powerflex_lia_token }}' + when: powerflex_lia_conf_file_stat.stat.exists + +- name: PKill LIA for config changes effect + ansible.builtin.command: pkill lia + register: powerflex_lia_pkill_ouput + changed_when: powerflex_lia_pkill_ouput.rc == 0 diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/tasks/install_powerflex4x_mdm.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/tasks/install_powerflex4x_mdm.yml index 67164337d..5a36649bf 100644 --- a/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/tasks/install_powerflex4x_mdm.yml +++ b/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/tasks/install_powerflex4x_mdm.yml @@ -51,6 +51,13 @@ run_once: true delegate_to: "{{ powerflex_mdm_primary_hostname }}" +- name: Add certificate file for PowerFlex version 4.x + ansible.builtin.command: scli --add_certificate --certificate_file /opt/emc/scaleio/mdm/cfg/mgmt_ca.pem + run_once: true + register: powerflex_mdm_add_certificate + changed_when: powerflex_mdm_add_certificate.rc == 0 + delegate_to: "{{ powerflex_mdm_primary_hostname }}" + - name: Login to primary MDM node register: powerflex_mdm_secondary_login ansible.builtin.command: > diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/defaults/main.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/defaults/main.yml index 5801c0ced..fc239219b 100644 --- a/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/defaults/main.yml +++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/defaults/main.yml @@ -17,5 +17,4 @@ powerflex_sdc_name: sdc_test powerflex_sdc_performance_profile: Compact file_glob_name: sdc i_am_sure: 1 -powerflex_sdc_esxi_guid: "d422ecab-af6f-4e0c-a059-333ac89cfb42" powerflex_role_environment: diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/tasks/install_sdc.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/tasks/install_sdc.yml index 27c82db27..25c6e8109 100644 --- a/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/tasks/install_sdc.yml +++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/tasks/install_sdc.yml @@ -70,6 +70,6 @@ mode: "0600" owner: "root" group: "root" - notify: restart scini + notify: Restart scini when: - ansible_distribution not in ['WindowsOS', 'SLES', 'VMkernel'] diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/tasks/register_esxi_sdc.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/tasks/register_esxi_sdc.yml index 0b06fc8cd..52a8004f5 100644 --- a/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/tasks/register_esxi_sdc.yml +++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/tasks/register_esxi_sdc.yml @@ -1,8 +1,14 @@ --- +- name: Generating random GUID + register: powerflex_sdc_esxi_guid + ansible.builtin.shell: > + uuidgen + changed_when: powerflex_sdc_esxi_guid.rc == 0 + - name: Register SDC and Set MDM IP addresses register: powerflex_sdc_register_set_sdc_mdm ansible.builtin.shell: > - esxcli system module parameters set -m scini -p "IoctlIniGuidStr={{ powerflex_sdc_esxi_guid }} + esxcli system module parameters set -m scini -p "IoctlIniGuidStr={{ powerflex_sdc_esxi_guid.stdout }} IoctlMdmIPStr={{ powerflex_sdc_mdm_ips }} bBlkDevIsPdlActive=1 blkDevPdlTimeoutMillis=60000" changed_when: powerflex_sdc_register_set_sdc_mdm.rc == 0 diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/tasks/add_sdr.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/tasks/add_sdr.yml index f7cbfa378..ba365ce90 100644 --- a/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/tasks/add_sdr.yml +++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/tasks/add_sdr.yml @@ -54,7 +54,7 @@ rescue: - name: Generate login certificate using primary_mdm_ip ansible.builtin.command: > - scli --generate_login_certificate --management_system_ip {{ powerflex_sdr_primary_mdm_ip }} --username {{ username }} + scli --generate_login_certificate --management_system_ip {{ powerflex_sdr_mdm_primary_ip }} --username {{ username }} --password {{ password }} --p12_path /opt/emc/scaleio/mdm/cfg/cli_certificate.p12 --p12_password {{ password }} --insecure run_once: true register: powerflex_sdr_generate_login_certificate_mdm_ip @@ -62,6 +62,14 @@ delegate_to: "{{ powerflex_sdr_mdm_primary_hostname }}" when: powerflex_sdr_array_version != "3" +- name: Add certificate file for PowerFlex version 4.x + ansible.builtin.command: scli --add_certificate --certificate_file /opt/emc/scaleio/mdm/cfg/mgmt_ca.pem + run_once: true + register: powerflex_sdr_add_certificate + changed_when: powerflex_sdr_add_certificate.rc == 0 + delegate_to: "{{ powerflex_sdr_mdm_primary_hostname }}" + when: powerflex_sdr_array_version != '3' + - name: Login to MDM for PowerFlex version 4.x ansible.builtin.command: scli --login --p12_path /opt/emc/scaleio/mdm/cfg/cli_certificate.p12 --p12_password {{ password }} run_once: true diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/tasks/remove_sdr.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/tasks/remove_sdr.yml index 3bf33b6ea..17ec5755a 100644 --- a/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/tasks/remove_sdr.yml +++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/tasks/remove_sdr.yml @@ -29,17 +29,42 @@ no_log: true when: powerflex_sdr_array_version == "3" -- name: Login to mdm for PowerFlex version 4.x - ansible.builtin.command: > - scli --login --management_system_ip {{ hostname }} - --username admin - --password "{{ password }}" - --approve_certificate +- name: Generate login certificate for PowerFlex version 4.x + block: + - name: Generate login certificate using management_system_ip + ansible.builtin.command: > + scli --generate_login_certificate --management_system_ip {{ hostname }} --username {{ username }} --password {{ password }} + --p12_path /opt/emc/scaleio/mdm/cfg/cli_certificate.p12 --p12_password {{ password }} --insecure + run_once: true + register: powerflex_sdr_generate_login_certificate + changed_when: powerflex_sdr_generate_login_certificate.rc == 0 + delegate_to: "{{ powerflex_sdr_mdm_primary_hostname }}" + when: powerflex_sdr_array_version != "3" + rescue: + - name: Generate login certificate using primary_mdm_ip + ansible.builtin.command: > + scli --generate_login_certificate --management_system_ip {{ powerflex_sdr_mdm_primary_ip }} --username {{ username }} + --password {{ password }} --p12_path /opt/emc/scaleio/mdm/cfg/cli_certificate.p12 --p12_password {{ password }} --insecure + run_once: true + register: powerflex_sdr_generate_login_certificate_mdm_ip + changed_when: powerflex_sdr_generate_login_certificate_mdm_ip.rc == 0 + delegate_to: "{{ powerflex_sdr_mdm_primary_hostname }}" + when: powerflex_sdr_array_version != "3" + +- name: Add certificate file for PowerFlex version 4.x + ansible.builtin.command: scli --add_certificate --certificate_file /opt/emc/scaleio/mdm/cfg/mgmt_ca.pem run_once: true - register: powerflex_initial_login + register: powerflex_sdr_add_certificate + changed_when: powerflex_sdr_add_certificate.rc == 0 + delegate_to: "{{ powerflex_sdr_mdm_primary_hostname }}" + when: powerflex_sdr_array_version != '3' + +- name: Login to MDM for PowerFlex version 4.x + ansible.builtin.command: scli --login --p12_path /opt/emc/scaleio/mdm/cfg/cli_certificate.p12 --p12_password {{ password }} + run_once: true + register: powerflex_sdr_login_output + changed_when: powerflex_sdr_login_output.rc == 0 delegate_to: "{{ powerflex_sdr_mdm_primary_hostname }}" - changed_when: powerflex_initial_login.rc == 0 - no_log: true when: powerflex_sdr_array_version != "3" - name: Output msg of previous task login to mdm diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sds/tasks/install_sds.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sds/tasks/install_sds.yml index 010aee075..e6abdbd1e 100644 --- a/ansible_collections/dellemc/powerflex/roles/powerflex_sds/tasks/install_sds.yml +++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sds/tasks/install_sds.yml @@ -59,6 +59,14 @@ delegate_to: "{{ powerflex_sds_primary_mdm_hostname }}" when: powerflex_sds_array_version != "3" +- name: Add certificate file for PowerFlex version 4.x + ansible.builtin.command: scli --add_certificate --certificate_file /opt/emc/scaleio/mdm/cfg/mgmt_ca.pem + run_once: true + register: powerflex_sds_add_certificate + changed_when: powerflex_sds_add_certificate.rc == 0 + delegate_to: "{{ powerflex_sds_primary_mdm_hostname }}" + when: powerflex_sds_array_version != '3' + - name: Login to MDM for PowerFlex version 4.x ansible.builtin.command: scli --login --p12_path /opt/emc/scaleio/mdm/cfg/cli_certificate.p12 --p12_password {{ password }} run_once: true diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_tb/tasks/connect_mdm_cluster.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_tb/tasks/connect_mdm_cluster.yml new file mode 100644 index 000000000..e166a9316 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/roles/powerflex_tb/tasks/connect_mdm_cluster.yml @@ -0,0 +1,131 @@ +--- +- name: Get login token + ansible.builtin.uri: + url: "https://{{ hostname }}:{{ port }}/api/gatewayLogin" + validate_certs: "{{ validate_certs }}" + user: "{{ username }}" + password: "{{ password }}" + method: GET + force_basic_auth: true + delegate_to: "{{ lookup('ansible.builtin.env', 'RUNON', default='localhost') }}" + run_once: true + no_log: true + register: powerflex_tb_login_token_response + +- name: Get LIA credentials + ansible.builtin.uri: + url: "https://{{ hostname }}:{{ port }}/api/V1/Credential?filter=eq,label,DELL_POWERFLEX_LIA" + method: GET + validate_certs: "{{ validate_certs }}" + headers: + Authorization: "Bearer {{ powerflex_tb_login_token_response.json }}" + delegate_to: "{{ lookup('ansible.builtin.env', 'RUNON', default='localhost') }}" + run_once: true + register: powerflex_tb_lia_credentials + +- name: Set credential ID + ansible.builtin.set_fact: + powerflex_tb_lia_credential_id: "{{ powerflex_tb_lia_credentials.json.credentialList[0].credential.id }}" + when: powerflex_tb_lia_credentials is defined and powerflex_tb_lia_credentials.json.credentialList | length > 0 + +- name: Create LIA credentials if not exists + when: powerflex_tb_lia_credentials.json.credentialList | length == 0 + block: + - name: Create LIA credentials payload + ansible.builtin.set_fact: + powerflex_tb_lia_credential: + credential: + type: "ManagementSystemCredential" + label: "DELL_POWERFLEX_LIA" + password: "{{ powerflex_lia_token }}" + liaPassword: "{{ powerflex_lia_token }}" + + - name: Create LIA credentials + ansible.builtin.uri: + url: "https://{{ hostname }}:{{ port }}/api/V1/Credential" + validate_certs: "{{ validate_certs }}" + method: POST + headers: + Authorization: "Bearer {{ powerflex_tb_login_token_response.json }}" + Content-Type: "application/json" + body: "{{ powerflex_tb_lia_credential | to_json }}" + register: powerflex_tb_lia_create_credentials + delegate_to: "{{ lookup('ansible.builtin.env', 'RUNON', default='localhost') }}" + run_once: true + changed_when: powerflex_tb_lia_create_credentials.status == 200 + + - name: Set credential ID + ansible.builtin.set_fact: + powerflex_tb_lia_credential_id: "{{ powerflex_tb_lia_create_credentials.json.credential.id }}" + when: powerflex_tb_lia_create_credentials is defined + +- name: Set discovery request payload + ansible.builtin.set_fact: + discovery_request_payload: + discoveryRequestList: + DiscoverIPRangeDeviceRequest: + - deviceManagementSystemCredRef: "{{ powerflex_tb_lia_credential_id }}" + deviceManagementSystemIPAddress: "{{ powerflex_tb_device_management_system_ip_address }}" + deviceManagementSystemId: "{{ powerflex_tb_system_id }}" + deviceType: "powerflex_management_system" + hostName: "block-legacy-gateway" + unmanaged: false + reserved: false + serverPoolId: "" + autoConfigureAlerts: true + snmpConfigure: true + when: powerflex_tb_lia_credentials is defined + +- name: Create connect mdm cluster payload + ansible.builtin.uri: + url: "https://{{ hostname }}:{{ port }}/api/V1/DiscoveryRequest" + validate_certs: "{{ validate_certs }}" + method: POST + headers: + Authorization: "Bearer {{ powerflex_tb_login_token_response.json }}" + Content-Type: "application/json" + body: "{{ discovery_request_payload | to_json }}" + status_code: 202 + when: powerflex_tb_lia_credentials is defined + delegate_to: "{{ lookup('ansible.builtin.env', 'RUNON', default='localhost') }}" + run_once: true + register: powerflex_tb_connect_mdm_cluster_output + changed_when: powerflex_tb_connect_mdm_cluster_output.status == 202 + +- name: Extract Job ID from connect mdm cluster output + ansible.builtin.set_fact: + powerflex_tb_connect_mdm_cluster_job_id: "{{ powerflex_tb_connect_mdm_cluster_output.link | regex_search('Job-[a-zA-Z0-9-]+') }}" + when: powerflex_tb_connect_mdm_cluster_output is defined + +- name: Track job + register: powerflex_tb_connect_mdm_cluster_job_status + ansible.builtin.uri: + url: "https://{{ hostname }}:{{ port }}/API/V1/JobHistory/{{ powerflex_tb_connect_mdm_cluster_job_id }}/status" + validate_certs: "{{ validate_certs }}" + method: GET + headers: + Authorization: "Bearer {{ powerflex_tb_login_token_response.json }}" + status_code: 200 + run_once: true + retries: 5 + delay: 10 + delegate_to: "{{ lookup('ansible.builtin.env', 'RUNON', default='localhost') }}" + when: powerflex_tb_connect_mdm_cluster_job_id is defined + until: powerflex_tb_connect_mdm_cluster_job_status.json != "IN_PROGRESS" + failed_when: powerflex_tb_connect_mdm_cluster_job_status.json == "FAILED" + changed_when: powerflex_tb_connect_mdm_cluster_job_status.json == "SUCCEESSFUL" + +- name: Wait for API login call to be successful + register: powerflex_tb_api_login + ansible.builtin.uri: + url: "https://{{ hostname }}:{{ port }}/api/login" + user: "{{ username }}" + password: "{{ password }}" + validate_certs: "{{ validate_certs }}" + method: GET + delegate_to: "{{ lookup('ansible.builtin.env', 'RUNON', default='localhost') }}" + run_once: true + ignore_errors: true + retries: 5 + delay: 10 + until: powerflex_tb_api_login.status == 200 diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_tb/tasks/install_tb4x.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_tb/tasks/install_tb4x.yml index d34857ba4..9ae3bc1c1 100644 --- a/ansible_collections/dellemc/powerflex/roles/powerflex_tb/tasks/install_tb4x.yml +++ b/ansible_collections/dellemc/powerflex/roles/powerflex_tb/tasks/install_tb4x.yml @@ -20,6 +20,10 @@ changed_when: powerflex_tb_login_output.rc == 0 delegate_to: "{{ powerflex_tb_mdm_primary_hostname }}" +- name: Extract System ID + ansible.builtin.set_fact: + powerflex_tb_system_id: "{{ powerflex_tb_login_output.stdout | regex_search('System ID is (\\w+)', '\\1') | first }}" + - name: Add primary TB ansible.builtin.command: > scli --add_standby_mdm @@ -67,3 +71,6 @@ changed_when: powerflex_tb_cluster_to_five_output.rc == 0 delegate_to: "{{ powerflex_tb_mdm_primary_hostname }}" when: powerflex_tb_mdm_cluster_mode[0] != "5_node" and powerflex_tb_cluster_mode == "FiveNodes" + +- name: Connect MDM cluster + ansible.builtin.include_tasks: connect_mdm_cluster.yml diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_tb/tasks/set_tb_ips.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_tb/tasks/set_tb_ips.yml index 34c0144d5..f2c534f77 100644 --- a/ansible_collections/dellemc/powerflex/roles/powerflex_tb/tasks/set_tb_ips.yml +++ b/ansible_collections/dellemc/powerflex/roles/powerflex_tb/tasks/set_tb_ips.yml @@ -11,19 +11,25 @@ powerflex_tb_mdm_secondary_ip: "{{ hostvars[groups['mdm'][1]]['ansible_host'] }}" powerflex_tb_mdm_secondary_hostname: "{{ hostvars[groups['mdm'][1]]['inventory_hostname'] }}" -- name: Set fact - powerflex_tb_mdm_tertiary_ip - ansible.builtin.set_fact: - powerflex_tb_mdm_tertiary_ip: "{{ hostvars[groups['tb'][2]]['ansible_host'] }}" - powerflex_tb_mdm_tertiary_hostname: "{{ hostvars[groups['tb'][2]]['inventory_hostname'] }}" - when: "powerflex_tb_mdm_count | int > 2" - - name: Set fact - powerflex_tb_primary ansible.builtin.set_fact: powerflex_tb_primary_ip: "{{ hostvars[groups['tb'][0]]['ansible_host'] }}" powerflex_tb_primary_hostname: "{{ hostvars[groups['tb'][0]]['inventory_hostname'] }}" +- name: Set fact - powerflex_tb_device_management_system_ip_address + ansible.builtin.set_fact: + powerflex_tb_device_management_system_ip_address: "{{ powerflex_tb_mdm_primary_ip }}, {{ powerflex_tb_mdm_secondary_ip }}, {{ powerflex_tb_primary_ip }}" + - name: Set fact - powerflex_tb_primary ansible.builtin.set_fact: powerflex_tb_secondary_ip: "{{ hostvars[groups['tb'][1]]['ansible_host'] }}" powerflex_tb_secondary_hostname: "{{ hostvars[groups['tb'][1]]['inventory_hostname'] }}" + powerflex_tb_device_management_system_ip_address: "{{ powerflex_tb_device_management_system_ip_address }}, {{ powerflex_tb_secondary_ip }}" when: "powerflex_tb_count | int > 1" + +- name: Set fact - powerflex_tb_mdm_tertiary_ip + ansible.builtin.set_fact: + powerflex_tb_mdm_tertiary_ip: "{{ hostvars[groups['tb'][2]]['ansible_host'] }}" + powerflex_tb_mdm_tertiary_hostname: "{{ hostvars[groups['tb'][2]]['inventory_hostname'] }}" + powerflex_tb_device_management_system_ip_address: "{{ powerflex_tb_device_management_system_ip_address }}, {{ powerflex_tb_mdm_tertiary_ip }}" + when: "powerflex_tb_mdm_count | int > 2" diff --git a/ansible_collections/dellemc/powerflex/tests/sanity/ignore-2.14.txt b/ansible_collections/dellemc/powerflex/tests/sanity/ignore-2.14.txt deleted file mode 100644 index 571402121..000000000 --- a/ansible_collections/dellemc/powerflex/tests/sanity/ignore-2.14.txt +++ /dev/null @@ -1,45 +0,0 @@ -plugins/modules/sds.py import-2.7 -plugins/modules/sds.py import-3.5 -plugins/modules/sds.py compile-2.7 -plugins/modules/sds.py compile-3.5 -plugins/modules/info.py import-2.7 -plugins/modules/info.py import-3.5 -plugins/modules/info.py compile-2.7 -plugins/modules/fault_set.py import-2.7 -plugins/modules/fault_set.py import-3.5 -plugins/modules/fault_set.py compile-2.7 -plugins/modules/fault_set.py compile-3.5 -plugins/module_utils/storage/dell/libraries/configuration.py import-2.7 -plugins/module_utils/storage/dell/libraries/configuration.py import-3.5 -plugins/module_utils/storage/dell/libraries/configuration.py compile-2.7 -plugins/module_utils/storage/dell/libraries/configuration.py compile-3.5 -plugins/modules/device.py validate-modules:missing-gplv3-license -plugins/modules/sdc.py validate-modules:missing-gplv3-license -plugins/modules/sds.py validate-modules:missing-gplv3-license -plugins/modules/snapshot.py validate-modules:missing-gplv3-license -plugins/modules/storagepool.py validate-modules:missing-gplv3-license -plugins/modules/volume.py validate-modules:missing-gplv3-license -plugins/modules/info.py validate-modules:missing-gplv3-license -plugins/modules/protection_domain.py validate-modules:missing-gplv3-license -plugins/modules/mdm_cluster.py validate-modules:missing-gplv3-license -plugins/modules/replication_consistency_group.py validate-modules:missing-gplv3-license -plugins/modules/replication_pair.py validate-modules:missing-gplv3-license -plugins/modules/snapshot_policy.py validate-modules:missing-gplv3-license -plugins/modules/fault_set.py validate-modules:missing-gplv3-license -plugins/modules/snapshot_policy.py compile-2.7 -plugins/modules/snapshot_policy.py compile-3.5 -plugins/modules/snapshot_policy.py import-2.7 -plugins/modules/snapshot_policy.py import-3.5 -plugins/modules/sdc.py import-2.7 -plugins/modules/sdc.py import-3.5 -plugins/modules/sdc.py compile-2.7 -plugins/modules/sdc.py compile-3.5 -tests/unit/plugins/module_utils/mock_device_api.py compile-2.7 -tests/unit/plugins/module_utils/mock_device_api.py compile-3.5 -plugins/modules/replication_consistency_group.py import-2.7 -plugins/modules/replication_consistency_group.py import-3.5 -plugins/modules/replication_consistency_group.py compile-2.7 -plugins/modules/replication_consistency_group.py compile-3.5 -plugins/modules/resource_group.py validate-modules:missing-gplv3-license -plugins/modules/resource_group.py compile-2.7 -plugins/modules/resource_group.py import-2.7 diff --git a/ansible_collections/dellemc/powerflex/tests/sanity/ignore-2.15.txt b/ansible_collections/dellemc/powerflex/tests/sanity/ignore-2.15.txt index 571402121..438f0a7bd 100644 --- a/ansible_collections/dellemc/powerflex/tests/sanity/ignore-2.15.txt +++ b/ansible_collections/dellemc/powerflex/tests/sanity/ignore-2.15.txt @@ -43,3 +43,5 @@ plugins/modules/replication_consistency_group.py compile-3.5 plugins/modules/resource_group.py validate-modules:missing-gplv3-license plugins/modules/resource_group.py compile-2.7 plugins/modules/resource_group.py import-2.7 +plugins/modules/storagepool.py compile-2.7 +plugins/modules/storagepool.py import-2.7 diff --git a/ansible_collections/dellemc/powerflex/tests/sanity/ignore-2.16.txt b/ansible_collections/dellemc/powerflex/tests/sanity/ignore-2.16.txt index 0dbde689c..5f24f9f25 100644 --- a/ansible_collections/dellemc/powerflex/tests/sanity/ignore-2.16.txt +++ b/ansible_collections/dellemc/powerflex/tests/sanity/ignore-2.16.txt @@ -29,3 +29,5 @@ plugins/modules/info.py import-2.7 plugins/modules/resource_group.py validate-modules:missing-gplv3-license plugins/modules/resource_group.py compile-2.7 plugins/modules/resource_group.py import-2.7 +plugins/modules/storagepool.py compile-2.7 +plugins/modules/storagepool.py import-2.7 diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_storagepool_api.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_storagepool_api.py index 87af1d6eb..3f0a89581 100644 --- a/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_storagepool_api.py +++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_storagepool_api.py @@ -22,13 +22,44 @@ class MockStoragePoolApi: "use_rmcache": None, "use_rfcache": None, "media_type": None, + "enable_zero_padding": None, + "rep_cap_max_ratio": None, + "rmcache_write_handling_mode": None, + "spare_percentage": None, + "enable_rebalance": None, + "enable_fragmentation": None, + "enable_rebuild": None, + "parallel_rebuild_rebalance_limit": None, + "cap_alert_thresholds": { + "high_threshold": 30, + "critical_threshold": 50 + }, + "protected_maintenance_mode_io_priority_policy": { + "policy": None, + "concurrent_ios_per_device": None, + "bw_limit_per_device": None + }, + "rebalance_io_priority_policy": { + "policy": None, + "concurrent_ios_per_device": None, + "bw_limit_per_device": None + }, + "vtree_migration_io_priority_policy": { + "policy": None, + "concurrent_ios_per_device": None, + "bw_limit_per_device": None}, + "persistent_checksum": { + "enable": None, + "validate_on_read": None, + "builder_limit": None + }, 'state': None } STORAGE_POOL_GET_LIST = [ { - 'protectionDomainId': '4eeb304600000000', - 'protectionDomainName': 'test_pd', + 'protectionDomainId': "7bd6457000000000", + 'protectionDomainName': "test_pd_1", 'rebuildEnabled': True, 'dataLayout': 'MediumGranularity', 'persistentChecksumState': 'Protected', @@ -98,8 +129,8 @@ class MockStoragePoolApi: STORAGE_POOL_GET_MULTI_LIST = [ { - 'protectionDomainId': '4eeb304600000000', - 'protectionDomainName': 'test_pd', + 'protectionDomainId': "7bd6457000000000", + 'protectionDomainName': "test_pd_1", 'rebuildEnabled': True, 'dataLayout': 'MediumGranularity', 'persistentChecksumState': 'Protected', @@ -166,7 +197,7 @@ class MockStoragePoolApi: 'id': 'test_pool_id_1' }, { - 'protectionDomainId': '4eeb304600000002', + 'protectionDomainId': "7bd6457000000000", 'protectionDomainName': 'test_pd_1', 'rebuildEnabled': True, 'dataLayout': 'MediumGranularity', @@ -239,6 +270,30 @@ class MockStoragePoolApi: PROTECTION_DETAILS_1 = [{"id": "4eeb304600000001", "name": "test_pd_name"}] + PROTECTION_DOMAIN = { + "protectiondomain": [ + { + "id": "7bd6457000000000", + "name": "test_pd_1", + "protectionDomainState": "Active", + "overallIoNetworkThrottlingInKbps": 20480, + "rebalanceNetworkThrottlingInKbps": 10240, + "rebuildNetworkThrottlingInKbps": 10240, + "vtreeMigrationNetworkThrottlingInKbps": 10240, + "rfcacheEnabled": "false", + "rfcacheMaxIoSizeKb": 128, + "rfcacheOpertionalMode": "None", + "rfcachePageSizeKb": 64, + "storagePools": [ + { + "id": "8d1cba1700000000", + "name": "pool1" + } + ] + } + ] + } + STORAGE_POOL_STATISTICS = { 'backgroundScanFixedReadErrorCount': 0, 'pendingMovingOutBckRebuildJobs': 0, @@ -616,10 +671,26 @@ class MockStoragePoolApi: "get_multi_details": "More than one storage pool found", "create_wo_pd": "Please provide protection domain details", "create_transitional": "TRANSITIONAL media type is not supported during creation.", - "create_pool_name_empty": "Empty or white spaced string provided in storage_pool_name.", + "create_pool_name_empty": "Empty or white spaced string provided for storage pool name. Provide valid storage pool name", "create_pool_new_name": "storage_pool_new_name is passed during creation.", - "rename_storage_pool_empty": "Empty/White spaced name is not allowed during renaming of a storage pool.", - "delete_storage_pool": "Deleting storage pool is not supported through ansible module." + "rename_storage_pool_empty": "Empty or white spaced string provided for storage pool name. Provide valid storage pool name", + "delete_storage_pool": "Deleting storage pool is not supported through ansible module.", + "rename_pool": "Modify storage pool name failed", + "modify_pool_rmcache": "Modify RM cache operation failed", + "modify_pool_rfcache": "Modify RF cache operation failed", + "modify_pool_zero_padding_enabled": "Enable/Disable zero padding operation failed", + "modify_pool_rep_cap_max_ratio": "Modify Replication Capacity max ratio operation failed", + "modify_pool_enable_rebalance": "Enable/Disable Rebalance failed", + "modify_pool_enable_rebuild": "Enable/Disable Rebuild operation failed", + "modify_pool_enable_fragmentation": "Enable/Disable Fragmentation operation failed", + "modify_pool_spare_percentage": "Modify Spare Percentage operation failed", + "modify_pool_rmcache_write_handling_mode": "Modify RMCache Write Handling Mode failed", + "modify_pool_rebuild_rebalance_parallelism_limit": "Modify Rebuild/Rebalance Parallelism Limit operation failed", + "modify_pool_capacity_alert_thresholds": "Modify Capacity Alert Thresholds operation failed", + "modify_pool_protected_maintenance_mode_io_priority_policy": "Set Protected Maintenance Mode IO Priority Policy operation failed", + "modify_pool_vtree_migration_io_priority_policy": "Set Vtree Migration I/O Priority Policy operation failed", + "modify_pool_rebalance_io_priority_policy": "Modify rebalance IO Priority Policy failed", + "modify_pool_persistent_checksum": "Modify Persistent Checksum failed" } @staticmethod diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_mdm_cluster.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_mdm_cluster.py index f8f3cdc2f..653fcb298 100644 --- a/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_mdm_cluster.py +++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_mdm_cluster.py @@ -1,4 +1,4 @@ -# Copyright: (c) 2022, Dell Technologies +# Copyright: (c) 2024, Dell Technologies # Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) @@ -634,3 +634,145 @@ class TestPowerflexMDMCluster(): ) mdm_cluster_module_mock.perform_module_operation() assert MockMdmClusterApi.new_name_add_mdm_failed_response() in mdm_cluster_module_mock.module.fail_json.call_args[1]['msg'] + + def test_change_cluster_mode(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "cluster_mode": "FiveNodes", + "mdm": [ + { + "mdm_name": MockMdmClusterApi.MDM_NAME_STB_MGR, + "mdm_id": None, + "mdm_type": "Secondary" + }, + { + "mdm_id": MockMdmClusterApi.STB_TB_MDM_ID, + "mdm_name": None, + "mdm_type": "TieBreaker" + } + ], + "mdm_state": "absent-in-cluster", + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.powerflex_conn.system.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.validate_parameters = MagicMock(return_value=None) + mdm_cluster_module_mock.powerflex_conn.system.switch_cluster_mode = MagicMock() + mdm_cluster_module_mock.perform_module_operation() + assert mdm_cluster_module_mock.module.exit_json.call_args[1]['changed'] is True + + def test_change_cluster_mode_with_name(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "cluster_mode": "FiveNodes", + "mdm": [ + { + "mdm_name": MockMdmClusterApi.MDM_NAME_STB_MGR, + "mdm_id": MockMdmClusterApi.MDM_ID, + "mdm_type": "Secondary" + }, + { + "mdm_id": MockMdmClusterApi.STB_TB_MDM_ID, + "mdm_name": MockMdmClusterApi.MDM_NAME, + "mdm_type": "TieBreaker" + } + ], + "mdm_state": "absent-in-cluster", + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.powerflex_conn.system.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.validate_parameters = MagicMock(return_value=None) + mdm_cluster_module_mock.powerflex_conn.system.switch_cluster_mode = MagicMock() + mdm_cluster_module_mock.perform_module_operation() + assert mdm_cluster_module_mock.module.exit_json.call_args[1]['changed'] is True + + def test_cluster_reduce_mode_absent(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "cluster_mode": "FiveNodes", + "mdm": [ + { + "mdm_name": MockMdmClusterApi.MDM_NAME_STB_MGR, + "mdm_id": None, + "mdm_type": "Secondary" + }, + { + "mdm_id": None, + "mdm_name": MockMdmClusterApi.MDM_NAME, + "mdm_type": "TieBreaker" + } + ], + "mdm_state": "absent-in-cluster", + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.powerflex_conn.system.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.is_mdm_name_id_exists = MagicMock(return_value=None) + mdm_cluster_module_mock.validate_parameters = MagicMock(return_value=None) + mdm_cluster_module_mock.powerflex_conn.system.switch_cluster_mode = MagicMock() + mdm_cluster_module_mock.perform_module_operation() + assert mdm_cluster_module_mock.module.exit_json.call_args[1]['changed'] is True + + def test_cluster_expand_list_tb(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "cluster_mode": "FiveNodes", + "mdm": [ + { + "mdm_name": MockMdmClusterApi.MDM_NAME_STB_MGR, + "mdm_id": None, + "mdm_type": "Secondary" + }, + { + "mdm_id": None, + "mdm_name": MockMdmClusterApi.MDM_NAME, + "mdm_type": "TieBreaker" + } + ], + "mdm_state": "present-in-cluster", + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.powerflex_conn.system.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.validate_parameters = MagicMock(return_value=None) + mdm_cluster_module_mock.powerflex_conn.system.switch_cluster_mode = MagicMock() + mdm_cluster_module_mock.perform_module_operation() + assert mdm_cluster_module_mock.module.exit_json.call_args[1]['changed'] is True + + def test_cluster_expand_list_tb_mdm_none(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "cluster_mode": "FiveNodes", + "mdm": [ + { + "mdm_name": MockMdmClusterApi.MDM_NAME_STB_MGR, + "mdm_id": None, + "mdm_type": "Secondary" + }, + { + "mdm_id": None, + "mdm_name": MockMdmClusterApi.MDM_NAME, + "mdm_type": "TieBreaker" + } + ], + "mdm_state": "present-in-cluster", + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.powerflex_conn.system.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.is_mdm_name_id_exists = MagicMock(return_value=None) + mdm_cluster_module_mock.validate_parameters = MagicMock(return_value=None) + mdm_cluster_module_mock.powerflex_conn.system.switch_cluster_mode = MagicMock() + mdm_cluster_module_mock.perform_module_operation() + assert mdm_cluster_module_mock.module.exit_json.call_args[1]['changed'] is True diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_storagepool.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_storagepool.py index 6780ed7ad..c60e1fd01 100644 --- a/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_storagepool.py +++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_storagepool.py @@ -4,29 +4,26 @@ """Unit Tests for storage pool module on PowerFlex""" + from __future__ import (absolute_import, division, print_function) __metaclass__ = type - import pytest -from mock.mock import MagicMock # pylint: disable=unused-import from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.libraries import initial_mock -from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_storagepool_api import MockStoragePoolApi +from mock.mock import MagicMock +from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_storagepool_api \ + import MockStoragePoolApi from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_api_exception \ import MockApiException -from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell \ - import utils from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.libraries.powerflex_unit_base \ import PowerFlexUnitBase - -utils.get_logger = MagicMock() -utils.get_powerflex_gateway_host_connection = MagicMock() -utils.PowerFlexClient = MagicMock() - -from ansible.module_utils import basic -basic.AnsibleModule = MagicMock() -from ansible_collections.dellemc.powerflex.plugins.modules.storagepool import PowerFlexStoragePool +from ansible_collections.dellemc.powerflex.plugins.modules.storagepool \ + import PowerFlexStoragePool +from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell \ + import utils +from ansible_collections.dellemc.powerflex.plugins.modules.storagepool import \ + StoragePoolHandler class TestPowerflexStoragePool(PowerFlexUnitBase): @@ -37,23 +34,22 @@ class TestPowerflexStoragePool(PowerFlexUnitBase): def module_object(self): return PowerFlexStoragePool - def test_get_storagepool_details(self, powerflex_module_mock): - self.get_module_args.update({ - "storage_pool_name": "test_pool", - "state": "present" - }) - powerflex_module_mock.module.params = self.get_module_args - storagepool_resp = MockStoragePoolApi.STORAGE_POOL_GET_LIST + def test_get_storage_pool_response(self, powerflex_module_mock): + self.set_module_params( + powerflex_module_mock, + self.get_module_args, + { + "storage_pool_name": "test_pool", + "state": "present" + }) powerflex_module_mock.powerflex_conn.storage_pool.get = MagicMock( - return_value=storagepool_resp - ) - storagepool_statistics_resp = MockStoragePoolApi.STORAGE_POOL_STATISTICS - powerflex_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock( - return_value=storagepool_statistics_resp - ) - powerflex_module_mock.perform_module_operation() + return_value=MockStoragePoolApi.STORAGE_POOL_GET_LIST) + pd_resp = MockStoragePoolApi.PROTECTION_DOMAIN + powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock( + return_value=pd_resp['protectiondomain']) + StoragePoolHandler().handle( + powerflex_module_mock, powerflex_module_mock.module.params) powerflex_module_mock.powerflex_conn.storage_pool.get.assert_called() - powerflex_module_mock.powerflex_conn.storage_pool.get_statistics.assert_called() def test_get_storagepool_details_multi(self, powerflex_module_mock): self.get_module_args.update({ @@ -71,7 +67,7 @@ class TestPowerflexStoragePool(PowerFlexUnitBase): ) self.capture_fail_json_call( MockStoragePoolApi.get_exception_response('get_multi_details'), - powerflex_module_mock, invoke_perform_module=True) + powerflex_module_mock, StoragePoolHandler) def test_get_storagepool_details_with_exception(self, powerflex_module_mock): self.get_module_args.update({ @@ -88,50 +84,7 @@ class TestPowerflexStoragePool(PowerFlexUnitBase): powerflex_module_mock.create_storage_pool = MagicMock(return_value=None) self.capture_fail_json_call( MockStoragePoolApi.get_exception_response('get_details'), - powerflex_module_mock, invoke_perform_module=True) - - @pytest.mark.parametrize("params", [ - {"pd_id": "4eeb304600000000"}, - {"pd_name": "test"}, - ]) - def test_get_protection_domain(self, powerflex_module_mock, params): - pd_id = params.get("pd_id", None) - pd_name = params.get("pd_name", None) - powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock( - return_value=MockStoragePoolApi.PROTECTION_DETAILS - ) - pd_details = powerflex_module_mock.get_protection_domain(pd_name, pd_id) - assert MockStoragePoolApi.PROTECTION_DETAILS[0] == pd_details - - def test_get_protection_domain_exception(self, powerflex_module_mock): - self.set_module_params( - powerflex_module_mock, - self.get_module_args, - { - "storage_pool_name": "test_pool", - "protection_domain_id": "4eeb304600000001", - "state": "present" - }) - powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock( - side_effect=MockApiException) - self.capture_fail_json_call( - MockStoragePoolApi.get_exception_response('get_pd_exception'), - powerflex_module_mock, invoke_perform_module=True) - - def test_get_protection_domain_non_exist(self, powerflex_module_mock): - self.set_module_params( - powerflex_module_mock, - self.get_module_args, - { - "storage_pool_name": "test_pool", - "protection_domain_id": "4eeb304600000001", - "state": "present" - }) - powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock( - return_value=None) - self.capture_fail_json_call( - MockStoragePoolApi.get_exception_response('get_pd_non_exist'), - powerflex_module_mock, invoke_perform_module=True) + powerflex_module_mock, StoragePoolHandler) def test_get_storagepool_details_with_invalid_pd_id(self, powerflex_module_mock): self.get_module_args.update({ @@ -152,78 +105,33 @@ class TestPowerflexStoragePool(PowerFlexUnitBase): ) self.capture_fail_json_call( MockStoragePoolApi.get_exception_response('invalid_pd_id'), - powerflex_module_mock, invoke_perform_module=True) + powerflex_module_mock, StoragePoolHandler) def test_create_storagepool_response(self, powerflex_module_mock): self.get_module_args.update({ "storage_pool_name": "test_pool", - "protection_domain_name": "test_pd_name", + "protection_domain_name": "test_pd_1", "media_type": "HDD", "state": "present" }) powerflex_module_mock.module.params = self.get_module_args + pd_resp = MockStoragePoolApi.PROTECTION_DOMAIN powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock( - return_value=MockStoragePoolApi.PROTECTION_DETAILS_1) + return_value=pd_resp['protectiondomain']) powerflex_module_mock.powerflex_conn.storage_pool.get = MagicMock( return_value=[] ) powerflex_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock( return_value=[] ) - powerflex_module_mock.powerflex_conn.storage_pool.create = MagicMock( - return_value=None - ) - resp = powerflex_module_mock.create_storage_pool(pool_name="test_pool", - pd_id=MockStoragePoolApi.PROTECTION_DETAILS_1[0]['id'], - media_type="HDD") - assert resp is True + StoragePoolHandler().handle( + powerflex_module_mock, powerflex_module_mock.module.params) powerflex_module_mock.powerflex_conn.storage_pool.create.assert_called() - def test_create_storagepool_only_pool_id(self, powerflex_module_mock): - self.get_module_args.update({ - "storage_pool_id": "test_pool_id", - "protection_domain_name": "test_pd_name", - "media_type": "HDD", - "state": "present" - }) - powerflex_module_mock.module.params = self.get_module_args - powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock( - return_value=MockStoragePoolApi.PROTECTION_DETAILS_1) - powerflex_module_mock.powerflex_conn.storage_pool.get = MagicMock( - return_value=[] - ) - powerflex_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock( - return_value=[] - ) - self.capture_fail_json_call( - MockStoragePoolApi.get_exception_response('create_pool_id'), - powerflex_module_mock, invoke_perform_module=True) - - def test_create_storagepool_new_name(self, powerflex_module_mock): - self.get_module_args.update({ - "storage_pool_name": "test_pool", - "storage_pool_new_name": "pool_new_name", - "protection_domain_name": "test_pd_name", - "media_type": "HDD", - "state": "present" - }) - powerflex_module_mock.module.params = self.get_module_args - powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock( - return_value=MockStoragePoolApi.PROTECTION_DETAILS_1) - powerflex_module_mock.powerflex_conn.storage_pool.get = MagicMock( - return_value=[] - ) - powerflex_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock( - return_value=[] - ) - self.capture_fail_json_call( - MockStoragePoolApi.get_exception_response('create_pool_new_name'), - powerflex_module_mock, invoke_perform_module=True) - def test_create_storagepool_empty_name(self, powerflex_module_mock): self.get_module_args.update({ "storage_pool_name": " ", - "protection_domain_name": "test_pd_name", + "protection_domain_name": "test_pd_1", "media_type": "HDD", "state": "present" }) @@ -232,7 +140,7 @@ class TestPowerflexStoragePool(PowerFlexUnitBase): return_value=MockStoragePoolApi.PROTECTION_DETAILS_1) self.capture_fail_json_call( MockStoragePoolApi.get_exception_response('create_pool_name_empty'), - powerflex_module_mock, invoke_perform_module=True) + powerflex_module_mock, StoragePoolHandler) def test_create_storagepool_wo_pd(self, powerflex_module_mock): self.get_module_args.update({ @@ -251,12 +159,12 @@ class TestPowerflexStoragePool(PowerFlexUnitBase): ) self.capture_fail_json_call( MockStoragePoolApi.get_exception_response('create_wo_pd'), - powerflex_module_mock, invoke_perform_module=True) + powerflex_module_mock, StoragePoolHandler) def test_create_storagepool_transitional_exception(self, powerflex_module_mock): self.get_module_args.update({ "storage_pool_name": "test_pool", - "protection_domain_name": "test_pd_name", + "protection_domain_name": "test_pd_1", "media_type": "TRANSITIONAL", "state": "present" }) @@ -274,7 +182,7 @@ class TestPowerflexStoragePool(PowerFlexUnitBase): ) self.capture_fail_json_call( MockStoragePoolApi.get_exception_response('create_transitional'), - powerflex_module_mock, invoke_perform_module=True) + powerflex_module_mock, StoragePoolHandler) def test_create_storagepool_exception(self, powerflex_module_mock): self.get_module_args.update({ @@ -297,15 +205,43 @@ class TestPowerflexStoragePool(PowerFlexUnitBase): ) self.capture_fail_json_call( MockStoragePoolApi.get_exception_response('create_storage_pool'), - powerflex_module_mock, invoke_perform_module=True) + powerflex_module_mock, StoragePoolHandler) def test_modify_storagepool_details(self, powerflex_module_mock): self.get_module_args.update({ "storage_pool_name": "test_pool", + "protection_domain_name": "test_pd_1", "storage_pool_new_name": "new_ansible_pool", "use_rfcache": True, "use_rmcache": True, - "media_type": "TRANSITIONAL", + "cap_alert_thresholds": { + "high_threshold": 30, + "critical_threshold": 50 + }, + "enable_zero_padding": True, + "rep_cap_max_ratio": 40, + "rmcache_write_handling_mode": "Passthrough", + "spare_percentage": 80, + "enable_rebalance": False, + "enable_fragmentation": False, + "enable_rebuild": False, + "parallel_rebuild_rebalance_limit": 3, + "protected_maintenance_mode_io_priority_policy": { + "policy": "unlimited", + "concurrent_ios_per_device": 1, + "bw_limit_per_device": 1024}, + "rebalance_io_priority_policy": { + "policy": "limitNumOfConcurrentIos", + "concurrent_ios_per_device": 10, + "bw_limit_per_device": 1024}, + "vtree_migration_io_priority_policy": { + "policy": "limitNumOfConcurrentIos", + "concurrent_ios_per_device": 10, + "bw_limit_per_device": 1024}, + "persistent_checksum": { + "enable": True, + "validate_on_read": True, + "builder_limit": 1024}, "state": "present" }) powerflex_module_mock.module.params = self.get_module_args @@ -313,20 +249,58 @@ class TestPowerflexStoragePool(PowerFlexUnitBase): powerflex_module_mock.powerflex_conn.storage_pool.get = MagicMock( return_value=storagepool_resp ) + pd_resp = MockStoragePoolApi.PROTECTION_DOMAIN + powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock( + return_value=pd_resp['protectiondomain']) storagepool_statistics_resp = MockStoragePoolApi.STORAGE_POOL_STATISTICS powerflex_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock( return_value=storagepool_statistics_resp ) - powerflex_module_mock.perform_module_operation() + StoragePoolHandler().handle( + powerflex_module_mock, powerflex_module_mock.module.params) powerflex_module_mock.powerflex_conn.storage_pool.rename.assert_called() powerflex_module_mock.powerflex_conn.storage_pool.set_use_rmcache.assert_called() powerflex_module_mock.powerflex_conn.storage_pool.set_use_rfcache.assert_called() - powerflex_module_mock.powerflex_conn.storage_pool.set_media_type.assert_called() + powerflex_module_mock.powerflex_conn.storage_pool.set_fragmentation_enabled.assert_called() + powerflex_module_mock.powerflex_conn.storage_pool.set_persistent_checksum.assert_called() + powerflex_module_mock.powerflex_conn.storage_pool.set_rebuild_rebalance_parallelism_limit.assert_called() + powerflex_module_mock.powerflex_conn.storage_pool.set_rmcache_write_handling_mode.assert_called() + powerflex_module_mock.powerflex_conn.storage_pool.rebalance_io_priority_policy.assert_called() + powerflex_module_mock.powerflex_conn.storage_pool.set_vtree_migration_io_priority_policy.assert_called() + powerflex_module_mock.powerflex_conn.storage_pool.set_protected_maintenance_mode_io_priority_policy.assert_called() + powerflex_module_mock.powerflex_conn.storage_pool.set_cap_alert_thresholds.assert_called() + powerflex_module_mock.powerflex_conn.storage_pool.set_zero_padding_policy.assert_called() + powerflex_module_mock.powerflex_conn.storage_pool.set_spare_percentage.assert_called() + powerflex_module_mock.powerflex_conn.storage_pool.set_rebuild_enabled.assert_called() + powerflex_module_mock.powerflex_conn.storage_pool.set_rebalance_enabled.assert_called() + powerflex_module_mock.powerflex_conn.storage_pool.set_rep_cap_max_ratio.assert_called() - def test_rename_storagepool_exception(self, powerflex_module_mock): + def test_delete_storagepool_exception(self, powerflex_module_mock): self.get_module_args.update({ "storage_pool_name": "test_pool", - "storage_pool_new_name": "new_ansible_pool", + "state": "absent" + }) + powerflex_module_mock.module.params = self.get_module_args + storagepool_resp = MockStoragePoolApi.STORAGE_POOL_GET_LIST + powerflex_module_mock.powerflex_conn.storage_pool.get = MagicMock( + return_value=storagepool_resp + ) + pd_resp = MockStoragePoolApi.PROTECTION_DOMAIN + powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock( + return_value=pd_resp['protectiondomain']) + storagepool_statistics_resp = MockStoragePoolApi.STORAGE_POOL_STATISTICS + powerflex_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock( + return_value=storagepool_statistics_resp + ) + self.capture_fail_json_call( + MockStoragePoolApi.get_exception_response('delete_storage_pool'), + powerflex_module_mock, StoragePoolHandler) + + def test_modify_name_exception(self, powerflex_module_mock): + self.get_module_args.update({ + "storage_pool_name": "test_pool", + "protection_domain_name": "test_pd_1", + "storage_pool_new_name": "test_pool_new", "state": "present" }) powerflex_module_mock.module.params = self.get_module_args @@ -334,6 +308,9 @@ class TestPowerflexStoragePool(PowerFlexUnitBase): powerflex_module_mock.powerflex_conn.storage_pool.get = MagicMock( return_value=storagepool_resp ) + pd_resp = MockStoragePoolApi.PROTECTION_DOMAIN + powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock( + return_value=pd_resp['protectiondomain']) storagepool_statistics_resp = MockStoragePoolApi.STORAGE_POOL_STATISTICS powerflex_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock( return_value=storagepool_statistics_resp @@ -342,13 +319,14 @@ class TestPowerflexStoragePool(PowerFlexUnitBase): side_effect=MockApiException ) self.capture_fail_json_call( - MockStoragePoolApi.get_exception_response('rename_storage_pool'), - powerflex_module_mock, invoke_perform_module=True) + MockStoragePoolApi.get_exception_response('rename_pool'), + powerflex_module_mock, StoragePoolHandler) - def test_rename_storagepool_empty_exception(self, powerflex_module_mock): + def test_modify_rmcache_exception(self, powerflex_module_mock): self.get_module_args.update({ "storage_pool_name": "test_pool", - "storage_pool_new_name": " ", + "protection_domain_name": "test_pd_1", + "use_rmcahe": True, "state": "present" }) powerflex_module_mock.module.params = self.get_module_args @@ -356,28 +334,395 @@ class TestPowerflexStoragePool(PowerFlexUnitBase): powerflex_module_mock.powerflex_conn.storage_pool.get = MagicMock( return_value=storagepool_resp ) + pd_resp = MockStoragePoolApi.PROTECTION_DOMAIN + powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock( + return_value=pd_resp['protectiondomain']) storagepool_statistics_resp = MockStoragePoolApi.STORAGE_POOL_STATISTICS powerflex_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock( return_value=storagepool_statistics_resp ) + powerflex_module_mock.powerflex_conn.storage_pool.set_use_rmcache = MagicMock( + side_effect=MockApiException + ) self.capture_fail_json_call( - MockStoragePoolApi.get_exception_response('rename_storage_pool_empty'), - powerflex_module_mock, invoke_perform_module=True) + MockStoragePoolApi.get_exception_response('modify_pool_rmcache'), + powerflex_module_mock, StoragePoolHandler) - def test_delete_storagepool_exception(self, powerflex_module_mock): + def test_modify_rfcache_exception(self, powerflex_module_mock): self.get_module_args.update({ "storage_pool_name": "test_pool", - "state": "absent" + "protection_domain_name": "test_pd_1", + "use_rfcahe": True, + "state": "present" }) powerflex_module_mock.module.params = self.get_module_args storagepool_resp = MockStoragePoolApi.STORAGE_POOL_GET_LIST powerflex_module_mock.powerflex_conn.storage_pool.get = MagicMock( return_value=storagepool_resp ) + pd_resp = MockStoragePoolApi.PROTECTION_DOMAIN + powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock( + return_value=pd_resp['protectiondomain']) storagepool_statistics_resp = MockStoragePoolApi.STORAGE_POOL_STATISTICS powerflex_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock( return_value=storagepool_statistics_resp ) + powerflex_module_mock.powerflex_conn.storage_pool.set_use_rfcache = MagicMock( + side_effect=MockApiException + ) self.capture_fail_json_call( - MockStoragePoolApi.get_exception_response('delete_storage_pool'), - powerflex_module_mock, invoke_perform_module=True) + MockStoragePoolApi.get_exception_response('modify_pool_rfcache'), + powerflex_module_mock, StoragePoolHandler) + + def test_modify_enable_zero_padding_exception(self, powerflex_module_mock): + self.get_module_args.update({ + "storage_pool_name": "test_pool", + "protection_domain_name": "test_pd_1", + "enable_zero_padding": False, + "state": "present" + }) + powerflex_module_mock.module.params = self.get_module_args + storagepool_resp = MockStoragePoolApi.STORAGE_POOL_GET_LIST + powerflex_module_mock.powerflex_conn.storage_pool.get = MagicMock( + return_value=storagepool_resp + ) + pd_resp = MockStoragePoolApi.PROTECTION_DOMAIN + powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock( + return_value=pd_resp['protectiondomain']) + storagepool_statistics_resp = MockStoragePoolApi.STORAGE_POOL_STATISTICS + powerflex_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock( + return_value=storagepool_statistics_resp + ) + powerflex_module_mock.powerflex_conn.storage_pool.set_zero_padding_policy = MagicMock( + side_effect=MockApiException + ) + self.capture_fail_json_call( + MockStoragePoolApi.get_exception_response('modify_pool_enable_zero_padding'), + powerflex_module_mock, StoragePoolHandler) + + def test_modify_rep_cap_max_ratio_exception(self, powerflex_module_mock): + self.get_module_args.update({ + "storage_pool_name": "test_pool", + "protection_domain_name": "test_pd_1", + "rep_cap_max_ratio": 10, + "state": "present" + }) + powerflex_module_mock.module.params = self.get_module_args + storagepool_resp = MockStoragePoolApi.STORAGE_POOL_GET_LIST + powerflex_module_mock.powerflex_conn.storage_pool.get = MagicMock( + return_value=storagepool_resp + ) + pd_resp = MockStoragePoolApi.PROTECTION_DOMAIN + powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock( + return_value=pd_resp['protectiondomain']) + storagepool_statistics_resp = MockStoragePoolApi.STORAGE_POOL_STATISTICS + powerflex_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock( + return_value=storagepool_statistics_resp + ) + powerflex_module_mock.powerflex_conn.storage_pool.set_rep_cap_max_ratio = MagicMock( + side_effect=MockApiException + ) + self.capture_fail_json_call( + MockStoragePoolApi.get_exception_response('modify_pool_rep_cap_max_ratio'), + powerflex_module_mock, StoragePoolHandler) + + def test_modify_enable_rebalance_exception(self, powerflex_module_mock): + self.get_module_args.update({ + "storage_pool_name": "test_pool", + "protection_domain_name": "test_pd_1", + "enable_rebalance": False, + "state": "present" + }) + powerflex_module_mock.module.params = self.get_module_args + storagepool_resp = MockStoragePoolApi.STORAGE_POOL_GET_LIST + powerflex_module_mock.powerflex_conn.storage_pool.get = MagicMock( + return_value=storagepool_resp + ) + pd_resp = MockStoragePoolApi.PROTECTION_DOMAIN + powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock( + return_value=pd_resp['protectiondomain']) + storagepool_statistics_resp = MockStoragePoolApi.STORAGE_POOL_STATISTICS + powerflex_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock( + return_value=storagepool_statistics_resp + ) + powerflex_module_mock.powerflex_conn.storage_pool.set_rebalance_enabled = MagicMock( + side_effect=MockApiException + ) + self.capture_fail_json_call( + MockStoragePoolApi.get_exception_response('modify_pool_enable_rebalance'), + powerflex_module_mock, StoragePoolHandler) + + def test_modify_enable_rebuild_exception(self, powerflex_module_mock): + self.get_module_args.update({ + "storage_pool_name": "test_pool", + "protection_domain_name": "test_pd_1", + "enable_rebuild": False, + "state": "present" + }) + powerflex_module_mock.module.params = self.get_module_args + storagepool_resp = MockStoragePoolApi.STORAGE_POOL_GET_LIST + powerflex_module_mock.powerflex_conn.storage_pool.get = MagicMock( + return_value=storagepool_resp + ) + pd_resp = MockStoragePoolApi.PROTECTION_DOMAIN + powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock( + return_value=pd_resp['protectiondomain']) + storagepool_statistics_resp = MockStoragePoolApi.STORAGE_POOL_STATISTICS + powerflex_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock( + return_value=storagepool_statistics_resp + ) + powerflex_module_mock.powerflex_conn.storage_pool.set_rebuild_enabled = MagicMock( + side_effect=MockApiException + ) + self.capture_fail_json_call( + MockStoragePoolApi.get_exception_response('modify_pool_enable_rebuild'), + powerflex_module_mock, StoragePoolHandler) + + def test_modify_enable_fragmentation_exception(self, powerflex_module_mock): + self.get_module_args.update({ + "storage_pool_name": "test_pool", + "protection_domain_name": "test_pd_1", + "enable_fragmentaion": False, + "state": "present" + }) + powerflex_module_mock.module.params = self.get_module_args + storagepool_resp = MockStoragePoolApi.STORAGE_POOL_GET_LIST + powerflex_module_mock.powerflex_conn.storage_pool.get = MagicMock( + return_value=storagepool_resp + ) + pd_resp = MockStoragePoolApi.PROTECTION_DOMAIN + powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock( + return_value=pd_resp['protectiondomain']) + storagepool_statistics_resp = MockStoragePoolApi.STORAGE_POOL_STATISTICS + powerflex_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock( + return_value=storagepool_statistics_resp + ) + powerflex_module_mock.powerflex_conn.storage_pool.set_fragmentation_enabled = MagicMock( + side_effect=MockApiException + ) + self.capture_fail_json_call( + MockStoragePoolApi.get_exception_response('modify_pool_enable_fragmentation'), + powerflex_module_mock, StoragePoolHandler) + + def test_modify_spare_percentage_exception(self, powerflex_module_mock): + self.get_module_args.update({ + "storage_pool_name": "test_pool", + "protection_domain_name": "test_pd_1", + "spare_percentage": 20, + "state": "present" + }) + powerflex_module_mock.module.params = self.get_module_args + storagepool_resp = MockStoragePoolApi.STORAGE_POOL_GET_LIST + powerflex_module_mock.powerflex_conn.storage_pool.get = MagicMock( + return_value=storagepool_resp + ) + pd_resp = MockStoragePoolApi.PROTECTION_DOMAIN + powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock( + return_value=pd_resp['protectiondomain']) + storagepool_statistics_resp = MockStoragePoolApi.STORAGE_POOL_STATISTICS + powerflex_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock( + return_value=storagepool_statistics_resp + ) + powerflex_module_mock.powerflex_conn.storage_pool.set_spare_percentage = MagicMock( + side_effect=MockApiException + ) + self.capture_fail_json_call( + MockStoragePoolApi.get_exception_response('modify_pool_spare_percentage'), + powerflex_module_mock, StoragePoolHandler) + + def test_modify_rmcache_write_handling_mode_exception(self, powerflex_module_mock): + self.get_module_args.update({ + "storage_pool_name": "test_pool", + "protection_domain_name": "test_pd_1", + "rmcache_write_handling_mode": "Cached", + "state": "present" + }) + powerflex_module_mock.module.params = self.get_module_args + storagepool_resp = MockStoragePoolApi.STORAGE_POOL_GET_LIST + powerflex_module_mock.powerflex_conn.storage_pool.get = MagicMock( + return_value=storagepool_resp + ) + pd_resp = MockStoragePoolApi.PROTECTION_DOMAIN + powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock( + return_value=pd_resp['protectiondomain']) + storagepool_statistics_resp = MockStoragePoolApi.STORAGE_POOL_STATISTICS + powerflex_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock( + return_value=storagepool_statistics_resp + ) + powerflex_module_mock.powerflex_conn.storage_pool.set_rmcache_write_handling_mode = MagicMock( + side_effect=MockApiException + ) + self.capture_fail_json_call( + MockStoragePoolApi.get_exception_response('modify_pool_rmcache_write_handling_mode'), + powerflex_module_mock, StoragePoolHandler) + + def test_modify_rebuild_rebalance_parallelism_limit_exception(self, powerflex_module_mock): + self.get_module_args.update({ + "storage_pool_name": "test_pool", + "protection_domain_name": "test_pd_1", + "parallel_rebuild_rebalance_limit": 4, + "state": "present" + }) + powerflex_module_mock.module.params = self.get_module_args + storagepool_resp = MockStoragePoolApi.STORAGE_POOL_GET_LIST + powerflex_module_mock.powerflex_conn.storage_pool.get = MagicMock( + return_value=storagepool_resp + ) + pd_resp = MockStoragePoolApi.PROTECTION_DOMAIN + powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock( + return_value=pd_resp['protectiondomain']) + storagepool_statistics_resp = MockStoragePoolApi.STORAGE_POOL_STATISTICS + powerflex_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock( + return_value=storagepool_statistics_resp + ) + powerflex_module_mock.powerflex_conn.storage_pool.set_rebuild_rebalance_parallelism_limit = MagicMock( + side_effect=MockApiException + ) + self.capture_fail_json_call( + MockStoragePoolApi.get_exception_response('modify_pool_rebuild_rebalance_parallelism_limit'), + powerflex_module_mock, StoragePoolHandler) + + def test_modify_capacity_alert_thresholds_exception(self, powerflex_module_mock): + self.get_module_args.update({ + "storage_pool_name": "test_pool", + "protection_domain_name": "test_pd_1", + "capacity_alert_thresholds": { + "high_threshold": 60, + "critical_threshold": 70 + }, + "state": "present" + }) + powerflex_module_mock.module.params = self.get_module_args + storagepool_resp = MockStoragePoolApi.STORAGE_POOL_GET_LIST + powerflex_module_mock.powerflex_conn.storage_pool.get = MagicMock( + return_value=storagepool_resp + ) + pd_resp = MockStoragePoolApi.PROTECTION_DOMAIN + powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock( + return_value=pd_resp['protectiondomain']) + storagepool_statistics_resp = MockStoragePoolApi.STORAGE_POOL_STATISTICS + powerflex_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock( + return_value=storagepool_statistics_resp + ) + powerflex_module_mock.powerflex_conn.storage_pool.set_capacity_alert_thresholds = MagicMock( + side_effect=MockApiException + ) + self.capture_fail_json_call( + MockStoragePoolApi.get_exception_response('modify_pool_capacity_alert_thresholds'), + powerflex_module_mock, StoragePoolHandler) + + def test_modify_protected_maintenance_mode_io_priority_policy_exception(self, powerflex_module_mock): + self.get_module_args.update({ + "storage_pool_name": "test_pool", + "protection_domain_name": "test_pd_1", + "protected_maintenance_mode_io_priority_policy": { + "policy": "unlimited", + "concurrent_ios_per_device": 1, + "bw_limit_per_device": 1024}, + "state": "present" + }) + powerflex_module_mock.module.params = self.get_module_args + storagepool_resp = MockStoragePoolApi.STORAGE_POOL_GET_LIST + powerflex_module_mock.powerflex_conn.storage_pool.get = MagicMock( + return_value=storagepool_resp + ) + pd_resp = MockStoragePoolApi.PROTECTION_DOMAIN + powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock( + return_value=pd_resp['protectiondomain']) + storagepool_statistics_resp = MockStoragePoolApi.STORAGE_POOL_STATISTICS + powerflex_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock( + return_value=storagepool_statistics_resp + ) + powerflex_module_mock.powerflex_conn.storage_pool.set_protected_maintenance_mode_io_priority_policy = MagicMock( + side_effect=MockApiException + ) + self.capture_fail_json_call( + MockStoragePoolApi.get_exception_response('modify_pool_protected_maintenance_mode_io_priority_policy'), + powerflex_module_mock, StoragePoolHandler) + + def test_modify_vtree_migration_io_priority_policy_exception(self, powerflex_module_mock): + self.get_module_args.update({ + "storage_pool_name": "test_pool", + "protection_domain_name": "test_pd_1", + "vtree_migration_io_priority_policy": { + "policy": "favorAppIos", + "concurrent_ios_per_device": 1, + "bw_limit_per_device": 1024}, + "state": "present" + }) + powerflex_module_mock.module.params = self.get_module_args + storagepool_resp = MockStoragePoolApi.STORAGE_POOL_GET_LIST + powerflex_module_mock.powerflex_conn.storage_pool.get = MagicMock( + return_value=storagepool_resp + ) + pd_resp = MockStoragePoolApi.PROTECTION_DOMAIN + powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock( + return_value=pd_resp['protectiondomain']) + storagepool_statistics_resp = MockStoragePoolApi.STORAGE_POOL_STATISTICS + powerflex_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock( + return_value=storagepool_statistics_resp + ) + powerflex_module_mock.powerflex_conn.storage_pool.set_vtree_migration_io_priority_policy = MagicMock( + side_effect=MockApiException + ) + self.capture_fail_json_call( + MockStoragePoolApi.get_exception_response('modify_pool_vtree_migration_io_priority_policy'), + powerflex_module_mock, StoragePoolHandler) + + def test_modify_rebalance_io_priority_policy_exception(self, powerflex_module_mock): + self.get_module_args.update({ + "storage_pool_name": "test_pool", + "protection_domain_name": "test_pd_1", + "rebalance_io_priority_policy": { + "policy": "favorAppIos", + "concurrent_ios_per_device": 1, + "bw_limit_per_device": 1024}, + "state": "present" + }) + powerflex_module_mock.module.params = self.get_module_args + storagepool_resp = MockStoragePoolApi.STORAGE_POOL_GET_LIST + powerflex_module_mock.powerflex_conn.storage_pool.get = MagicMock( + return_value=storagepool_resp + ) + pd_resp = MockStoragePoolApi.PROTECTION_DOMAIN + powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock( + return_value=pd_resp['protectiondomain']) + storagepool_statistics_resp = MockStoragePoolApi.STORAGE_POOL_STATISTICS + powerflex_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock( + return_value=storagepool_statistics_resp + ) + powerflex_module_mock.powerflex_conn.storage_pool.rebalance_io_priority_policy = MagicMock( + side_effect=MockApiException + ) + self.capture_fail_json_call( + MockStoragePoolApi.get_exception_response('modify_pool_rebalance_io_priority_policy'), + powerflex_module_mock, StoragePoolHandler) + + def test_modify_persistent_checksum_exception(self, powerflex_module_mock): + self.get_module_args.update({ + "storage_pool_name": "test_pool", + "protection_domain_name": "test_pd_1", + "persistent_checksum": { + "enable": True, + "validate_on_read": True, + "builder_limit": 1024}, + "state": "present" + }) + powerflex_module_mock.module.params = self.get_module_args + storagepool_resp = MockStoragePoolApi.STORAGE_POOL_GET_LIST + powerflex_module_mock.powerflex_conn.storage_pool.get = MagicMock( + return_value=storagepool_resp + ) + pd_resp = MockStoragePoolApi.PROTECTION_DOMAIN + powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock( + return_value=pd_resp['protectiondomain']) + storagepool_statistics_resp = MockStoragePoolApi.STORAGE_POOL_STATISTICS + powerflex_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock( + return_value=storagepool_statistics_resp + ) + powerflex_module_mock.powerflex_conn.storage_pool.set_persistent_checksum = MagicMock( + side_effect=MockApiException + ) + self.capture_fail_json_call( + MockStoragePoolApi.get_exception_response('modify_pool_persistent_checksum'), + powerflex_module_mock, StoragePoolHandler) |