summaryrefslogtreecommitdiffstats
path: root/ansible_collections/purestorage/flasharray
diff options
context:
space:
mode:
Diffstat (limited to 'ansible_collections/purestorage/flasharray')
-rw-r--r--ansible_collections/purestorage/flasharray/.github/workflows/main.yml1
-rw-r--r--ansible_collections/purestorage/flasharray/CHANGELOG.rst21
-rw-r--r--ansible_collections/purestorage/flasharray/FILES.json97
-rw-r--r--ansible_collections/purestorage/flasharray/MANIFEST.json4
-rw-r--r--ansible_collections/purestorage/flasharray/README.md2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/.plugin-cache.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/changelog.yaml27
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/210_add_rename_hgroup.yaml (renamed from ansible_collections/purestorage/flasharray/changelogs/210_add_rename_hgroup.yaml)0
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/211_fix_clearing_host_inititators.yaml (renamed from ansible_collections/purestorage/flasharray/changelogs/211_fix_clearing_host_inititators.yaml)0
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/550_service.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/555_update_pg.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/558_add_perf_info.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/559_volume_response.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/560_snapshot_epoch.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/561_remote_snap_info.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/562_host_vol_fix.yaml3
-rw-r--r--ansible_collections/purestorage/flasharray/meta/runtime.yml2
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_host.py3
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_info.py390
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_pg.py144
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_volume.py189
21 files changed, 676 insertions, 221 deletions
diff --git a/ansible_collections/purestorage/flasharray/.github/workflows/main.yml b/ansible_collections/purestorage/flasharray/.github/workflows/main.yml
index 529e1fa88..27d5532d2 100644
--- a/ansible_collections/purestorage/flasharray/.github/workflows/main.yml
+++ b/ansible_collections/purestorage/flasharray/.github/workflows/main.yml
@@ -13,7 +13,6 @@ jobs:
strategy:
matrix:
ansible:
- - stable-2.14
- stable-2.15
- stable-2.16
- devel
diff --git a/ansible_collections/purestorage/flasharray/CHANGELOG.rst b/ansible_collections/purestorage/flasharray/CHANGELOG.rst
index 71991a731..111a76e53 100644
--- a/ansible_collections/purestorage/flasharray/CHANGELOG.rst
+++ b/ansible_collections/purestorage/flasharray/CHANGELOG.rst
@@ -5,6 +5,27 @@ Purestorage.Flasharray Release Notes
.. contents:: Topics
+v1.28.0
+=======
+
+Minor Changes
+-------------
+
+- purefa_hg - Add support to rename existing hostgroup
+- purefa_info - Add ``is_local`` parameter for snapshots
+- purefa_info - Add performance data for some subsets
+- purefa_info - Add service_mode to identify if array is Evergreen//One or standard FlashArray
+- purefa_pg - Enhance ``state absent`` to work on volumes, hosts and hostgroups
+- purefa_snap - Add ``created_epoch`` parameter in response
+
+Bugfixes
+--------
+
+- purefa_host - Allows all current host inititators to be correctly removed
+- purefa_host - Fix idempotency issue with connected volume
+- purefa_volume - Ensure module response for creation of volume and rerun are the same
+- purefa_volume - Fix idempotency issue with delete volume
+
v1.27.0
=======
diff --git a/ansible_collections/purestorage/flasharray/FILES.json b/ansible_collections/purestorage/flasharray/FILES.json
index 5beee0936..81e70779b 100644
--- a/ansible_collections/purestorage/flasharray/FILES.json
+++ b/ansible_collections/purestorage/flasharray/FILES.json
@@ -15,13 +15,6 @@
"format": 1
},
{
- "name": "changelogs/210_add_rename_hgroup.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8589783011b4145d3eb5099a7d5e025e9fd2cbf50319d426f0b5b6f8e1b637af",
- "format": 1
- },
- {
"name": "changelogs/fragments",
"ftype": "dir",
"chksum_type": null,
@@ -120,6 +113,13 @@
"format": 1
},
{
+ "name": "changelogs/fragments/562_host_vol_fix.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8e9b4f4e5e786fbd3d991227b5df5feb83eb7f1b0e72919df98b3e7fba51b8c1",
+ "format": 1
+ },
+ {
"name": "changelogs/fragments/518_nfs_security.yaml",
"ftype": "file",
"chksum_type": "sha256",
@@ -190,6 +190,13 @@
"format": 1
},
{
+ "name": "changelogs/fragments/210_add_rename_hgroup.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8589783011b4145d3eb5099a7d5e025e9fd2cbf50319d426f0b5b6f8e1b637af",
+ "format": 1
+ },
+ {
"name": "changelogs/fragments/308_add_vm.yaml",
"ftype": "file",
"chksum_type": "sha256",
@@ -295,6 +302,13 @@
"format": 1
},
{
+ "name": "changelogs/fragments/559_volume_response.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a46bd238e8734aab9aaf4c2829922bd0af48efe0180ecb2836f13c549ef05444",
+ "format": 1
+ },
+ {
"name": "changelogs/fragments/134_ac_pg_support.yaml",
"ftype": "file",
"chksum_type": "sha256",
@@ -484,6 +498,13 @@
"format": 1
},
{
+ "name": "changelogs/fragments/550_service.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ed29a7af803d809ffe4f0e1a755431cc0c0f2e3b8330c79cfa867ce0e6914350",
+ "format": 1
+ },
+ {
"name": "changelogs/fragments/336_add_servicelist.yaml",
"ftype": "file",
"chksum_type": "sha256",
@@ -925,6 +946,13 @@
"format": 1
},
{
+ "name": "changelogs/fragments/560_snapshot_epoch.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2ee78a33bdaeb6cea928365789ef208d6b5c56db648b3cea894020c9cd203983",
+ "format": 1
+ },
+ {
"name": "changelogs/fragments/496_fix_cert_signing.yaml",
"ftype": "file",
"chksum_type": "sha256",
@@ -1121,6 +1149,13 @@
"format": 1
},
{
+ "name": "changelogs/fragments/211_fix_clearing_host_inititators.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8ce58291d0256cb22a7a8cb015ebfc4775474f594d5c724225875c495213d259",
+ "format": 1
+ },
+ {
"name": "changelogs/fragments/133_purefa_info_v6_replication.yaml",
"ftype": "file",
"chksum_type": "sha256",
@@ -1177,6 +1212,13 @@
"format": 1
},
{
+ "name": "changelogs/fragments/561_remote_snap_info.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "179f27b578403177cc3f2f94c141ae33d2e71e95fc2457a71bff010fe344a287",
+ "format": 1
+ },
+ {
"name": "changelogs/fragments/136_add_vol_get_send_info.yaml",
"ftype": "file",
"chksum_type": "sha256",
@@ -1254,6 +1296,13 @@
"format": 1
},
{
+ "name": "changelogs/fragments/558_add_perf_info.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0f5d8964b5286a56b053468398ff4d937d4af276e24a91c8f7f41eeea2001d87",
+ "format": 1
+ },
+ {
"name": "changelogs/fragments/315_spf_details.yaml",
"ftype": "file",
"chksum_type": "sha256",
@@ -1268,6 +1317,13 @@
"format": 1
},
{
+ "name": "changelogs/fragments/555_update_pg.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "71abc3c712ee7e9b71002f31109473aa4b5a34a8ab36dace109e0e22ee540163",
+ "format": 1
+ },
+ {
"name": "changelogs/fragments/375_fix_remote_hosts.yaml",
"ftype": "file",
"chksum_type": "sha256",
@@ -1509,21 +1565,14 @@
"name": "changelogs/changelog.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "b301bed7223921e76ea8c916463b769bb824f548df463e372920d8424d19dc2f",
- "format": 1
- },
- {
- "name": "changelogs/211_fix_clearing_host_inititators.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8ce58291d0256cb22a7a8cb015ebfc4775474f594d5c724225875c495213d259",
+ "chksum_sha256": "0b587f1c2ab470c01ea25d07a6102c7bd45c511ce57596be9c20f38bb3b5e456",
"format": 1
},
{
"name": "changelogs/.plugin-cache.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "dc866613dd4087ce500e2278a17bcd5aa16faf970f135ce9390b3d98605ec035",
+ "chksum_sha256": "2eb4d8b274c69bde5af5d3e8dfb793a902429d99ca679286f7cce43712013dad",
"format": 1
},
{
@@ -1887,7 +1936,7 @@
"name": "plugins/modules/purefa_volume.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "028203768fcd1f50267ca0fcf343ad03f5f4a5e8a80698567f55cf5b0d71e770",
+ "chksum_sha256": "654560c2551fdb85c368d9e296d496174724064102b2b407f428191627a40e7d",
"format": 1
},
{
@@ -1936,7 +1985,7 @@
"name": "plugins/modules/purefa_host.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d913dcc81fa1cde2809c459cd4b59eafd14e41911dd1be71b935f34dc6cfd732",
+ "chksum_sha256": "b7d3d5e058062e9376c53f5b81cf78cb90bfa28840f8c537079fabf7f6444194",
"format": 1
},
{
@@ -2069,7 +2118,7 @@
"name": "plugins/modules/purefa_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "de685054de14726a87c3c9f2ef257770b8e82384e7fe024c97efd72f04b51ef2",
+ "chksum_sha256": "71476398cc0db0ada0a924a210ea483bcad581eb94dd778562f845f510fcfb01",
"format": 1
},
{
@@ -2090,7 +2139,7 @@
"name": "plugins/modules/purefa_pg.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "5412fdd646fe83be3147d917bfc6e7b67eca14f96e11aca16696e5f62ceb9a6a",
+ "chksum_sha256": "c33f5788a68e3c24385a1682a9958cff97f2e0b131e468d1711d9d2ccf789e24",
"format": 1
},
{
@@ -2216,7 +2265,7 @@
"name": ".github/workflows/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3fbd0b2490b2c53dcf65cac2ca22b246ba7a447ffbe89366de942686de8e54a3",
+ "chksum_sha256": "de55a03dadff2ec71a72efd44dcbcb3c2950d1a00aea1ffd6223c6ce9a9e815a",
"format": 1
},
{
@@ -2237,7 +2286,7 @@
"name": "README.md",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "78c320e467ed14bb56976077daad539d0016f60275826f69ea2cce845167aed0",
+ "chksum_sha256": "f7f6650c570a9b0da9f733fa208da1166b49bddf08658d59ef9905648f80060f",
"format": 1
},
{
@@ -2251,7 +2300,7 @@
"name": "meta/runtime.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a215a07e8eb5923a9ad3ab77c7f18090860e0ce10af48e97c0ba13c2dc3ca354",
+ "chksum_sha256": "6b21b23276abc0ebd3dc7b0f60ee7bde406d270e81fbc44f050c18e6f87c091b",
"format": 1
},
{
@@ -2321,7 +2370,7 @@
"name": "CHANGELOG.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3d37a2cad23124cafaeb9cff76e42675e03e970925ef84ac6623a6ebdcf93536",
+ "chksum_sha256": "ef83ad3fb215623f94ceb80b0dab6978894dd59b90203d2999e9026fa9627b76",
"format": 1
}
],
diff --git a/ansible_collections/purestorage/flasharray/MANIFEST.json b/ansible_collections/purestorage/flasharray/MANIFEST.json
index 7f4348785..5c39c4c14 100644
--- a/ansible_collections/purestorage/flasharray/MANIFEST.json
+++ b/ansible_collections/purestorage/flasharray/MANIFEST.json
@@ -2,7 +2,7 @@
"collection_info": {
"namespace": "purestorage",
"name": "flasharray",
- "version": "1.27.0",
+ "version": "1.28.0",
"authors": [
"Pure Storage Ansible Team <pure-ansible-team@purestorage.com>"
],
@@ -29,7 +29,7 @@
"name": "FILES.json",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "0b1b2d1f58f2481ee5654c9c884302a3e2c330cabf5a720b81a8e44543082dc7",
+ "chksum_sha256": "ab3ade4ad530c2f9141631d1ac2c2b431cec2651753ec9e7dc3a845eda21dd82",
"format": 1
},
"format": 1
diff --git a/ansible_collections/purestorage/flasharray/README.md b/ansible_collections/purestorage/flasharray/README.md
index e0ec9614f..92ae085d8 100644
--- a/ansible_collections/purestorage/flasharray/README.md
+++ b/ansible_collections/purestorage/flasharray/README.md
@@ -15,7 +15,7 @@ The Pure Storage FlashArray collection consists of the latest versions of the Fl
## Prerequisites
-- Ansible 2.14 or later
+- Ansible 2.15 or later
- Pure Storage FlashArray system running Purity 6.1.0 or later
- some modules require higher versions of Purity
- Some modules require specific Purity versions
diff --git a/ansible_collections/purestorage/flasharray/changelogs/.plugin-cache.yaml b/ansible_collections/purestorage/flasharray/changelogs/.plugin-cache.yaml
index 57f7e73f4..f99c9ee8a 100644
--- a/ansible_collections/purestorage/flasharray/changelogs/.plugin-cache.yaml
+++ b/ansible_collections/purestorage/flasharray/changelogs/.plugin-cache.yaml
@@ -322,4 +322,4 @@ plugins:
strategy: {}
test: {}
vars: {}
-version: 1.27.0
+version: 1.28.0
diff --git a/ansible_collections/purestorage/flasharray/changelogs/changelog.yaml b/ansible_collections/purestorage/flasharray/changelogs/changelog.yaml
index 6a0cac383..c73a6b48b 100644
--- a/ansible_collections/purestorage/flasharray/changelogs/changelog.yaml
+++ b/ansible_collections/purestorage/flasharray/changelogs/changelog.yaml
@@ -582,6 +582,33 @@ releases:
- 547_lacp_neighbor_info.yaml
- 548_uptime.yaml
release_date: '2024-03-08'
+ 1.28.0:
+ changes:
+ bugfixes:
+ - purefa_host - Allows all current host inititators to be correctly removed
+ - purefa_host - Fix idempotency issue with connected volume
+ - purefa_volume - Ensure module response for creation of volume and rerun are
+ the same
+ - purefa_volume - Fix idempotency issue with delete volume
+ minor_changes:
+ - purefa_hg - Add support to rename existing hostgroup
+ - purefa_info - Add ``is_local`` parameter for snapshots
+ - purefa_info - Add performance data for some subsets
+ - purefa_info - Add service_mode to identify if array is Evergreen//One or standard
+ FlashArray
+ - purefa_pg - Enhance ``state absent`` to work on volumes, hosts and hostgroups
+ - purefa_snap - Add ``created_epoch`` parameter in response
+ fragments:
+ - 210_add_rename_hgroup.yaml
+ - 211_fix_clearing_host_inititators.yaml
+ - 550_service.yaml
+ - 555_update_pg.yaml
+ - 558_add_perf_info.yaml
+ - 559_volume_response.yaml
+ - 560_snapshot_epoch.yaml
+ - 561_remote_snap_info.yaml
+ - 562_host_vol_fix.yaml
+ release_date: '2024-05-01'
1.4.0:
changes:
bugfixes:
diff --git a/ansible_collections/purestorage/flasharray/changelogs/210_add_rename_hgroup.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/210_add_rename_hgroup.yaml
index 78d1d91f9..78d1d91f9 100644
--- a/ansible_collections/purestorage/flasharray/changelogs/210_add_rename_hgroup.yaml
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/210_add_rename_hgroup.yaml
diff --git a/ansible_collections/purestorage/flasharray/changelogs/211_fix_clearing_host_inititators.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/211_fix_clearing_host_inititators.yaml
index 1425d12b0..1425d12b0 100644
--- a/ansible_collections/purestorage/flasharray/changelogs/211_fix_clearing_host_inititators.yaml
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/211_fix_clearing_host_inititators.yaml
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/550_service.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/550_service.yaml
new file mode 100644
index 000000000..d45c7f47c
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/550_service.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_info - Add service_mode to identify if array is Evergreen//One or standard FlashArray
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/555_update_pg.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/555_update_pg.yaml
new file mode 100644
index 000000000..854c106f1
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/555_update_pg.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_pg - Enhance ``state absent`` to work on volumes, hosts and hostgroups
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/558_add_perf_info.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/558_add_perf_info.yaml
new file mode 100644
index 000000000..62ba0ef23
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/558_add_perf_info.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_info - Add performance data for some subsets
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/559_volume_response.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/559_volume_response.yaml
new file mode 100644
index 000000000..1524bb247
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/559_volume_response.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_volume - Ensure module response for creation of volume and rerun are the same
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/560_snapshot_epoch.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/560_snapshot_epoch.yaml
new file mode 100644
index 000000000..8574fe26e
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/560_snapshot_epoch.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_snap - Add ``created_epoch`` parameter in response
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/561_remote_snap_info.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/561_remote_snap_info.yaml
new file mode 100644
index 000000000..1e5532b75
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/561_remote_snap_info.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_info - Add ``is_local`` parameter for snapshots
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/562_host_vol_fix.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/562_host_vol_fix.yaml
new file mode 100644
index 000000000..762d23781
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/562_host_vol_fix.yaml
@@ -0,0 +1,3 @@
+bugfixes:
+ - purefa_volume - Fix idempotency issue with delete volume
+ - purefa_host - Fix idempotency issue with connected volume
diff --git a/ansible_collections/purestorage/flasharray/meta/runtime.yml b/ansible_collections/purestorage/flasharray/meta/runtime.yml
index 15af60575..c784b7e82 100644
--- a/ansible_collections/purestorage/flasharray/meta/runtime.yml
+++ b/ansible_collections/purestorage/flasharray/meta/runtime.yml
@@ -1,5 +1,5 @@
---
-requires_ansible: ">=2.14.0"
+requires_ansible: ">=2.15.0"
plugin_routing:
modules:
purefa_sso:
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_host.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_host.py
index c396975a2..1bf88ac1e 100644
--- a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_host.py
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_host.py
@@ -792,6 +792,7 @@ def update_host(module, array):
"""Modify a host"""
changed = False
renamed = False
+ vol_changed = False
vlan_changed = False
if module.params["state"] == "present":
if module.params["vlan"]:
@@ -816,7 +817,7 @@ def update_host(module, array):
module.params["rename"]
)
)
- init_changed = vol_changed = pers_changed = pref_changed = chap_changed = False
+ init_changed = pers_changed = pref_changed = chap_changed = False
volumes = array.list_host_connections(module.params["name"])
if module.params["iqn"] or module.params["wwns"] or module.params["nqn"]:
init_changed = _update_host_initiators(module, array)
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_info.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_info.py
index 262d227be..cc2c92fdc 100644
--- a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_info.py
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_info.py
@@ -176,6 +176,9 @@ def generate_default_dict(module, array):
default_info["encryption_module_version"] = encryption.module_version
eradication = array_data.eradication_config
if SUBS_API_VERSION in api_version:
+ default_info["service_mode"] = list(arrayv6.get_subscriptions().items)[
+ 0
+ ].service
default_info["eradication_disabled_days_timer"] = int(
eradication.disabled_delay / SEC_TO_DAY
)
@@ -1079,9 +1082,15 @@ def generate_snap_dict(module, array):
"size": snaps[snap]["size"],
"source": snaps[snap]["source"],
"created": snaps[snap]["created"],
+ "created_epoch": int(
+ time.mktime(time.strptime(snaps[snap]["created"], "%Y-%m-%dT%H:%M:%SZ"))
+ ),
"tags": [],
+ "is_local": True,
"remote": [],
}
+ if ":" in snapshot and "::" not in snapshot:
+ snap_info[snapshot]["is_local"] = False
if FC_REPL_API_VERSION in api_version:
for snap in range(0, len(snapsv6)):
snapshot = snapsv6[snap].name
@@ -1324,7 +1333,7 @@ def generate_del_vol_dict(module, array):
return volume_info
-def generate_vol_dict(module, array):
+def generate_vol_dict(module, array, performance):
volume_info = {}
vols_space = array.list_volumes(space=True)
vols = array.list_volumes()
@@ -1347,6 +1356,7 @@ def generate_vol_dict(module, array):
"data_reduction": vols_space[vol]["data_reduction"],
"thin_provisioning": vols_space[vol]["thin_provisioning"],
"total_reduction": vols_space[vol]["total_reduction"],
+ "performance": [],
}
api_version = array._list_available_rest_versions()
if V6_MINIMUM_API_VERSION in api_version:
@@ -1425,27 +1435,78 @@ def generate_vol_dict(module, array):
].priority_adjustment.priority_adjustment_operator + str(
volumes[vol].priority_adjustment.priority_adjustment_value
)
- cvols = array.list_volumes(connect=True)
- for cvol in range(0, len(cvols)):
- volume = cvols[cvol]["name"]
- voldict = {"host": cvols[cvol]["host"], "lun": cvols[cvol]["lun"]}
- volume_info[volume]["hosts"].append(voldict)
- if ACTIVE_DR_API in api_version:
- voltags = array.list_volumes(tags=True)
- for voltag in range(0, len(voltags)):
- if voltags[voltag]["namespace"] != "vasa-integration.purestorage.com":
- volume = voltags[voltag]["name"]
- tagdict = {
- "key": voltags[voltag]["key"],
- "value": voltags[voltag]["value"],
- "copyable": voltags[voltag]["copyable"],
- "namespace": voltags[voltag]["namespace"],
+ if performance:
+ vols_performance = list(arrayv6.get_volumes_performance().items)
+ for performance in range(0, len(vols_performance)):
+ volume_info[vols_performance[performance].name]["performance"] = {
+ "bytes_per_mirrored_write": vols_performance[
+ performance
+ ].bytes_per_mirrored_write,
+ "bytes_per_op": vols_performance[performance].bytes_per_op,
+ "bytes_per_read": vols_performance[performance].bytes_per_read,
+ "bytes_per_write": vols_performance[performance].bytes_per_write,
+ "mirrored_write_bytes_per_sec": vols_performance[
+ performance
+ ].mirrored_write_bytes_per_sec,
+ "mirrored_writes_per_sec": vols_performance[
+ performance
+ ].mirrored_writes_per_sec,
+ "qos_rate_limit_usec_per_mirrored_write_op": vols_performance[
+ performance
+ ].qos_rate_limit_usec_per_mirrored_write_op,
+ "qos_rate_limit_usec_per_read_op": vols_performance[
+ performance
+ ].qos_rate_limit_usec_per_mirrored_write_op,
+ "qos_rate_limit_usec_per_write_op": vols_performance[
+ performance
+ ].qos_rate_limit_usec_per_read_op,
+ "queue_usec_per_mirrored_write_op": vols_performance[
+ performance
+ ].queue_usec_per_mirrored_write_op,
+ "queue_usec_per_read_op": vols_performance[
+ performance
+ ].queue_usec_per_read_op,
+ "queue_usec_per_write_op": vols_performance[
+ performance
+ ].queue_usec_per_write_op,
+ "read_bytes_per_sec": vols_performance[
+ performance
+ ].read_bytes_per_sec,
+ "reads_per_sec": vols_performance[performance].reads_per_sec,
+ "san_usec_per_mirrored_write_op": vols_performance[
+ performance
+ ].san_usec_per_mirrored_write_op,
+ "san_usec_per_read_op": vols_performance[
+ performance
+ ].san_usec_per_read_op,
+ "san_usec_per_write_op": vols_performance[
+ performance
+ ].san_usec_per_write_op,
+ "service_usec_per_mirrored_write_op": vols_performance[
+ performance
+ ].service_usec_per_mirrored_write_op,
+ "service_usec_per_read_op": vols_performance[
+ performance
+ ].service_usec_per_read_op,
+ "service_usec_per_write_op": vols_performance[
+ performance
+ ].service_usec_per_write_op,
+ "usec_per_mirrored_write_op": vols_performance[
+ performance
+ ].usec_per_mirrored_write_op,
+ "usec_per_read_op": vols_performance[performance].usec_per_read_op,
+ "usec_per_write_op": vols_performance[
+ performance
+ ].usec_per_write_op,
+ "write_bytes_per_sec": vols_performance[
+ performance
+ ].write_bytes_per_sec,
+ "writes_per_sec": vols_performance[performance].writes_per_sec,
}
- volume_info[volume]["tags"].append(tagdict)
return volume_info
-def generate_host_dict(module, array):
+def generate_host_dict(module, array, performance):
api_version = array._list_available_rest_versions()
host_info = {}
if FC_REPL_API_VERSION in api_version:
@@ -1474,6 +1535,7 @@ def generate_host_dict(module, array):
"personality": array.get_host(hostname, personality=True)["personality"],
"target_port": all_tports,
"volumes": [],
+ "performance": [],
"performance_balance": [],
}
if FC_REPL_API_VERSION in api_version:
@@ -1546,6 +1608,70 @@ def generate_host_dict(module, array):
if hosts[host].is_local:
hostname = hosts[host].name
host_info[hostname]["vlan"] = getattr(hosts[host], "vlan", None)
+ if FC_REPL_API_VERSION in api_version and performance:
+ hosts_performance = list(arrayv6.get_hosts_performance().items)
+ for performance in range(0, len(hosts_performance)):
+ host_info[hosts_performance[performance].name]["performance"] = {
+ "bytes_per_mirrored_write": hosts_performance[
+ performance
+ ].bytes_per_mirrored_write,
+ "bytes_per_op": hosts_performance[performance].bytes_per_op,
+ "bytes_per_read": hosts_performance[performance].bytes_per_read,
+ "bytes_per_write": hosts_performance[performance].bytes_per_write,
+ "mirrored_write_bytes_per_sec": hosts_performance[
+ performance
+ ].mirrored_write_bytes_per_sec,
+ "mirrored_writes_per_sec": hosts_performance[
+ performance
+ ].mirrored_writes_per_sec,
+ "qos_rate_limit_usec_per_mirrored_write_op": hosts_performance[
+ performance
+ ].qos_rate_limit_usec_per_mirrored_write_op,
+ "qos_rate_limit_usec_per_read_op": hosts_performance[
+ performance
+ ].qos_rate_limit_usec_per_mirrored_write_op,
+ "qos_rate_limit_usec_per_write_op": hosts_performance[
+ performance
+ ].qos_rate_limit_usec_per_read_op,
+ "queue_usec_per_mirrored_write_op": hosts_performance[
+ performance
+ ].queue_usec_per_mirrored_write_op,
+ "queue_usec_per_read_op": hosts_performance[
+ performance
+ ].queue_usec_per_read_op,
+ "queue_usec_per_write_op": hosts_performance[
+ performance
+ ].queue_usec_per_write_op,
+ "read_bytes_per_sec": hosts_performance[performance].read_bytes_per_sec,
+ "reads_per_sec": hosts_performance[performance].reads_per_sec,
+ "san_usec_per_mirrored_write_op": hosts_performance[
+ performance
+ ].san_usec_per_mirrored_write_op,
+ "san_usec_per_read_op": hosts_performance[
+ performance
+ ].san_usec_per_read_op,
+ "san_usec_per_write_op": hosts_performance[
+ performance
+ ].san_usec_per_write_op,
+ "service_usec_per_mirrored_write_op": hosts_performance[
+ performance
+ ].service_usec_per_mirrored_write_op,
+ "service_usec_per_read_op": hosts_performance[
+ performance
+ ].service_usec_per_read_op,
+ "service_usec_per_write_op": hosts_performance[
+ performance
+ ].service_usec_per_write_op,
+ "usec_per_mirrored_write_op": hosts_performance[
+ performance
+ ].usec_per_mirrored_write_op,
+ "usec_per_read_op": hosts_performance[performance].usec_per_read_op,
+ "usec_per_write_op": hosts_performance[performance].usec_per_write_op,
+ "write_bytes_per_sec": hosts_performance[
+ performance
+ ].write_bytes_per_sec,
+ "writes_per_sec": hosts_performance[performance].writes_per_sec,
+ }
return host_info
@@ -1674,7 +1800,7 @@ def generate_del_pgroups_dict(module, array):
return pgroups_info
-def generate_pgroups_dict(module, array):
+def generate_pgroups_dict(module, array, performance):
pgroups_info = {}
api_version = array._list_available_rest_versions()
pgroups = array.list_pgroups()
@@ -1901,7 +2027,7 @@ def generate_del_pods_dict(module, array):
return pods_info
-def generate_pods_dict(module, array):
+def generate_pods_dict(module, array, performance):
pods_info = {}
api_version = array._list_available_rest_versions()
if AC_REQUIRED_API_VERSION in api_version:
@@ -1913,6 +2039,7 @@ def generate_pods_dict(module, array):
"arrays": pods[pod]["arrays"],
"mediator": pods[pod]["mediator"],
"mediator_version": pods[pod]["mediator_version"],
+ "performance": [],
}
if ACTIVE_DR_API in api_version:
if pods_info[acpod]["arrays"][0]["frozen_at"]:
@@ -1968,6 +2095,79 @@ def generate_pods_dict(module, array):
)
if SUBS_API_VERSION in api_version:
pods_info[name]["total_used"] = pods[pod].space.total_used
+ if performance:
+ pods_performance = list(arrayv6.get_pods_performance().items)
+ for performance in range(0, len(pods_performance)):
+ pods_info[pods_performance[performance].name]["performance"] = {
+ "bytes_per_mirrored_write": pods_performance[
+ performance
+ ].bytes_per_mirrored_write,
+ "bytes_per_op": pods_performance[performance].bytes_per_op,
+ "bytes_per_read": pods_performance[performance].bytes_per_read,
+ "bytes_per_write": pods_performance[
+ performance
+ ].bytes_per_write,
+ "mirrored_write_bytes_per_sec": pods_performance[
+ performance
+ ].mirrored_write_bytes_per_sec,
+ "mirrored_writes_per_sec": pods_performance[
+ performance
+ ].mirrored_writes_per_sec,
+ "others_per_sec": pods_performance[performance].others_per_sec,
+ "qos_rate_limit_usec_per_mirrored_write_op": pods_performance[
+ performance
+ ].qos_rate_limit_usec_per_mirrored_write_op,
+ "qos_rate_limit_usec_per_read_op": pods_performance[
+ performance
+ ].qos_rate_limit_usec_per_mirrored_write_op,
+ "qos_rate_limit_usec_per_write_op": pods_performance[
+ performance
+ ].qos_rate_limit_usec_per_read_op,
+ "queue_usec_per_mirrored_write_op": pods_performance[
+ performance
+ ].queue_usec_per_mirrored_write_op,
+ "queue_usec_per_read_op": pods_performance[
+ performance
+ ].queue_usec_per_read_op,
+ "queue_usec_per_write_op": pods_performance[
+ performance
+ ].queue_usec_per_write_op,
+ "read_bytes_per_sec": pods_performance[
+ performance
+ ].read_bytes_per_sec,
+ "reads_per_sec": pods_performance[performance].reads_per_sec,
+ "san_usec_per_mirrored_write_op": pods_performance[
+ performance
+ ].san_usec_per_mirrored_write_op,
+ "san_usec_per_read_op": pods_performance[
+ performance
+ ].san_usec_per_read_op,
+ "san_usec_per_write_op": pods_performance[
+ performance
+ ].san_usec_per_write_op,
+ "service_usec_per_mirrored_write_op": pods_performance[
+ performance
+ ].service_usec_per_mirrored_write_op,
+ "service_usec_per_read_op": pods_performance[
+ performance
+ ].service_usec_per_read_op,
+ "service_usec_per_write_op": pods_performance[
+ performance
+ ].service_usec_per_write_op,
+ "usec_per_mirrored_write_op": pods_performance[
+ performance
+ ].usec_per_mirrored_write_op,
+ "usec_per_read_op": pods_performance[
+ performance
+ ].usec_per_read_op,
+ "usec_per_write_op": pods_performance[
+ performance
+ ].usec_per_write_op,
+ "write_bytes_per_sec": pods_performance[
+ performance
+ ].write_bytes_per_sec,
+ "writes_per_sec": pods_performance[performance].writes_per_sec,
+ }
return pods_info
@@ -2064,7 +2264,7 @@ def generate_apps_dict(array):
return apps_info
-def generate_vgroups_dict(module, array):
+def generate_vgroups_dict(module, array, performance):
vgroups_info = {}
api_version = array._list_available_rest_versions()
if AC_REQUIRED_API_VERSION in api_version:
@@ -2073,6 +2273,7 @@ def generate_vgroups_dict(module, array):
virtgroup = vgroups[vgroup]["name"]
vgroups_info[virtgroup] = {
"volumes": vgroups[vgroup]["volumes"],
+ "performance": [],
}
if V6_MINIMUM_API_VERSION in api_version:
arrayv6 = get_array(module)
@@ -2115,6 +2316,72 @@ def generate_vgroups_dict(module, array):
].priority_adjustment.priority_adjustment_operator + str(
vgroups[vgroup].priority_adjustment.priority_adjustment_value
)
+ if performance:
+ vgs_performance = list(arrayv6.get_volume_groups_performance().items)
+ for performance in range(0, len(vgs_performance)):
+ vgroups_info[vgs_performance[performance].name]["performance"] = {
+ "bytes_per_mirrored_write": vgs_performance[
+ performance
+ ].bytes_per_mirrored_write,
+ "bytes_per_op": vgs_performance[performance].bytes_per_op,
+ "bytes_per_read": vgs_performance[performance].bytes_per_read,
+ "bytes_per_write": vgs_performance[performance].bytes_per_write,
+ "mirrored_write_bytes_per_sec": vgs_performance[
+ performance
+ ].mirrored_write_bytes_per_sec,
+ "mirrored_writes_per_sec": vgs_performance[
+ performance
+ ].mirrored_writes_per_sec,
+ "qos_rate_limit_usec_per_mirrored_write_op": vgs_performance[
+ performance
+ ].qos_rate_limit_usec_per_mirrored_write_op,
+ "qos_rate_limit_usec_per_read_op": vgs_performance[
+ performance
+ ].qos_rate_limit_usec_per_mirrored_write_op,
+ "qos_rate_limit_usec_per_write_op": vgs_performance[
+ performance
+ ].qos_rate_limit_usec_per_read_op,
+ "queue_usec_per_mirrored_write_op": vgs_performance[
+ performance
+ ].queue_usec_per_mirrored_write_op,
+ "queue_usec_per_read_op": vgs_performance[
+ performance
+ ].queue_usec_per_read_op,
+ "queue_usec_per_write_op": vgs_performance[
+ performance
+ ].queue_usec_per_write_op,
+ "read_bytes_per_sec": vgs_performance[
+ performance
+ ].read_bytes_per_sec,
+ "reads_per_sec": vgs_performance[performance].reads_per_sec,
+ "san_usec_per_mirrored_write_op": vgs_performance[
+ performance
+ ].san_usec_per_mirrored_write_op,
+ "san_usec_per_read_op": vgs_performance[
+ performance
+ ].san_usec_per_read_op,
+ "san_usec_per_write_op": vgs_performance[
+ performance
+ ].san_usec_per_write_op,
+ "service_usec_per_mirrored_write_op": vgs_performance[
+ performance
+ ].service_usec_per_mirrored_write_op,
+ "service_usec_per_read_op": vgs_performance[
+ performance
+ ].service_usec_per_read_op,
+ "service_usec_per_write_op": vgs_performance[
+ performance
+ ].service_usec_per_write_op,
+ "usec_per_mirrored_write_op": vgs_performance[
+ performance
+ ].usec_per_mirrored_write_op,
+ "usec_per_read_op": vgs_performance[performance].usec_per_read_op,
+ "usec_per_write_op": vgs_performance[performance].usec_per_write_op,
+ "write_bytes_per_sec": vgs_performance[
+ performance
+ ].write_bytes_per_sec,
+ "writes_per_sec": vgs_performance[performance].writes_per_sec,
+ }
return vgroups_info
@@ -2425,7 +2692,7 @@ def generate_google_offload_dict(array):
return offload_info
-def generate_hgroups_dict(module, array):
+def generate_hgroups_dict(module, array, performance):
hgroups_info = {}
api_version = array._list_available_rest_versions()
hgroups = array.list_hgroups()
@@ -2480,6 +2747,72 @@ def generate_hgroups_dict(module, array):
)
if SUBS_API_VERSION in api_version:
hgroups_info[name]["total_used"] = hgroups[hgroup].space.total_used
+ if performance:
+ hgs_performance = list(arrayv6.get_host_groups_performance().items)
+ for performance in range(0, len(hgs_performance)):
+ hgroups_info[hgs_performance[performance].name]["performance"] = {
+ "bytes_per_mirrored_write": hgs_performance[
+ performance
+ ].bytes_per_mirrored_write,
+ "bytes_per_op": hgs_performance[performance].bytes_per_op,
+ "bytes_per_read": hgs_performance[performance].bytes_per_read,
+ "bytes_per_write": hgs_performance[performance].bytes_per_write,
+ "mirrored_write_bytes_per_sec": hgs_performance[
+ performance
+ ].mirrored_write_bytes_per_sec,
+ "mirrored_writes_per_sec": hgs_performance[
+ performance
+ ].mirrored_writes_per_sec,
+ "qos_rate_limit_usec_per_mirrored_write_op": hgs_performance[
+ performance
+ ].qos_rate_limit_usec_per_mirrored_write_op,
+ "qos_rate_limit_usec_per_read_op": hgs_performance[
+ performance
+ ].qos_rate_limit_usec_per_mirrored_write_op,
+ "qos_rate_limit_usec_per_write_op": hgs_performance[
+ performance
+ ].qos_rate_limit_usec_per_read_op,
+ "queue_usec_per_mirrored_write_op": hgs_performance[
+ performance
+ ].queue_usec_per_mirrored_write_op,
+ "queue_usec_per_read_op": hgs_performance[
+ performance
+ ].queue_usec_per_read_op,
+ "queue_usec_per_write_op": hgs_performance[
+ performance
+ ].queue_usec_per_write_op,
+ "read_bytes_per_sec": hgs_performance[
+ performance
+ ].read_bytes_per_sec,
+ "reads_per_sec": hgs_performance[performance].reads_per_sec,
+ "san_usec_per_mirrored_write_op": hgs_performance[
+ performance
+ ].san_usec_per_mirrored_write_op,
+ "san_usec_per_read_op": hgs_performance[
+ performance
+ ].san_usec_per_read_op,
+ "san_usec_per_write_op": hgs_performance[
+ performance
+ ].san_usec_per_write_op,
+ "service_usec_per_mirrored_write_op": hgs_performance[
+ performance
+ ].service_usec_per_mirrored_write_op,
+ "service_usec_per_read_op": hgs_performance[
+ performance
+ ].service_usec_per_read_op,
+ "service_usec_per_write_op": hgs_performance[
+ performance
+ ].service_usec_per_write_op,
+ "usec_per_mirrored_write_op": hgs_performance[
+ performance
+ ].usec_per_mirrored_write_op,
+ "usec_per_read_op": hgs_performance[performance].usec_per_read_op,
+ "usec_per_write_op": hgs_performance[performance].usec_per_write_op,
+ "write_bytes_per_sec": hgs_performance[
+ performance
+ ].write_bytes_per_sec,
+ "writes_per_sec": hgs_performance[performance].writes_per_sec,
+ }
return hgroups_info
@@ -2657,10 +2990,11 @@ def main():
)
info = {}
-
+ performance = False
if "minimum" in subset or "all" in subset or "apps" in subset:
info["default"] = generate_default_dict(module, array)
if "performance" in subset or "all" in subset:
+ performance = True
info["performance"] = generate_perf_dict(array)
if "config" in subset or "all" in subset:
info["config"] = generate_config_dict(module, array)
@@ -2673,26 +3007,26 @@ def main():
if "interfaces" in subset or "all" in subset:
info["interfaces"] = generate_interfaces_dict(array)
if "hosts" in subset or "all" in subset:
- info["hosts"] = generate_host_dict(module, array)
+ info["hosts"] = generate_host_dict(module, array, performance)
if "volumes" in subset or "all" in subset:
- info["volumes"] = generate_vol_dict(module, array)
+ info["volumes"] = generate_vol_dict(module, array, performance)
info["deleted_volumes"] = generate_del_vol_dict(module, array)
if "snapshots" in subset or "all" in subset:
info["snapshots"] = generate_snap_dict(module, array)
info["deleted_snapshots"] = generate_del_snap_dict(module, array)
if "hgroups" in subset or "all" in subset:
- info["hgroups"] = generate_hgroups_dict(module, array)
+ info["hgroups"] = generate_hgroups_dict(module, array, performance)
if "pgroups" in subset or "all" in subset:
info["pgroups"] = generate_pgroups_dict(module, array)
info["deleted_pgroups"] = generate_del_pgroups_dict(module, array)
if "pods" in subset or "all" in subset or "replication" in subset:
info["replica_links"] = generate_rl_dict(module, array)
- info["pods"] = generate_pods_dict(module, array)
+ info["pods"] = generate_pods_dict(module, array, performance)
info["deleted_pods"] = generate_del_pods_dict(module, array)
if "admins" in subset or "all" in subset:
info["admins"] = generate_admin_dict(array)
if "vgroups" in subset or "all" in subset:
- info["vgroups"] = generate_vgroups_dict(module, array)
+ info["vgroups"] = generate_vgroups_dict(module, array, performance)
info["deleted_vgroups"] = generate_del_vgroups_dict(module, array)
if "offload" in subset or "all" in subset:
info["azure_offload"] = generate_azure_offload_dict(module, array)
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pg.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pg.py
index 3344c0895..840b11385 100644
--- a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pg.py
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pg.py
@@ -36,6 +36,8 @@ options:
state:
description:
- Define whether the protection group should exist or not.
+ - If specified with I(volume) or I(host) or I(hostgroup) will
+ act on those items in the protection group only.
type: str
default: present
choices: [ absent, present ]
@@ -186,6 +188,15 @@ EXAMPLES = r"""
target: arrayb
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Remove a volume from protection group
+ purestorage.flasharray.purefa_pg:
+ name: bar
+ volume:
+ - vol1
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
"""
RETURN = r"""
@@ -455,6 +466,7 @@ def rename_exists(module, array):
def update_pgroup(module, array):
"""Update Protection Group"""
changed = renamed = False
+ state = module.params["state"]
api_version = array._list_available_rest_versions()
if module.params["target"]:
connected_targets = []
@@ -546,19 +558,36 @@ def update_pgroup(module, array):
else:
cased_vols = list(module.params["volume"])
cased_pgvols = list(get_pgroup(module, array)["volumes"])
- if not all(x in cased_pgvols for x in cased_vols):
- if not module.check_mode:
- changed = True
- try:
- array.set_pgroup(
- module.params["name"], addvollist=module.params["volume"]
- )
- except Exception:
- module.fail_json(
- msg="Changing volumes in pgroup {0} failed.".format(
- module.params["name"]
+ if state == "present":
+ if not all(x in cased_pgvols for x in cased_vols):
+ if not module.check_mode:
+ changed = True
+ try:
+ array.set_pgroup(
+ module.params["name"],
+ addvollist=module.params["volume"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Adding volumes in pgroup {0} failed.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ if all(x in cased_pgvols for x in cased_vols):
+ if not module.check_mode:
+ changed = True
+ try:
+ array.set_pgroup(
+ module.params["name"],
+ remvollist=module.params["volume"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Removing volumes in pgroup {0} failed.".format(
+ module.params["name"]
+ )
)
- )
if (
module.params["host"]
@@ -581,19 +610,34 @@ def update_pgroup(module, array):
else:
cased_hosts = list(module.params["host"])
cased_pghosts = list(get_pgroup(module, array)["hosts"])
- if not all(x in cased_pghosts for x in cased_hosts):
- if not module.check_mode:
- changed = True
- try:
- array.set_pgroup(
- module.params["name"], addhostlist=module.params["host"]
- )
- except Exception:
- module.fail_json(
- msg="Changing hosts in pgroup {0} failed.".format(
- module.params["name"]
+ if state == "present":
+ if not all(x in cased_pghosts for x in cased_hosts):
+ if not module.check_mode:
+ changed = True
+ try:
+ array.set_pgroup(
+ module.params["name"], addhostlist=module.params["host"]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Adding hosts in pgroup {0} failed.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ if all(x in cased_pghosts for x in cased_hosts):
+ if not module.check_mode:
+ changed = True
+ try:
+ array.set_pgroup(
+ module.params["name"], remhostlist=module.params["host"]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Removing hosts in pgroup {0} failed.".format(
+ module.params["name"]
+ )
)
- )
if (
module.params["hostgroup"]
@@ -616,20 +660,36 @@ def update_pgroup(module, array):
else:
cased_hostg = list(module.params["hostgroup"])
cased_pghostg = list(get_pgroup(module, array)["hgroups"])
- if not all(x in cased_pghostg for x in cased_hostg):
- if not module.check_mode:
- changed = True
- try:
- array.set_pgroup(
- module.params["name"],
- addhgrouplist=module.params["hostgroup"],
- )
- except Exception:
- module.fail_json(
- msg="Changing hostgroups in pgroup {0} failed.".format(
- module.params["name"]
+ if state == "present":
+ if not all(x in cased_pghostg for x in cased_hostg):
+ if not module.check_mode:
+ changed = True
+ try:
+ array.set_pgroup(
+ module.params["name"],
+ addhgrouplist=module.params["hostgroup"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Adding hostgroups in pgroup {0} failed.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ if all(x in cased_pghostg for x in cased_hostg):
+ if not module.check_mode:
+ changed = True
+ try:
+ array.set_pgroup(
+ module.params["name"],
+ remhgrouplist=module.params["hostgroup"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Removing hostgroups in pgroup {0} failed.".format(
+ module.params["name"]
+ )
)
- )
if module.params["rename"]:
if not rename_exists(module, array):
if ":" in module.params["name"]:
@@ -881,6 +941,16 @@ def main():
if pgroup and state == "present":
update_pgroup(module, array)
+ elif (
+ pgroup
+ and state == "absent"
+ and (
+ module.params["volume"]
+ or module.params["hosts"]
+ or module.params["hostgroups"]
+ )
+ ):
+ update_pgroup(module, array)
elif pgroup and state == "absent":
delete_pgroup(module, array)
elif xpgroup and state == "absent" and module.params["eradicate"]:
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_volume.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_volume.py
index 877af7f74..77236e3af 100644
--- a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_volume.py
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_volume.py
@@ -331,7 +331,9 @@ from ansible_collections.purestorage.flasharray.plugins.module_utils.common impo
human_to_bytes,
human_to_real,
)
-
+from ansible_collections.purestorage.flasharray.plugins.module_utils.version import (
+ LooseVersion,
+)
QOS_API_VERSION = "1.14"
VGROUPS_API_VERSION = "1.13"
@@ -347,6 +349,38 @@ DEFAULT_API_VERSION = "2.16"
VOLUME_PROMOTION_API_VERSION = "2.2"
+def _volfact(module, array):
+ api_version = array.get_rest_version()
+ volume_name = module.params["name"]
+ volume_data = list(array.get_volumes(names=[volume_name]).items)[0]
+ volfact = {
+ volume_name: {
+ "size": volume_data.provisioned,
+ "serial": volume_data.serial,
+ "created": time.strftime(
+ "%Y-%m-%d %H:%M:%S", time.localtime(volume_data.created / 1000)
+ ),
+ "page83_naa": PURE_OUI + volume_data.serial.lower(),
+ "nvme_nguid": _create_nguid(volume_data.serial.lower()),
+ "iops_limit": getattr(volume_data.qos, "iops_limit", 0),
+ "bandwidth_limit": getattr(volume_data.qos, "bandwidth_limit", 0),
+ "requested_promotion_state": volume_data.requested_promotion_state,
+ "promotion_status": volume_data.promotion_status,
+ "priority": getattr(volume_data, "priority", 0),
+ "priority_operator": "",
+ "priority_value": "",
+ }
+ }
+ if LooseVersion(PRIORITY_API_VERSION) <= LooseVersion(api_version):
+ volfact[volume_name][
+ "priority_operator"
+ ] = volume_data.priority_adjustment.priority_adjustment_operator
+ volfact[volume_name][
+ "priority_value"
+ ] = volume_data.priority_adjustment.priority_adjustment_value
+ return volfact
+
+
def _create_nguid(serial):
nguid = "eui.00" + serial[0:14] + "24a937" + serial[-10:]
return nguid
@@ -511,7 +545,6 @@ def check_pod(module, array):
def create_volume(module, array):
"""Create Volume"""
- volfact = []
changed = False
api_version = array._list_available_rest_versions()
if "/" in module.params["name"] and not check_vgroup(module, array):
@@ -549,17 +582,11 @@ def create_volume(module, array):
changed = True
if not module.check_mode:
try:
- volfact = array.create_volume(
+ array.create_volume(
module.params["name"],
module.params["size"],
bandwidth_limit=module.params["bw_qos"],
)
- volfact["page83_naa"] = (
- PURE_OUI + volfact["serial"].lower()
- )
- volfact["nvme_nguid"] = _create_nguid(
- volfact["serial"].lower()
- )
except Exception:
module.fail_json(
msg="Volume {0} creation failed.".format(
@@ -581,17 +608,11 @@ def create_volume(module, array):
changed = True
if not module.check_mode:
try:
- volfact = array.create_volume(
+ array.create_volume(
module.params["name"],
module.params["size"],
iops_limit=module.params["iops_qos"],
)
- volfact["page83_naa"] = (
- PURE_OUI + volfact["serial"].lower()
- )
- volfact["nvme_nguid"] = _create_nguid(
- volfact["serial"].lower()
- )
except Exception:
module.fail_json(
msg="Volume {0} creation failed.".format(
@@ -612,18 +633,12 @@ def create_volume(module, array):
changed = True
if not module.check_mode:
try:
- volfact = array.create_volume(
+ array.create_volume(
module.params["name"],
module.params["size"],
iops_limit=module.params["iops_qos"],
bandwidth_limit=module.params["bw_qos"],
)
- volfact["page83_naa"] = (
- PURE_OUI + volfact["serial"].lower()
- )
- volfact["nvme_nguid"] = _create_nguid(
- volfact["serial"].lower()
- )
except Exception:
module.fail_json(
msg="Volume {0} creation failed.".format(
@@ -642,17 +657,11 @@ def create_volume(module, array):
changed = True
if not module.check_mode:
try:
- volfact = array.create_volume(
+ array.create_volume(
module.params["name"],
module.params["size"],
bandwidth_limit=module.params["bw_qos"],
)
- volfact["page83_naa"] = (
- PURE_OUI + volfact["serial"].lower()
- )
- volfact["nvme_nguid"] = _create_nguid(
- volfact["serial"].lower()
- )
except Exception:
module.fail_json(
msg="Volume {0} creation failed.".format(
@@ -669,13 +678,9 @@ def create_volume(module, array):
changed = True
if not module.check_mode:
try:
- volfact = array.create_volume(
+ array.create_volume(
module.params["name"], module.params["size"]
)
- volfact["page83_naa"] = PURE_OUI + volfact["serial"].lower()
- volfact["nvme_nguid"] = _create_nguid(
- volfact["serial"].lower()
- )
except Exception:
module.fail_json(
msg="Volume {0} creation failed.".format(
@@ -686,11 +691,7 @@ def create_volume(module, array):
changed = True
if not module.check_mode:
try:
- volfact = array.create_volume(
- module.params["name"], module.params["size"]
- )
- volfact["page83_naa"] = PURE_OUI + volfact["serial"].lower()
- volfact["nvme_nguid"] = _create_nguid(volfact["serial"].lower())
+ array.create_volume(module.params["name"], module.params["size"])
except Exception:
module.fail_json(
msg="Volume {0} creation failed.".format(module.params["name"])
@@ -714,8 +715,6 @@ def create_volume(module, array):
module.params["name"]
)
)
- else:
- volfact["promotion_state"] = module.params["promotion_state"]
if PRIORITY_API_VERSION in api_version and module.params["priority_operator"]:
arrayv6 = get_array(module)
volume = flasharray.VolumePatch(
@@ -736,9 +735,6 @@ def create_volume(module, array):
module.params["name"], res.errors[0].message
)
)
- else:
- volfact["priority_operator"] = module.params["priority_operator"]
- volfact["priority_value"] = module.params["priority_value"]
if module.params["pgroup"] and DEFAULT_API_VERSION not in api_version:
changed = True
if not module.check_mode:
@@ -753,7 +749,7 @@ def create_volume(module, array):
)
)
- module.exit_json(changed=changed, volume=volfact)
+ module.exit_json(changed=changed, volume=_volfact(module, arrayv6))
def create_multi_volume(module, array, single=False):
@@ -954,7 +950,6 @@ def create_multi_volume(module, array, single=False):
def copy_from_volume(module, array):
"""Create Volume Clone"""
- volfact = []
changed = False
tgt = get_target(module, array)
api_version = array._list_available_rest_versions()
@@ -1000,23 +995,9 @@ def copy_from_volume(module, array):
res.errors[0].message,
)
)
- vol_data = list(res.items)
- volfact = {
- "size": vol_data[0].provisioned,
- "serial": vol_data[0].serial,
- "created": time.strftime(
- "%Y-%m-%d %H:%M:%S", time.localtime(vol_data[0].created / 1000)
- ),
- "page83_naa": PURE_OUI + vol_data[0].serial.lower(),
- "nvme_nguid": _create_nguid(vol_data[0].serial.lower()),
- }
else:
try:
- volfact = array.copy_volume(
- module.params["name"], module.params["target"]
- )
- volfact["page83_naa"] = PURE_OUI + volfact["serial"].lower()
- volfact["nvme_nguid"] = _create_nguid(volfact["serial"].lower())
+ array.copy_volume(module.params["name"], module.params["target"])
changed = True
except Exception:
module.fail_json(
@@ -1029,13 +1010,11 @@ def copy_from_volume(module, array):
if not module.check_mode:
if DEFAULT_API_VERSION not in api_version:
try:
- volfact = array.copy_volume(
+ array.copy_volume(
module.params["name"],
module.params["target"],
overwrite=module.params["overwrite"],
)
- volfact["page83_naa"] = PURE_OUI + volfact["serial"].lower()
- volfact["nvme_nguid"] = _create_nguid(volfact["serial"].lower())
changed = True
except Exception:
module.fail_json(
@@ -1059,23 +1038,12 @@ def copy_from_volume(module, array):
res.errors[0].message,
)
)
- vol_data = list(res.items)
- volfact = {
- "size": vol_data[0].provisioned,
- "serial": vol_data[0].serial,
- "created": time.strftime(
- "%Y-%m-%d %H:%M:%S", time.localtime(vol_data[0].created / 1000)
- ),
- "page83_naa": PURE_OUI + vol_data[0].serial.lower(),
- "nvme_nguid": _create_nguid(vol_data[0].serial.lower()),
- }
- module.exit_json(changed=changed, volume=volfact)
+ module.exit_json(changed=changed, volume=_volfact(module, arrayv6))
def update_volume(module, array):
"""Update Volume size and/or QoS"""
- volfact = {}
changed = False
arrayv6 = None
api_version = array._list_available_rest_versions()
@@ -1102,7 +1070,7 @@ def update_volume(module, array):
changed = True
if not module.check_mode:
try:
- volfact = array.extend_volume(
+ array.extend_volume(
module.params["name"], module.params["size"]
)
except Exception:
@@ -1119,9 +1087,7 @@ def update_volume(module, array):
changed = True
if not module.check_mode:
try:
- volfact = array.set_volume(
- module.params["name"], bandwidth_limit=""
- )
+ array.set_volume(module.params["name"], bandwidth_limit="")
except Exception:
module.fail_json(
msg="Volume {0} Bandwidth QoS removal failed.".format(
@@ -1134,7 +1100,7 @@ def update_volume(module, array):
changed = True
if not module.check_mode:
try:
- volfact = array.set_volume(
+ array.set_volume(
module.params["name"],
bandwidth_limit=module.params["bw_qos"],
)
@@ -1156,7 +1122,7 @@ def update_volume(module, array):
changed = True
if not module.check_mode:
try:
- volfact = array.set_volume(module.params["name"], iops_limit="")
+ array.set_volume(module.params["name"], iops_limit="")
except Exception:
module.fail_json(
msg="Volume {0} IOPs QoS removal failed.".format(
@@ -1167,7 +1133,7 @@ def update_volume(module, array):
changed = True
if not module.check_mode:
try:
- volfact = array.set_volume(
+ array.set_volume(
module.params["name"], iops_limit=module.params["iops_qos"]
)
except Exception:
@@ -1199,10 +1165,6 @@ def update_volume(module, array):
module.params["name"]
)
)
- else:
- if not volfact:
- volfact = array.get_volume(module.params["name"])
- volfact["promotion_status"] = module.params["promotion_state"]
if PRIORITY_API_VERSION in api_version and module.params["priority_operator"]:
volv6 = list(arrayv6.get_volumes(names=[module.params["name"]]).items)[0]
change_prio = False
@@ -1246,33 +1208,13 @@ def update_volume(module, array):
module.params["name"], prio_res.errors[0].message
)
)
- else:
- if not volfact:
- volfact = array.get_volume(module.params["name"])
- volfact["priority_operator"] = module.params["priority_operator"]
- volfact["priority_value"] = module.params["priority_value"]
- if MULTI_VOLUME_VERSION in api_version:
- volume_data = list(arrayv6.get_volumes(names=[module.params["name"]]).items)[0]
- updatefacts = {
- "name": volume_data.name,
- "size": volume_data.provisioned,
- "serial": volume_data.serial,
- "created": time.strftime(
- "%Y-%m-%d %H:%M:%S", time.localtime(volume_data.created / 1000)
- ),
- "page83_naa": PURE_OUI + volume_data.serial.lower(),
- "nvme_nguid": _create_nguid(volume_data.serial.lower()),
- }
- else:
- updatefacts = array.get_volume(module.params["name"])
- vol_fact = {**volfact, **updatefacts}
- module.exit_json(changed=changed, volume=vol_fact)
+ module.exit_json(changed=changed, volume=_volfact(module, arrayv6))
def rename_volume(module, array):
"""Rename volume within a container, ie pod, vgroup or local array"""
- volfact = []
changed = False
+ arrayv6 = get_array(module)
pod_name = ""
vgroup_name = ""
target_name = module.params["rename"]
@@ -1314,9 +1256,7 @@ def rename_volume(module, array):
changed = True
if not module.check_mode:
try:
- volfact = array.rename_volume(
- module.params["name"], module.params["rename"]
- )
+ array.rename_volume(module.params["name"], module.params["rename"])
except Exception:
module.fail_json(
msg="Rename volume {0} to {1} failed.".format(
@@ -1326,12 +1266,11 @@ def rename_volume(module, array):
else:
module.fail_json(msg="Target volume {0} already exists.".format(target_name))
- module.exit_json(changed=changed, volume=volfact)
+ module.exit_json(changed=changed, volume=_volfact(module, arrayv6))
def move_volume(module, array):
"""Move volume between pods, vgroups or local array"""
- volfact = []
changed = vgroup_exists = target_exists = pod_exists = False
api_version = array._list_available_rest_versions()
pod_name = ""
@@ -1428,7 +1367,7 @@ def move_volume(module, array):
changed = True
if not module.check_mode:
try:
- volfact = array.move_volume(module.params["name"], target_location)
+ array.move_volume(module.params["name"], target_location)
except Exception:
if target_location == "":
target_location = "[local]"
@@ -1437,28 +1376,30 @@ def move_volume(module, array):
module.params["name"], target_location
)
)
- module.exit_json(changed=changed, volume=volfact)
+ arrayv6 = get_array(module)
+ module.exit_json(changed=changed, volume=_volfact(module, arrayv6))
def delete_volume(module, array):
"""Delete Volume"""
changed = True
- volfact = []
if not module.check_mode:
try:
array.destroy_volume(module.params["name"])
if module.params["eradicate"]:
try:
- volfact = array.eradicate_volume(module.params["name"])
+ array.eradicate_volume(module.params["name"])
except Exception:
module.fail_json(
msg="Eradicate volume {0} failed.".format(module.params["name"])
)
+ module.exit_json(changed=changed, volume=[])
except Exception:
module.fail_json(
msg="Delete volume {0} failed.".format(module.params["name"])
)
- module.exit_json(changed=changed, volume=volfact)
+ arrayv6 = get_array(module)
+ module.exit_json(changed=changed, volume=_volfact(module, arrayv6))
def eradicate_volume(module, array):
@@ -1479,7 +1420,6 @@ def eradicate_volume(module, array):
def recover_volume(module, array):
"""Recover Deleted Volume"""
changed = True
- volfact = []
if not module.check_mode:
try:
array.recover_volume(module.params["name"])
@@ -1487,10 +1427,9 @@ def recover_volume(module, array):
module.fail_json(
msg="Recovery of volume {0} failed".format(module.params["name"])
)
- volfact = array.get_volume(module.params["name"])
- volfact["page83_naa"] = PURE_OUI + volfact["serial"].lower()
- volfact["nvme_nguid"] = _create_nguid(volfact["serial"].lower())
- module.exit_json(changed=changed, volume=volfact)
+ array.get_volume(module.params["name"])
+ arrayv6 = get_array(module)
+ module.exit_json(changed=changed, volume=_volfact(module, arrayv6))
def main():