summaryrefslogtreecommitdiffstats
path: root/ansible_collections/purestorage/flasharray/plugins
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-13 12:04:41 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-13 12:04:41 +0000
commit975f66f2eebe9dadba04f275774d4ab83f74cf25 (patch)
tree89bd26a93aaae6a25749145b7e4bca4a1e75b2be /ansible_collections/purestorage/flasharray/plugins
parentInitial commit. (diff)
downloadansible-975f66f2eebe9dadba04f275774d4ab83f74cf25.tar.xz
ansible-975f66f2eebe9dadba04f275774d4ab83f74cf25.zip
Adding upstream version 7.7.0+dfsg.upstream/7.7.0+dfsg
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'ansible_collections/purestorage/flasharray/plugins')
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/doc_fragments/purestorage.py46
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/module_utils/purefa.py137
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_ad.py323
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_admin.py180
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_alert.py208
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_apiclient.py250
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_arrayname.py103
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_banner.py125
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_certs.py524
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_connect.py238
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_console.py107
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_default_protection.py328
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_directory.py234
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_dirsnap.py474
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_dns.py349
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_ds.py609
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_dsrole.py200
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_endpoint.py347
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_eradication.py117
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_eula.py117
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_export.py251
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_fs.py367
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_hg.py433
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_host.py1085
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_info.py2286
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_inventory.py368
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_kmip.py251
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_logging.py166
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_maintenance.py133
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_messages.py198
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_network.py437
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_ntp.py151
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_offload.py443
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_pg.py909
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_pgsched.py527
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_pgsnap.py481
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_phonehome.py106
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_pod.py664
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_pod_replica.py279
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_policy.py1606
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_proxy.py131
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_ra.py121
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_saml.py340
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_smis.py132
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_smtp.py161
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_snap.py640
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_snmp.py425
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_snmp_agent.py267
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_sso.py119
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_subnet.py327
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_syslog.py218
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_syslog_settings.py171
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_timeout.py116
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_token.py225
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_user.py278
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_vg.py685
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_vlan.py267
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_vnc.py161
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_volume.py1726
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_volume_tags.py295
60 files changed, 22962 insertions, 0 deletions
diff --git a/ansible_collections/purestorage/flasharray/plugins/doc_fragments/purestorage.py b/ansible_collections/purestorage/flasharray/plugins/doc_fragments/purestorage.py
new file mode 100644
index 000000000..7c19925e6
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/doc_fragments/purestorage.py
@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Simon Dodsley <simon@purestorage.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ # Standard Pure Storage documentation fragment
+ DOCUMENTATION = r"""
+options:
+ - See separate platform section for more details
+requirements:
+ - See separate platform section for more details
+notes:
+ - Ansible modules are available for the following Pure Storage products: FlashArray, FlashBlade
+"""
+
+ # Documentation fragment for FlashArray
+ FA = r"""
+options:
+ fa_url:
+ description:
+ - FlashArray management IPv4 address or Hostname.
+ type: str
+ api_token:
+ description:
+ - FlashArray API token for admin privileged user.
+ type: str
+notes:
+ - This module requires the C(purestorage) and C(py-pure-client) Python libraries
+ - Additional Python librarues may be required for specific modules.
+ - You must set C(PUREFA_URL) and C(PUREFA_API) environment variables
+ if I(fa_url) and I(api_token) arguments are not passed to the module directly
+requirements:
+ - python >= 3.3
+ - purestorage >= 1.19
+ - py-pure-client >= 1.26.0
+ - netaddr
+ - requests
+ - pycountry
+ - packaging
+"""
diff --git a/ansible_collections/purestorage/flasharray/plugins/module_utils/purefa.py b/ansible_collections/purestorage/flasharray/plugins/module_utils/purefa.py
new file mode 100644
index 000000000..b85ce0e29
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/module_utils/purefa.py
@@ -0,0 +1,137 @@
+# -*- coding: utf-8 -*-
+
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Simon Dodsley <simon@purestorage.com>,2017
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+HAS_PURESTORAGE = True
+try:
+ from purestorage import purestorage
+except ImportError:
+ HAS_PURESTORAGE = False
+
+HAS_PYPURECLIENT = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PYPURECLIENT = False
+
+from os import environ
+import platform
+
+VERSION = 1.4
+USER_AGENT_BASE = "Ansible"
+
+
+def get_system(module):
+ """Return System Object or Fail"""
+ user_agent = "%(base)s %(class)s/%(version)s (%(platform)s)" % {
+ "base": USER_AGENT_BASE,
+ "class": __name__,
+ "version": VERSION,
+ "platform": platform.platform(),
+ }
+ array_name = module.params["fa_url"]
+ api = module.params["api_token"]
+ if HAS_PURESTORAGE:
+ if array_name and api:
+ system = purestorage.FlashArray(
+ array_name, api_token=api, user_agent=user_agent, verify_https=False
+ )
+ elif environ.get("PUREFA_URL") and environ.get("PUREFA_API"):
+ system = purestorage.FlashArray(
+ environ.get("PUREFA_URL"),
+ api_token=(environ.get("PUREFA_API")),
+ user_agent=user_agent,
+ verify_https=False,
+ )
+ else:
+ module.fail_json(
+ msg="You must set PUREFA_URL and PUREFA_API environment variables "
+ "or the fa_url and api_token module arguments"
+ )
+ try:
+ system.get()
+ except Exception:
+ module.fail_json(
+ msg="Pure Storage FlashArray authentication failed. Check your credentials"
+ )
+ else:
+ module.fail_json(msg="purestorage SDK is not installed.")
+ return system
+
+
+def get_array(module):
+ """Return System Object or Fail"""
+ user_agent = "%(base)s %(class)s/%(version)s (%(platform)s)" % {
+ "base": USER_AGENT_BASE,
+ "class": __name__,
+ "version": VERSION,
+ "platform": platform.platform(),
+ }
+ array_name = module.params["fa_url"]
+ api = module.params["api_token"]
+ if HAS_PYPURECLIENT:
+ if array_name and api:
+ system = flasharray.Client(
+ target=array_name,
+ api_token=api,
+ user_agent=user_agent,
+ )
+ elif environ.get("PUREFA_URL") and environ.get("PUREFA_API"):
+ system = flasharray.Client(
+ target=(environ.get("PUREFA_URL")),
+ api_token=(environ.get("PUREFA_API")),
+ user_agent=user_agent,
+ )
+ else:
+ module.fail_json(
+ msg="You must set PUREFA_URL and PUREFA_API environment variables "
+ "or the fa_url and api_token module arguments"
+ )
+ try:
+ system.get_hardware()
+ except Exception:
+ module.fail_json(
+ msg="Pure Storage FlashArray authentication failed. Check your credentials"
+ )
+ else:
+ module.fail_json(msg="py-pure-client and/or requests are not installed.")
+ return system
+
+
+def purefa_argument_spec():
+ """Return standard base dictionary used for the argument_spec argument in AnsibleModule"""
+
+ return dict(
+ fa_url=dict(),
+ api_token=dict(no_log=True),
+ )
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_ad.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_ad.py
new file mode 100644
index 000000000..d9eee96ac
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_ad.py
@@ -0,0 +1,323 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2021, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_ad
+version_added: '1.9.0'
+short_description: Manage FlashArray Active Directory Account
+description:
+- Add or delete FlashArray Active Directory Account
+- FlashArray allows the creation of one AD computer account, or joining of an
+ existing AD computer account.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Name of the AD account
+ type: str
+ required: true
+ state:
+ description:
+ - Define whether the AD sccount is deleted or not
+ default: present
+ choices: [ absent, present ]
+ type: str
+ computer:
+ description:
+ - The common name of the computer account to be created in the Active Directory domain.
+ - If not specified, defaults to the name of the Active Directory configuration.
+ type: str
+ domain:
+ description:
+ - The Active Directory domain to join
+ type: str
+ username:
+ description:
+ - A user capable of creating a computer account within the domain
+ type: str
+ password:
+ description:
+ - Password string for I(username)
+ type: str
+ directory_servers:
+ description:
+ - A list of directory servers that will be used for lookups related to user authorization
+ - Accepted server formats are IP address and DNS name
+ - All specified servers must be registered to the domain appropriately in the array
+ configured DNS and are only communicated with over the secure LDAP (LDAPS) protocol.
+ If not specified, servers are resolved for the domain in DNS
+ - The specified list can have a maximum length of 1, or 3 for Purity 6.1.6 or higher.
+ If more are provided only the first allowed count used.
+ type: list
+ elements: str
+ kerberos_servers:
+ description:
+ - A list of key distribution servers to use for Kerberos protocol
+ - Accepted server formats are IP address and DNS name
+ - All specified servers must be registered to the domain appropriately in the array
+ configured DNS and are only communicated with over the secure LDAP (LDAPS) protocol.
+ If not specified, servers are resolved for the domain in DNS.
+ - The specified list can have a maximum length of 1, or 3 for Purity 6.1.6 or higher.
+ If more are provided only the first allowed count used.
+ type: list
+ elements: str
+ local_only:
+ description:
+ - Do a local-only delete of an active directory account
+ type: bool
+ default: false
+ join_ou:
+ description:
+ - Distinguished name of organization unit in which the computer account
+ should be created when joining the domain. e.g. OU=Arrays,OU=Storage.
+ - The B(DC=...) components can be omitted.
+ - If left empty, defaults to B(CN=Computers).
+ - Requires Purity//FA 6.1.8 or higher
+ type: str
+ version_added: '1.10.0'
+ tls:
+ description:
+ - TLS mode for communication with domain controllers.
+ type: str
+ choices: [ required, optional ]
+ default: required
+ version_added: '1.14.0'
+ join_existing:
+ description:
+ - If specified as I(true), the domain is searched for a pre-existing
+ computer account to join to, and no new account will be created within the domain.
+ The C(username) specified when joining a pre-existing account must have
+ permissions to 'read all properties from' and 'reset the password of'
+ the pre-existing account. C(join_ou) will be read from the pre-existing
+ account and cannot be specified when joining to an existing account
+ type: bool
+ default: false
+ version_added: '1.14.0'
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Create new AD account
+ purestorage.flasharray.purefa_ad:
+ name: ad_account
+ computer: FLASHARRAY
+ domain: acme.com
+ join_ou: "OU=Acme,OU=Dev"
+ username: Administrator
+ password: Password
+ kerberos_servers:
+ - kdc.acme.com
+ directory_servers:
+ - ldap.acme.com
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete AD account locally
+ purestorage.flasharray.purefa_ad:
+ name: ad_account
+ local_only: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Fully delete AD account. Note that correct AD permissions are required
+ purestorage.flasharray.purefa_ad:
+ name: ad_account
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient.flasharray import ActiveDirectoryPost, ActiveDirectoryPatch
+except ImportError:
+ HAS_PURESTORAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ get_array,
+ purefa_argument_spec,
+)
+
+MIN_REQUIRED_API_VERSION = "2.2"
+SERVER_API_VERSION = "2.6"
+MIN_JOIN_OU_API_VERSION = "2.8"
+MIN_TLS_API_VERSION = "2.15"
+
+
+def delete_account(module, array):
+ """Delete Active directory Account"""
+ changed = True
+ if not module.check_mode:
+ res = array.delete_active_directory(
+ names=[module.params["name"]], local_only=module.params["local_only"]
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete AD Account {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def update_account(module, array):
+ """Update existing AD account"""
+ changed = False
+ current_acc = list(array.get_active_directory(names=[module.params["name"]]).items)[
+ 0
+ ]
+ if current_acc.tls != module.params["tls"]:
+ changed = True
+ if not module.check_mode:
+ res = array.patch_active_directory(
+ names=[module.params["name"]],
+ active_directory=ActiveDirectoryPatch(tls=module.params["tls"]),
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to update AD Account {0} TLS setting. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def create_account(module, array, api_version):
+ """Create Active Directory Account"""
+ changed = True
+ if MIN_JOIN_OU_API_VERSION not in api_version:
+ ad_config = ActiveDirectoryPost(
+ computer_name=module.params["computer"],
+ directory_servers=module.params["directory_servers"],
+ kerberos_servers=module.params["kerberos_servers"],
+ domain=module.params["domain"],
+ user=module.params["username"],
+ password=module.params["password"],
+ )
+ elif MIN_TLS_API_VERSION in api_version:
+ ad_config = ActiveDirectoryPost(
+ computer_name=module.params["computer"],
+ directory_servers=module.params["directory_servers"],
+ kerberos_servers=module.params["kerberos_servers"],
+ domain=module.params["domain"],
+ user=module.params["username"],
+ join_ou=module.params["join_ou"],
+ password=module.params["password"],
+ tls=module.params["tls"],
+ )
+ else:
+ ad_config = ActiveDirectoryPost(
+ computer_name=module.params["computer"],
+ directory_servers=module.params["directory_servers"],
+ kerberos_servers=module.params["kerberos_servers"],
+ domain=module.params["domain"],
+ user=module.params["username"],
+ join_ou=module.params["join_ou"],
+ password=module.params["password"],
+ )
+ if not module.check_mode:
+ if MIN_TLS_API_VERSION in api_version:
+ res = array.post_active_directory(
+ names=[module.params["name"]],
+ join_existing_account=module.params["join_existing"],
+ active_directory=ad_config,
+ )
+ else:
+ res = array.post_active_directory(
+ names=[module.params["name"]],
+ active_directory=ad_config,
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to add Active Directory Account {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ username=dict(type="str"),
+ password=dict(type="str", no_log=True),
+ name=dict(type="str", required=True),
+ computer=dict(type="str"),
+ local_only=dict(type="bool", default=False),
+ domain=dict(type="str"),
+ join_ou=dict(type="str"),
+ directory_servers=dict(type="list", elements="str"),
+ kerberos_servers=dict(type="list", elements="str"),
+ tls=dict(type="str", default="required", choices=["required", "optional"]),
+ join_existing=dict(type="bool", default=False),
+ )
+ )
+
+ required_if = [["state", "present", ["username", "password", "domain"]]]
+
+ module = AnsibleModule(
+ argument_spec, required_if=required_if, supports_check_mode=True
+ )
+
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashArray REST version not supported. "
+ "Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
+ )
+ state = module.params["state"]
+ array = get_array(module)
+ exists = bool(
+ array.get_active_directory(names=[module.params["name"]]).status_code == 200
+ )
+
+ if not module.params["computer"]:
+ module.params["computer"] = module.params["name"].replace("_", "-")
+ if module.params["kerberos_servers"]:
+ if SERVER_API_VERSION in api_version:
+ module.params["kerberos_servers"] = module.params["kerberos_servers"][0:3]
+ else:
+ module.params["kerberos_servers"] = module.params["kerberos_servers"][0:1]
+ if module.params["directory_servers"]:
+ if SERVER_API_VERSION in api_version:
+ module.params["directory_servers"] = module.params["directory_servers"][0:3]
+ else:
+ module.params["directory_servers"] = module.params["directory_servers"][0:1]
+ if not exists and state == "present":
+ create_account(module, array, api_version)
+ elif exists and state == "present" and MIN_TLS_API_VERSION in api_version:
+ update_account(module, array)
+ elif exists and state == "absent":
+ delete_account(module, array)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_admin.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_admin.py
new file mode 100644
index 000000000..becb86893
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_admin.py
@@ -0,0 +1,180 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2021, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_admin
+version_added: '1.12.0'
+short_description: Configure Pure Storage FlashArray Global Admin settings
+description:
+- Set global admin settings for the FlashArray
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ sso:
+ description:
+ - Enable or disable the array Signle Sign-On from Pure1 Manage
+ default: false
+ type: bool
+ max_login:
+ description:
+ - Maximum number of failed logins before account is locked
+ type: int
+ min_password:
+ description:
+ - Minimum user password length
+ default: 1
+ type: int
+ lockout:
+ description:
+ - Account lockout duration, in seconds, after max_login exceeded
+ - Range between 1 second and 90 days (7776000 seconds)
+ type: int
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Set global login parameters
+ purestorage.flasharray.purefa_admin:
+ sso: false
+ max_login: 5
+ min_password: 10
+ lockout: 300
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient.flasharray import AdminSettings
+except ImportError:
+ HAS_PURESTORAGE = False
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ get_array,
+ purefa_argument_spec,
+)
+
+MIN_API_VERSION = "2.2"
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ sso=dict(type="bool", default=False),
+ max_login=dict(type="int"),
+ min_password=dict(type="int", default=1, no_log=False),
+ lockout=dict(type="int"),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+ if module.params["lockout"] and not 1 <= module.params["lockout"] <= 7776000:
+ module.fail_json(msg="Lockout must be between 1 and 7776000 seconds")
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+ changed = False
+ if MIN_API_VERSION in api_version:
+ array = get_array(module)
+ current_settings = list(array.get_admins_settings().items)[0]
+ if (
+ module.params["sso"]
+ and module.params["sso"] != current_settings.single_sign_on_enabled
+ ):
+ changed = True
+ sso = module.params["sso"]
+ else:
+ sso = current_settings.single_sign_on_enabled
+ if (
+ module.params["min_password"]
+ and module.params["min_password"] != current_settings.min_password_length
+ ):
+ changed = True
+ min_password = module.params["min_password"]
+ else:
+ min_password = current_settings.min_password_length
+ lockout = getattr(current_settings, "lockout_duration", None)
+ if (
+ lockout
+ and module.params["lockout"]
+ and lockout != module.params["lockout"] * 1000
+ ):
+ changed = True
+ lockout = module.params["lockout"] * 1000
+ elif not lockout and module.params["lockout"]:
+ changed = True
+ lockout = module.params["lockout"] * 1000
+ max_login = getattr(current_settings, "max_login_attempts", None)
+ if (
+ max_login
+ and module.params["max_login"]
+ and max_login != module.params["max_login"]
+ ):
+ changed = True
+ max_login = module.params["max_login"]
+ elif not max_login and module.params["max_login"]:
+ changed = True
+ max_login = module.params["max_login"]
+ if changed and not module.check_mode:
+ if max_login:
+ admin = AdminSettings(
+ single_sign_on_enabled=sso,
+ min_password_length=min_password,
+ max_login_attempts=max_login,
+ )
+ if lockout:
+ admin = AdminSettings(
+ single_sign_on_enabled=sso,
+ min_password_length=min_password,
+ lockout_duration=lockout,
+ )
+ if lockout and max_login:
+ admin = AdminSettings(
+ single_sign_on_enabled=sso,
+ min_password_length=min_password,
+ lockout_duration=lockout,
+ max_login_attempts=max_login,
+ )
+ if not lockout and not max_login:
+ admin = AdminSettings(
+ single_sign_on_enabled=sso,
+ min_password_length=min_password,
+ )
+ res = array.patch_admins_settings(admin_settings=admin)
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to change Global Admin settings. Error: {0}".format(
+ res.errors[0].message
+ )
+ )
+ else:
+ module.fail_json(msg="Purity version does not support Global Admin settings")
+ module.exit_json(changed=changed)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_alert.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_alert.py
new file mode 100644
index 000000000..1220ed560
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_alert.py
@@ -0,0 +1,208 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_alert
+version_added: '1.0.0'
+short_description: Configure Pure Storage FlashArray alert email settings
+description:
+- Configure alert email configuration for Pure Storage FlashArrays.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ type: str
+ description:
+ - Create or delete alert email
+ default: present
+ choices: [ absent, present ]
+ address:
+ type: str
+ description:
+ - Email address (valid format required)
+ required: true
+ enabled:
+ type: bool
+ default: true
+ description:
+ - Set specified email address to be enabled or disabled
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Add new email recipient and enable, or enable existing email
+ purestorage.flasharray.purefa_alert:
+ address: "user@domain.com"
+ enabled: true
+ state: present
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+- name: Delete existing email recipient
+ purestorage.flasharray.purefa_alert:
+ state: absent
+ address: "user@domain.com"
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ purefa_argument_spec,
+)
+
+
+def create_alert(module, array):
+ """Create Alert Email"""
+ changed = True
+ if not module.check_mode:
+ changed = False
+ try:
+ array.create_alert_recipient(module.params["address"])
+ changed = True
+ except Exception:
+ module.fail_json(
+ msg="Failed to create alert email: {0}".format(module.params["address"])
+ )
+
+ if not module.params["enabled"]:
+ try:
+ array.disable_alert_recipient(module.params["address"])
+ changed = True
+ except Exception:
+ module.fail_json(
+ msg="Failed to create alert email: {0}".format(
+ module.params["address"]
+ )
+ )
+
+ module.exit_json(changed=changed)
+
+
+def enable_alert(module, array):
+ """Enable Alert Email"""
+ changed = True
+ if not module.check_mode:
+ changed = False
+ try:
+ array.enable_alert_recipient(module.params["address"])
+ changed = True
+ except Exception:
+ module.fail_json(
+ msg="Failed to enable alert email: {0}".format(module.params["address"])
+ )
+
+ module.exit_json(changed=changed)
+
+
+def disable_alert(module, array):
+ """Disable Alert Email"""
+ changed = True
+ if not module.check_mode:
+ changed = False
+ try:
+ array.disable_alert_recipient(module.params["address"])
+ changed = True
+ except Exception:
+ module.fail_json(
+ msg="Failed to disable alert email: {0}".format(
+ module.params["address"]
+ )
+ )
+
+ module.exit_json(changed=changed)
+
+
+def delete_alert(module, array):
+ """Delete Alert Email"""
+ changed = True
+ if module.params["address"] == "flasharray-alerts@purestorage.com":
+ module.fail_json(
+ msg="Built-in address {0} cannot be deleted.".format(
+ module.params["address"]
+ )
+ )
+ if not module.check_mode:
+ changed = False
+ try:
+ array.delete_alert_recipient(module.params["address"])
+ changed = True
+ except Exception:
+ module.fail_json(
+ msg="Failed to delete alert email: {0}".format(module.params["address"])
+ )
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ address=dict(type="str", required=True),
+ enabled=dict(type="bool", default=True),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ pattern = re.compile(r"^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$")
+ if not pattern.match(module.params["address"]):
+ module.fail_json(msg="Valid email address not provided.")
+
+ array = get_system(module)
+
+ exists = False
+ try:
+ emails = array.list_alert_recipients()
+ except Exception:
+ module.fail_json(msg="Failed to get exisitng email list")
+ for email in range(0, len(emails)):
+ if emails[email]["name"] == module.params["address"]:
+ exists = True
+ enabled = emails[email]["enabled"]
+ break
+ if module.params["state"] == "present" and not exists:
+ create_alert(module, array)
+ elif (
+ module.params["state"] == "present"
+ and exists
+ and not enabled
+ and module.params["enabled"]
+ ):
+ enable_alert(module, array)
+ elif (
+ module.params["state"] == "present"
+ and exists
+ and enabled
+ and not module.params["enabled"]
+ ):
+ disable_alert(module, array)
+ elif module.params["state"] == "absent" and exists:
+ delete_alert(module, array)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_apiclient.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_apiclient.py
new file mode 100644
index 000000000..12970dddb
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_apiclient.py
@@ -0,0 +1,250 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_apiclient
+version_added: '1.5.0'
+short_description: Manage FlashArray API Clients
+description:
+- Enable or disable FlashArray API Clients
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Name of the API Client
+ type: str
+ required: true
+ state:
+ description:
+ - Define whether the API client should exist or not.
+ default: present
+ choices: [ absent, present ]
+ type: str
+ role:
+ description:
+ - The maximum role allowed for ID Tokens issued by this API client
+ type: str
+ choices: [readonly, ops_admin, storage_admin, array_admin]
+ issuer:
+ description:
+ - The name of the identity provider that will be issuing ID Tokens for this API client
+ - If not specified, defaults to the API client name, I(name).
+ type: str
+ public_key:
+ description:
+ - The API clients PEM formatted (Base64 encoded) RSA public key.
+ - Include the I(—–BEGIN PUBLIC KEY—–) and I(—–END PUBLIC KEY—–) lines
+ type: str
+ token_ttl:
+ description:
+ - Time To Live length in seconds for the exchanged access token
+ - Range is 1 second to 1 day (86400 seconds)
+ type: int
+ default: 86400
+ enabled:
+ description:
+ - State of the API Client Key
+ type: bool
+ default: true
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Create API token ansible-token
+ purestorage.flasharray.purefa_apiclient:
+ name: ansible-token
+ issuer: "Pure Storage"
+ ttl: 3000
+ role: array_admin
+ public_key: "{{lookup('file', 'public_pem_file') }}"
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Disable API CLient
+ purestorage.flasharray.purefa_apiclient:
+ name: ansible-token
+ enabled: false
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Enable API CLient
+ purestorage.flasharray.purefa_apiclient:
+ name: ansible-token
+ enabled: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete API Client
+ purestorage.flasharray.purefa_apiclient:
+ state: absent
+ name: ansible-token
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ get_array,
+ purefa_argument_spec,
+)
+
+MIN_REQUIRED_API_VERSION = "2.1"
+
+
+def delete_client(module, array):
+ changed = True
+ if not module.check_mode:
+ try:
+ array.delete_api_clients(names=[module.params["name"]])
+ except Exception:
+ module.fail_json(
+ msg="Failed to delete API Client {0}".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def update_client(module, array, client):
+ """Update API Client"""
+ changed = False
+ if client.enabled != module.params["enabled"]:
+ changed = True
+ if not module.check_mode:
+ try:
+ array.patch_api_clients(
+ names=[module.params["name"]],
+ api_clients=flasharray.ApiClientPatch(
+ enabled=module.params["enabled"]
+ ),
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to update API Client {0}".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def create_client(module, array):
+ """Create API Client"""
+ changed = True
+ if not 1 <= module.params["token_ttl"] <= 86400:
+ module.fail_json(msg="token_ttl parameter is out of range (1 to 86400)")
+ else:
+ token_ttl = module.params["token_ttl"] * 1000
+ if not module.params["issuer"]:
+ module.params["issuer"] = module.params["name"]
+ try:
+ client = flasharray.ApiClientPost(
+ max_role=module.params["role"],
+ issuer=module.params["issuer"],
+ access_token_ttl_in_ms=token_ttl,
+ public_key=module.params["public_key"],
+ )
+ if not module.check_mode:
+ res = array.post_api_clients(
+ names=[module.params["name"]], api_clients=client
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to create API CLient {0}. Error message: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ if module.params["enabled"]:
+ try:
+ array.patch_api_clients(
+ names=[module.params["name"]],
+ api_clients=flasharray.ApiClientPatch(
+ enabled=module.params["enabled"]
+ ),
+ )
+ except Exception:
+ array.delete_api_clients(names=[module.params["name"]])
+ module.fail_json(
+ msg="Failed to create API Client {0}".format(
+ module.params["name"]
+ )
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to create API Client {0}".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ enabled=dict(type="bool", default=True),
+ name=dict(type="str", required=True),
+ role=dict(
+ type="str",
+ choices=["readonly", "ops_admin", "storage_admin", "array_admin"],
+ ),
+ public_key=dict(type="str", no_log=True),
+ token_ttl=dict(type="int", default=86400, no_log=False),
+ issuer=dict(type="str"),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashArray REST version not supported. "
+ "Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
+ )
+ array = get_array(module)
+ state = module.params["state"]
+
+ try:
+ client = list(array.get_api_clients(names=[module.params["name"]]).items)[0]
+ exists = True
+ except Exception:
+ exists = False
+
+ if not exists and state == "present":
+ create_client(module, array)
+ elif exists and state == "present":
+ update_client(module, array, client)
+ elif exists and state == "absent":
+ delete_client(module, array)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_arrayname.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_arrayname.py
new file mode 100644
index 000000000..cf5202c6f
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_arrayname.py
@@ -0,0 +1,103 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_arrayname
+version_added: '1.0.0'
+short_description: Configure Pure Storage FlashArray array name
+description:
+- Configure name of array for Pure Storage FlashArrays.
+- Ideal for Day 0 initial configuration.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Set the array name
+ type: str
+ default: present
+ choices: [ present ]
+ name:
+ description:
+ - Name of the array. Must conform to correct naming schema.
+ type: str
+ required: true
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Set new array name
+ purestorage.flasharray.purefa_arrayname:
+ name: new-array-name
+ state: present
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ purefa_argument_spec,
+)
+
+
+def update_name(module, array):
+ """Change aray name"""
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set(name=module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Failed to change array name to {0}".format(module.params["name"])
+ )
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True),
+ state=dict(type="str", default="present", choices=["present"]),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ array = get_system(module)
+ pattern = re.compile("^[a-zA-Z0-9]([a-zA-Z0-9-]{0,54}[a-zA-Z0-9])?$")
+ if not pattern.match(module.params["name"]):
+ module.fail_json(
+ msg="Array name {0} does not conform to array name rules".format(
+ module.params["name"]
+ )
+ )
+ if module.params["name"] != array.get()["array_name"]:
+ update_name(module, array)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_banner.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_banner.py
new file mode 100644
index 000000000..bd7a367a5
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_banner.py
@@ -0,0 +1,125 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_banner
+version_added: '1.0.0'
+short_description: Configure Pure Storage FlashArray GUI and SSH MOTD message
+description:
+- Configure MOTD for Pure Storage FlashArrays.
+- This will be shown during an SSH or GUI login to the array.
+- Multiple line messages can be achieved using \\n.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Set ot delete the MOTD
+ default: present
+ type: str
+ choices: [ present, absent ]
+ banner:
+ description:
+ - Banner text, or MOTD, to use
+ type: str
+ default: "Welcome to the machine..."
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Set new banner text
+ purestorage.flasharray.purefa_banner:
+ banner: "Banner over\ntwo lines"
+ state: present
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete banner text
+ purestorage.flasharray.purefa_banner:
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ purefa_argument_spec,
+)
+
+
+def set_banner(module, array):
+ """Set MOTD banner text"""
+ changed = True
+ if not module.params["banner"]:
+ module.fail_json(msg="Invalid MOTD banner given")
+ if not module.check_mode:
+ try:
+ array.set(banner=module.params["banner"])
+ except Exception:
+ module.fail_json(msg="Failed to set MOTD banner text")
+
+ module.exit_json(changed=changed)
+
+
+def delete_banner(module, array):
+ """Delete MOTD banner text"""
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set(banner="")
+ except Exception:
+ module.fail_json(msg="Failed to delete current MOTD banner text")
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ banner=dict(type="str", default="Welcome to the machine..."),
+ state=dict(type="str", default="present", choices=["present", "absent"]),
+ )
+ )
+
+ required_if = [("state", "present", ["banner"])]
+
+ module = AnsibleModule(
+ argument_spec, required_if=required_if, supports_check_mode=True
+ )
+
+ state = module.params["state"]
+ array = get_system(module)
+ current_banner = array.get(banner=True)["banner"]
+ # set banner if empty value or value differs
+ if state == "present" and (
+ not current_banner or current_banner != module.params["banner"]
+ ):
+ set_banner(module, array)
+ # clear banner if it has a value
+ elif state == "absent" and current_banner:
+ delete_banner(module, array)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_certs.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_certs.py
new file mode 100644
index 000000000..33ffb60cc
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_certs.py
@@ -0,0 +1,524 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2021, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_certs
+version_added: '1.8.0'
+short_description: Manage FlashArray SSL Certificates
+description:
+- Create, delete, import and export FlashArray SSL Certificates
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Name of the SSL Certificate
+ type: str
+ default: management
+ state:
+ description:
+ - Action for the module to perform
+ - I(present) will create or re-create an SSL certificate
+ - I(absent) will delete an existing SSL certificate
+ - I(sign) will construct a Certificate Signing request (CSR)
+ - I(export) will export the exisitng SSL certificate
+ - I(import) will import a CA provided certificate.
+ default: present
+ choices: [ absent, present, import, export, sign ]
+ type: str
+ country:
+ type: str
+ description:
+ - The two-letter ISO code for the country where your organization is located
+ province:
+ type: str
+ description:
+ - The full name of the state or province where your organization is located
+ locality:
+ type: str
+ description:
+ - The full name of the city where your organization is located
+ organization:
+ type: str
+ description:
+ - The full and exact legal name of your organization.
+ - The organization name should not be abbreviated and should
+ include suffixes such as Inc, Corp, or LLC.
+ org_unit:
+ type: str
+ description:
+ - The department within your organization that is managing the certificate
+ common_name:
+ type: str
+ description:
+ - The fully qualified domain name (FQDN) of the current array
+ - For example, the common name for https://purearray.example.com is
+ purearray.example.com, or *.example.com for a wildcard certificate
+ - This can also be the management IP address of the array or the
+ shortname of the current array.
+ - Maximum of 64 characters
+ - If not provided this will default to the shortname of the array
+ email:
+ type: str
+ description:
+ - The email address used to contact your organization
+ key_size:
+ type: int
+ description:
+ - The key size in bits if you generate a new private key
+ default: 2048
+ choices: [ 1024, 2048, 4096 ]
+ days:
+ default: 3650
+ type: int
+ description:
+ - The number of valid days for the self-signed certificate being generated
+ - If not specified, the self-signed certificate expires after 3650 days.
+ generate:
+ default: false
+ type: bool
+ description:
+ - Generate a new private key.
+ - If not selected, the certificate will use the existing key
+ certificate:
+ type: str
+ description:
+ - Required for I(import)
+ - A valid signed certicate in PEM format (Base64 encoded)
+ - Includes the "-----BEGIN CERTIFICATE-----" and "-----END CERTIFICATE-----" lines
+ - Does not exceed 3000 characters in length
+ intermeadiate_cert:
+ type: str
+ description:
+ - Intermeadiate certificate provided by the CA
+ key:
+ type: str
+ description:
+ - If the Certificate Signed Request (CSR) was not constructed on the array
+ or the private key has changed since construction the CSR, provide
+ a new private key here
+ passphrase:
+ type: str
+ description:
+ - Passphrase if the private key is encrypted
+ export_file:
+ type: str
+ description:
+ - Name of file to contain Certificate Signing Request when `status sign`
+ - Name of file to export the current SSL Certificate when `status export`
+ - File will be overwritten if it already exists
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Create SSL certifcate foo
+ purestorage.flasharray.purefa_certs:
+ name: foo
+ key_size: 4096
+ country: US
+ province: FL
+ locality: Miami
+ organization: "Acme Inc"
+ org_unit: "DevOps"
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete SSL certificate foo
+ purestorage.flasharray.purefa_certs:
+ name: foo
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Request CSR
+ purestorage.flasharray.purefa_certs:
+ state: sign
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Regenerate key for SSL foo
+ purestorage.flasharray.purefa_certs:
+ generate: true
+ name: foo
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Import SSL Cert foo and Private Key
+ purestorage.flasharray.purefa_certs:
+ state: import
+ name: foo
+ certificate: "{{lookup('file', 'example.crt') }}"
+ key: "{{lookup('file', 'example.key') }}"
+ passphrase: password
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+HAS_PYCOUNTRY = True
+try:
+ import pycountry
+except ImportError:
+ HAS_PYCOUNTRY = False
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ get_array,
+ purefa_argument_spec,
+)
+
+MIN_REQUIRED_API_VERSION = "2.4"
+
+
+def update_cert(module, array):
+ """Update existing SSL Certificate"""
+ changed = True
+ current_cert = list(array.get_certificates(names=[module.params["name"]]).items)[0]
+ try:
+ if module.params["common_name"] != current_cert.common_name:
+ module.params["common_name"] = current_cert.common_name
+ except AttributeError:
+ pass
+ try:
+ if module.params["country"] != current_cert.country:
+ module.params["country"] = current_cert.country
+ except AttributeError:
+ pass
+ try:
+ if module.params["email"] != current_cert.email:
+ module.params["email"] = current_cert.email
+ except AttributeError:
+ pass
+ try:
+ if module.params["key_size"] != current_cert.key_size:
+ module.params["key_size"] = current_cert.key_size
+ except AttributeError:
+ pass
+ try:
+ if module.params["locality"] != current_cert.locality:
+ module.params["locality"] = current_cert.locality
+ except AttributeError:
+ pass
+ try:
+ if module.params["province"] != current_cert.state:
+ module.params["province"] = current_cert.state
+ except AttributeError:
+ pass
+ try:
+ if module.params["organization"] != current_cert.organization:
+ module.params["organization"] = current_cert.organization
+ except AttributeError:
+ pass
+ try:
+ if module.params["org_unit"] != current_cert.organizational_unit:
+ module.params["org_unit"] = current_cert.organizational_unit
+ except AttributeError:
+ pass
+ certificate = flasharray.CertificatePost(
+ common_name=module.params["common_name"],
+ country=module.params["country"],
+ email=module.params["email"],
+ key_size=module.params["key_size"],
+ locality=module.params["locality"],
+ organization=module.params["organization"],
+ organizational_unit=module.params["org_unit"],
+ state=module.params["province"],
+ days=module.params["days"],
+ )
+ if not module.check_mode:
+ res = array.patch_certificates(
+ names=[module.params["name"]],
+ certificate=certificate,
+ generate_new_key=module.params["generate"],
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Updating existing SSL certificate {0} failed. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+
+ module.exit_json(changed=changed)
+
+
+def create_cert(module, array):
+ changed = True
+ certificate = flasharray.CertificatePost(
+ common_name=module.params["common_name"],
+ country=module.params["country"],
+ email=module.params["email"],
+ key_size=module.params["key_size"],
+ locality=module.params["locality"],
+ organization=module.params["organization"],
+ organizational_unit=module.params["org_unit"],
+ state=module.params["province"],
+ status="self-signed",
+ days=module.params["days"],
+ )
+ if not module.check_mode:
+ res = array.post_certificates(
+ names=[module.params["name"]], certificate=certificate
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Creating SSL certificate {0} failed. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+
+ module.exit_json(changed=changed)
+
+
+def delete_cert(module, array):
+ changed = True
+ if module.params["name"] == "management":
+ module.fail_json(msg="management SSL cannot be deleted")
+ if not module.check_mode:
+ res = array.delete_certificates(names=[module.params["name"]])
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete {0} SSL certifcate. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def import_cert(module, array, reimport=False):
+ """Import a CA provided SSL certificate"""
+ changed = True
+ if len(module.params["certificate"]) > 3000:
+ module.fail_json(msg="Imported Certificate exceeds 3000 characters")
+ certificate = flasharray.CertificatePost(
+ certificate=module.params["certificate"],
+ intermediate_certificate=module.params["intermeadiate_cert"],
+ key=module.params["key"],
+ passphrase=module.params["passphrase"],
+ status="imported",
+ )
+ if not module.check_mode:
+ if reimport:
+ res = array.patch_certificates(
+ names=[module.params["name"]], certificate=certificate
+ )
+ else:
+ res = array.post_certificates(
+ names=[module.params["name"]], certificate=certificate
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Importing Certificate failed. Error: {0}".format(
+ res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def export_cert(module, array):
+ """Export current SSL certificate"""
+ changed = True
+ if not module.check_mode:
+ ssl = array.get_certificates(names=[module.params["name"]])
+ if ssl.status_code != 200:
+ module.fail_json(
+ msg="Exporting Certificate failed. Error: {0}".format(
+ ssl.errors[0].message
+ )
+ )
+ ssl_file = open(module.params["export_file"], "w")
+ ssl_file.write(list(ssl.items)[0].certificate)
+ ssl_file.close()
+ module.exit_json(changed=changed)
+
+
+def create_csr(module, array):
+ """Construct a Certificate Signing Request
+
+ Output the result to a specified file
+ """
+ changed = True
+ current_attr = list(array.get_certificates(names=[module.params["name"]]).items)[0]
+ try:
+ if module.params["common_name"] != current_attr.common_name:
+ module.params["common_name"] = current_attr.common_name
+ except AttributeError:
+ pass
+ try:
+ if module.params["country"] != current_attr.country:
+ module.params["country"] = current_attr.country
+ except AttributeError:
+ pass
+ try:
+ if module.params["email"] != current_attr.email:
+ module.params["email"] = current_attr.email
+ except AttributeError:
+ pass
+ try:
+ if module.params["locality"] != current_attr.locality:
+ module.params["locality"] = current_attr.locality
+ except AttributeError:
+ pass
+ try:
+ if module.params["province"] != current_attr.state:
+ module.params["province"] = current_attr.state
+ except AttributeError:
+ pass
+ try:
+ if module.params["organization"] != current_attr.organization:
+ module.params["organization"] = current_attr.organization
+ except AttributeError:
+ pass
+ try:
+ if module.params["org_unit"] != current_attr.organization_unit:
+ module.params["org_unit"] = current_attr.organization_unit
+ except AttributeError:
+ pass
+ if not module.check_mode:
+ certificate = flasharray.CertificateSigningRequestPost(
+ certificate={"name": "management"},
+ common_name=module.params["common_name"],
+ country=module.params["country"],
+ email=module.params["email"],
+ locality=module.params["locality"],
+ state=module.params["province"],
+ organization=module.params["organization"],
+ organization_unit=module.params["org_unit"],
+ )
+ csr = list(
+ array.post_certificates_certificate_signing_requests(
+ certificate=certificate
+ ).items
+ )[0].certificate_signing_request
+ csr_file = open(module.params["export_file"], "w")
+ csr_file.write(csr)
+ csr_file.close()
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(
+ type="str",
+ default="present",
+ choices=["absent", "present", "import", "export", "sign"],
+ ),
+ generate=dict(type="bool", default=False),
+ name=dict(type="str", default="management"),
+ country=dict(type="str"),
+ province=dict(type="str"),
+ locality=dict(type="str"),
+ organization=dict(type="str"),
+ org_unit=dict(type="str"),
+ common_name=dict(type="str"),
+ email=dict(type="str"),
+ key_size=dict(type="int", default=2048, choices=[1024, 2048, 4096]),
+ certificate=dict(type="str", no_log=True),
+ intermeadiate_cert=dict(type="str", no_log=True),
+ key=dict(type="str", no_log=True),
+ export_file=dict(type="str"),
+ passphrase=dict(type="str", no_log=True),
+ days=dict(type="int", default=3650),
+ )
+ )
+
+ mutually_exclusive = [["certificate", "key_size"]]
+ required_if = [
+ ["state", "import", ["certificate"]],
+ ["state", "export", ["export_file"]],
+ ]
+
+ module = AnsibleModule(
+ argument_spec,
+ mutually_exclusive=mutually_exclusive,
+ required_if=required_if,
+ supports_check_mode=True,
+ )
+
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+
+ if not HAS_PYCOUNTRY:
+ module.fail_json(msg="pycountry sdk is required for this module")
+
+ email_pattern = r"^(\w|\.|\_|\-)+[@](\w|\_|\-|\.)+[.]\w{2,3}$"
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashArray REST version not supported. "
+ "Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
+ )
+
+ array = get_array(module)
+ if module.params["email"]:
+ if not re.search(email_pattern, module.params["email"]):
+ module.fail_json(
+ msg="Email {0} is not valid".format(module.params["email"])
+ )
+ if module.params["country"]:
+ if len(module.params["country"]) != 2:
+ module.fail_json(msg="Country must be a two-letter country (ISO) code")
+ if not pycountry.countries.get(alpha_2=module.params["country"].upper()):
+ module.fail_json(
+ msg="Country code {0} is not an assigned ISO 3166-1 code".format(
+ module.params["country"].upper()
+ )
+ )
+ state = module.params["state"]
+ if state in ["present", "sign"]:
+ if not module.params["common_name"]:
+ module.params["common_name"] = list(array.get_arrays().items)[0].name
+ module.params["common_name"] = module.params["common_name"][:64]
+
+ exists = bool(
+ array.get_certificates(names=[module.params["name"]]).status_code == 200
+ )
+
+ if not exists and state == "present":
+ create_cert(module, array)
+ elif exists and state == "present":
+ update_cert(module, array)
+ elif state == "sign":
+ create_csr(module, array)
+ elif not exists and state == "import":
+ import_cert(module, array)
+ elif exists and state == "import":
+ import_cert(module, array, reimport=True)
+ elif state == "export":
+ export_cert(module, array)
+ elif exists and state == "absent":
+ delete_cert(module, array)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_connect.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_connect.py
new file mode 100644
index 000000000..3148ea482
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_connect.py
@@ -0,0 +1,238 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_connect
+version_added: '1.0.0'
+short_description: Manage replication connections between two FlashArrays
+description:
+- Manage array connections to specified target array
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Create or delete array connection
+ default: present
+ type: str
+ choices: [ absent, present ]
+ target_url:
+ description:
+ - Management IP address of remote array.
+ type: str
+ required: true
+ target_api:
+ description:
+ - API token for target array
+ type: str
+ connection:
+ description:
+ - Type of connection between arrays.
+ type: str
+ choices: [ sync, async ]
+ default: async
+ transport:
+ description:
+ - Type of transport protocol to use for replication
+ type: str
+ choices: [ ip, fc ]
+ default: ip
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Create an async connection to remote array
+ purestorage.flasharray.purefa_connect:
+ target_url: 10.10.10.20
+ target_api: 9c0b56bc-f941-f7a6-9f85-dcc3e9a8f7d6
+ connection: async
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+- name: Delete connection to remote array
+ purestorage.flasharray.purefa_connect:
+ state: absent
+ target_url: 10.10.10.20
+ target_api: 9c0b56bc-f941-f7a6-9f85-dcc3e9a8f7d6
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from purestorage import FlashArray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+HAS_PYPURECLIENT = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PYPURECLIENT = False
+
+import platform
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_array,
+ get_system,
+ purefa_argument_spec,
+)
+
+
+P53_API_VERSION = "1.17"
+FC_REPL_VERSION = "2.4"
+
+
+def _check_connected(module, array):
+ connected_arrays = array.list_array_connections()
+ api_version = array._list_available_rest_versions()
+ for target in range(0, len(connected_arrays)):
+ if P53_API_VERSION in api_version:
+ if (
+ connected_arrays[target]["management_address"]
+ == module.params["target_url"]
+ and "connected" in connected_arrays[target]["status"]
+ ):
+ return connected_arrays[target]
+ else:
+ if (
+ connected_arrays[target]["management_address"]
+ == module.params["target_url"]
+ and connected_arrays[target]["connected"]
+ ):
+ return connected_arrays[target]
+ return None
+
+
+def break_connection(module, array, target_array):
+ """Break connection between arrays"""
+ changed = True
+ source_array = array.get()["array_name"]
+ if target_array["management_address"] is None:
+ module.fail_json(
+ msg="disconnect can only happen from the array that formed the connection"
+ )
+ if not module.check_mode:
+ try:
+ array.disconnect_array(target_array["array_name"])
+ except Exception:
+ module.fail_json(
+ msg="Failed to disconnect {0} from {1}.".format(
+ target_array["array_name"], source_array
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def create_connection(module, array):
+ """Create connection between arrays"""
+ changed = True
+ remote_array = module.params["target_url"]
+ user_agent = "%(base)s %(class)s/%(version)s (%(platform)s)" % {
+ "base": "Ansible",
+ "class": __name__,
+ "version": 1.2,
+ "platform": platform.platform(),
+ }
+ try:
+ remote_system = FlashArray(
+ module.params["target_url"],
+ api_token=module.params["target_api"],
+ user_agent=user_agent,
+ )
+ connection_key = remote_system.get(connection_key=True)["connection_key"]
+ remote_array = remote_system.get()["array_name"]
+ api_version = array._list_available_rest_versions()
+ # TODO: Refactor when FC async is supported
+ if (
+ FC_REPL_VERSION in api_version
+ and module.params["transport"].lower() == "fc"
+ ):
+ if module.params["connection"].lower() == "async":
+ module.fail_json(
+ msg="Asynchronous replication not supported using FC transport"
+ )
+ array_connection = flasharray.ArrayConnectionPost(
+ type="sync-replication",
+ management_address=module.params["target_url"],
+ replication_transport="fc",
+ connection_key=connection_key,
+ )
+ array = get_array(module)
+ if not module.check_mode:
+ res = array.post_array_connections(array_connection=array_connection)
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Array Connection failed. Error: {0}".format(
+ res.errors[0].message
+ )
+ )
+ else:
+ if not module.check_mode:
+ array.connect_array(
+ module.params["target_url"],
+ connection_key,
+ [module.params["connection"]],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to connect to remote array {0}.".format(remote_array)
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ connection=dict(type="str", default="async", choices=["async", "sync"]),
+ transport=dict(type="str", default="ip", choices=["ip", "fc"]),
+ target_url=dict(type="str", required=True),
+ target_api=dict(type="str"),
+ )
+ )
+
+ required_if = [("state", "present", ["target_api"])]
+
+ module = AnsibleModule(
+ argument_spec, required_if=required_if, supports_check_mode=True
+ )
+
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="purestorage sdk is required for this module")
+
+ if module.params["transport"] == "fc" and not HAS_PYPURECLIENT:
+ module.fail_json(msg="pypureclient sdk is required for this module")
+
+ state = module.params["state"]
+ array = get_system(module)
+ target_array = _check_connected(module, array)
+
+ if state == "present" and target_array is None:
+ create_connection(module, array)
+ elif state == "absent" and target_array is not None:
+ break_connection(module, array, target_array)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_console.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_console.py
new file mode 100644
index 000000000..f3c4df429
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_console.py
@@ -0,0 +1,107 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_console
+version_added: '1.0.0'
+short_description: Enable or Disable Pure Storage FlashArray Console Lock
+description:
+- Enablke or Disable root lockout from the array at the physical console for a Pure Storage FlashArray.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Define state of console lockout
+ - When set to I(enable) the console port is locked from root login.
+ type: str
+ default: disable
+ choices: [ enable, disable ]
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Enable Console Lockout
+ purestorage.flasharray.purefa_console:
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Disable Console Lockout
+ purestorage.flasharray.purefa_console:
+ state: disable
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ purefa_argument_spec,
+)
+
+
+def enable_console(module, array):
+ """Enable Console Lockout"""
+ changed = False
+ if array.get_console_lock_status()["console_lock"] != "enabled":
+ changed = True
+ if not module.check_mode:
+ try:
+ array.enable_console_lock()
+ except Exception:
+ module.fail_json(msg="Enabling Console Lock failed")
+ module.exit_json(changed=changed)
+
+
+def disable_console(module, array):
+ """Disable Console Lock"""
+ changed = False
+ if array.get_console_lock_status()["console_lock"] == "enabled":
+ changed = True
+ if not module.check_mode:
+ try:
+ array.disable_console_lock()
+ except Exception:
+ module.fail_json(msg="Disabling Console Lock failed")
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="disable", choices=["enable", "disable"]),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ array = get_system(module)
+
+ if module.params["state"] == "enable":
+ enable_console(module, array)
+ else:
+ disable_console(module, array)
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_default_protection.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_default_protection.py
new file mode 100644
index 000000000..5038de423
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_default_protection.py
@@ -0,0 +1,328 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_default_protection
+version_added: '1.14.0'
+short_description: Manage SafeMode default protection for a Pure Storage FlashArray
+description:
+- Configure automatic protection group membership for new volumes and copied volumes
+ array wide, or at the pod level.
+- Requires a minimum of Purity 6.3.4
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ scope:
+ description:
+ - The scope of the default protection group
+ type: str
+ choices: [ array, pod ]
+ default: array
+ name:
+ description:
+ - The name of the protection group to assign or remove as default for the scope.
+ - If I(scope) is I(pod) only the short-name for the pod protection group is needed.
+ See examples
+ elements: str
+ type: list
+ required: true
+ pod:
+ description:
+ - name of the pod to apply the default protection to.
+ - Only required for I(scope) is I(pod)
+ type: str
+ state:
+ description:
+ - Define whether to add or delete the protection group to the default list
+ default: present
+ choices: [ absent, present ]
+ type: str
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Add protection group foo::bar as default for pod foo
+ purestorage.flasharray.purefa_default_protection:
+ name: bar
+ pod: foo
+ scope: pod
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Add protection group foo as default for array
+ purestorage.flasharray.purefa_default_protection:
+ name: foo
+ scope: array
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Remove protection group foo from array default protection
+ purestorage.flasharray.purefa_default_protection:
+ name: foo
+ scope: array
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: absent
+
+- name: Clear default protection for the array
+ purestorage.flasharray.purefa_volume_tags:
+ name: ''
+ scope: array
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: absent
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ get_array,
+ purefa_argument_spec,
+)
+
+
+DEFAULT_API_VERSION = "2.16"
+
+
+def _get_pod(module, array):
+ """Return Pod or None"""
+ try:
+ return array.get_pods(names=[module.params["pod"]])
+ except Exception:
+ return None
+
+
+def _get_pg(array, pod):
+ """Return Protection Group or None"""
+ try:
+ return array.get_protection_groups(names=[pod])
+ except Exception:
+ return None
+
+
+def create_default(module, array):
+ """Create Default Protection"""
+ changed = True
+ pg_list = []
+ if not module.check_mode:
+ for pgroup in range(0, len(module.params["name"])):
+ if module.params["scope"] == "array":
+ pg_list.append(
+ flasharray.DefaultProtectionReference(
+ name=module.params["name"][pgroup], type="protection_group"
+ )
+ )
+ else:
+ pg_list.append(
+ flasharray.DefaultProtectionReference(
+ name=module.params["pod"]
+ + "::"
+ + module.params["name"][pgroup],
+ type="protection_group",
+ )
+ )
+ if module.params["scope"] == "array":
+ protection = flasharray.ContainerDefaultProtection(
+ name="", type="", default_protections=pg_list
+ )
+ res = array.patch_container_default_protections(
+ names=[""], container_default_protection=protection
+ )
+ else:
+ protection = flasharray.ContainerDefaultProtection(
+ name=module.params["pod"], type="pod", default_protections=pg_list
+ )
+ res = array.patch_container_default_protections(
+ names=[module.params["pod"]], container_default_protection=protection
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to set default protection. Error: {0}".format(
+ res.errors[0].message
+ )
+ )
+
+ module.exit_json(changed=changed)
+
+
+def update_default(module, array, current_default):
+ """Update Default Protection"""
+ changed = False
+ current = []
+ for default in range(0, len(current_default)):
+ if module.params["scope"] == "array":
+ current.append(current_default[default].name)
+ else:
+ current.append(current_default[default].name.split(":")[-1])
+ pg_list = []
+ if module.params["state"] == "present":
+ if current:
+ new_list = sorted(list(set(module.params["name"] + current)))
+ else:
+ new_list = sorted(list(set(module.params["name"])))
+ elif current:
+ new_list = sorted(list(set(current).difference(module.params["name"])))
+ else:
+ new_list = []
+ if not new_list:
+ delete_default(module, array)
+ elif new_list == current:
+ changed = False
+ else:
+ changed = True
+ if not module.check_mode:
+ for pgroup in range(0, len(new_list)):
+ if module.params["scope"] == "array":
+ pg_list.append(
+ flasharray.DefaultProtectionReference(
+ name=new_list[pgroup], type="protection_group"
+ )
+ )
+ else:
+ pg_list.append(
+ flasharray.DefaultProtectionReference(
+ name=module.params["pod"] + "::" + new_list[pgroup],
+ type="protection_group",
+ )
+ )
+ if module.params["scope"] == "array":
+ protection = flasharray.ContainerDefaultProtection(
+ name="", type="", default_protections=pg_list
+ )
+ res = array.patch_container_default_protections(
+ names=[""], container_default_protection=protection
+ )
+ else:
+ protection = flasharray.ContainerDefaultProtection(
+ name=module.params["pod"],
+ type="pod",
+ default_protections=pg_list,
+ )
+ res = array.patch_container_default_protections(
+ names=[module.params["pod"]],
+ container_default_protection=protection,
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to update default protection. Error: {0}".format(
+ res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def delete_default(module, array):
+ """Delete Default Protection"""
+ changed = True
+ if not module.check_mode:
+ if module.params["scope"] == "array":
+ protection = flasharray.ContainerDefaultProtection(
+ name="", type="", default_protections=[]
+ )
+ res = array.patch_container_default_protections(
+ names=[""], container_default_protection=protection
+ )
+ else:
+ protection = flasharray.ContainerDefaultProtection(
+ name=module.params["pod"], type="pod", default_protections=[]
+ )
+ res = array.patch_container_default_protections(
+ names=[module.params["pod"]], container_default_protection=[]
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete default protection. Error: {0}".format(
+ res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="list", elements="str", required=True),
+ pod=dict(type="str"),
+ scope=dict(type="str", default="array", choices=["array", "pod"]),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ )
+ )
+
+ required_if = [["scope", "pod", ["pod"]]]
+ module = AnsibleModule(
+ argument_spec, required_if=required_if, supports_check_mode=True
+ )
+ state = module.params["state"]
+ if not HAS_PURESTORAGE:
+ module.fail_json(
+ msg="py-pure-client sdk is required to support 'count' parameter"
+ )
+ arrayv5 = get_system(module)
+ module.params["name"] = sorted(module.params["name"])
+ api_version = arrayv5._list_available_rest_versions()
+ if DEFAULT_API_VERSION not in api_version:
+ module.fail_json(
+ msg="Default Protection is not supported. Purity//FA 6.3.4, or higher, is required."
+ )
+ array = get_array(module)
+ if module.params["scope"] == "pod":
+ if not _get_pod(module, array):
+ module.fail_json(
+ msg="Invalid pod {0} specified.".format(module.params["pod"])
+ )
+ current_default = list(
+ array.get_container_default_protections(names=[module.params["pod"]]).items
+ )[0].default_protections
+ else:
+ current_default = list(array.get_container_default_protections().items)[
+ 0
+ ].default_protections
+ for pgroup in range(0, len(module.params["name"])):
+ if module.params["scope"] == "pod":
+ pod_name = module.params["pod"] + module.params["name"][pgroup]
+ else:
+ pod_name = module.params["name"][pgroup]
+ if not _get_pg(array, pod_name):
+ module.fail_json(msg="Protection Group {0} does not exist".format(pod_name))
+
+ if state == "present" and not current_default:
+ create_default(module, array)
+ elif state == "absent" and not current_default:
+ module.exit_json(changed=False)
+ elif state == "present" and current_default:
+ update_default(module, array, current_default)
+ elif state == "absent" and current_default and module.params["name"] != [""]:
+ update_default(module, array, current_default)
+ elif state == "absent" and current_default and module.params["name"] == [""]:
+ delete_default(module, array)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_directory.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_directory.py
new file mode 100644
index 000000000..125b84172
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_directory.py
@@ -0,0 +1,234 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_directory
+version_added: '1.5.0'
+short_description: Manage FlashArray File System Directories
+description:
+- Create/Delete FlashArray File Systems
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Name of the directory
+ type: str
+ required: true
+ state:
+ description:
+ - Define whether the directory should exist or not.
+ default: present
+ choices: [ absent, present ]
+ type: str
+ filesystem:
+ description:
+ - Name of the filesystem the directory links to.
+ type: str
+ required: true
+ path:
+ description:
+ - Path of the managed directory in the file system
+ - If not provided will default to I(name)
+ type: str
+ rename:
+ description:
+ - Value to rename the specified directory to
+ type: str
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Create direcotry foo in filesysten bar with path zeta
+ purestorage.flasharray.purefa_directory:
+ name: foo
+ filesystem: bar
+ path: zeta
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Rename directory foo to fin in filesystem bar
+ purestorage.flasharray.purefa_directory:
+ name: foo
+ rename: fin
+ filesystem: bar
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete diectory foo in filesystem bar
+ purestorage.flasharray.purefa_directory:
+ name: foo
+ filesystem: bar
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ get_array,
+ purefa_argument_spec,
+)
+
+MIN_REQUIRED_API_VERSION = "2.2"
+
+
+def delete_dir(module, array):
+ """Delete a file system directory"""
+ changed = True
+ if not module.check_mode:
+ res = array.delete_directories(
+ names=[module.params["filesystem"] + ":" + module.params["name"]]
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete file system {0}. {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def rename_dir(module, array):
+ """Rename a file system directory"""
+ changed = False
+ target = array.get_directories(
+ names=[module.params["filesystem"] + ":" + module.params["rename"]]
+ )
+ if target.status_code != 200:
+ if not module.check_mode:
+ changed = True
+ directory = flasharray.DirectoryPatch(
+ name=module.params["filesystem"] + ":" + module.params["rename"]
+ )
+ res = array.patch_directories(
+ names=[module.params["filesystem"] + ":" + module.params["name"]],
+ directory=directory,
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete file system {0}".format(module.params["name"])
+ )
+ else:
+ module.fail_json(
+ msg="Target file system {0} already exists".format(module.params["rename"])
+ )
+ module.exit_json(changed=changed)
+
+
+def create_dir(module, array):
+ """Create a file system directory"""
+ changed = False
+ if not module.params["path"]:
+ module.params["path"] = module.params["name"]
+ all_fs = list(
+ array.get_directories(file_system_names=[module.params["filesystem"]]).items
+ )
+ for check in range(0, len(all_fs)):
+ if module.params["path"] == all_fs[check].path[1:]:
+ module.fail_json(
+ msg="Path {0} already existis in file system {1}".format(
+ module.params["path"], module.params["filesystem"]
+ )
+ )
+ changed = True
+ if not module.check_mode:
+ directory = flasharray.DirectoryPost(
+ directory_name=module.params["name"], path=module.params["path"]
+ )
+ res = array.post_directories(
+ file_system_names=[module.params["filesystem"]], directory=directory
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to create file system {0}. {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ filesystem=dict(type="str", required=True),
+ name=dict(type="str", required=True),
+ rename=dict(type="str"),
+ path=dict(type="str"),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashArray REST version not supported. "
+ "Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
+ )
+ array = get_array(module)
+ state = module.params["state"]
+
+ try:
+ filesystem = list(
+ array.get_file_systems(names=[module.params["filesystem"]]).items
+ )[0]
+ except Exception:
+ module.fail_json(
+ msg="Selected file system {0} does not exist".format(
+ module.params["filesystem"]
+ )
+ )
+ res = array.get_directories(
+ names=[module.params["filesystem"] + ":" + module.params["name"]]
+ )
+ exists = bool(res.status_code == 200)
+
+ if state == "present" and not exists:
+ create_dir(module, array)
+ elif (
+ state == "present"
+ and exists
+ and module.params["rename"]
+ and not filesystem.destroyed
+ ):
+ rename_dir(module, array)
+ elif state == "absent" and exists:
+ delete_dir(module, array)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_dirsnap.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_dirsnap.py
new file mode 100644
index 000000000..4c090bde8
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_dirsnap.py
@@ -0,0 +1,474 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2021, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_dirsnap
+version_added: '1.9.0'
+short_description: Manage FlashArray File System Directory Snapshots
+description:
+- Create/Delete FlashArray File System directory snapshots
+- A full snapshot name is constructed in the form of DIR.CLIENT_NAME.SUFFIX
+ where DIR is the managed directory name, CLIENT_NAME is the client name,
+ and SUFFIX is the suffix.
+- The client visible snapshot name is CLIENT_NAME.SUFFIX.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Name of the directory to snapshot
+ type: str
+ required: true
+ state:
+ description:
+ - Define whether the directory snapshot should exist or not.
+ default: present
+ choices: [ absent, present ]
+ type: str
+ filesystem:
+ description:
+ - Name of the filesystem the directory links to.
+ type: str
+ required: true
+ eradicate:
+ description:
+ - Define whether to eradicate the snapshot on delete or leave in trash
+ type: bool
+ default: false
+ client:
+ description:
+ - The client name portion of the client visible snapshot name
+ type: str
+ required: true
+ suffix:
+ description:
+ - Snapshot suffix to use
+ type: str
+ new_client:
+ description:
+ - The new client name when performing a rename
+ type: str
+ version_added: '1.12.0'
+ new_suffix:
+ description:
+ - The new suffix when performing a rename
+ type: str
+ version_added: '1.12.0'
+ rename:
+ description:
+ - Whether to rename a directory snapshot
+ - The snapshot client name and suffix can be changed
+ - Required with I(new_client) ans I(new_suffix)
+ type: bool
+ default: false
+ version_added: '1.12.0'
+ keep_for:
+ description:
+ - Retention period, after which snapshots will be eradicated
+ - Specify in seconds. Range 300 - 31536000 (5 minutes to 1 year)
+ - Value of 0 will set no retention period.
+ - If not specified on create will default to 0 (no retention period)
+ type: int
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Create a snapshot direcotry foo in filesysten bar for client test with suffix test
+ purestorage.flasharray.purefa_dirsnap:
+ name: foo
+ filesystem: bar
+ client: test
+ suffix: test
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Update retention time for a snapshot foo:bar.client.test
+ purestorage.flasharray.purefa_dirsnap:
+ name: foo
+ filesystem: bar
+ client: client
+ suffix: test
+ keep_for: 300 # 5 minutes
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete snapshot foo:bar.client.test
+ purestorage.flasharray.purefa_dirsnap:
+ name: foo
+ filesystem: bar
+ client: client
+ suffix: test
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Recover deleted snapshot foo:bar.client.test
+ purestorage.flasharray.purefa_dirsnap:
+ name: foo
+ filesystem: bar
+ client: client
+ suffix: test
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete and eradicate snapshot foo:bar.client.test
+ purestorage.flasharray.purefa_dirsnap:
+ name: foo
+ filesystem: bar
+ client: client
+ suffix: test
+ state: absent
+ eradicate: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Eradicate deleted snapshot foo:bar.client.test
+ purestorage.flasharray.purefa_dirsnap:
+ name: foo
+ filesystem: bar
+ client: client
+ suffix: test
+ eradicate: true
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Rename snapshot
+ purestorage.flasharray.purefa_dirsnap:
+ name: foo
+ filesystem: bar
+ client: client
+ suffix: test
+ rename: true
+ new_client: client2
+ new_suffix: test2
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient.flasharray import DirectorySnapshotPost, DirectorySnapshotPatch
+except ImportError:
+ HAS_PURESTORAGE = False
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ get_array,
+ purefa_argument_spec,
+)
+
+MIN_REQUIRED_API_VERSION = "2.2"
+MIN_RENAME_API_VERSION = "2.10"
+
+
+def eradicate_snap(module, array):
+ """Eradicate a filesystem snapshot"""
+ changed = True
+ if not module.check_mode:
+ snapname = (
+ module.params["filesystem"]
+ + ":"
+ + module.params["name"]
+ + "."
+ + module.params["client"]
+ + "."
+ + module.params["suffix"]
+ )
+ res = array.delete_directory_snapshots(names=[snapname])
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to eradicate filesystem snapshot {0}. Error: {1}".format(
+ snapname, res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def delete_snap(module, array):
+ """Delete a filesystem snapshot"""
+ changed = True
+ if not module.check_mode:
+ snapname = (
+ module.params["filesystem"]
+ + ":"
+ + module.params["name"]
+ + "."
+ + module.params["client"]
+ + "."
+ + module.params["suffix"]
+ )
+ directory_snapshot = DirectorySnapshotPatch(destroyed=True)
+ res = array.patch_directory_snapshots(
+ names=[snapname], directory_snapshot=directory_snapshot
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete filesystem snapshot {0}. Error: {1}".format(
+ snapname, res.errors[0].message
+ )
+ )
+ if module.params["eradicate"]:
+ eradicate_snap(module, array)
+ module.exit_json(changed=changed)
+
+
+def update_snap(module, array, snap_detail):
+ """Update a filesystem snapshot retention time"""
+ changed = True
+ snapname = (
+ module.params["filesystem"]
+ + ":"
+ + module.params["name"]
+ + "."
+ + module.params["client"]
+ + "."
+ + module.params["suffix"]
+ )
+ if module.params["rename"]:
+ if not module.params["new_client"]:
+ new_client = module.params["client"]
+ else:
+ new_client = module.params["new_client"]
+ if not module.params["new_suffix"]:
+ new_suffix = module.params["suffix"]
+ else:
+ new_suffix = module.params["new_suffix"]
+ new_snapname = (
+ module.params["filesystem"]
+ + ":"
+ + module.params["name"]
+ + "."
+ + new_client
+ + "."
+ + new_suffix
+ )
+ directory_snapshot = DirectorySnapshotPatch(
+ client_name=new_client, suffix=new_suffix
+ )
+ if not module.check_mode:
+ res = array.patch_directory_snapshots(
+ names=[snapname], directory_snapshot=directory_snapshot
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to rename snapshot {0}. Error: {1}".format(
+ snapname, res.errors[0].message
+ )
+ )
+ else:
+ snapname = new_snapname
+ if not module.params["keep_for"] or module.params["keep_for"] == 0:
+ keep_for = 0
+ elif 300 <= module.params["keep_for"] <= 31536000:
+ keep_for = module.params["keep_for"] * 1000
+ else:
+ module.fail_json(msg="keep_for not in range of 300 - 31536000")
+ if not module.check_mode:
+ if snap_detail.destroyed:
+ directory_snapshot = DirectorySnapshotPatch(destroyed=False)
+ res = array.patch_directory_snapshots(
+ names=[snapname], directory_snapshot=directory_snapshot
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to recover snapshot {0}. Error: {1}".format(
+ snapname, res.errors[0].message
+ )
+ )
+ directory_snapshot = DirectorySnapshotPatch(keep_for=keep_for)
+ if snap_detail.time_remaining == 0 and keep_for != 0:
+ res = array.patch_directory_snapshots(
+ names=[snapname], directory_snapshot=directory_snapshot
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to retention time for snapshot {0}. Error: {1}".format(
+ snapname, res.errors[0].message
+ )
+ )
+ elif snap_detail.time_remaining > 0:
+ if module.params["rename"] and module.params["keep_for"]:
+ res = array.patch_directory_snapshots(
+ names=[snapname], directory_snapshot=directory_snapshot
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to retention time for renamed snapshot {0}. Error: {1}".format(
+ snapname, res.errors[0].message
+ )
+ )
+
+ module.exit_json(changed=changed)
+
+
+def create_snap(module, array):
+ """Create a filesystem snapshot"""
+ changed = True
+ if not module.check_mode:
+ if not module.params["keep_for"] or module.params["keep_for"] == 0:
+ keep_for = 0
+ elif 300 <= module.params["keep_for"] <= 31536000:
+ keep_for = module.params["keep_for"] * 1000
+ else:
+ module.fail_json(msg="keep_for not in range of 300 - 31536000")
+ directory = module.params["filesystem"] + ":" + module.params["name"]
+ if module.params["suffix"]:
+ directory_snapshot = DirectorySnapshotPost(
+ client_name=module.params["client"],
+ keep_for=keep_for,
+ suffix=module.params["suffix"],
+ )
+ else:
+ directory_snapshot = DirectorySnapshotPost(
+ client_name=module.params["client"], keep_for=keep_for
+ )
+ res = array.post_directory_snapshots(
+ source_names=[directory], directory_snapshot=directory_snapshot
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to create client {0} snapshot for {1}. Error: {2}".format(
+ module.params["client"], directory, res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ filesystem=dict(type="str", required=True),
+ name=dict(type="str", required=True),
+ eradicate=dict(type="bool", default=False),
+ client=dict(type="str", required=True),
+ suffix=dict(type="str"),
+ rename=dict(type="bool", default=False),
+ new_client=dict(type="str"),
+ new_suffix=dict(type="str"),
+ keep_for=dict(type="int"),
+ )
+ )
+
+ required_if = [["state", "absent", ["suffix"]]]
+
+ module = AnsibleModule(
+ argument_spec, required_if=required_if, supports_check_mode=True
+ )
+
+ if module.params["rename"]:
+ if not module.params["new_client"] and not module.params["new_suffix"]:
+ module.fail_json(msg="Rename requires one of: new_client, new_suffix")
+
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+
+ client_pattern = re.compile(
+ "^(?=.*[a-zA-Z-])[a-zA-Z0-9]([a-zA-Z0-9-]{0,56}[a-zA-Z0-9])?$"
+ )
+ suffix_pattern = re.compile(
+ "^(?=.*[a-zA-Z-])[a-zA-Z0-9]([a-zA-Z0-9-]{0,63}[a-zA-Z0-9])?$"
+ )
+ if module.params["suffix"]:
+ if not suffix_pattern.match(module.params["suffix"]):
+ module.fail_json(
+ msg="Suffix name {0} does not conform to the suffix name rules.".format(
+ module.params["suffix"]
+ )
+ )
+ if module.params["new_suffix"]:
+ if not suffix_pattern.match(module.params["new_suffix"]):
+ module.fail_json(
+ msg="Suffix rename {0} does not conform to the suffix name rules.".format(
+ module.params["new_suffix"]
+ )
+ )
+ if module.params["client"]:
+ if not client_pattern.match(module.params["client"]):
+ module.fail_json(
+ msg="Client name {0} does not conform to the client name rules.".format(
+ module.params["client"]
+ )
+ )
+
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashArray REST version not supported. "
+ "Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
+ )
+ if module.params["rename"] and MIN_RENAME_API_VERSION not in api_version:
+ module.fail_json(
+ msg="Directory snapshot rename not supported. "
+ "Minimum Purity//FA version required: 6.2.1"
+ )
+ array = get_array(module)
+ state = module.params["state"]
+ snapshot_root = module.params["filesystem"] + ":" + module.params["name"]
+ if bool(
+ array.get_directories(
+ filter='name="' + snapshot_root + '"', total_item_count=True
+ ).total_item_count
+ == 0
+ ):
+ module.fail_json(msg="Directory {0} does not exist.".format(snapshot_root))
+ snap_exists = False
+ if module.params["suffix"]:
+ snap_detail = array.get_directory_snapshots(
+ filter="name='"
+ + snapshot_root
+ + "."
+ + module.params["client"]
+ + "."
+ + module.params["suffix"]
+ + "'",
+ total_item_count=True,
+ )
+ if bool(snap_detail.status_code == 200):
+ snap_exists = bool(snap_detail.total_item_count != 0)
+ if snap_exists:
+ snap_facts = list(snap_detail.items)[0]
+ if state == "present" and not snap_exists:
+ create_snap(module, array)
+ elif state == "present" and snap_exists and module.params["suffix"]:
+ update_snap(module, array, snap_facts)
+ elif state == "absent" and snap_exists and not snap_facts.destroyed:
+ delete_snap(module, array)
+ elif (
+ state == "absent"
+ and snap_exists
+ and snap_facts.destroyed
+ and module.params["eradicate"]
+ ):
+ eradicate_snap(module, array)
+ else:
+ module.exit_json(changed=False)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_dns.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_dns.py
new file mode 100644
index 000000000..746a4ed52
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_dns.py
@@ -0,0 +1,349 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_dns
+version_added: '1.0.0'
+short_description: Configure FlashArray DNS settings
+description:
+- Set or erase configuration for the DNS settings.
+- Nameservers provided will overwrite any existing nameservers.
+- From Purity//FA 6.3.3 DNS setting for FA-File can be configured seperately
+ to the management DNS settings
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Name of the DNS configuration.
+ - Default value only supported for management service
+ default: management
+ type: str
+ version_added: 1.14.0
+ state:
+ description:
+ - Set or delete directory service configuration
+ default: present
+ type: str
+ choices: [ absent, present ]
+ domain:
+ description:
+ - Domain suffix to be appended when perofrming DNS lookups.
+ type: str
+ nameservers:
+ description:
+ - List of up to 3 unique DNS server IP addresses. These can be
+ IPv4 or IPv6 - No validation is done of the addresses is performed.
+ type: list
+ elements: str
+ service:
+ description:
+ - Type of ser vice the DNS will work with
+ type: str
+ version_added: 1.14.0
+ choices: [ management, file ]
+ default: management
+ source:
+ description:
+ - A virtual network interface (vif)
+ type: str
+ version_added: 1.14.0
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Delete exisitng DNS settings
+ purestorage.flasharray.purefa_dns:
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Set managemnt DNS settings
+ purestorage.flasharray.purefa_dns:
+ domain: purestorage.com
+ nameservers:
+ - 8.8.8.8
+ - 8.8.4.4
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Set file DNS settings
+ purestorage.flasharray.purefa_dns:
+ domain: purestorage.com
+ nameservers:
+ - 8.8.8.8
+ - 8.8.4.4
+ name: ad_dns
+ service: file
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ get_array,
+ purefa_argument_spec,
+)
+
+MULTIPLE_DNS = "2.15"
+
+
+def remove(duplicate):
+ final_list = []
+ for num in duplicate:
+ if num not in final_list:
+ final_list.append(num)
+ return final_list
+
+
+def _get_source(module, array):
+ res = array.get_network_interfaces(names=[module.params["source"]])
+ if res.status_code == 200:
+ return True
+ else:
+ return False
+
+
+def delete_dns(module, array):
+ """Delete DNS settings"""
+ changed = False
+ current_dns = array.get_dns()
+ if current_dns["domain"] == "" and current_dns["nameservers"] == [""]:
+ module.exit_json(changed=changed)
+ else:
+ try:
+ changed = True
+ if not module.check_mode:
+ array.set_dns(domain="", nameservers=[])
+ except Exception:
+ module.fail_json(msg="Delete DNS settigs failed")
+ module.exit_json(changed=changed)
+
+
+def create_dns(module, array):
+ """Set DNS settings"""
+ changed = False
+ current_dns = array.get_dns()
+ if current_dns["domain"] != module.params["domain"] or sorted(
+ module.params["nameservers"]
+ ) != sorted(current_dns["nameservers"]):
+ try:
+ changed = True
+ if not module.check_mode:
+ array.set_dns(
+ domain=module.params["domain"],
+ nameservers=module.params["nameservers"][0:3],
+ )
+ except Exception:
+ module.fail_json(msg="Set DNS settings failed: Check configuration")
+ module.exit_json(changed=changed)
+
+
+def update_multi_dns(module, array):
+ """Update a DNS configuration"""
+ changed = False
+ current_dns = list(array.get_dns(names=[module.params["name"]]).items)[0]
+ new_dns = current_dns
+ if module.params["domain"] and current_dns.domain != module.params["domain"]:
+ new_dns.domain = module.params["domain"]
+ changed = True
+ if module.params["service"] and current_dns.services != [module.params["service"]]:
+ module.fail_json(msg="Changing service type is not permitted")
+ if module.params["nameservers"] and sorted(current_dns.nameservers) != sorted(
+ module.params["nameservers"]
+ ):
+ new_dns.nameservers = module.params["nameservers"]
+ changed = True
+ if (
+ module.params["source"] or module.params["source"] == ""
+ ) and current_dns.source.name != module.params["source"]:
+ new_dns.source.name = module.params["source"]
+ changed = True
+ if changed and not module.check_mode:
+ res = array.patch_dns(
+ names=[module.params["name"]],
+ dns=flasharray.Dns(
+ domain=new_dns.domain,
+ nameservers=new_dns.nameservers,
+ source=flasharray.ReferenceNoId(module.params["source"]),
+ ),
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Update to DNS service {0} failed. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def delete_multi_dns(module, array):
+ """Delete a DNS configuration"""
+ changed = True
+ if module.params["name"] == "management":
+ res = array.update_dns(
+ names=[module.params["name"]],
+ dns=flasharray.DnsPatch(
+ domain=module.params["domain"],
+ nameservers=module.params["nameservers"],
+ ),
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Management DNS configuration not deleted. Error: {0}".format(
+ res.errors[0].message
+ )
+ )
+ else:
+ if not module.check_mode:
+ res = array.delete_dns(names=[module.params["name"]])
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete DNS configuration {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def create_multi_dns(module, array):
+ """Create a DNS configuration"""
+ changed = True
+ if not module.check_mode:
+ if module.params["service"] == "file":
+ if module.params["source"]:
+ res = array.post_dns(
+ names=[module.params["name"]],
+ dns=flasharray.DnsPost(
+ services=[module.params["service"]],
+ domain=module.params["domain"],
+ nameservers=module.params["nameservers"],
+ source=flasharray.ReferenceNoId(
+ module.params["source"].lower()
+ ),
+ ),
+ )
+ else:
+ res = array.post_dns(
+ names=[module.params["name"]],
+ dns=flasharray.DnsPost(
+ services=[module.params["service"]],
+ domain=module.params["domain"],
+ nameservers=module.params["nameservers"],
+ ),
+ )
+ else:
+ res = array.create_dns(
+ names=[module.params["name"]],
+ services=[module.params["service"]],
+ domain=module.params["domain"],
+ nameservers=module.params["nameservers"],
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to create {0} DNS configuration {1}. Error: {2}".format(
+ module.params["service"],
+ module.params["name"],
+ res.errors[0].message,
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ name=dict(type="str", default="management"),
+ service=dict(
+ type="str", default="management", choices=["management", "file"]
+ ),
+ domain=dict(type="str"),
+ source=dict(type="str"),
+ nameservers=dict(type="list", elements="str"),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ state = module.params["state"]
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+ if module.params["nameservers"]:
+ module.params["nameservers"] = remove(module.params["nameservers"])
+ if module.params["service"] == "management":
+ module.params["nameservers"] = module.params["nameservers"][0:3]
+
+ if MULTIPLE_DNS in api_version:
+ array = get_array(module)
+ configs = list(array.get_dns().items)
+ exists = False
+ for config in range(0, len(configs)):
+ if configs[config].name == module.params["name"]:
+ exists = True
+ if (
+ module.params["service"] == "management"
+ and module.params["name"] != "management"
+ and not exists
+ ):
+ module.warn("Overriding configuration name to management")
+ module.params["name"] = "management"
+ if module.params["source"] and not _get_source(module, array):
+ module.fail_json(
+ msg="Specified VIF {0} does not exist.".format(module.params["source"])
+ )
+ if state == "present" and exists:
+ update_multi_dns(module, array)
+ elif state == "present" and not exists:
+ if len(configs) == 2:
+ module.fail_json(
+ msg="Only 2 DNS configurations are currently "
+ "supported. One for management and one for file services"
+ )
+ create_multi_dns(module, array)
+ elif exists and state == "absent":
+ delete_multi_dns(module, array)
+ else:
+ module.exit_json(changed=False)
+ else:
+ if state == "absent":
+ delete_dns(module, array)
+ elif state == "present":
+ if not module.params["domain"] or not module.params["nameservers"]:
+ module.fail_json(
+ msg="`domain` and `nameservers` are required for DNS configuration"
+ )
+ create_dns(module, array)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_ds.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_ds.py
new file mode 100644
index 000000000..195aa2155
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_ds.py
@@ -0,0 +1,609 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_ds
+version_added: '1.0.0'
+short_description: Configure FlashArray Directory Service
+description:
+- Set or erase configuration for the directory service. There is no facility
+ to SSL certificates at this time. Use the FlashArray GUI for this
+ additional configuration work.
+- To modify an existing directory service configuration you must first delete
+ an exisitng configuration and then recreate with new settings.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ type: str
+ description:
+ - Create or delete directory service configuration
+ default: present
+ choices: [ absent, present ]
+ enable:
+ description:
+ - Whether to enable or disable directory service support.
+ default: false
+ type: bool
+ dstype:
+ description:
+ - The type of directory service to work on
+ choices: [ management, data ]
+ type: str
+ default: management
+ uri:
+ type: list
+ elements: str
+ description:
+ - A list of up to 30 URIs of the directory servers. Each URI must include
+ the scheme ldap:// or ldaps:// (for LDAP over SSL), a hostname, and a
+ domain name or IP address. For example, ldap://ad.company.com configures
+ the directory service with the hostname "ad" in the domain "company.com"
+ while specifying the unencrypted LDAP protocol.
+ base_dn:
+ type: str
+ description:
+ - Sets the base of the Distinguished Name (DN) of the directory service
+ groups. The base should consist of only Domain Components (DCs). The
+ base_dn will populate with a default value when a URI is entered by
+ parsing domain components from the URI. The base DN should specify DC=
+ for each domain component and multiple DCs should be separated by commas.
+ bind_password:
+ type: str
+ description:
+ - Sets the password of the bind_user user name account.
+ force_bind_password:
+ type: bool
+ default: true
+ description:
+ - Will force the bind password to be reset even if the bind user password
+ is unchanged.
+ - If set to I(false) and I(bind_user) is unchanged the password will not
+ be reset.
+ version_added: 1.14.0
+ bind_user:
+ type: str
+ description:
+ - Sets the user name that can be used to bind to and query the directory.
+ - For Active Directory, enter the username - often referred to as
+ sAMAccountName or User Logon Name - of the account that is used to
+ perform directory lookups.
+ - For OpenLDAP, enter the full DN of the user.
+ group_base:
+ type: str
+ description:
+ - Specifies where the configured groups are located in the directory
+ tree. This field consists of Organizational Units (OUs) that combine
+ with the base DN attribute and the configured group CNs to complete
+ the full Distinguished Name of the groups. The group base should
+ specify OU= for each OU and multiple OUs should be separated by commas.
+ The order of OUs is important and should get larger in scope from left
+ to right. Each OU should not exceed 64 characters in length.
+ - Not Supported from Purity 5.2.0 or higher.
+ Use I(purestorage.flasharray.purefa_dsrole) module.
+ ro_group:
+ type: str
+ description:
+ - Sets the common Name (CN) of the configured directory service group
+ containing users with read-only privileges on the FlashArray. This
+ name should be just the Common Name of the group without the CN=
+ specifier. Common Names should not exceed 64 characters in length.
+ - Not Supported from Purity 5.2.0 or higher.
+ Use I(purestorage.flasharray.purefa_dsrole) module.
+ sa_group:
+ type: str
+ description:
+ - Sets the common Name (CN) of the configured directory service group
+ containing administrators with storage-related privileges on the
+ FlashArray. This name should be just the Common Name of the group
+ without the CN= specifier. Common Names should not exceed 64
+ characters in length.
+ - Not Supported from Purity 5.2.0 or higher.
+ Use I(purestorage.flasharray.purefa_dsrole) module.
+ aa_group:
+ type: str
+ description:
+ - Sets the common Name (CN) of the directory service group containing
+ administrators with full privileges when managing the FlashArray.
+ The name should be just the Common Name of the group without the
+ CN= specifier. Common Names should not exceed 64 characters in length.
+ - Not Supported from Purity 5.2.0 or higher.
+ Use I(purestorage.flasharray.purefa_dsrole) module.
+ user_login:
+ type: str
+ description:
+ - User login attribute in the structure of the configured LDAP servers.
+ Typically the attribute field that holds the users unique login name.
+ Default value is I(sAMAccountName) for Active Directory or I(uid)
+ for all other directory services
+ - Supported from Purity 6.0 or higher.
+ user_object:
+ type: str
+ description:
+ - Value of the object class for a management LDAP user.
+ Defaults to I(User) for Active Directory servers, I(posixAccount) or
+ I(shadowAccount) for OpenLDAP servers dependent on the group type
+ of the server, or person for all other directory servers.
+ - Supported from Purity 6.0 or higher.
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Delete existing directory service
+ purestorage.flasharray.purefa_ds:
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create directory service (disabled) - Pre-5.2.0
+ purestorage.flasharray.purefa_ds:
+ uri: "ldap://lab.purestorage.com"
+ base_dn: "DC=lab,DC=purestorage,DC=com"
+ bind_user: Administrator
+ bind_password: password
+ group_base: "OU=Pure-Admin"
+ ro_group: PureReadOnly
+ sa_group: PureStorage
+ aa_group: PureAdmin
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create directory service (disabled) - 5.2.0 or higher
+ purestorage.flasharray.purefa_ds:
+ dstype: management
+ uri: "ldap://lab.purestorage.com"
+ base_dn: "DC=lab,DC=purestorage,DC=com"
+ bind_user: Administrator
+ bind_password: password
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Enable existing directory service
+ purestorage.flasharray.purefa_ds:
+ enable: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Disable existing directory service
+ purestorage.flasharray.purefa_ds:
+ enable: false
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create directory service (enabled) - Pre-5.2.0
+ purestorage.flasharray.purefa_ds:
+ enable: true
+ uri: "ldap://lab.purestorage.com"
+ base_dn: "DC=lab,DC=purestorage,DC=com"
+ bind_user: Administrator
+ bind_password: password
+ group_base: "OU=Pure-Admin"
+ ro_group: PureReadOnly
+ sa_group: PureStorage
+ aa_group: PureAdmin
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create directory service (enabled) - 5.2.0 or higher
+ purestorage.flasharray.purefa_ds:
+ enable: true
+ dstype: management
+ uri: "ldap://lab.purestorage.com"
+ base_dn: "DC=lab,DC=purestorage,DC=com"
+ bind_user: Administrator
+ bind_password: password
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_array,
+ get_system,
+ purefa_argument_spec,
+)
+
+
+DS_ROLE_REQUIRED_API_VERSION = "1.16"
+FAFILES_API_VERSION = "2.2"
+
+
+def disable_ds(module, array):
+ """Disable Directory Service"""
+ changed = True
+ if not module.check_mode:
+ try:
+ array.disable_directory_service()
+ except Exception:
+ module.fail_json(msg="Disable Directory Service failed")
+ module.exit_json(changed=changed)
+
+
+def enable_ds(module, array):
+ """Enable Directory Service"""
+ changed = False
+ api_version = array._list_available_rest_versions()
+ if DS_ROLE_REQUIRED_API_VERSION in api_version:
+ try:
+ roles = array.list_directory_service_roles()
+ enough_roles = False
+ for role in range(0, len(roles)):
+ if roles[role]["group_base"]:
+ enough_roles = True
+ if enough_roles:
+ changed = True
+ if not module.check_mode:
+ array.enable_directory_service()
+ else:
+ module.fail_json(
+ msg="Cannot enable directory service - please create a directory service role"
+ )
+ except Exception:
+ module.fail_json(msg="Enable Directory Service failed: Check Configuration")
+ else:
+ try:
+ changed = True
+ if not module.check_mode:
+ array.enable_directory_service()
+ except Exception:
+ module.fail_json(msg="Enable Directory Service failed: Check Configuration")
+ module.exit_json(changed=changed)
+
+
+def delete_ds(module, array):
+ """Delete Directory Service"""
+ changed = True
+ if not module.check_mode:
+ try:
+ api_version = array._list_available_rest_versions()
+ array.set_directory_service(enabled=False)
+ if DS_ROLE_REQUIRED_API_VERSION in api_version:
+ array.set_directory_service(
+ uri=[""], base_dn="", bind_user="", bind_password="", certificate=""
+ )
+ else:
+ array.set_directory_service(
+ uri=[""],
+ base_dn="",
+ group_base="",
+ bind_user="",
+ bind_password="",
+ readonly_group="",
+ storage_admin_group="",
+ array_admin_group="",
+ certificate="",
+ )
+ except Exception:
+ module.fail_json(msg="Delete Directory Service failed")
+ module.exit_json(changed=changed)
+
+
+def delete_ds_v6(module, array):
+ """Delete Directory Service"""
+ changed = True
+ if module.params["dstype"] == "management":
+ management = flasharray.DirectoryServiceManagement(
+ user_login_attribute="", user_object_class=""
+ )
+ directory_service = flasharray.DirectoryService(
+ uris=[""],
+ base_dn="",
+ bind_user="",
+ bind_password="",
+ enabled=False,
+ services=module.params["dstype"],
+ management=management,
+ )
+ else:
+ directory_service = flasharray.DirectoryService(
+ uris=[""],
+ base_dn="",
+ bind_user="",
+ bind_password="",
+ enabled=False,
+ services=module.params["dstype"],
+ )
+ if not module.check_mode:
+ res = array.patch_directory_services(
+ names=[module.params["dstype"]], directory_service=directory_service
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Delete {0} Directory Service failed. Error message: {1}".format(
+ module.params["dstype"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def create_ds(module, array):
+ """Create Directory Service"""
+ changed = False
+ if None in (
+ module.params["bind_password"],
+ module.params["bind_user"],
+ module.params["base_dn"],
+ module.params["uri"],
+ ):
+ module.fail_json(
+ msg="Parameters 'bind_password', 'bind_user', 'base_dn' and 'uri' are all required"
+ )
+ api_version = array._list_available_rest_versions()
+ if DS_ROLE_REQUIRED_API_VERSION in api_version:
+ try:
+ changed = True
+ if not module.check_mode:
+ array.set_directory_service(
+ uri=module.params["uri"],
+ base_dn=module.params["base_dn"],
+ bind_user=module.params["bind_user"],
+ bind_password=module.params["bind_password"],
+ )
+ roles = array.list_directory_service_roles()
+ enough_roles = False
+ for role in range(0, len(roles)):
+ if roles[role]["group_base"]:
+ enough_roles = True
+ if enough_roles:
+ array.set_directory_service(enabled=module.params["enable"])
+ else:
+ module.fail_json(
+ msg="Cannot enable directory service - please create a directory service role"
+ )
+ except Exception:
+ module.fail_json(msg="Create Directory Service failed: Check configuration")
+ else:
+ groups_rule = [
+ not module.params["ro_group"],
+ not module.params["sa_group"],
+ not module.params["aa_group"],
+ ]
+
+ if all(groups_rule):
+ module.fail_json(msg="At least one group must be configured")
+ try:
+ changed = True
+ if not module.check_mode:
+ array.set_directory_service(
+ uri=module.params["uri"],
+ base_dn=module.params["base_dn"],
+ group_base=module.params["group_base"],
+ bind_user=module.params["bind_user"],
+ bind_password=module.params["bind_password"],
+ readonly_group=module.params["ro_group"],
+ storage_admin_group=module.params["sa_group"],
+ array_admin_group=module.params["aa_group"],
+ )
+ array.set_directory_service(enabled=module.params["enable"])
+ except Exception:
+ module.fail_json(msg="Create Directory Service failed: Check configuration")
+ module.exit_json(changed=changed)
+
+
+def update_ds_v6(module, array):
+ """Update Directory Service"""
+ changed = False
+ ds_change = False
+ password_required = False
+ dirserv = list(
+ array.get_directory_services(
+ filter="name='" + module.params["dstype"] + "'"
+ ).items
+ )[0]
+ current_ds = dirserv
+ if module.params["uri"] and current_ds.uris is None:
+ password_required = True
+ if current_ds.uris != module.params["uri"]:
+ uris = module.params["uri"]
+ ds_change = True
+ else:
+ uris = current_ds.uris
+ try:
+ base_dn = current_ds.base_dn
+ except AttributeError:
+ base_dn = ""
+ try:
+ bind_user = current_ds.bind_user
+ except AttributeError:
+ bind_user = ""
+ if module.params["base_dn"] != "" and module.params["base_dn"] != base_dn:
+ base_dn = module.params["base_dn"]
+ ds_change = True
+ if module.params["bind_user"] != "":
+ bind_user = module.params["bind_user"]
+ if module.params["bind_user"] != bind_user:
+ password_required = True
+ ds_change = True
+ elif module.params["force_bind_password"]:
+ password_required = True
+ ds_change = True
+ if module.params["bind_password"] is not None and password_required:
+ bind_password = module.params["bind_password"]
+ ds_change = True
+ if module.params["enable"] != current_ds.enabled:
+ ds_change = True
+ if password_required and not module.params["bind_password"]:
+ module.fail_json(msg="'bind_password' must be provided for this task")
+ if module.params["dstype"] == "management":
+ try:
+ user_login = current_ds.management.user_login_attribute
+ except AttributeError:
+ user_login = ""
+ try:
+ user_object = current_ds.management.user_object_class
+ except AttributeError:
+ user_object = ""
+ if (
+ module.params["user_object"] is not None
+ and user_object != module.params["user_object"]
+ ):
+ user_object = module.params["user_object"]
+ ds_change = True
+ if (
+ module.params["user_login"] is not None
+ and user_login != module.params["user_login"]
+ ):
+ user_login = module.params["user_login"]
+ ds_change = True
+ management = flasharray.DirectoryServiceManagement(
+ user_login_attribute=user_login, user_object_class=user_object
+ )
+ if password_required:
+ directory_service = flasharray.DirectoryService(
+ uris=uris,
+ base_dn=base_dn,
+ bind_user=bind_user,
+ bind_password=bind_password,
+ enabled=module.params["enable"],
+ services=module.params["dstype"],
+ management=management,
+ )
+ else:
+ directory_service = flasharray.DirectoryService(
+ uris=uris,
+ base_dn=base_dn,
+ bind_user=bind_user,
+ enabled=module.params["enable"],
+ services=module.params["dstype"],
+ management=management,
+ )
+ else:
+ if password_required:
+ directory_service = flasharray.DirectoryService(
+ uris=uris,
+ base_dn=base_dn,
+ bind_user=bind_user,
+ bind_password=bind_password,
+ enabled=module.params["enable"],
+ services=module.params["dstype"],
+ )
+ else:
+ directory_service = flasharray.DirectoryService(
+ uris=uris,
+ base_dn=base_dn,
+ bind_user=bind_user,
+ enabled=module.params["enable"],
+ services=module.params["dstype"],
+ )
+ if ds_change:
+ changed = True
+ if not module.check_mode:
+ res = array.patch_directory_services(
+ names=[module.params["dstype"]], directory_service=directory_service
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="{0} Directory Service failed. Error message: {1}".format(
+ module.params["dstype"].capitalize(), res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ uri=dict(type="list", elements="str"),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ enable=dict(type="bool", default=False),
+ force_bind_password=dict(type="bool", default=True, no_log=True),
+ bind_password=dict(type="str", no_log=True),
+ bind_user=dict(type="str"),
+ base_dn=dict(type="str"),
+ group_base=dict(type="str"),
+ user_login=dict(type="str"),
+ user_object=dict(type="str"),
+ ro_group=dict(type="str"),
+ sa_group=dict(type="str"),
+ aa_group=dict(type="str"),
+ dstype=dict(
+ type="str", default="management", choices=["management", "data"]
+ ),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required to for this module")
+
+ if FAFILES_API_VERSION in api_version:
+ arrayv6 = get_array(module)
+
+ if module.params["dstype"] == "data":
+ if FAFILES_API_VERSION in api_version:
+ if len(list(arrayv6.get_directory_services().items)) == 1:
+ module.warn("FA-Files is not enabled - ignoring")
+ module.exit_json(changed=False)
+ else:
+ module.fail_json(
+ msg="'data' directory service requires Purity//FA 6.0.0 or higher"
+ )
+
+ state = module.params["state"]
+ ds_exists = False
+ if FAFILES_API_VERSION in api_version:
+ dirserv = list(
+ arrayv6.get_directory_services(
+ filter="name='" + module.params["dstype"] + "'"
+ ).items
+ )[0]
+ if state == "absent" and dirserv.uris != []:
+ delete_ds_v6(module, arrayv6)
+ else:
+ update_ds_v6(module, arrayv6)
+ else:
+ dirserv = array.get_directory_service()
+ ds_enabled = dirserv["enabled"]
+ if dirserv["base_dn"]:
+ ds_exists = True
+
+ if state == "absent" and ds_exists:
+ delete_ds(module, array)
+ elif ds_exists and module.params["enable"] and ds_enabled:
+ module.warn(
+ "To update an existing directory service configuration in Purity//FA 5.x, please delete and recreate"
+ )
+ module.exit_json(changed=False)
+ elif ds_exists and not module.params["enable"] and ds_enabled:
+ disable_ds(module, array)
+ elif ds_exists and module.params["enable"] and not ds_enabled:
+ enable_ds(module, array)
+ elif not ds_exists and state == "present":
+ create_ds(module, array)
+ else:
+ module.exit_json(changed=False)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_dsrole.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_dsrole.py
new file mode 100644
index 000000000..ce6e8c0a5
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_dsrole.py
@@ -0,0 +1,200 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2019, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_dsrole
+version_added: '1.0.0'
+short_description: Configure FlashArray Directory Service Roles
+description:
+- Set or erase directory services role configurations.
+- Only available for FlashArray running Purity 5.2.0 or higher
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Create or delete directory service role
+ type: str
+ default: present
+ choices: [ absent, present ]
+ role:
+ description:
+ - The directory service role to work on
+ type: str
+ required: true
+ choices: [ array_admin, ops_admin, readonly, storage_admin ]
+ group_base:
+ type: str
+ description:
+ - Specifies where the configured group is located in the directory
+ tree. This field consists of Organizational Units (OUs) that combine
+ with the base DN attribute and the configured group CNs to complete
+ the full Distinguished Name of the groups. The group base should
+ specify OU= for each OU and multiple OUs should be separated by commas.
+ The order of OUs is important and should get larger in scope from left
+ to right.
+ - Each OU should not exceed 64 characters in length.
+ group:
+ type: str
+ description:
+ - Sets the common Name (CN) of the configured directory service group
+ containing users for the FlashBlade. This name should be just the
+ Common Name of the group without the CN= specifier.
+ - Common Names should not exceed 64 characters in length.
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Delete exisitng array_admin directory service role
+ purestorage.flasharray.purefa_dsrole:
+ role: array_admin
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create array_admin directory service role
+ purestorage.flasharray.purefa_dsrole:
+ role: array_admin
+ group_base: "OU=PureGroups,OU=SANManagers"
+ group: pureadmins
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Update ops_admin directory service role
+ purestorage.flasharray.purefa_dsrole:
+ role: ops_admin
+ group_base: "OU=PureGroups"
+ group: opsgroup
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ purefa_argument_spec,
+)
+
+
+def update_role(module, array):
+ """Update Directory Service Role"""
+ changed = False
+ role = array.list_directory_service_roles(names=[module.params["role"]])
+ if (
+ role[0]["group_base"] != module.params["group_base"]
+ or role[0]["group"] != module.params["group"]
+ ):
+ try:
+ changed = True
+ if not module.check_mode:
+ array.set_directory_service_roles(
+ names=[module.params["role"]],
+ group_base=module.params["group_base"],
+ group=module.params["group"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Update Directory Service Role {0} failed".format(
+ module.params["role"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def delete_role(module, array):
+ """Delete Directory Service Role"""
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set_directory_service_roles(
+ names=[module.params["role"]], group_base="", group=""
+ )
+ except Exception:
+ module.fail_json(
+ msg="Delete Directory Service Role {0} failed".format(
+ module.params["role"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def create_role(module, array):
+ """Create Directory Service Role"""
+ changed = False
+ if not module.params["group"] == "" or not module.params["group_base"] == "":
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set_directory_service_roles(
+ names=[module.params["role"]],
+ group_base=module.params["group_base"],
+ group=module.params["group"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Create Directory Service Role {0} failed".format(
+ module.params["role"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ role=dict(
+ required=True,
+ type="str",
+ choices=["array_admin", "ops_admin", "readonly", "storage_admin"],
+ ),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ group_base=dict(type="str"),
+ group=dict(type="str"),
+ )
+ )
+
+ required_together = [["group", "group_base"]]
+
+ module = AnsibleModule(
+ argument_spec, required_together=required_together, supports_check_mode=True
+ )
+
+ state = module.params["state"]
+ array = get_system(module)
+ role_configured = False
+ role = array.list_directory_service_roles(names=[module.params["role"]])
+ if role[0]["group"] is not None:
+ role_configured = True
+
+ if state == "absent" and role_configured:
+ delete_role(module, array)
+ elif role_configured and state == "present":
+ update_role(module, array)
+ elif not role_configured and state == "present":
+ create_role(module, array)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_endpoint.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_endpoint.py
new file mode 100644
index 000000000..c759be4af
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_endpoint.py
@@ -0,0 +1,347 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_endpoint
+short_description: Manage VMware protocol-endpoints on Pure Storage FlashArrays
+version_added: '1.0.0'
+description:
+- Create, delete or eradicate the an endpoint on a Pure Storage FlashArray.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - The name of the endpoint.
+ type: str
+ required: true
+ state:
+ description:
+ - Define whether the endpoint should exist or not.
+ default: present
+ choices: [ absent, present ]
+ type: str
+ eradicate:
+ description:
+ - Define whether to eradicate the endpoint on delete or leave in trash.
+ type: bool
+ default: false
+ rename:
+ description:
+ - Value to rename the specified endpoint to.
+ - Rename only applies to the container the current endpoint is in.
+ type: str
+ host:
+ description:
+ - name of host to attach endpoint to
+ type: str
+ hgroup:
+ description:
+ - name of hostgroup to attach endpoint to
+ type: str
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Create new endpoint named foo
+ purestorage.flasharray.purefa_endpoint:
+ name: test-endpoint
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: present
+
+- name: Delete and eradicate endpoint named foo
+ purestorage.flasharray.purefa_endpoint:
+ name: foo
+ eradicate: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: absent
+
+- name: Rename endpoint foor to bar
+ purestorage.flasharray.purefa_endpoint:
+ name: foo
+ rename: bar
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+volume:
+ description: A dictionary describing the changed volume. Only some
+ attributes below will be returned with various actions.
+ type: dict
+ returned: success
+ contains:
+ source:
+ description: Volume name of source volume used for volume copy
+ type: str
+ serial:
+ description: Volume serial number
+ type: str
+ sample: '361019ECACE43D83000120A4'
+ created:
+ description: Volume creation time
+ type: str
+ sample: '2019-03-13T22:49:24Z'
+ name:
+ description: Volume name
+ type: str
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ purefa_argument_spec,
+)
+
+
+VGROUPS_API_VERSION = "1.13"
+
+
+def get_volume(volume, array):
+ """Return Volume or None"""
+ try:
+ return array.get_volume(volume, pending=True)
+ except Exception:
+ return None
+
+
+def get_target(volume, array):
+ """Return Volume or None"""
+ try:
+ return array.get_volume(volume, pending=True)
+ except Exception:
+ return None
+
+
+def get_endpoint(vol, array):
+ """Return Endpoint or None"""
+ try:
+ return array.get_volume(vol, protocol_endpoint=True)
+ except Exception:
+ return None
+
+
+def get_destroyed_endpoint(vol, array):
+ """Return Endpoint Endpoint or None"""
+ try:
+ return bool(
+ array.get_volume(vol, protocol_endpoint=True, pending=True)[
+ "time_remaining"
+ ]
+ != ""
+ )
+ except Exception:
+ return None
+
+
+def check_vgroup(module, array):
+ """Check is the requested VG to create volume in exists"""
+ vg_exists = False
+ vg_name = module.params["name"].split("/")[0]
+ try:
+ vgs = array.list_vgroups()
+ except Exception:
+ module.fail_json(msg="Failed to get volume groups list. Check array.")
+ for vgroup in range(0, len(vgs)):
+ if vg_name == vgs[vgroup]["name"]:
+ vg_exists = True
+ break
+ return vg_exists
+
+
+def create_endpoint(module, array):
+ """Create Endpoint"""
+ changed = False
+ volfact = []
+ if "/" in module.params["name"] and not check_vgroup(module, array):
+ module.fail_json(
+ msg="Failed to create endpoint {0}. Volume Group does not exist.".format(
+ module.params["name"]
+ )
+ )
+ try:
+ changed = True
+ if not module.check_mode:
+ volfact = array.create_conglomerate_volume(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Endpoint {0} creation failed.".format(module.params["name"])
+ )
+ if module.params["host"]:
+ try:
+ if not module.check_mode:
+ array.connect_host(module.params["host"], module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Failed to attach endpoint {0} to host {1}.".format(
+ module.params["name"], module.params["host"]
+ )
+ )
+ if module.params["hgroup"]:
+ try:
+ if not module.check_mode:
+ array.connect_hgroup(module.params["hgroup"], module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Failed to attach endpoint {0} to hostgroup {1}.".format(
+ module.params["name"], module.params["hgroup"]
+ )
+ )
+
+ module.exit_json(changed=changed, volume=volfact)
+
+
+def rename_endpoint(module, array):
+ """Rename endpoint within a container, ie vgroup or local array"""
+ changed = False
+ volfact = []
+ target_name = module.params["rename"]
+ if "/" in module.params["rename"] or "::" in module.params["rename"]:
+ module.fail_json(msg="Target endpoint cannot include a container name")
+ if "/" in module.params["name"]:
+ vgroup_name = module.params["name"].split("/")[0]
+ target_name = vgroup_name + "/" + module.params["rename"]
+ if get_target(target_name, array) or get_destroyed_endpoint(target_name, array):
+ module.fail_json(msg="Target endpoint {0} already exists.".format(target_name))
+ else:
+ try:
+ changed = True
+ if not module.check_mode:
+ volfact = array.rename_volume(module.params["name"], target_name)
+ except Exception:
+ module.fail_json(
+ msg="Rename endpoint {0} to {1} failed.".format(
+ module.params["name"], module.params["rename"]
+ )
+ )
+
+ module.exit_json(changed=changed, volume=volfact)
+
+
+def delete_endpoint(module, array):
+ """Delete Endpoint"""
+ changed = True
+ volfact = []
+ if not module.check_mode:
+ try:
+ array.destroy_volume(module.params["name"])
+ if module.params["eradicate"]:
+ try:
+ volfact = array.eradicate_volume(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Eradicate endpoint {0} failed.".format(
+ module.params["name"]
+ )
+ )
+ except Exception:
+ module.fail_json(
+ msg="Delete endpoint {0} failed.".format(module.params["name"])
+ )
+ module.exit_json(changed=changed, volume=volfact)
+
+
+def recover_endpoint(module, array):
+ """Recover Deleted Endpoint"""
+ changed = True
+ volfact = []
+ if not module.check_mode:
+ try:
+ array.recover_volume(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Recovery of endpoint {0} failed".format(module.params["name"])
+ )
+ module.exit_json(changed=changed, volume=volfact)
+
+
+def eradicate_endpoint(module, array):
+ """Eradicate Deleted Endpoint"""
+ changed = True
+ volfact = []
+ if not module.check_mode:
+ if module.params["eradicate"]:
+ try:
+ array.eradicate_volume(module.params["name"], protocol_endpoint=True)
+ except Exception:
+ module.fail_json(
+ msg="Eradication of endpoint {0} failed".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed, volume=volfact)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True),
+ rename=dict(type="str"),
+ host=dict(type="str"),
+ hgroup=dict(type="str"),
+ eradicate=dict(type="bool", default=False),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ )
+ )
+
+ mutually_exclusive = [["rename", "eradicate"], ["host", "hgroup"]]
+
+ module = AnsibleModule(
+ argument_spec, mutually_exclusive=mutually_exclusive, supports_check_mode=True
+ )
+
+ state = module.params["state"]
+ destroyed = False
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+ if VGROUPS_API_VERSION not in api_version:
+ module.fail_json(
+ msg="Purity version does not support endpoints. Please contact support"
+ )
+ volume = get_volume(module.params["name"], array)
+ if volume:
+ module.fail_json(
+ msg="Volume {0} is an true volume. Please use the purefa_volume module".format(
+ module.params["name"]
+ )
+ )
+ endpoint = get_endpoint(module.params["name"], array)
+ if not endpoint:
+ destroyed = get_destroyed_endpoint(module.params["name"], array)
+
+ if state == "present" and not endpoint and not destroyed:
+ create_endpoint(module, array)
+ elif state == "present" and endpoint and module.params["rename"]:
+ rename_endpoint(module, array)
+ elif state == "present" and destroyed:
+ recover_endpoint(module, array)
+ elif state == "absent" and endpoint:
+ delete_endpoint(module, array)
+ elif state == "absent" and destroyed:
+ eradicate_endpoint(module, array)
+ elif state == "absent" and not endpoint and not volume:
+ module.exit_json(changed=False)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_eradication.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_eradication.py
new file mode 100644
index 000000000..ea7bd48bc
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_eradication.py
@@ -0,0 +1,117 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2021, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_eradication
+version_added: '1.9.0'
+short_description: Configure Pure Storage FlashArray Eradication Timer
+description:
+- Configure the eradication timer for destroyed items on a FlashArray.
+- Valid values are integer days from 1 to 30. Default is 1.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ timer:
+ description:
+ - Set the eradication timer for the FlashArray
+ - Allowed values are integers from 1 to 30. Default is 1
+ default: 1
+ type: int
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Set eradication timer to 30 days
+ purestorage.flasharray.purefa_eradication:
+ timer: 30
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Set eradication timer to 1 day
+ purestorage.flasharray.purefa_eradication:
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient.flasharray import Arrays, EradicationConfig
+except ImportError:
+ HAS_PURESTORAGE = False
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ get_array,
+ purefa_argument_spec,
+)
+
+SEC_PER_DAY = 86400000
+ERADICATION_API_VERSION = "2.6"
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ timer=dict(type="int", default="1"),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+ if not 30 >= module.params["timer"] >= 1:
+ module.fail_json(msg="Eradication Timer must be between 1 and 30 days.")
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+ changed = False
+ if ERADICATION_API_VERSION in api_version:
+ array = get_array(module)
+ current_timer = (
+ list(array.get_arrays().items)[0].eradication_config.eradication_delay
+ / SEC_PER_DAY
+ )
+ if module.params["timer"] != current_timer:
+ changed = True
+ if not module.check_mode:
+ new_timer = SEC_PER_DAY * module.params["timer"]
+ eradication_config = EradicationConfig(eradication_delay=new_timer)
+ res = array.patch_arrays(
+ array=Arrays(eradication_config=eradication_config)
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to change Eradication Timer. Error: {0}".format(
+ res.errors[0].message
+ )
+ )
+ else:
+ module.fail_json(
+ msg="Purity version does not support changing Eradication Timer"
+ )
+ module.exit_json(changed=changed)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_eula.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_eula.py
new file mode 100644
index 000000000..8d4d9536c
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_eula.py
@@ -0,0 +1,117 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_eula
+version_added: '1.0.0'
+short_description: Sign Pure Storage FlashArray EULA
+description:
+- Sign the FlashArray EULA for Day 0 config, or change signatory.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ company:
+ description:
+ - Full legal name of the entity.
+ - The value must be between 1 and 64 characters in length.
+ type: str
+ required: true
+ name:
+ description:
+ - Full legal name of the individual at the company who has the authority to accept the terms of the agreement.
+ - The value must be between 1 and 64 characters in length.
+ type: str
+ required: true
+ title:
+ description:
+ - Individual's job title at the company.
+ - The value must be between 1 and 64 characters in length.
+ type: str
+ required: true
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Sign EULA for FlashArray
+ purestorage.flasharray.purefa_eula:
+ company: "ACME Storage, Inc."
+ name: "Fred Bloggs"
+ title: "Storage Manager"
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ purefa_argument_spec,
+)
+
+
+EULA_API_VERSION = "1.17"
+
+
+def set_eula(module, array):
+ """Sign EULA"""
+ changed = False
+ try:
+ current_eula = array.get_eula()
+ except Exception:
+ module.fail_json(msg="Failed to get current EULA")
+ if (
+ current_eula["acceptance"]["company"] != module.params["company"]
+ or current_eula["acceptance"]["title"] != module.params["title"]
+ or current_eula["acceptance"]["name"] != module.params["name"]
+ ):
+ try:
+ changed = True
+ if not module.check_mode:
+ array.set_eula(
+ company=module.params["company"],
+ title=module.params["title"],
+ name=module.params["name"],
+ )
+ except Exception:
+ module.fail_json(msg="Signing EULA failed")
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ company=dict(type="str", required=True),
+ name=dict(type="str", required=True),
+ title=dict(type="str", required=True),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+ if EULA_API_VERSION in api_version:
+ set_eula(module, array)
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_export.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_export.py
new file mode 100644
index 000000000..5188dbd96
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_export.py
@@ -0,0 +1,251 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_export
+version_added: '1.5.0'
+short_description: Manage FlashArray File System Exports
+description:
+- Create/Delete FlashArray File Systems Exports
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Name of the export
+ type: str
+ required: true
+ state:
+ description:
+ - Define whether the export should exist or not.
+ - You must specify an NFS or SMB policy, or both on creation and deletion.
+ default: present
+ choices: [ absent, present ]
+ type: str
+ filesystem:
+ description:
+ - Name of the filesystem the export applies to
+ type: str
+ required: true
+ directory:
+ description:
+ - Name of the managed directory in the file system the export applies to
+ type: str
+ required: true
+ nfs_policy:
+ description:
+ - Name of NFS Policy to apply to the export
+ type: str
+ smb_policy:
+ description:
+ - Name of SMB Policy to apply to the export
+ type: str
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Create NFS and SMB exports for directory foo in filesysten bar
+ purestorage.flasharray.purefa_export:
+ name: export1
+ filesystem: bar
+ directory: foo
+ nfs_policy: nfs-example
+ smb_polict: smb-example
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete NFS export for directory foo in filesystem bar
+ purestorage.flasharray.purefa_export:
+ name: export1
+ filesystem: bar
+ directory: foo
+ nfs_policy: nfs-example
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ get_array,
+ purefa_argument_spec,
+)
+
+MIN_REQUIRED_API_VERSION = "2.3"
+
+
+def delete_export(module, array):
+ """Delete a file system export"""
+ changed = False
+ all_policies = []
+ directory = module.params["filesystem"] + ":" + module.params["directory"]
+ if not module.params["nfs_policy"] and not module.params["smb_policy"]:
+ module.fail_json(msg="At least one policy must be provided")
+ if module.params["nfs_policy"]:
+ policy_exists = bool(
+ array.get_directory_exports(
+ export_names=[module.params["name"]],
+ policy_names=[module.params["nfs_policy"]],
+ directory_names=[directory],
+ ).status_code
+ == 200
+ )
+ if policy_exists:
+ all_policies.append(module.params["nfs_policy"])
+ if module.params["smb_policy"]:
+ policy_exists = bool(
+ array.get_directory_exports(
+ export_names=[module.params["name"]],
+ policy_names=[module.params["smb_policy"]],
+ directory_names=[directory],
+ ).status_code
+ == 200
+ )
+ if policy_exists:
+ all_policies.append(module.params["smb_policy"])
+ if all_policies:
+ changed = True
+ if not module.check_mode:
+ res = array.delete_directory_exports(
+ export_names=[module.params["name"]], policy_names=all_policies
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete file system export {0}. {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def create_export(module, array):
+ """Create a file system export"""
+ changed = False
+ if not module.params["nfs_policy"] and not module.params["smb_policy"]:
+ module.fail_json(msg="At least one policy must be provided")
+ all_policies = []
+ if module.params["nfs_policy"]:
+ if bool(
+ array.get_policies_nfs(names=[module.params["nfs_policy"]]).status_code
+ != 200
+ ):
+ module.fail_json(
+ msg="NFS Policy {0} does not exist.".format(module.params["nfs_policy"])
+ )
+ if bool(
+ array.get_directory_exports(
+ export_names=[module.params["name"]],
+ policy_names=[module.params["nfs_policy"]],
+ ).status_code
+ != 200
+ ):
+ all_policies.append(module.params["nfs_policy"])
+ if module.params["smb_policy"]:
+ if bool(
+ array.get_policies_smb(names=[module.params["smb_policy"]]).status_code
+ != 200
+ ):
+ module.fail_json(
+ msg="SMB Policy {0} does not exist.".format(module.params["smb_policy"])
+ )
+ if bool(
+ array.get_directory_exports(
+ export_names=[module.params["name"]],
+ policy_names=[module.params["smb_policy"]],
+ ).status_code
+ != 200
+ ):
+ all_policies.append(module.params["smb_policy"])
+ if all_policies:
+ export = flasharray.DirectoryExportPost(export_name=module.params["name"])
+ changed = True
+ if not module.check_mode:
+ res = array.post_directory_exports(
+ directory_names=[
+ module.params["filesystem"] + ":" + module.params["directory"]
+ ],
+ exports=export,
+ policy_names=all_policies,
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to create file system exports for {0}:{1}. Error: {2}".format(
+ module.params["filesystem"],
+ module.params["directory"],
+ res.errors[0].message,
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ filesystem=dict(type="str", required=True),
+ directory=dict(type="str", required=True),
+ name=dict(type="str", required=True),
+ nfs_policy=dict(type="str"),
+ smb_policy=dict(type="str"),
+ )
+ )
+
+ required_if = [["state", "present", ["filesystem", "directory"]]]
+ module = AnsibleModule(
+ argument_spec, required_if=required_if, supports_check_mode=True
+ )
+
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashArray REST version not supported. "
+ "Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
+ )
+ array = get_array(module)
+ state = module.params["state"]
+
+ exists = bool(
+ array.get_directory_exports(export_names=[module.params["name"]]).status_code
+ == 200
+ )
+
+ if state == "present":
+ create_export(module, array)
+ elif state == "absent" and exists:
+ delete_export(module, array)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_fs.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_fs.py
new file mode 100644
index 000000000..05fbcb29b
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_fs.py
@@ -0,0 +1,367 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_fs
+version_added: '1.5.0'
+short_description: Manage FlashArray File Systems
+description:
+- Create/Delete FlashArray File Systems
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Name of the file system
+ type: str
+ required: true
+ state:
+ description:
+ - Define whether the file system should exist or not.
+ default: present
+ choices: [ absent, present ]
+ type: str
+ eradicate:
+ description:
+ - Define whether to eradicate the file system on delete or leave in trash.
+ type: bool
+ default: false
+ rename:
+ description:
+ - Value to rename the specified file system to
+ - Rename only applies to the container the current filesystem is in.
+ - There is no requirement to specify the pod name as this is implied.
+ type: str
+ move:
+ description:
+ - Move a filesystem in and out of a pod
+ - Provide the name of pod to move the filesystem to
+ - Pod names must be unique in the array
+ - To move to the local array, specify C(local)
+ - This is not idempotent - use C(ignore_errors) in the play
+ type: str
+ version_added: '1.13.0'
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Create file system foo
+ purestorage.flasharray.purefa_fs:
+ name: foo
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete and eradicate file system foo
+ purestorage.flasharray.purefa_fs:
+ name: foo
+ eradicate: true
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Rename file system foo to bar
+ purestorage.flasharray.purefa_fs:
+ name: foo
+ rename: bar
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ get_array,
+ purefa_argument_spec,
+)
+
+MIN_REQUIRED_API_VERSION = "2.2"
+REPL_SUPPORT_API = "2.13"
+
+
+def delete_fs(module, array):
+ """Delete a file system"""
+ changed = True
+ if not module.check_mode:
+ try:
+ file_system = flasharray.FileSystemPatch(destroyed=True)
+ array.patch_file_systems(
+ names=[module.params["name"]], file_system=file_system
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to delete file system {0}".format(module.params["name"])
+ )
+ if module.params["eradicate"]:
+ try:
+ array.delete_file_systems(names=[module.params["name"]])
+ except Exception:
+ module.fail_json(
+ msg="Eradication of file system {0} failed".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def recover_fs(module, array):
+ """Recover a deleted file system"""
+ changed = True
+ if not module.check_mode:
+ try:
+ file_system = flasharray.FileSystemPatch(destroyed=False)
+ array.patch_file_systems(
+ names=[module.params["name"]], file_system=file_system
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to recover file system {0}".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def eradicate_fs(module, array):
+ """Eradicate a file system"""
+ changed = True
+ if not module.check_mode:
+ try:
+ array.delete_file_systems(names=[module.params["name"]])
+ except Exception:
+ module.fail_json(
+ msg="Failed to eradicate file system {0}".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def rename_fs(module, array):
+ """Rename a file system"""
+ changed = False
+ target_name = module.params["rename"]
+ if "::" in module.params["name"]:
+ pod_name = module.params["name"].split("::")[0]
+ target_name = pod_name + "::" + module.params["rename"]
+ try:
+ target = list(array.get_file_systems(names=[target_name]).items)[0]
+ except Exception:
+ target = None
+ if not target:
+ changed = True
+ if not module.check_mode:
+ try:
+ file_system = flasharray.FileSystemPatch(name=target_name)
+ array.patch_file_systems(
+ names=[module.params["name"]], file_system=file_system
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to rename file system {0}".format(module.params["name"])
+ )
+ else:
+ module.fail_json(
+ msg="Target file system {0} already exists".format(module.params["rename"])
+ )
+ module.exit_json(changed=changed)
+
+
+def create_fs(module, array):
+ """Create a file system"""
+ changed = True
+ if "::" in module.params["name"]:
+ pod_name = module.params["name"].split("::")[0]
+ try:
+ pod = list(array.get_pods(names=[pod_name]).items)[0]
+ except Exception:
+ module.fail_json(
+ msg="Failed to create filesystem. Pod {0} does not exist".format(
+ pod_name
+ )
+ )
+ if pod.promotion_status == "demoted":
+ module.fail_json(msg="Filesystem cannot be created in a demoted pod")
+ if not module.check_mode:
+ try:
+ array.post_file_systems(names=[module.params["name"]])
+ except Exception:
+ module.fail_json(
+ msg="Failed to create file system {0}".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def move_fs(module, array):
+ """Move filesystem between pods or local array"""
+ changed = False
+ target_exists = False
+ pod_name = ""
+ fs_name = module.params["name"]
+ if "::" in module.params["name"]:
+ fs_name = module.params["name"].split("::")[1]
+ pod_name = module.params["name"].split("::")[0]
+ if module.params["move"] == "local":
+ target_location = ""
+ if "::" not in module.params["name"]:
+ module.fail_json(msg="Source and destination [local] cannot be the same.")
+ try:
+ target_exists = list(array.get_file_systems(names=[fs_name]).items)[0]
+ except Exception:
+ target_exists = False
+ if target_exists:
+ module.fail_json(msg="Target filesystem {0} already exists".format(fs_name))
+ else:
+ try:
+ pod = list(array.get_pods(names=[module.params["move"]]).items)[0]
+ if len(pod.arrays) > 1:
+ module.fail_json(msg="Filesystem cannot be moved into a stretched pod")
+ if pod.link_target_count != 0:
+ module.fail_json(
+ msg="Filesystem cannot be moved into a linked source pod"
+ )
+ if pod.promotion_status == "demoted":
+ module.fail_json(msg="Volume cannot be moved into a demoted pod")
+ except Exception:
+ module.fail_json(
+ msg="Failed to move filesystem. Pod {0} does not exist".format(pod_name)
+ )
+ if "::" in module.params["name"]:
+ pod = list(array.get_pods(names=[module.params["move"]]).items)[0]
+ if len(pod.arrays) > 1:
+ module.fail_json(
+ msg="Filesystem cannot be moved out of a stretched pod"
+ )
+ if pod.linked_target_count != 0:
+ module.fail_json(
+ msg="Filesystem cannot be moved out of a linked source pod"
+ )
+ if pod.promotion_status == "demoted":
+ module.fail_json(msg="Volume cannot be moved out of a demoted pod")
+ target_location = module.params["move"]
+ changed = True
+ if not module.check_mode:
+ file_system = flasharray.FileSystemPatch(
+ pod=flasharray.Reference(name=target_location)
+ )
+ move_res = array.patch_file_systems(
+ names=[module.params["name"]], file_system=file_system
+ )
+ if move_res.status_code != 200:
+ module.fail_json(
+ msg="Move of filesystem {0} failed. Error: {1}".format(
+ module.params["name"], move_res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ eradicate=dict(type="bool", default=False),
+ name=dict(type="str", required=True),
+ move=dict(type="str"),
+ rename=dict(type="str"),
+ )
+ )
+
+ mutually_exclusive = [["move", "rename"]]
+ module = AnsibleModule(
+ argument_spec, mutually_exclusive=mutually_exclusive, supports_check_mode=True
+ )
+
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashArray REST version not supported. "
+ "Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
+ )
+ if REPL_SUPPORT_API not in api_version and "::" in module.params["name"]:
+ module.fail_json(
+ msg="Filesystem Replication is only supported in Purity//FA 6.3.0 or higher"
+ )
+ array = get_array(module)
+ state = module.params["state"]
+
+ try:
+ filesystem = list(array.get_file_systems(names=[module.params["name"]]).items)[
+ 0
+ ]
+ exists = True
+ except Exception:
+ exists = False
+
+ if state == "present" and not exists and not module.params["move"]:
+ create_fs(module, array)
+ elif (
+ state == "present"
+ and exists
+ and module.params["move"]
+ and not filesystem.destroyed
+ ):
+ move_fs(module, array)
+ elif (
+ state == "present"
+ and exists
+ and module.params["rename"]
+ and not filesystem.destroyed
+ ):
+ rename_fs(module, array)
+ elif (
+ state == "present"
+ and exists
+ and filesystem.destroyed
+ and not module.params["rename"]
+ and not module.params["move"]
+ ):
+ recover_fs(module, array)
+ elif (
+ state == "present" and exists and filesystem.destroyed and module.params["move"]
+ ):
+ module.fail_json(
+ msg="Filesystem {0} exists, but in destroyed state".format(
+ module.params["name"]
+ )
+ )
+ elif state == "absent" and exists and not filesystem.destroyed:
+ delete_fs(module, array)
+ elif (
+ state == "absent"
+ and exists
+ and module.params["eradicate"]
+ and filesystem.destroyed
+ ):
+ eradicate_fs(module, array)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_hg.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_hg.py
new file mode 100644
index 000000000..0467501e2
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_hg.py
@@ -0,0 +1,433 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_hg
+version_added: '1.0.0'
+short_description: Manage hostgroups on Pure Storage FlashArrays
+description:
+- Create, delete or modifiy hostgroups on Pure Storage FlashArrays.
+author:
+- Pure Storage ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - The name of the hostgroup.
+ type: str
+ required: true
+ aliases: [ hostgroup ]
+ state:
+ description:
+ - Define whether the hostgroup should exist or not.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ host:
+ type: list
+ elements: str
+ description:
+ - List of existing hosts to add to hostgroup.
+ - Note that hostnames are case-sensitive however FlashArray hostnames are unique
+ and ignore case - you cannot have I(hosta) and I(hostA)
+ volume:
+ type: list
+ elements: str
+ description:
+ - List of existing volumes to add to hostgroup.
+ - Note that volumes are case-sensitive however FlashArray volume names are unique
+ and ignore case - you cannot have I(volumea) and I(volumeA)
+ lun:
+ description:
+ - LUN ID to assign to volume for hostgroup. Must be unique.
+ - Only applicable when only one volume is specified for connection.
+ - If not provided the ID will be automatically assigned.
+ - Range for LUN ID is 1 to 4095.
+ type: int
+ rename:
+ description:
+ - New name of hostgroup
+ type: str
+ version_added: '1.10.0'
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Create empty hostgroup
+ purestorage.flasharray.purefa_hg:
+ name: foo
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Add hosts and volumes to existing or new hostgroup
+ purestorage.flasharray.purefa_hg:
+ name: foo
+ host:
+ - host1
+ - host2
+ volume:
+ - vol1
+ - vol2
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete hosts and volumes from hostgroup
+ purestorage.flasharray.purefa_hg:
+ name: foo
+ host:
+ - host1
+ - host2
+ volume:
+ - vol1
+ - vol2
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: absent
+
+# This will disconnect all hosts and volumes in the hostgroup
+- name: Delete hostgroup
+ purestorage.flasharray.purefa_hg:
+ name: foo
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: absent
+
+- name: Rename hostgroup
+ purestorage.flasharray.purefa_hg:
+ name: foo
+ rename: bar
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create host group with hosts and volumes
+ purestorage.flasharray.purefa_hg:
+ name: bar
+ host:
+ - host1
+ - host2
+ volume:
+ - vol1
+ - vol2
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ purefa_argument_spec,
+)
+
+
+def rename_exists(module, array):
+ """Determine if rename target already exists"""
+ exists = False
+ new_name = module.params["rename"]
+ for hgroup in array.list_hgroups():
+ if hgroup["name"].casefold() == new_name.casefold():
+ exists = True
+ break
+ return exists
+
+
+def get_hostgroup(module, array):
+ hostgroup = None
+
+ for host in array.list_hgroups():
+ if host["name"].casefold() == module.params["name"].casefold():
+ hostgroup = host
+ break
+
+ return hostgroup
+
+
+def make_hostgroup(module, array):
+ if module.params["rename"]:
+ module.fail_json(
+ msg="Hostgroup {0} does not exist - rename failed.".format(
+ module.params["name"]
+ )
+ )
+ changed = True
+ if not module.check_mode:
+ try:
+ array.create_hgroup(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Failed to create hostgroup {0}".format(module.params["name"])
+ )
+ if module.params["host"]:
+ array.set_hgroup(module.params["name"], hostlist=module.params["host"])
+ if module.params["volume"]:
+ if len(module.params["volume"]) == 1 and module.params["lun"]:
+ try:
+ array.connect_hgroup(
+ module.params["name"],
+ module.params["volume"][0],
+ lun=module.params["lun"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to add volume {0} with LUN ID {1}".format(
+ module.params["volume"][0], module.params["lun"]
+ )
+ )
+ else:
+ for vol in module.params["volume"]:
+ try:
+ array.connect_hgroup(module.params["name"], vol)
+ except Exception:
+ module.fail_json(msg="Failed to add volume to hostgroup")
+ module.exit_json(changed=changed)
+
+
+def update_hostgroup(module, array):
+ changed = False
+ renamed = False
+ hgroup = get_hostgroup(module, array)
+ current_hostgroup = module.params["name"]
+ volumes = array.list_hgroup_connections(module.params["name"])
+ if module.params["state"] == "present":
+ if module.params["rename"]:
+ if not rename_exists(module, array):
+ try:
+ if not module.check_mode:
+ array.rename_hgroup(
+ module.params["name"], module.params["rename"]
+ )
+ current_hostgroup = module.params["rename"]
+ renamed = True
+ except Exception:
+ module.fail_json(
+ msg="Rename to {0} failed.".format(module.params["rename"])
+ )
+ else:
+ module.warn(
+ "Rename failed. Hostgroup {0} already exists. Continuing with other changes...".format(
+ module.params["rename"]
+ )
+ )
+ if module.params["host"]:
+ cased_hosts = list(module.params["host"])
+ cased_hghosts = list(hgroup["hosts"])
+ new_hosts = list(set(cased_hosts).difference(cased_hghosts))
+ if new_hosts:
+ try:
+ if not module.check_mode:
+ array.set_hgroup(current_hostgroup, addhostlist=new_hosts)
+ changed = True
+ except Exception:
+ module.fail_json(msg="Failed to add host(s) to hostgroup")
+ if module.params["volume"]:
+ if volumes:
+ current_vols = [vol["vol"] for vol in volumes]
+ cased_vols = list(module.params["volume"])
+ new_volumes = list(set(cased_vols).difference(set(current_vols)))
+ if len(new_volumes) == 1 and module.params["lun"]:
+ try:
+ if not module.check_mode:
+ array.connect_hgroup(
+ current_hostgroup,
+ new_volumes[0],
+ lun=module.params["lun"],
+ )
+ changed = True
+ except Exception:
+ module.fail_json(
+ msg="Failed to add volume {0} with LUN ID {1}".format(
+ new_volumes[0], module.params["lun"]
+ )
+ )
+ else:
+ for cvol in new_volumes:
+ try:
+ if not module.check_mode:
+ array.connect_hgroup(current_hostgroup, cvol)
+ changed = True
+ except Exception:
+ module.fail_json(
+ msg="Failed to connect volume {0} to hostgroup {1}.".format(
+ cvol, current_hostgroup
+ )
+ )
+ else:
+ if len(module.params["volume"]) == 1 and module.params["lun"]:
+ try:
+ if not module.check_mode:
+ array.connect_hgroup(
+ current_hostgroup,
+ module.params["volume"][0],
+ lun=module.params["lun"],
+ )
+ changed = True
+ except Exception:
+ module.fail_json(
+ msg="Failed to add volume {0} with LUN ID {1}".format(
+ module.params["volume"], module.params["lun"]
+ )
+ )
+ else:
+ for cvol in module.params["volume"]:
+ try:
+ if not module.check_mode:
+ array.connect_hgroup(current_hostgroup, cvol)
+ changed = True
+ except Exception:
+ module.fail_json(
+ msg="Failed to connect volume {0} to hostgroup {1}.".format(
+ cvol, current_hostgroup
+ )
+ )
+ else:
+ if module.params["host"]:
+ cased_old_hosts = list(module.params["host"])
+ cased_hosts = list(hgroup["hosts"])
+ old_hosts = list(set(cased_old_hosts).intersection(cased_hosts))
+ if old_hosts:
+ try:
+ if not module.check_mode:
+ array.set_hgroup(current_hostgroup, remhostlist=old_hosts)
+ changed = True
+ except Exception:
+ module.fail_json(
+ msg="Failed to remove hosts {0} from hostgroup {1}".format(
+ old_hosts, current_hostgroup
+ )
+ )
+ if module.params["volume"]:
+ cased_old_vols = list(module.params["volume"])
+ old_volumes = list(
+ set(cased_old_vols).intersection(set([vol["vol"] for vol in volumes]))
+ )
+ if old_volumes:
+ changed = True
+ for cvol in old_volumes:
+ try:
+ if not module.check_mode:
+ array.disconnect_hgroup(current_hostgroup, cvol)
+ except Exception:
+ module.fail_json(
+ msg="Failed to disconnect volume {0} from hostgroup {1}".format(
+ cvol, current_hostgroup
+ )
+ )
+ changed = changed or renamed
+ module.exit_json(changed=changed)
+
+
+def delete_hostgroup(module, array):
+ changed = True
+ try:
+ vols = array.list_hgroup_connections(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Failed to get volume connection for hostgroup {0}".format(
+ module.params["hostgroup"]
+ )
+ )
+ if not module.check_mode:
+ for vol in vols:
+ try:
+ array.disconnect_hgroup(module.params["name"], vol["vol"])
+ except Exception:
+ module.fail_json(
+ msg="Failed to disconnect volume {0} from hostgroup {1}".format(
+ vol["vol"], module.params["name"]
+ )
+ )
+ host = array.get_hgroup(module.params["name"])
+ if not module.check_mode:
+ try:
+ array.set_hgroup(module.params["name"], remhostlist=host["hosts"])
+ try:
+ array.delete_hgroup(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Failed to delete hostgroup {0}".format(
+ module.params["name"]
+ )
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to remove hosts {0} from hostgroup {1}".format(
+ host["hosts"], module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True, aliases=["hostgroup"]),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ host=dict(type="list", elements="str"),
+ lun=dict(type="int"),
+ rename=dict(type="str"),
+ volume=dict(type="list", elements="str"),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ state = module.params["state"]
+ array = get_system(module)
+ hostgroup = get_hostgroup(module, array)
+
+ if module.params["host"]:
+ try:
+ for hst in module.params["host"]:
+ array.get_host(hst)
+ except Exception:
+ module.fail_json(msg="Host {0} not found".format(hst))
+ if module.params["lun"] and len(module.params["volume"]) > 1:
+ module.fail_json(msg="LUN ID cannot be specified with multiple volumes.")
+
+ if module.params["lun"] and not 1 <= module.params["lun"] <= 4095:
+ module.fail_json(
+ msg="LUN ID of {0} is out of range (1 to 4095)".format(module.params["lun"])
+ )
+
+ if module.params["volume"]:
+ try:
+ for vol in module.params["volume"]:
+ array.get_volume(vol)
+ except Exception:
+ module.exit_json(changed=False)
+
+ if hostgroup and state == "present":
+ update_hostgroup(module, array)
+ elif hostgroup and module.params["volume"] and state == "absent":
+ update_hostgroup(module, array)
+ elif hostgroup and module.params["host"] and state == "absent":
+ update_hostgroup(module, array)
+ elif hostgroup and state == "absent":
+ delete_hostgroup(module, array)
+ elif hostgroup is None and state == "absent":
+ module.exit_json(changed=False)
+ else:
+ make_hostgroup(module, array)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_host.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_host.py
new file mode 100644
index 000000000..9054d8f30
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_host.py
@@ -0,0 +1,1085 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_host
+version_added: '1.0.0'
+short_description: Manage hosts on Pure Storage FlashArrays
+description:
+- Create, delete or modify hosts on Pure Storage FlashArrays.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+notes:
+- If specifying C(lun) option ensure host support requested value
+options:
+ name:
+ description:
+ - The name of the host.
+ - Note that hostnames are case-sensitive however FlashArray hostnames are unique
+ and ignore case - you cannot have I(hosta) and I(hostA)
+ - Multi-host support available from Purity//FA 6.0.0
+ B(***NOTE***) Manual deletion of individual hosts created
+ using multi-host will cause idempotency to fail
+ - Multi-host support only exists for host creation
+ type: str
+ required: true
+ aliases: [ host ]
+ protocol:
+ description:
+ - Defines the host connection protocol for volumes.
+ - DEPRECATED No longer a necessary parameter
+ type: str
+ choices: [ fc, iscsi, nvme, mixed ]
+ rename:
+ description:
+ - The name to rename to.
+ - Note that hostnames are case-sensitive however FlashArray hostnames are unique
+ and ignore case - you cannot have I(hosta) and I(hostA)
+ type: str
+ state:
+ description:
+ - Define whether the host should exist or not.
+ - When removing host all connected volumes will be disconnected.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ wwns:
+ type: list
+ elements: str
+ description:
+ - List of wwns of the host.
+ iqn:
+ type: list
+ elements: str
+ description:
+ - List of IQNs of the host.
+ nqn:
+ type: list
+ elements: str
+ description:
+ - List of NQNs of the host.
+ volume:
+ type: str
+ description:
+ - Volume name to map to the host.
+ lun:
+ description:
+ - LUN ID to assign to volume for host. Must be unique.
+ - If not provided the ID will be automatically assigned.
+ - Range for LUN ID is 1 to 4095.
+ type: int
+ count:
+ description:
+ - Number of hosts to be created in a multiple host creation
+ - Only supported from Purity//FA v6.0.0 and higher
+ type: int
+ start:
+ description:
+ - Number at which to start the multiple host creation index
+ - Only supported from Purity//FA v6.0.0 and higher
+ type: int
+ default: 0
+ digits:
+ description:
+ - Number of digits to use for multiple host count. This
+ will pad the index number with zeros where necessary
+ - Only supported from Purity//FA v6.0.0 and higher
+ - Range is between 1 and 10
+ type: int
+ default: 1
+ suffix:
+ description:
+ - Suffix string, if required, for multiple host create
+ - Host names will be formed as I(<name>#<suffix>), where
+ I(#) is a placeholder for the host index
+ See associated descriptions
+ - Suffix string is optional
+ - Only supported from Purity//FA v6.0.0 and higher
+ type: str
+ personality:
+ type: str
+ description:
+ - Define which operating system the host is. Recommended for
+ ActiveCluster integration.
+ default: ''
+ choices: ['hpux', 'vms', 'aix', 'esxi', 'solaris', 'hitachi-vsp', 'oracle-vm-server', 'delete', '']
+ preferred_array:
+ type: list
+ elements: str
+ description:
+ - List of preferred arrays in an ActiveCluster environment.
+ - To remove existing preferred arrays from the host, specify I(delete).
+ target_user:
+ type: str
+ description:
+ - Sets the target user name for CHAP authentication
+ - Required with I(target_password)
+ - To clear the username/password pair use I(clear) as the password
+ target_password:
+ type: str
+ description:
+ - Sets the target password for CHAP authentication
+ - Password length between 12 and 255 characters
+ - To clear the username/password pair use I(clear) as the password
+ - SETTING A PASSWORD IS NON-IDEMPOTENT
+ host_user:
+ type: str
+ description:
+ - Sets the host user name for CHAP authentication
+ - Required with I(host_password)
+ - To clear the username/password pair use I(clear) as the password
+ host_password:
+ type: str
+ description:
+ - Sets the host password for CHAP authentication
+ - Password length between 12 and 255 characters
+ - To clear the username/password pair use I(clear) as the password
+ - SETTING A PASSWORD IS NON-IDEMPOTENT
+ vlan:
+ type: str
+ description:
+ - The VLAN ID that the host is associated with.
+ - If not set or set to I(any), the host can access any VLAN.
+ - If set to I(untagged), the host can only access untagged VLANs.
+ - If set to a number between 1 and 4094, the host can only access the specified VLAN with that number.
+ version_added: '1.16.0'
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Create new AIX host
+ purestorage.flasharray.purefa_host:
+ name: foo
+ personality: aix
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create 10 hosts with index starting at 10 but padded with 3 digits
+ purestorage.flasharray.purefa_host:
+ name: foo
+ personality: vms
+ suffix: bar
+ count: 10
+ start: 10
+ digits: 3
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: present
+
+- name: Rename host foo to bar
+ purestorage.flasharray.purefa_host:
+ name: foo
+ rename: bar
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete host
+ purestorage.flasharray.purefa_host:
+ name: foo
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: absent
+
+- name: Make host bar with wwn ports
+ purestorage.flasharray.purefa_host:
+ name: bar
+ wwns:
+ - 00:00:00:00:00:00:00
+ - 11:11:11:11:11:11:11
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Make host bar with iSCSI ports
+ purestorage.flasharray.purefa_host:
+ name: bar
+ iqn:
+ - iqn.1994-05.com.redhat:7d366003913
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Make host bar with NVMe ports
+ purestorage.flasharray.purefa_host:
+ name: bar
+ nqn:
+ - nqn.2014-08.com.vendor:nvme:nvm-subsystem-sn-d78432
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Make mixed protocol host
+ purestorage.flasharray.purefa_host:
+ name: bar
+ nqn:
+ - nqn.2014-08.com.vendor:nvme:nvm-subsystem-sn-d78432
+ iqn:
+ - iqn.1994-05.com.redhat:7d366003914
+ wwns:
+ - 00:00:00:00:00:00:01
+ - 11:11:11:11:11:11:12
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Map host foo to volume bar as LUN ID 12
+ purestorage.flasharray.purefa_host:
+ name: foo
+ volume: bar
+ lun: 12
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Disconnect volume bar from host foo
+ purestorage.flasharray.purefa_host:
+ name: foo
+ volume: bar
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Add preferred arrays to host foo
+ purestorage.flasharray.purefa_host:
+ name: foo
+ preferred_array:
+ - array1
+ - array2
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete preferred arrays from host foo
+ purestorage.flasharray.purefa_host:
+ name: foo
+ preferred_array: delete
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete exisitng WWNs from host foo (does not delete host object)
+ purestorage.flasharray.purefa_host:
+ name: foo
+ wwns: ""
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Set CHAP target and host username/password pairs
+ purestorage.flasharray.purefa_host:
+ name: foo
+ target_user: user1
+ target_password: passwrodpassword
+ host_user: user2
+ host_password: passwrodpassword
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete CHAP target and host username/password pairs
+ purestorage.flasharray.purefa_host:
+ name: foo
+ target_user: user
+ target_password: clear
+ host_user: user
+ host_password: clear
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_array,
+ get_system,
+ purefa_argument_spec,
+)
+
+
+AC_REQUIRED_API_VERSION = "1.14"
+PREFERRED_ARRAY_API_VERSION = "1.15"
+NVME_API_VERSION = "1.16"
+MULTI_HOST_VERSION = "2.2"
+VLAN_VERSION = "2.16"
+
+
+def _is_cbs(array, is_cbs=False):
+ """Is the selected array a Cloud Block Store"""
+ model = array.get(controllers=True)[0]["model"]
+ is_cbs = bool("CBS" in model)
+ return is_cbs
+
+
+def _set_host_initiators(module, array):
+ """Set host initiators."""
+ if module.params["nqn"]:
+ try:
+ array.set_host(module.params["name"], nqnlist=module.params["nqn"])
+ except Exception:
+ module.fail_json(msg="Setting of NVMe NQN failed.")
+ if module.params["iqn"]:
+ try:
+ array.set_host(module.params["name"], iqnlist=module.params["iqn"])
+ except Exception:
+ module.fail_json(msg="Setting of iSCSI IQN failed.")
+ if module.params["wwns"]:
+ try:
+ array.set_host(module.params["name"], wwnlist=module.params["wwns"])
+ except Exception:
+ module.fail_json(msg="Setting of FC WWNs failed.")
+
+
+def _update_host_initiators(module, array, answer=False):
+ """Change host initiator if iscsi or nvme or add new FC WWNs"""
+ if module.params["nqn"]:
+ current_nqn = array.get_host(module.params["name"])["nqn"]
+ if module.params["nqn"] != [""]:
+ if current_nqn != module.params["nqn"]:
+ answer = True
+ if not module.check_mode:
+ try:
+ array.set_host(
+ module.params["name"], nqnlist=module.params["nqn"]
+ )
+ except Exception:
+ module.fail_json(msg="Change of NVMe NQN failed.")
+ elif current_nqn:
+ answer = True
+ if not module.check_mode:
+ try:
+ array.set_host(module.params["name"], remnqnlist=current_nqn)
+ except Exception:
+ module.fail_json(msg="Removal of NVMe NQN failed.")
+ if module.params["iqn"]:
+ current_iqn = array.get_host(module.params["name"])["iqn"]
+ if module.params["iqn"] != [""]:
+ if current_iqn != module.params["iqn"]:
+ answer = True
+ if not module.check_mode:
+ try:
+ array.set_host(
+ module.params["name"], iqnlist=module.params["iqn"]
+ )
+ except Exception:
+ module.fail_json(msg="Change of iSCSI IQN failed.")
+ elif current_iqn:
+ answer = True
+ if not module.check_mode:
+ try:
+ array.set_host(module.params["name"], remiqnlist=current_iqn)
+ except Exception:
+ module.fail_json(msg="Removal of iSCSI IQN failed.")
+ if module.params["wwns"]:
+ module.params["wwns"] = [wwn.replace(":", "") for wwn in module.params["wwns"]]
+ module.params["wwns"] = [wwn.upper() for wwn in module.params["wwns"]]
+ current_wwn = array.get_host(module.params["name"])["wwn"]
+ if module.params["wwns"] != [""]:
+ if current_wwn != module.params["wwns"]:
+ answer = True
+ if not module.check_mode:
+ try:
+ array.set_host(
+ module.params["name"], wwnlist=module.params["wwns"]
+ )
+ except Exception:
+ module.fail_json(msg="FC WWN change failed.")
+ elif current_wwn:
+ answer = True
+ if not module.check_mode:
+ try:
+ array.set_host(module.params["name"], remwwnlist=current_wwn)
+ except Exception:
+ module.fail_json(msg="Removal of all FC WWNs failed.")
+ return answer
+
+
+def _connect_new_volume(module, array, answer=False):
+ """Connect volume to host"""
+ api_version = array._list_available_rest_versions()
+ if AC_REQUIRED_API_VERSION in api_version and module.params["lun"]:
+ answer = True
+ if not module.check_mode:
+ try:
+ array.connect_host(
+ module.params["name"],
+ module.params["volume"],
+ lun=module.params["lun"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="LUN ID {0} invalid. Check for duplicate LUN IDs.".format(
+ module.params["lun"]
+ )
+ )
+ else:
+ answer = True
+ if not module.check_mode:
+ array.connect_host(module.params["name"], module.params["volume"])
+ return answer
+
+
+def _disconnect_volume(module, array, answer=False):
+ """Disconnect volume from host"""
+ answer = True
+ if not module.check_mode:
+ try:
+ array.disconnect_host(module.params["name"], module.params["volume"])
+ except Exception:
+ module.fail_json(
+ msg="Failed to disconnect volume {0}".format(module.params["volume"])
+ )
+ return answer
+
+
+def _set_host_personality(module, array):
+ """Set host personality. Only called when supported"""
+ if module.params["personality"] != "delete":
+ array.set_host(module.params["name"], personality=module.params["personality"])
+ else:
+ array.set_host(module.params["name"], personality="")
+
+
+def _set_preferred_array(module, array):
+ """Set preferred array list. Only called when supported"""
+ if module.params["preferred_array"] != ["delete"]:
+ array.set_host(
+ module.params["name"], preferred_array=module.params["preferred_array"]
+ )
+ else:
+ array.set_host(module.params["name"], preferred_array=[])
+
+
+def _set_chap_security(module, array):
+ """Set CHAP usernames and passwords"""
+ pattern = re.compile("[^ ]{12,255}")
+ if module.params["host_user"]:
+ if not pattern.match(module.params["host_password"]):
+ module.fail_json(
+ msg="host_password must contain a minimum of 12 and a maximum of 255 characters"
+ )
+ try:
+ array.set_host(
+ module.params["name"],
+ host_user=module.params["host_user"],
+ host_password=module.params["host_password"],
+ )
+ except Exception:
+ module.params(msg="Failed to set CHAP host username and password")
+ if module.params["target_user"]:
+ if not pattern.match(module.params["target_password"]):
+ module.fail_json(
+ msg="target_password must contain a minimum of 12 and a maximum of 255 characters"
+ )
+ try:
+ array.set_host(
+ module.params["name"],
+ target_user=module.params["target_user"],
+ target_password=module.params["target_password"],
+ )
+ except Exception:
+ module.params(msg="Failed to set CHAP target username and password")
+
+
+def _update_chap_security(module, array, answer=False):
+ """Change CHAP usernames and passwords"""
+ pattern = re.compile("[^ ]{12,255}")
+ chap = array.get_host(module.params["name"], chap=True)
+ if module.params["host_user"]:
+ if module.params["host_password"] == "clear":
+ if chap["host_user"]:
+ answer = True
+ if not module.check_mode:
+ try:
+ array.set_host(module.params["name"], host_user="")
+ except Exception:
+ module.params(
+ msg="Failed to clear CHAP host username and password"
+ )
+ else:
+ if not pattern.match(module.params["host_password"]):
+ module.fail_json(
+ msg="host_password must contain a minimum of 12 and a maximum of 255 characters"
+ )
+ answer = True
+ if not module.check_mode:
+ try:
+ array.set_host(
+ module.params["name"],
+ host_user=module.params["host_user"],
+ host_password=module.params["host_password"],
+ )
+ except Exception:
+ module.params(msg="Failed to set CHAP host username and password")
+ if module.params["target_user"]:
+ if module.params["target_password"] == "clear":
+ if chap["target_user"]:
+ answer = True
+ if not module.check_mode:
+ try:
+ array.set_host(module.params["name"], target_user="")
+ except Exception:
+ module.params(
+ msg="Failed to clear CHAP target username and password"
+ )
+ else:
+ if not pattern.match(module.params["target_password"]):
+ module.fail_json(
+ msg="target_password must contain a minimum of 12 and a maximum of 255 characters"
+ )
+ answer = True
+ if not module.check_mode:
+ try:
+ array.set_host(
+ module.params["name"],
+ target_user=module.params["target_user"],
+ target_password=module.params["target_password"],
+ )
+ except Exception:
+ module.params(msg="Failed to set CHAP target username and password")
+ return answer
+
+
+def _update_host_personality(module, array, answer=False):
+ """Change host personality. Only called when supported"""
+ personality = array.get_host(module.params["name"], personality=True)["personality"]
+ if personality is None and module.params["personality"] != "delete":
+ answer = True
+ if not module.check_mode:
+ try:
+ array.set_host(
+ module.params["name"], personality=module.params["personality"]
+ )
+ except Exception:
+ module.fail_json(msg="Personality setting failed.")
+ if personality is not None:
+ if module.params["personality"] == "delete":
+ answer = True
+ if not module.check_mode:
+ try:
+ array.set_host(module.params["name"], personality="")
+ except Exception:
+ module.fail_json(msg="Personality deletion failed.")
+ elif personality != module.params["personality"]:
+ answer = True
+ if not module.check_mode:
+ try:
+ array.set_host(
+ module.params["name"], personality=module.params["personality"]
+ )
+ except Exception:
+ module.fail_json(msg="Personality change failed.")
+ return answer
+
+
+def _update_preferred_array(module, array, answer=False):
+ """Update existing preferred array list. Only called when supported"""
+ preferred_array = array.get_host(module.params["name"], preferred_array=True)[
+ "preferred_array"
+ ]
+ if preferred_array == [] and module.params["preferred_array"] != ["delete"]:
+ answer = True
+ if not module.check_mode:
+ try:
+ array.set_host(
+ module.params["name"],
+ preferred_array=module.params["preferred_array"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Preferred array list creation failed for {0}.".format(
+ module.params["name"]
+ )
+ )
+ elif preferred_array != []:
+ if module.params["preferred_array"] == ["delete"]:
+ answer = True
+ if not module.check_mode:
+ try:
+ array.set_host(module.params["name"], preferred_array=[])
+ except Exception:
+ module.fail_json(
+ msg="Preferred array list deletion failed for {0}.".format(
+ module.params["name"]
+ )
+ )
+ elif preferred_array != module.params["preferred_array"]:
+ answer = True
+ if not module.check_mode:
+ try:
+ array.set_host(
+ module.params["name"],
+ preferred_array=module.params["preferred_array"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Preferred array list change failed for {0}.".format(
+ module.params["name"]
+ )
+ )
+ return answer
+
+
+def _set_vlan(module):
+ array = get_array(module)
+ res = array.patch_hosts(
+ names=[module.params["name"]],
+ host=flasharray.HostPatch(vlan=module.params["vlan"]),
+ )
+ if res.status_code != 200:
+ module.warn(
+ "Failed to set host VLAN ID. Error: {0}".format(res.errors[0].message)
+ )
+
+
+def _update_vlan(module):
+ changed = False
+ array = get_array(module)
+ host_vlan = getattr(
+ list(array.get_hosts(names=[module.params["name"]]).items)[0], "vlan", None
+ )
+ if module.params["vlan"] != host_vlan:
+ changed = True
+ if not module.check_mode:
+ res = array.patch_hosts(
+ names=[module.params["name"]],
+ host=flasharray.HostPatch(vlan=module.params["vlan"]),
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to update host VLAN ID. Error: {0}".format(
+ res.errors[0].message
+ )
+ )
+ return changed
+
+
+def get_multi_hosts(module):
+ """Return True is all hosts exist"""
+ hosts = []
+ array = get_array(module)
+ for host_num in range(
+ module.params["start"], module.params["count"] + module.params["start"]
+ ):
+ if module.params["suffix"]:
+ hosts.append(
+ module.params["name"]
+ + str(host_num).zfill(module.params["digits"])
+ + module.params["suffix"]
+ )
+ else:
+ hosts.append(
+ module.params["name"] + str(host_num).zfill(module.params["digits"])
+ )
+ return bool(array.get_hosts(names=hosts).status_code == 200)
+
+
+def get_host(module, array):
+ """Return host or None"""
+ host = None
+ for hst in array.list_hosts():
+ if hst["name"].casefold() == module.params["name"].casefold():
+ module.params["name"] = hst["name"]
+ host = hst
+ break
+ return host
+
+
+def rename_exists(module, array):
+ """Determine if rename target already exists"""
+ exists = False
+ for hst in array.list_hosts():
+ if hst["name"].casefold() == module.params["rename"].casefold():
+ exists = True
+ break
+ return exists
+
+
+def make_multi_hosts(module):
+ """Create multiple hosts"""
+ changed = True
+ if not module.check_mode:
+ hosts = []
+ array = get_array(module)
+ for host_num in range(
+ module.params["start"], module.params["count"] + module.params["start"]
+ ):
+ if module.params["suffix"]:
+ hosts.append(
+ module.params["name"]
+ + str(host_num).zfill(module.params["digits"])
+ + module.params["suffix"]
+ )
+ else:
+ hosts.append(
+ module.params["name"] + str(host_num).zfill(module.params["digits"])
+ )
+ if module.params["personality"]:
+ host = flasharray.HostPost(personality=module.params["personality"])
+ else:
+ host = flasharray.HostPost()
+ res = array.post_hosts(names=hosts, host=host)
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Multi-Host {0}#{1} creation failed: {2}".format(
+ module.params["name"],
+ module.params["suffix"],
+ res.errors[0].message,
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def make_host(module, array):
+ """Create a new host"""
+ changed = True
+ if not module.check_mode:
+ try:
+ array.create_host(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Host {0} creation failed.".format(module.params["name"])
+ )
+ try:
+ if module.params["vlan"]:
+ _set_vlan(module)
+ _set_host_initiators(module, array)
+ api_version = array._list_available_rest_versions()
+ if AC_REQUIRED_API_VERSION in api_version and module.params["personality"]:
+ _set_host_personality(module, array)
+ if (
+ PREFERRED_ARRAY_API_VERSION in api_version
+ and module.params["preferred_array"]
+ ):
+ _set_preferred_array(module, array)
+ if module.params["host_user"] or module.params["target_user"]:
+ _set_chap_security(module, array)
+ if module.params["volume"]:
+ if module.params["lun"]:
+ array.connect_host(
+ module.params["name"],
+ module.params["volume"],
+ lun=module.params["lun"],
+ )
+ else:
+ array.connect_host(module.params["name"], module.params["volume"])
+ except Exception:
+ module.fail_json(
+ msg="Host {0} configuration failed.".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def update_host(module, array):
+ """Modify a host"""
+ changed = False
+ renamed = False
+ vlan_changed = False
+ if module.params["state"] == "present":
+ if module.params["vlan"]:
+ vlan_changed = _update_vlan(module)
+ if module.params["rename"]:
+ if not rename_exists(module, array):
+ if not module.check_mode:
+ try:
+ array.rename_host(
+ module.params["name"], module.params["rename"]
+ )
+ module.params["name"] = module.params["rename"]
+ renamed = True
+ except Exception:
+ module.fail_json(
+ msg="Rename to {0} failed.".format(module.params["rename"])
+ )
+ else:
+ module.warn(
+ "Rename failed. Target hostname {0} already exists. "
+ "Continuing with any other changes...".format(
+ module.params["rename"]
+ )
+ )
+ init_changed = vol_changed = pers_changed = pref_changed = chap_changed = False
+ volumes = array.list_host_connections(module.params["name"])
+ if module.params["iqn"] or module.params["wwns"] or module.params["nqn"]:
+ init_changed = _update_host_initiators(module, array)
+ if module.params["volume"]:
+ current_vols = [vol["vol"] for vol in volumes]
+ if not module.params["volume"] in current_vols:
+ vol_changed = _connect_new_volume(module, array)
+ api_version = array._list_available_rest_versions()
+ if AC_REQUIRED_API_VERSION in api_version:
+ if module.params["personality"]:
+ pers_changed = _update_host_personality(module, array)
+ if PREFERRED_ARRAY_API_VERSION in api_version:
+ if module.params["preferred_array"]:
+ pref_changed = _update_preferred_array(module, array)
+ if module.params["target_user"] or module.params["host_user"]:
+ chap_changed = _update_chap_security(module, array)
+ changed = (
+ init_changed
+ or vol_changed
+ or pers_changed
+ or pref_changed
+ or chap_changed
+ or vlan_changed
+ or renamed
+ )
+ else:
+ if module.params["volume"]:
+ volumes = array.list_host_connections(module.params["name"])
+ current_vols = [vol["vol"] for vol in volumes]
+ if module.params["volume"] in current_vols:
+ vol_changed = _disconnect_volume(module, array)
+ changed = vol_changed
+ module.exit_json(changed=changed)
+
+
+def delete_host(module, array):
+ """Delete a host"""
+ changed = True
+ if not module.check_mode:
+ try:
+ hgroup = array.get_host(module.params["name"])["hgroup"]
+ if hgroup is not None:
+ array.set_hgroup(hgroup, remhostlist=[module.params["name"]])
+ for vol in array.list_host_connections(module.params["name"]):
+ array.disconnect_host(module.params["name"], vol["vol"])
+ array.delete_host(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Host {0} deletion failed".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True, aliases=["host"]),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ protocol=dict(
+ type="str",
+ choices=["fc", "iscsi", "nvme", "mixed"],
+ removed_from_collection="1.13",
+ removed_in_version="2.0.0",
+ ),
+ nqn=dict(type="list", elements="str"),
+ iqn=dict(type="list", elements="str"),
+ wwns=dict(type="list", elements="str"),
+ host_password=dict(type="str", no_log=True),
+ host_user=dict(type="str"),
+ target_password=dict(type="str", no_log=True),
+ target_user=dict(type="str"),
+ volume=dict(type="str"),
+ rename=dict(type="str"),
+ lun=dict(type="int"),
+ count=dict(type="int"),
+ start=dict(type="int", default=0),
+ digits=dict(type="int", default=1),
+ suffix=dict(type="str"),
+ personality=dict(
+ type="str",
+ default="",
+ choices=[
+ "hpux",
+ "vms",
+ "aix",
+ "esxi",
+ "solaris",
+ "hitachi-vsp",
+ "oracle-vm-server",
+ "delete",
+ "",
+ ],
+ ),
+ preferred_array=dict(type="list", elements="str"),
+ vlan=dict(type="str"),
+ )
+ )
+
+ required_together = [
+ ["host_password", "host_user"],
+ ["target_password", "target_user"],
+ ]
+
+ module = AnsibleModule(
+ argument_spec, supports_check_mode=True, required_together=required_together
+ )
+
+ array = get_system(module)
+ pattern = re.compile("^[a-zA-Z0-9]([a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?$")
+ if module.params["rename"]:
+ if not pattern.match(module.params["rename"]):
+ module.fail_json(
+ msg="Rename value {0} does not conform to naming convention".format(
+ module.params["rename"]
+ )
+ )
+ if not pattern.match(module.params["name"]):
+ module.fail_json(
+ msg="Host name {0} does not conform to naming convention".format(
+ module.params["name"]
+ )
+ )
+ if _is_cbs(array):
+ if module.params["wwns"] or module.params["nqn"]:
+ module.fail_json(msg="Cloud Block Store only supports iSCSI as a protocol")
+ api_version = array._list_available_rest_versions()
+ if module.params["nqn"] is not None and NVME_API_VERSION not in api_version:
+ module.fail_json(msg="NVMe protocol not supported. Please upgrade your array.")
+ state = module.params["state"]
+ if module.params["suffix"]:
+ suffix_len = len(module.params["suffix"])
+ else:
+ suffix_len = 0
+ if module.params["vlan"]:
+ if not HAS_PURESTORAGE:
+ module.fail_json(
+ msg="py-pure-client sdk is required to support 'vlan' parameter"
+ )
+ if VLAN_VERSION not in api_version:
+ module.fail_json(
+ msg="'vlan' parameter is not supported until Purity//FA 6.3.4 or higher"
+ )
+ if not module.params["vlan"] in ["any", "untagged"]:
+ try:
+ vlan = int(module.params["vlan"])
+ if vlan not in range(1, 4094):
+ module.fail_json(
+ msg="VLAN must be set to a number between 1 and 4094"
+ )
+ except Exception:
+ module.fail_json(
+ msg="Invalid string for VLAN. Must be 'any', 'untagged' or a number between 1 and 4094"
+ )
+ if module.params["count"]:
+ if not HAS_PURESTORAGE:
+ module.fail_json(
+ msg="py-pure-client sdk is required to support 'count' parameter"
+ )
+ if MULTI_HOST_VERSION not in api_version:
+ module.fail_json(
+ msg="'count' parameter is not supported until Purity//FA 6.0.0 or higher"
+ )
+ if module.params["digits"] and module.params["digits"] not in range(1, 10):
+ module.fail_json(msg="'digits' must be in the range of 1 to 10")
+ if module.params["start"] < 0:
+ module.fail_json(msg="'start' must be a positive number")
+ if not pattern.match(module.params["name"]):
+ module.fail_json(
+ msg="Host name pattern {0} does not conform to naming convention".format(
+ module.params["name"]
+ )
+ )
+ elif module.params["suffix"] and not pattern.match(module.params["suffix"]):
+ module.fail_json(
+ msg="Suffix pattern {0} does not conform to naming convention".format(
+ module.params["suffix"]
+ )
+ )
+ elif (
+ len(module.params["name"])
+ + max(
+ len(str(module.params["count"] + module.params["start"])),
+ module.params["digits"],
+ )
+ + suffix_len
+ > 63
+ ):
+ module.fail_json(msg="Host name length exceeds maximum allowed")
+ host = get_multi_hosts(module)
+ if not host and state == "present":
+ make_multi_hosts(module)
+ else:
+ host = get_host(module, array)
+ if module.params["lun"] and not 1 <= module.params["lun"] <= 4095:
+ module.fail_json(
+ msg="LUN ID of {0} is out of range (1 to 4095)".format(
+ module.params["lun"]
+ )
+ )
+ if module.params["volume"]:
+ try:
+ array.get_volume(module.params["volume"])
+ except Exception:
+ module.exit_json(changed=False)
+ if module.params["preferred_array"]:
+ try:
+ if module.params["preferred_array"] != ["delete"]:
+ all_connected_arrays = array.list_array_connections()
+ if not all_connected_arrays:
+ module.fail_json(
+ msg="No target arrays connected to source array - preferred arrays not possible."
+ )
+ else:
+ current_arrays = [array.get()["array_name"]]
+ api_version = array._list_available_rest_versions()
+ if NVME_API_VERSION in api_version:
+ for current_array in range(0, len(all_connected_arrays)):
+ if (
+ all_connected_arrays[current_array]["type"]
+ == "sync-replication"
+ ):
+ current_arrays.append(
+ all_connected_arrays[current_array][
+ "array_name"
+ ]
+ )
+ else:
+ for current_array in range(0, len(all_connected_arrays)):
+ if all_connected_arrays[current_array]["type"] == [
+ "sync-replication"
+ ]:
+ current_arrays.append(
+ all_connected_arrays[current_array][
+ "array_name"
+ ]
+ )
+ for array_to_connect in range(
+ 0, len(module.params["preferred_array"])
+ ):
+ if (
+ module.params["preferred_array"][array_to_connect]
+ not in current_arrays
+ ):
+ module.fail_json(
+ msg="Array {0} is not a synchronously connected array.".format(
+ module.params["preferred_array"][array_to_connect]
+ )
+ )
+ except Exception:
+ module.fail_json(msg="Failed to get existing array connections.")
+
+ if host is None and state == "present" and not module.params["rename"]:
+ make_host(module, array)
+ elif host is None and state == "present" and module.params["rename"]:
+ module.exit_json(changed=False)
+ elif host and state == "present":
+ update_host(module, array)
+ elif host and state == "absent" and module.params["volume"]:
+ update_host(module, array)
+ elif host and state == "absent":
+ delete_host(module, array)
+ elif host is None and state == "absent":
+ module.exit_json(changed=False)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_info.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_info.py
new file mode 100644
index 000000000..de7f05002
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_info.py
@@ -0,0 +1,2286 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2019, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_info
+version_added: '1.0.0'
+short_description: Collect information from Pure Storage FlashArray
+description:
+ - Collect information from a Pure Storage Flasharray running the
+ Purity//FA operating system. By default, the module will collect basic
+ information including hosts, host groups, protection
+ groups and volume counts. Additional information can be collected
+ based on the configured set of arguements.
+author:
+ - Pure Storage ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ gather_subset:
+ description:
+ - When supplied, this argument will define the information to be collected.
+ Possible values for this include all, minimum, config, performance,
+ capacity, network, subnet, interfaces, hgroups, pgroups, hosts,
+ admins, volumes, snapshots, pods, replication, vgroups, offload, apps,
+ arrays, certs, kmip, clients, policies, dir_snaps, filesystems,
+ alerts and virtual_machines.
+ type: list
+ elements: str
+ required: false
+ default: minimum
+extends_documentation_fragment:
+ - purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: collect default set of information
+ purestorage.flasharray.purefa_info:
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ register: array_info
+- name: show default information
+ debug:
+ msg: "{{ array_info['purefa_info']['default'] }}"
+
+- name: collect configuration and capacity information
+ purestorage.flasharray.purefa_info:
+ gather_subset:
+ - config
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ register: array_info
+- name: show configuration information
+ debug:
+ msg: "{{ array_info['purefa_info']['config'] }}"
+
+- name: collect all information
+ purestorage.flasharray.purefa_info:
+ gather_subset:
+ - all
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+- name: show all information
+ debug:
+ msg: "{{ array_info['purefa_info'] }}"
+"""
+
+RETURN = r"""
+purefa_info:
+ description: Returns the information collected from the FlashArray
+ returned: always
+ type: dict
+"""
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_array,
+ get_system,
+ purefa_argument_spec,
+)
+
+HAS_PACKAGING = True
+try:
+ from packaging import version
+except ImportError:
+ HAS_PACKAGING = False
+try:
+ from purestorage import purestorage
+except ImportError:
+ purestorage = None
+import time
+
+SEC_TO_DAY = 86400000
+ADMIN_API_VERSION = "1.14"
+S3_REQUIRED_API_VERSION = "1.16"
+LATENCY_REQUIRED_API_VERSION = "1.16"
+AC_REQUIRED_API_VERSION = "1.14"
+CAP_REQUIRED_API_VERSION = "1.6"
+SAN_REQUIRED_API_VERSION = "1.10"
+NVME_API_VERSION = "1.16"
+PREFERRED_API_VERSION = "1.15"
+P53_API_VERSION = "1.17"
+ACTIVE_DR_API = "1.19"
+V6_MINIMUM_API_VERSION = "2.2"
+FILES_API_VERSION = "2.3"
+FC_REPL_API_VERSION = "2.4"
+ENCRYPTION_STATUS_API_VERSION = "2.6"
+DIR_QUOTA_API_VERSION = "2.7"
+SHARED_CAP_API_VERSION = "2.9"
+PURE_OUI = "naa.624a9370"
+SAFE_MODE_VERSION = "2.10"
+PER_PG_VERSION = "2.13"
+SAML2_VERSION = "2.11"
+NFS_USER_MAP_VERSION = "2.15"
+DEFAULT_PROT_API_VERSION = "2.16"
+VM_VERSION = "2.14"
+VLAN_VERSION = "2.17"
+NEIGHBOR_API_VERSION = "2.22"
+POD_QUOTA_VERSION = "2.23"
+
+
+def generate_default_dict(module, array):
+ default_info = {}
+ defaults = array.get()
+ api_version = array._list_available_rest_versions()
+ default_info["api_versions"] = api_version
+ if FILES_API_VERSION in api_version:
+ arrayv6 = get_array(module)
+ if VM_VERSION in api_version:
+ default_info["virtual_machines"] = len(
+ arrayv6.get_virtual_machines(vm_type="vvol").items
+ )
+ default_info["virtual_machine_snaps"] = len(
+ arrayv6.get_virtual_machine_snapshots(vm_type="vvol").items
+ )
+ default_info["snapshot_policies"] = len(arrayv6.get_policies_snapshot().items)
+ default_info["nfs_policies"] = len(arrayv6.get_policies_nfs().items)
+ default_info["smb_policies"] = len(arrayv6.get_policies_smb().items)
+ default_info["filesystems"] = len(arrayv6.get_file_systems().items)
+ default_info["directories"] = len(arrayv6.get_directories().items)
+ default_info["exports"] = len(arrayv6.get_directory_exports().items)
+ default_info["directory_snapshots"] = len(
+ arrayv6.get_directory_snapshots().items
+ )
+ if DIR_QUOTA_API_VERSION in api_version:
+ default_info["quota_policies"] = len(arrayv6.get_policies_quota().items)
+ if ENCRYPTION_STATUS_API_VERSION in api_version:
+ array_data = list(arrayv6.get_arrays().items)[0]
+ encryption = array_data.encryption
+ default_info["encryption_enabled"] = encryption.data_at_rest.enabled
+ if default_info["encryption_enabled"]:
+ default_info["encryption_algorithm"] = encryption.data_at_rest.algorithm
+ default_info["encryption_module_version"] = encryption.module_version
+ eradication = array_data.eradication_config
+ default_info["eradication_days_timer"] = int(
+ eradication.eradication_delay / SEC_TO_DAY
+ )
+ if SAFE_MODE_VERSION in api_version:
+ if eradication.manual_eradication == "all-enabled":
+ default_info["safe_mode"] = "Disabled"
+ else:
+ default_info["safe_mode"] = "Enabled"
+ if AC_REQUIRED_API_VERSION in api_version:
+ default_info["volume_groups"] = len(array.list_vgroups())
+ default_info["connected_arrays"] = len(array.list_array_connections())
+ default_info["pods"] = len(array.list_pods())
+ default_info["connection_key"] = array.get(connection_key=True)[
+ "connection_key"
+ ]
+ hosts = array.list_hosts()
+ admins = array.list_admins()
+ snaps = array.list_volumes(snap=True, pending=True)
+ volumes = array.list_volumes(pending=True)
+ pgroups = array.list_pgroups(pending=True)
+ hgroups = array.list_hgroups()
+ default_info["array_model"] = array.get(controllers=True)[0]["model"]
+ default_info["array_name"] = defaults["array_name"]
+ default_info["purity_version"] = defaults["version"]
+ default_info["hosts"] = len(hosts)
+ default_info["snapshots"] = len(snaps)
+ default_info["volumes"] = len(volumes)
+ default_info["protection_groups"] = len(pgroups)
+ default_info["hostgroups"] = len(hgroups)
+ default_info["admins"] = len(admins)
+ default_info["remote_assist"] = array.get_remote_assist_status()["status"]
+ if P53_API_VERSION in api_version:
+ default_info["maintenance_window"] = array.list_maintenance_windows()
+ return default_info
+
+
+def generate_perf_dict(array):
+ perf_info = {}
+ api_version = array._list_available_rest_versions()
+ if LATENCY_REQUIRED_API_VERSION in api_version:
+ latency_info = array.get(action="monitor", latency=True)[0]
+ perf_info = array.get(action="monitor")[0]
+ perf_info["writes_per_sec"] = perf_info["writes_per_sec"]
+ perf_info["reads_per_sec"] = perf_info["reads_per_sec"]
+
+ perf_info["input_per_sec"] = perf_info["input_per_sec"]
+ perf_info["output_per_sec"] = perf_info["output_per_sec"]
+
+ if LATENCY_REQUIRED_API_VERSION in api_version:
+ perf_info["san_usec_per_read_op"] = latency_info["san_usec_per_read_op"]
+ perf_info["san_usec_per_write_op"] = latency_info["san_usec_per_write_op"]
+ perf_info["queue_usec_per_read_op"] = latency_info["queue_usec_per_read_op"]
+ perf_info["queue_usec_per_write_op"] = latency_info["queue_usec_per_write_op"]
+ perf_info["qos_rate_limit_usec_per_read_op"] = latency_info[
+ "qos_rate_limit_usec_per_read_op"
+ ]
+ perf_info["qos_rate_limit_usec_per_write_op"] = latency_info[
+ "qos_rate_limit_usec_per_write_op"
+ ]
+ perf_info["local_queue_usec_per_op"] = perf_info["local_queue_usec_per_op"]
+ perf_info["usec_per_read_op"] = perf_info["usec_per_read_op"]
+ perf_info["usec_per_write_op"] = perf_info["usec_per_write_op"]
+ perf_info["queue_depth"] = perf_info["queue_depth"]
+ return perf_info
+
+
+def generate_config_dict(module, array):
+ config_info = {}
+ api_version = array._list_available_rest_versions()
+ config_info["console_lock"] = array.get_console_lock_status()["console_lock"]
+ if NFS_USER_MAP_VERSION not in api_version:
+ config_info["dns"] = array.get_dns()
+ config_info["smtp"] = array.list_alert_recipients()
+ config_info["snmp"] = array.list_snmp_managers()
+ config_info["snmp_v3_engine_id"] = array.get_snmp_engine_id()["engine_id"]
+ if V6_MINIMUM_API_VERSION in api_version:
+ config_info["directory_service"] = {}
+ arrayv6 = get_array(module)
+ services = list(arrayv6.get_directory_services().items)
+ for service in range(0, len(services)):
+ service_type = services[service].name
+ config_info["directory_service"][service_type] = {
+ "base_dn": getattr(services[service], "base_dn", "None"),
+ "bind_user": getattr(services[service], "bind_user", "None"),
+ "enabled": services[service].enabled,
+ "services": services[service].services,
+ "uris": services[service].uris,
+ }
+ config_info["directory_service_roles"] = {}
+ roles = list(arrayv6.get_directory_services_roles().items)
+ for role in range(0, len(roles)):
+ role_name = roles[role].role.name
+ try:
+ config_info["directory_service_roles"][role_name] = {
+ "group": roles[role].group,
+ "group_base": roles[role].group_base,
+ }
+ except Exception:
+ pass
+ smi_s = list(arrayv6.get_smi_s().items)[0]
+ config_info["smi-s"] = {
+ "slp_enabled": smi_s.slp_enabled,
+ "wbem_https_enabled": smi_s.wbem_https_enabled,
+ }
+ # Add additional SMI-S section to help with formatting
+ # issues caused by `-` in the dict name.
+ config_info["smi_s"] = {
+ "slp_enabled": smi_s.slp_enabled,
+ "wbem_https_enabled": smi_s.wbem_https_enabled,
+ }
+ if NFS_USER_MAP_VERSION in api_version:
+ config_info["dns"] = {}
+ dns_configs = list(arrayv6.get_dns().items)
+ for config in range(0, len(dns_configs)):
+ config_info["dns"][dns_configs[config].services[0]] = {
+ "nameservers": dns_configs[config].nameservers,
+ "domain": dns_configs[config].domain,
+ }
+ try:
+ config_info["dns"][dns_configs[config].services[0]][
+ "source"
+ ] = dns_configs[config].source["name"]
+ except Exception:
+ pass
+ if SAML2_VERSION in api_version:
+ config_info["saml2sso"] = {}
+ saml2 = list(arrayv6.get_sso_saml2_idps().items)
+ if saml2:
+ config_info["saml2sso"] = {
+ "enabled": saml2[0].enabled,
+ "array_url": saml2[0].array_url,
+ "name": saml2[0].name,
+ "idp": {
+ "url": getattr(saml2[0].idp, "url", None),
+ "encrypt_enabled": saml2[0].idp.encrypt_assertion_enabled,
+ "sign_enabled": saml2[0].idp.sign_request_enabled,
+ "metadata_url": saml2[0].idp.metadata_url,
+ },
+ "sp": {
+ "decrypt_cred": getattr(
+ saml2[0].sp.decryption_credential, "name", None
+ ),
+ "sign_cred": getattr(
+ saml2[0].sp.signing_credential, "name", None
+ ),
+ },
+ }
+ if FILES_API_VERSION in api_version:
+ config_info["active_directory"] = {}
+ try:
+ ad_accounts = list(arrayv6.get_active_directory().items)
+ for ad_account in range(0, len(ad_accounts)):
+ ad_name = ad_accounts[ad_account].name
+ config_info["active_directory"][ad_name] = {
+ "computer_name": ad_accounts[ad_account].computer_name,
+ "domain": ad_accounts[ad_account].domain,
+ "directory_servers": getattr(
+ ad_accounts[ad_account], "directory_servers", None
+ ),
+ "kerberos_servers": getattr(
+ ad_accounts[ad_account], "kerberos_servers", None
+ ),
+ "service_principal_names": getattr(
+ ad_accounts[ad_account], "service_principal_names", None
+ ),
+ "tls": getattr(ad_accounts[ad_account], "tls", None),
+ }
+ except Exception:
+ module.warn("FA-Files is not enabled on this array")
+ if DEFAULT_PROT_API_VERSION in api_version:
+ config_info["default_protections"] = {}
+ default_prots = list(arrayv6.get_container_default_protections().items)
+ for prot in range(0, len(default_prots)):
+ container = getattr(default_prots[prot], "name", "-")
+ config_info["default_protections"][container] = {
+ "protections": [],
+ "type": getattr(default_prots[prot], "type", "array"),
+ }
+ for container_prot in range(
+ 0, len(default_prots[prot].default_protections)
+ ):
+ config_info["default_protections"][container]["protections"].append(
+ {
+ "type": default_prots[prot]
+ .default_protections[container_prot]
+ .type,
+ "name": default_prots[prot]
+ .default_protections[container_prot]
+ .name,
+ }
+ )
+
+ else:
+ config_info["directory_service"] = {}
+ config_info["directory_service"]["management"] = array.get_directory_service()
+ if S3_REQUIRED_API_VERSION in api_version:
+ config_info["directory_service_roles"] = {}
+ roles = array.list_directory_service_roles()
+ for role in range(0, len(roles)):
+ role_name = roles[role]["name"]
+ config_info["directory_service_roles"][role_name] = {
+ "group": roles[role]["group"],
+ "group_base": roles[role]["group_base"],
+ }
+ else:
+ config_info["directory_service"].update(
+ array.get_directory_service(groups=True)
+ )
+ config_info["ntp"] = array.get(ntpserver=True)["ntpserver"]
+ config_info["syslog"] = array.get(syslogserver=True)["syslogserver"]
+ config_info["phonehome"] = array.get(phonehome=True)["phonehome"]
+ config_info["proxy"] = array.get(proxy=True)["proxy"]
+ config_info["relayhost"] = array.get(relayhost=True)["relayhost"]
+ config_info["senderdomain"] = array.get(senderdomain=True)["senderdomain"]
+ config_info["syslog"] = array.get(syslogserver=True)["syslogserver"]
+ config_info["idle_timeout"] = array.get(idle_timeout=True)["idle_timeout"]
+ config_info["scsi_timeout"] = array.get(scsi_timeout=True)["scsi_timeout"]
+ if S3_REQUIRED_API_VERSION in api_version:
+ config_info["global_admin"] = array.get_global_admin_attributes()
+ if (
+ config_info["global_admin"]["lockout_duration"]
+ and config_info["global_admin"]["lockout_duration"] > 0
+ ):
+ config_info["global_admin"]["lockout_duration"] = int(
+ config_info["global_admin"]["lockout_duration"] / 1000
+ )
+ return config_info
+
+
+def generate_filesystems_dict(array):
+ files_info = {}
+ filesystems = list(array.get_file_systems().items)
+ for filesystem in range(0, len(filesystems)):
+ fs_name = filesystems[filesystem].name
+ files_info[fs_name] = {
+ "destroyed": filesystems[filesystem].destroyed,
+ "directories": {},
+ }
+ directories = list(array.get_directories(file_system_names=[fs_name]).items)
+ for directory in range(0, len(directories)):
+ d_name = directories[directory].directory_name
+ files_info[fs_name]["directories"][d_name] = {
+ "path": directories[directory].path,
+ "data_reduction": directories[directory].space.data_reduction,
+ "snapshots_space": directories[directory].space.snapshots,
+ "total_physical_space": directories[directory].space.total_physical,
+ "unique_space": directories[directory].space.unique,
+ "virtual_space": directories[directory].space.virtual,
+ "destroyed": directories[directory].destroyed,
+ "full_name": directories[directory].name,
+ "used_provisioned": getattr(
+ directories[directory].space, "used_provisioned", None
+ ),
+ "exports": {},
+ }
+ exports = list(
+ array.get_directory_exports(
+ directory_names=[
+ files_info[fs_name]["directories"][d_name]["full_name"]
+ ]
+ ).items
+ )
+ for export in range(0, len(exports)):
+ e_name = exports[export].export_name
+ files_info[fs_name]["directories"][d_name]["exports"][e_name] = {
+ "enabled": exports[export].enabled,
+ "policy": {
+ "name": exports[export].policy.name,
+ "type": exports[export].policy.resource_type,
+ },
+ }
+ return files_info
+
+
+def generate_pgsnaps_dict(array):
+ pgsnaps_info = {}
+ snapshots = list(array.get_protection_group_snapshots().items)
+ for snapshot in range(0, len(snapshots)):
+ s_name = snapshots[snapshot].name
+ pgsnaps_info[s_name] = {
+ "destroyed": snapshots[snapshot].destroyed,
+ "source": snapshots[snapshot].source.name,
+ "suffix": snapshots[snapshot].suffix,
+ "snapshot_space": snapshots[snapshot].space.snapshots,
+ "used_provisioned": getattr(
+ snapshots[snapshot].space, "used_provisioned", None
+ ),
+ }
+ try:
+ if pgsnaps_info[s_name]["destroyed"]:
+ pgsnaps_info[s_name]["time_remaining"] = snapshots[
+ snapshot
+ ].time_remaining
+ except AttributeError:
+ pass
+ try:
+ pgsnaps_info[s_name]["manual_eradication"] = snapshots[
+ snapshot
+ ].eradication_config.manual_eradication
+ except AttributeError:
+ pass
+ return pgsnaps_info
+
+
+def generate_dir_snaps_dict(array):
+ dir_snaps_info = {}
+ snapshots = list(array.get_directory_snapshots().items)
+ for snapshot in range(0, len(snapshots)):
+ s_name = snapshots[snapshot].name
+ dir_snaps_info[s_name] = {
+ "destroyed": snapshots[snapshot].destroyed,
+ "source": snapshots[snapshot].source.name,
+ "suffix": snapshots[snapshot].suffix,
+ "client_name": snapshots[snapshot].client_name,
+ "snapshot_space": snapshots[snapshot].space.snapshots,
+ "total_physical_space": snapshots[snapshot].space.total_physical,
+ "unique_space": snapshots[snapshot].space.unique,
+ "used_provisioned": getattr(
+ snapshots[snapshot].space, "used_provisioned", None
+ ),
+ }
+ try:
+ dir_snaps_info[s_name]["policy"] = snapshots[snapshot].policy.name
+ except Exception:
+ dir_snaps_info[s_name]["policy"] = ""
+ if dir_snaps_info[s_name]["destroyed"]:
+ dir_snaps_info[s_name]["time_remaining"] = snapshots[
+ snapshot
+ ].time_remaining
+ return dir_snaps_info
+
+
+def generate_policies_dict(array, quota_available, nfs_user_mapping):
+ policy_info = {}
+ policies = list(array.get_policies().items)
+ for policy in range(0, len(policies)):
+ p_name = policies[policy].name
+ policy_info[p_name] = {
+ "type": policies[policy].policy_type,
+ "enabled": policies[policy].enabled,
+ "members": [],
+ "rules": [],
+ }
+ members = list(array.get_directories_policies(policy_names=[p_name]).items)
+ for member in range(0, len(members)):
+ m_name = members[member].member.name
+ policy_info[p_name]["members"].append(m_name)
+ if policies[policy].policy_type == "smb":
+ rules = list(
+ array.get_policies_smb_client_rules(policy_names=[p_name]).items
+ )
+ for rule in range(0, len(rules)):
+ smb_rules_dict = {
+ "client": rules[rule].client,
+ "smb_encryption_required": rules[rule].smb_encryption_required,
+ "anonymous_access_allowed": rules[rule].anonymous_access_allowed,
+ }
+ policy_info[p_name]["rules"].append(smb_rules_dict)
+ if policies[policy].policy_type == "nfs":
+ if nfs_user_mapping:
+ nfs_policy = list(array.get_policies_nfs(names=[p_name]).items)[0]
+ policy_info[p_name][
+ "user_mapping_enabled"
+ ] = nfs_policy.user_mapping_enabled
+ rules = list(
+ array.get_policies_nfs_client_rules(policy_names=[p_name]).items
+ )
+ for rule in range(0, len(rules)):
+ nfs_rules_dict = {
+ "access": rules[rule].access,
+ "permission": rules[rule].permission,
+ "client": rules[rule].client,
+ }
+ policy_info[p_name]["rules"].append(nfs_rules_dict)
+ if policies[policy].policy_type == "snapshot":
+ if HAS_PACKAGING:
+ suffix_enabled = version.parse(
+ array.get_rest_version()
+ ) >= version.parse(SHARED_CAP_API_VERSION)
+ else:
+ suffix_enabled = False
+ rules = list(array.get_policies_snapshot_rules(policy_names=[p_name]).items)
+ for rule in range(0, len(rules)):
+ try:
+ snap_rules_dict = {
+ "at": str(int(rules[rule].at / 3600000)).zfill(2) + ":00",
+ "client_name": rules[rule].client_name,
+ "every": str(int(rules[rule].every / 60000)) + " mins",
+ "keep_for": str(int(rules[rule].keep_for / 60000)) + " mins",
+ }
+ except AttributeError:
+ snap_rules_dict = {
+ "at": None,
+ "client_name": rules[rule].client_name,
+ "every": str(int(rules[rule].every / 60000)) + " mins",
+ "keep_for": str(int(rules[rule].keep_for / 60000)) + " mins",
+ }
+ if suffix_enabled:
+ try:
+ snap_rules_dict["suffix"] = rules[rule].suffix
+ except AttributeError:
+ snap_rules_dict["suffix"] = ""
+ policy_info[p_name]["rules"].append(snap_rules_dict)
+ if policies[policy].policy_type == "quota" and quota_available:
+ rules = list(array.get_policies_quota_rules(policy_names=[p_name]).items)
+ for rule in range(0, len(rules)):
+ quota_rules_dict = {
+ "enforced": rules[rule].enforced,
+ "quota_limit": rules[rule].quota_limit,
+ "notifications": rules[rule].notifications,
+ }
+ policy_info[p_name]["rules"].append(quota_rules_dict)
+ return policy_info
+
+
+def generate_clients_dict(array):
+ clients_info = {}
+ clients = list(array.get_api_clients().items)
+ for client in range(0, len(clients)):
+ c_name = clients[client].name
+ clients_info[c_name] = {
+ "enabled": clients[client].enabled,
+ "TTL(seconds)": clients[client].access_token_ttl_in_ms / 1000,
+ "key_id": clients[client].key_id,
+ "client_id": clients[client].id,
+ "max_role": clients[client].max_role,
+ "public_key": clients[client].public_key,
+ }
+ return clients_info
+
+
+def generate_admin_dict(array):
+ admin_info = {}
+ api_version = array._list_available_rest_versions()
+ if ADMIN_API_VERSION in api_version:
+ admins = array.list_admins()
+ for admin in range(0, len(admins)):
+ admin_name = admins[admin]["name"]
+ admin_info[admin_name] = {
+ "type": admins[admin]["type"],
+ "role": admins[admin]["role"],
+ }
+ return admin_info
+
+
+def generate_subnet_dict(array):
+ sub_info = {}
+ subnets = array.list_subnets()
+ for sub in range(0, len(subnets)):
+ sub_name = subnets[sub]["name"]
+ if subnets[sub]["enabled"]:
+ sub_info[sub_name] = {
+ "gateway": subnets[sub]["gateway"],
+ "mtu": subnets[sub]["mtu"],
+ "vlan": subnets[sub]["vlan"],
+ "prefix": subnets[sub]["prefix"],
+ "interfaces": subnets[sub]["interfaces"],
+ "services": subnets[sub]["services"],
+ }
+ return sub_info
+
+
+def generate_network_dict(module, array):
+ net_info = {}
+ api_version = array._list_available_rest_versions()
+ ports = array.list_network_interfaces()
+ for port in range(0, len(ports)):
+ int_name = ports[port]["name"]
+ net_info[int_name] = {
+ "hwaddr": ports[port]["hwaddr"],
+ "mtu": ports[port]["mtu"],
+ "enabled": ports[port]["enabled"],
+ "speed": ports[port]["speed"],
+ "address": ports[port]["address"],
+ "slaves": ports[port]["slaves"],
+ "services": ports[port]["services"],
+ "gateway": ports[port]["gateway"],
+ "netmask": ports[port]["netmask"],
+ }
+ if ports[port]["subnet"]:
+ subnets = array.get_subnet(ports[port]["subnet"])
+ if subnets["enabled"]:
+ net_info[int_name]["subnet"] = {
+ "name": subnets["name"],
+ "prefix": subnets["prefix"],
+ "vlan": subnets["vlan"],
+ }
+ if NEIGHBOR_API_VERSION in api_version:
+ arrayv6 = get_array(module)
+ neighbors = list(arrayv6.get_network_interfaces_neighbors().items)
+ for neighbor in range(0, len(neighbors)):
+ neighbor_info = neighbors[neighbor]
+ int_name = neighbor_info.local_port.name
+ net_info[int_name].update(
+ {
+ "neighbor": {
+ "initial_ttl_in_sec": neighbor_info.initial_ttl_in_sec,
+ "neighbor_port": {
+ "description": getattr(
+ neighbor_info.neighbor_port, "description", None
+ ),
+ "name": getattr(
+ neighbor_info.neighbor_chassis, "name", None
+ ),
+ "id": getattr(
+ neighbor_info.neighbor_port.id, "value", None
+ ),
+ },
+ "neighbor_chassis": {
+ "addresses": getattr(
+ neighbor_info.neighbor_chassis, "addresses", None
+ ),
+ "description": getattr(
+ neighbor_info.neighbor_chassis, "description", None
+ ),
+ "name": getattr(
+ neighbor_info.neighbor_chassis, "name", None
+ ),
+ "bridge": {
+ "enabled": getattr(
+ neighbor_info.neighbor_chassis.bridge,
+ "enabled",
+ False,
+ ),
+ "supported": getattr(
+ neighbor_info.neighbor_chassis.bridge,
+ "supported",
+ False,
+ ),
+ },
+ "repeater": {
+ "enabled": getattr(
+ neighbor_info.neighbor_chassis.repeater,
+ "enabled",
+ False,
+ ),
+ "supported": getattr(
+ neighbor_info.neighbor_chassis.repeater,
+ "supported",
+ False,
+ ),
+ },
+ "router": {
+ "enabled": getattr(
+ neighbor_info.neighbor_chassis.router,
+ "enabled",
+ False,
+ ),
+ "supported": getattr(
+ neighbor_info.neighbor_chassis.router,
+ "supported",
+ False,
+ ),
+ },
+ "station_only": {
+ "enabled": getattr(
+ neighbor_info.neighbor_chassis.station_only,
+ "enabled",
+ False,
+ ),
+ "supported": getattr(
+ neighbor_info.neighbor_chassis.station_only,
+ "supported",
+ False,
+ ),
+ },
+ "telephone": {
+ "enabled": getattr(
+ neighbor_info.neighbor_chassis.telephone,
+ "enabled",
+ False,
+ ),
+ "supported": getattr(
+ neighbor_info.neighbor_chassis.telephone,
+ "supported",
+ False,
+ ),
+ },
+ "wlan_access_point": {
+ "enabled": getattr(
+ neighbor_info.neighbor_chassis.wlan_access_point,
+ "enabled",
+ False,
+ ),
+ "supported": getattr(
+ neighbor_info.neighbor_chassis.wlan_access_point,
+ "supported",
+ False,
+ ),
+ },
+ "docsis_cable_device": {
+ "enabled": getattr(
+ neighbor_info.neighbor_chassis.docsis_cable_device,
+ "enabled",
+ False,
+ ),
+ "supported": getattr(
+ neighbor_info.neighbor_chassis.docsis_cable_device,
+ "supported",
+ False,
+ ),
+ },
+ "id": {
+ "type": getattr(
+ neighbor_info.neighbor_chassis.id,
+ "type",
+ None,
+ ),
+ "value": getattr(
+ neighbor_info.neighbor_chassis.id,
+ "value",
+ None,
+ ),
+ },
+ },
+ }
+ }
+ )
+ return net_info
+
+
+def generate_capacity_dict(module, array):
+ capacity_info = {}
+ api_version = array._list_available_rest_versions()
+ if V6_MINIMUM_API_VERSION in api_version:
+ new_version = bool(SHARED_CAP_API_VERSION in api_version)
+ arrayv6 = get_array(module)
+ total_capacity = list(arrayv6.get_arrays().items)[0].capacity
+ capacity = list(arrayv6.get_arrays_space().items)[0]
+ capacity_info["total_capacity"] = total_capacity
+ if new_version:
+ capacity_info["provisioned_space"] = getattr(
+ capacity.space, "total_provisioned", 0
+ )
+ capacity_info["free_space"] = total_capacity - getattr(
+ capacity.space, "total_physical", 0
+ )
+ capacity_info["data_reduction"] = getattr(
+ capacity.space, "data_reduction", 0
+ )
+ capacity_info["system_space"] = getattr(capacity.space, "system", 0)
+ capacity_info["volume_space"] = getattr(capacity.space, "unique", 0)
+ capacity_info["shared_space"] = getattr(capacity.space, "shared", 0)
+ capacity_info["snapshot_space"] = getattr(capacity.space, "snapshots", 0)
+ capacity_info["thin_provisioning"] = getattr(
+ capacity.space, "thin_provisioning", 0
+ )
+ capacity_info["total_reduction"] = getattr(
+ capacity.space, "total_reduction", 0
+ )
+ capacity_info["replication"] = getattr(capacity.space, "replication", 0)
+ capacity_info["shared_effective"] = getattr(
+ capacity.space, "shared_effective", 0
+ )
+ capacity_info["snapshots_effective"] = getattr(
+ capacity.space, "snapshots_effective", 0
+ )
+ capacity_info["unique_effective"] = getattr(
+ capacity.space, "total_effective", 0
+ )
+ capacity_info["total_effective"] = getattr(
+ capacity.space, "total_effective", 0
+ )
+ capacity_info["used_provisioned"] = getattr(
+ capacity.space, "used_provisioned", 0
+ )
+ else:
+ capacity_info["provisioned_space"] = capacity.space["total_provisioned"]
+ capacity_info["free_space"] = (
+ total_capacity - capacity.space["total_physical"]
+ )
+ capacity_info["data_reduction"] = capacity.space["data_reduction"]
+ capacity_info["system_space"] = capacity.space["system"]
+ capacity_info["volume_space"] = capacity.space["unique"]
+ capacity_info["shared_space"] = capacity.space["shared"]
+ capacity_info["snapshot_space"] = capacity.space["snapshots"]
+ capacity_info["thin_provisioning"] = capacity.space["thin_provisioning"]
+ capacity_info["total_reduction"] = capacity.space["total_reduction"]
+ capacity_info["replication"] = capacity.space["replication"]
+ elif CAP_REQUIRED_API_VERSION in api_version:
+ volumes = array.list_volumes(pending=True)
+ capacity_info["provisioned_space"] = sum(item["size"] for item in volumes)
+ capacity = array.get(space=True)
+ total_capacity = capacity[0]["capacity"]
+ used_space = capacity[0]["total"]
+ capacity_info["free_space"] = total_capacity - used_space
+ capacity_info["total_capacity"] = total_capacity
+ capacity_info["data_reduction"] = capacity[0]["data_reduction"]
+ capacity_info["system_space"] = capacity[0]["system"]
+ capacity_info["volume_space"] = capacity[0]["volumes"]
+ capacity_info["shared_space"] = capacity[0]["shared_space"]
+ capacity_info["snapshot_space"] = capacity[0]["snapshots"]
+ capacity_info["thin_provisioning"] = capacity[0]["thin_provisioning"]
+ capacity_info["total_reduction"] = capacity[0]["total_reduction"]
+ return capacity_info
+
+
+def generate_snap_dict(module, array):
+ snap_info = {}
+ api_version = array._list_available_rest_versions()
+ if FC_REPL_API_VERSION in api_version:
+ arrayv6 = get_array(module)
+ snapsv6 = list(arrayv6.get_volume_snapshots(destroyed=False).items)
+ snaps = array.list_volumes(snap=True)
+ for snap in range(0, len(snaps)):
+ snapshot = snaps[snap]["name"]
+ snap_info[snapshot] = {
+ "size": snaps[snap]["size"],
+ "source": snaps[snap]["source"],
+ "created": snaps[snap]["created"],
+ "tags": [],
+ "remote": [],
+ }
+ if FC_REPL_API_VERSION in api_version:
+ for snap in range(0, len(snapsv6)):
+ snapshot = snapsv6[snap].name
+ snap_info[snapshot]["snapshot_space"] = snapsv6[snap].space.snapshots
+ snap_info[snapshot]["used_provisioned"] = (
+ getattr(snapsv6[snap].space, "used_provisioned", None),
+ )
+ snap_info[snapshot]["total_physical"] = snapsv6[snap].space.total_physical
+ snap_info[snapshot]["total_provisioned"] = snapsv6[
+ snap
+ ].space.total_provisioned
+ snap_info[snapshot]["unique_space"] = snapsv6[snap].space.unique
+ if SHARED_CAP_API_VERSION in api_version:
+ snap_info[snapshot]["snapshots_effective"] = snapsv6[
+ snap
+ ].space.snapshots_effective
+ offloads = list(arrayv6.get_offloads().items)
+ for offload in range(0, len(offloads)):
+ offload_name = offloads[offload].name
+ check_offload = arrayv6.get_remote_volume_snapshots(on=offload_name)
+ if check_offload.status_code == 200:
+ remote_snaps = list(
+ arrayv6.get_remote_volume_snapshots(
+ on=offload_name, destroyed=False
+ ).items
+ )
+ for remote_snap in range(0, len(remote_snaps)):
+ remote_snap_name = remote_snaps[remote_snap].name.split(":")[1]
+ remote_transfer = list(
+ arrayv6.get_remote_volume_snapshots_transfer(
+ on=offload_name, names=[remote_snaps[remote_snap].name]
+ ).items
+ )[0]
+ remote_dict = {
+ "source": remote_snaps[remote_snap].source.name,
+ "suffix": remote_snaps[remote_snap].suffix,
+ "size": remote_snaps[remote_snap].provisioned,
+ "data_transferred": remote_transfer.data_transferred,
+ "completed": time.strftime(
+ "%Y-%m-%d %H:%M:%S",
+ time.gmtime(remote_transfer.completed / 1000),
+ )
+ + " UTC",
+ "physical_bytes_written": remote_transfer.physical_bytes_written,
+ "progress": remote_transfer.progress,
+ "created": time.strftime(
+ "%Y-%m-%d %H:%M:%S",
+ time.gmtime(remote_snaps[remote_snap].created / 1000),
+ )
+ + " UTC",
+ }
+ try:
+ snap_info[remote_snap_name]["remote"].append(remote_dict)
+ except KeyError:
+ snap_info[remote_snap_name] = {"remote": []}
+ snap_info[remote_snap_name]["remote"].append(remote_dict)
+ if ACTIVE_DR_API in api_version:
+ snaptags = array.list_volumes(snap=True, tags=True, namespace="*")
+ for snaptag in range(0, len(snaptags)):
+ if snaptags[snaptag]["namespace"] != "vasa-integration.purestorage.com":
+ snapname = snaptags[snaptag]["name"]
+ tagdict = {
+ "key": snaptags[snaptag]["key"],
+ "value": snaptags[snaptag]["value"],
+ "namespace": snaptags[snaptag]["namespace"],
+ }
+ snap_info[snapname]["tags"].append(tagdict)
+ return snap_info
+
+
+def generate_del_snap_dict(module, array):
+ snap_info = {}
+ api_version = array._list_available_rest_versions()
+ if FC_REPL_API_VERSION in api_version:
+ arrayv6 = get_array(module)
+ snapsv6 = list(arrayv6.get_volume_snapshots(destroyed=True).items)
+ snaps = array.list_volumes(snap=True, pending_only=True)
+ for snap in range(0, len(snaps)):
+ snapshot = snaps[snap]["name"]
+ snap_info[snapshot] = {
+ "size": snaps[snap]["size"],
+ "source": snaps[snap]["source"],
+ "created": snaps[snap]["created"],
+ "time_remaining": snaps[snap]["time_remaining"],
+ "tags": [],
+ "remote": [],
+ }
+ if FC_REPL_API_VERSION in api_version:
+ for snap in range(0, len(snapsv6)):
+ snapshot = snapsv6[snap].name
+ snap_info[snapshot]["snapshot_space"] = snapsv6[snap].space.snapshots
+ snap_info[snapshot]["used_provisioned"] = (
+ getattr(snapsv6[snap].space, "used_provisioned", None),
+ )
+ snap_info[snapshot]["total_physical"] = snapsv6[snap].space.total_physical
+ snap_info[snapshot]["total_provisioned"] = snapsv6[
+ snap
+ ].space.total_provisioned
+ snap_info[snapshot]["unique_space"] = snapsv6[snap].space.unique
+ offloads = list(arrayv6.get_offloads().items)
+ for offload in range(0, len(offloads)):
+ offload_name = offloads[offload].name
+ check_offload = arrayv6.get_remote_volume_snapshots(on=offload_name)
+ if check_offload.status_code == 200:
+ remote_snaps = list(
+ arrayv6.get_remote_volume_snapshots(
+ on=offload_name, destroyed=True
+ ).items
+ )
+ for remote_snap in range(0, len(remote_snaps)):
+ remote_snap_name = remote_snaps[remote_snap].name.split(":")[1]
+ remote_transfer = list(
+ arrayv6.get_remote_volume_snapshots_transfer(
+ on=offload_name, names=[remote_snaps[remote_snap].name]
+ ).items
+ )[0]
+ remote_dict = {
+ "source": remote_snaps[remote_snap].source.name,
+ "suffix": remote_snaps[remote_snap].suffix,
+ "size": remote_snaps[remote_snap].provisioned,
+ "data_transferred": remote_transfer.data_transferred,
+ "completed": time.strftime(
+ "%Y-%m-%d %H:%M:%S",
+ time.gmtime(remote_transfer.completed / 1000),
+ )
+ + " UTC",
+ "physical_bytes_written": remote_transfer.physical_bytes_written,
+ "progress": remote_transfer.progress,
+ "created": time.strftime(
+ "%Y-%m-%d %H:%M:%S",
+ time.gmtime(remote_snaps[remote_snap].created / 1000),
+ )
+ + " UTC",
+ }
+ try:
+ snap_info[remote_snap_name]["remote"].append(remote_dict)
+ except KeyError:
+ snap_info[remote_snap_name] = {"remote": []}
+ snap_info[remote_snap_name]["remote"].append(remote_dict)
+ if ACTIVE_DR_API in api_version:
+ snaptags = array.list_volumes(
+ snap=True, tags=True, pending_only=True, namespace="*"
+ )
+ for snaptag in range(0, len(snaptags)):
+ if snaptags[snaptag]["namespace"] != "vasa-integration.purestorage.com":
+ snapname = snaptags[snaptag]["name"]
+ tagdict = {
+ "key": snaptags[snaptag]["key"],
+ "value": snaptags[snaptag]["value"],
+ "namespace": snaptags[snaptag]["namespace"],
+ }
+ snap_info[snapname]["tags"].append(tagdict)
+ return snap_info
+
+
+def generate_del_vol_dict(module, array):
+ volume_info = {}
+ api_version = array._list_available_rest_versions()
+ vols = array.list_volumes(pending_only=True)
+ for vol in range(0, len(vols)):
+ volume = vols[vol]["name"]
+ volume_info[volume] = {
+ "size": vols[vol]["size"],
+ "source": vols[vol]["source"],
+ "created": vols[vol]["created"],
+ "serial": vols[vol]["serial"],
+ "page83_naa": PURE_OUI + vols[vol]["serial"],
+ "nvme_nguid": "eui.00"
+ + vols[vol]["serial"][0:14].lower()
+ + "24a937"
+ + vols[vol]["serial"][-10:].lower(),
+ "time_remaining": vols[vol]["time_remaining"],
+ "tags": [],
+ }
+ if V6_MINIMUM_API_VERSION in api_version:
+ arrayv6 = get_array(module)
+ vols_space = list(arrayv6.get_volumes_space(destroyed=True).items)
+ for vol in range(0, len(vols_space)):
+ name = vols_space[vol].name
+ volume_info[name]["snapshots_space"] = vols_space[vol].space.snapshots
+ # Provide system as this matches the old naming convention
+ volume_info[name]["system"] = vols_space[vol].space.unique
+ volume_info[name]["unique_space"] = vols_space[vol].space.unique
+ volume_info[name]["virtual_space"] = vols_space[vol].space.virtual
+ volume_info[name]["total_physical_space"] = vols_space[
+ vol
+ ].space.total_physical
+ volume_info[name]["data_reduction"] = vols_space[vol].space.data_reduction
+ volume_info[name]["total_reduction"] = vols_space[vol].space.total_reduction
+ volume_info[name]["total_provisioned"] = vols_space[
+ vol
+ ].space.total_provisioned
+ volume_info[name]["thin_provisioning"] = vols_space[
+ vol
+ ].space.thin_provisioning
+ if SHARED_CAP_API_VERSION in api_version:
+ volume_info[name]["snapshots_effective"] = vols_space[
+ vol
+ ].space.snapshots_effective
+ volume_info[name]["unique_effective"] = vols_space[
+ vol
+ ].space.unique_effective
+ volume_info[name]["used_provisioned"] = (
+ getattr(vols_space[vol].space, "used_provisioned", None),
+ )
+ if ACTIVE_DR_API in api_version:
+ voltags = array.list_volumes(tags=True, pending_only=True)
+ for voltag in range(0, len(voltags)):
+ if voltags[voltag]["namespace"] != "vasa-integration.purestorage.com":
+ volume = voltags[voltag]["name"]
+ tagdict = {
+ "key": voltags[voltag]["key"],
+ "value": voltags[voltag]["value"],
+ "copyable": voltags[voltag]["copyable"],
+ "namespace": voltags[voltag]["namespace"],
+ }
+ volume_info[volume]["tags"].append(tagdict)
+ if SAFE_MODE_VERSION in api_version:
+ volumes = list(arrayv6.get_volumes(destroyed=True).items)
+ for vol in range(0, len(volumes)):
+ name = volumes[vol].name
+ volume_info[name]["priority"] = volumes[vol].priority
+ volume_info[name]["priority_adjustment"] = volumes[
+ vol
+ ].priority_adjustment.priority_adjustment_operator + str(
+ volumes[vol].priority_adjustment.priority_adjustment_value
+ )
+ return volume_info
+
+
+def generate_vol_dict(module, array):
+ volume_info = {}
+ vols_space = array.list_volumes(space=True)
+ vols = array.list_volumes()
+ for vol in range(0, len(vols)):
+ volume = vols[vol]["name"]
+ volume_info[volume] = {
+ "protocol_endpoint": False,
+ "source": vols[vol]["source"],
+ "size": vols[vol]["size"],
+ "serial": vols[vol]["serial"],
+ "page83_naa": PURE_OUI + vols[vol]["serial"],
+ "nvme_nguid": "eui.00"
+ + vols[vol]["serial"][0:14].lower()
+ + "24a937"
+ + vols[vol]["serial"][-10:].lower(),
+ "tags": [],
+ "hosts": [],
+ "bandwidth": "",
+ "iops_limit": "",
+ "data_reduction": vols_space[vol]["data_reduction"],
+ "thin_provisioning": vols_space[vol]["thin_provisioning"],
+ "total_reduction": vols_space[vol]["total_reduction"],
+ }
+ api_version = array._list_available_rest_versions()
+ if V6_MINIMUM_API_VERSION in api_version:
+ arrayv6 = get_array(module)
+ vols_space = list(arrayv6.get_volumes_space(destroyed=False).items)
+ for vol in range(0, len(vols_space)):
+ name = vols_space[vol].name
+ volume_info[name]["snapshots_space"] = vols_space[vol].space.snapshots
+ # Provide system as this matches the old naming convention
+ volume_info[name]["system"] = vols_space[vol].space.unique
+ volume_info[name]["unique_space"] = vols_space[vol].space.unique
+ volume_info[name]["virtual_space"] = vols_space[vol].space.virtual
+ volume_info[name]["total_physical_space"] = vols_space[
+ vol
+ ].space.total_physical
+ if SHARED_CAP_API_VERSION in api_version:
+ volume_info[name]["snapshots_effective"] = vols_space[
+ vol
+ ].space.snapshots_effective
+ volume_info[name]["unique_effective"] = vols_space[
+ vol
+ ].space.unique_effective
+ volume_info[name]["total_effective"] = vols_space[
+ vol
+ ].space.total_effective
+ volume_info[name]["used_provisioned"] = (
+ getattr(vols_space[vol].space, "used_provisioned", None),
+ )
+ if AC_REQUIRED_API_VERSION in api_version:
+ qvols = array.list_volumes(qos=True)
+ for qvol in range(0, len(qvols)):
+ volume = qvols[qvol]["name"]
+ qos = qvols[qvol]["bandwidth_limit"]
+ volume_info[volume]["bandwidth"] = qos
+ if P53_API_VERSION in api_version:
+ iops = qvols[qvol]["iops_limit"]
+ volume_info[volume]["iops_limit"] = iops
+ vvols = array.list_volumes(protocol_endpoint=True)
+ for vvol in range(0, len(vvols)):
+ volume = vvols[vvol]["name"]
+ volume_info[volume] = {
+ "protocol_endpoint": True,
+ "host_encryption_key_status": None,
+ "source": vvols[vvol]["source"],
+ "serial": vvols[vvol]["serial"],
+ "nvme_nguid": "eui.00"
+ + vols[vol]["serial"][0:14].lower()
+ + "24a937"
+ + vols[vol]["serial"][-10:].lower(),
+ "page83_naa": PURE_OUI + vvols[vvol]["serial"],
+ "tags": [],
+ "hosts": [],
+ }
+ if P53_API_VERSION in array._list_available_rest_versions():
+ e2ees = array.list_volumes(host_encryption_key=True)
+ for e2ee in range(0, len(e2ees)):
+ volume = e2ees[e2ee]["name"]
+ volume_info[volume]["host_encryption_key_status"] = e2ees[e2ee][
+ "host_encryption_key_status"
+ ]
+ if SAFE_MODE_VERSION in api_version:
+ volumes = list(arrayv6.get_volumes(destroyed=False).items)
+ for vol in range(0, len(volumes)):
+ name = volumes[vol].name
+ volume_info[name]["priority"] = volumes[vol].priority
+ volume_info[name]["priority_adjustment"] = volumes[
+ vol
+ ].priority_adjustment.priority_adjustment_operator + str(
+ volumes[vol].priority_adjustment.priority_adjustment_value
+ )
+ cvols = array.list_volumes(connect=True)
+ for cvol in range(0, len(cvols)):
+ volume = cvols[cvol]["name"]
+ voldict = {"host": cvols[cvol]["host"], "lun": cvols[cvol]["lun"]}
+ volume_info[volume]["hosts"].append(voldict)
+ if ACTIVE_DR_API in api_version:
+ voltags = array.list_volumes(tags=True)
+ for voltag in range(0, len(voltags)):
+ if voltags[voltag]["namespace"] != "vasa-integration.purestorage.com":
+ volume = voltags[voltag]["name"]
+ tagdict = {
+ "key": voltags[voltag]["key"],
+ "value": voltags[voltag]["value"],
+ "copyable": voltags[voltag]["copyable"],
+ "namespace": voltags[voltag]["namespace"],
+ }
+ volume_info[volume]["tags"].append(tagdict)
+ return volume_info
+
+
+def generate_host_dict(module, array):
+ api_version = array._list_available_rest_versions()
+ host_info = {}
+ hosts = array.list_hosts()
+ for host in range(0, len(hosts)):
+ hostname = hosts[host]["name"]
+ tports = []
+ all_tports = []
+ host_all_info = None
+ try:
+ host_all_info = array.get_host(hostname, all=True)
+ except purestorage.PureHTTPError as err:
+ if err.code == 400:
+ continue
+ if host_all_info:
+ for tport in range(0, len(host_all_info)):
+ for itport in range(0, len(host_all_info[tport]["target_port"])):
+ tports.append(host_all_info[tport]["target_port"][itport])
+ all_tports = list(dict.fromkeys(tports))
+ host_info[hostname] = {
+ "hgroup": hosts[host]["hgroup"],
+ "iqn": hosts[host]["iqn"],
+ "wwn": hosts[host]["wwn"],
+ "personality": array.get_host(hostname, personality=True)["personality"],
+ "target_port": all_tports,
+ "volumes": [],
+ }
+ host_connections = array.list_host_connections(hostname)
+ for connection in range(0, len(host_connections)):
+ connection_dict = {
+ "hostgroup": host_connections[connection]["hgroup"],
+ "volume": host_connections[connection]["vol"],
+ "lun": host_connections[connection]["lun"],
+ }
+ host_info[hostname]["volumes"].append(connection_dict)
+ if host_info[hostname]["iqn"]:
+ chap_data = array.get_host(hostname, chap=True)
+ host_info[hostname]["target_user"] = chap_data["target_user"]
+ host_info[hostname]["host_user"] = chap_data["host_user"]
+ if NVME_API_VERSION in api_version:
+ host_info[hostname]["nqn"] = hosts[host]["nqn"]
+ if PREFERRED_API_VERSION in api_version:
+ hosts = array.list_hosts(preferred_array=True)
+ for host in range(0, len(hosts)):
+ hostname = hosts[host]["name"]
+ host_info[hostname]["preferred_array"] = hosts[host]["preferred_array"]
+ if VLAN_VERSION in api_version:
+ arrayv6 = get_array(module)
+ hosts = list(arrayv6.get_hosts().items)
+ for host in range(0, len(hosts)):
+ if hosts[host].is_local:
+ hostname = hosts[host].name
+ host_info[hostname]["vlan"] = getattr(hosts[host], "vlan", None)
+ return host_info
+
+
+def generate_pgroups_dict(module, array):
+ pgroups_info = {}
+ api_version = array._list_available_rest_versions()
+ pgroups = array.list_pgroups()
+ if SHARED_CAP_API_VERSION in api_version:
+ array_v6 = get_array(module)
+ deleted_enabled = True
+ else:
+ deleted_enabled = False
+ for pgroup in range(0, len(pgroups)):
+ protgroup = pgroups[pgroup]["name"]
+ pgroups_info[protgroup] = {
+ "hgroups": pgroups[pgroup]["hgroups"],
+ "hosts": pgroups[pgroup]["hosts"],
+ "source": pgroups[pgroup]["source"],
+ "targets": pgroups[pgroup]["targets"],
+ "volumes": pgroups[pgroup]["volumes"],
+ }
+ try:
+ prot_sched = array.get_pgroup(protgroup, schedule=True)
+ prot_reten = array.get_pgroup(protgroup, retention=True)
+ snap_transfers = array.get_pgroup(
+ protgroup, snap=True, transfer=True, pending=True
+ )
+ except purestorage.PureHTTPError as err:
+ if err.code == 400:
+ continue
+ if prot_sched["snap_enabled"] or prot_sched["replicate_enabled"]:
+ pgroups_info[protgroup]["snap_frequency"] = prot_sched["snap_frequency"]
+ pgroups_info[protgroup]["replicate_frequency"] = prot_sched[
+ "replicate_frequency"
+ ]
+ pgroups_info[protgroup]["snap_enabled"] = prot_sched["snap_enabled"]
+ pgroups_info[protgroup]["replicate_enabled"] = prot_sched[
+ "replicate_enabled"
+ ]
+ pgroups_info[protgroup]["snap_at"] = prot_sched["snap_at"]
+ pgroups_info[protgroup]["replicate_at"] = prot_sched["replicate_at"]
+ pgroups_info[protgroup]["replicate_blackout"] = prot_sched[
+ "replicate_blackout"
+ ]
+ pgroups_info[protgroup]["per_day"] = prot_reten["per_day"]
+ pgroups_info[protgroup]["target_per_day"] = prot_reten["target_per_day"]
+ pgroups_info[protgroup]["target_days"] = prot_reten["target_days"]
+ pgroups_info[protgroup]["days"] = prot_reten["days"]
+ pgroups_info[protgroup]["all_for"] = prot_reten["all_for"]
+ pgroups_info[protgroup]["target_all_for"] = prot_reten["target_all_for"]
+ pgroups_info[protgroup]["snaps"] = {}
+ for snap_transfer in range(0, len(snap_transfers)):
+ snap = snap_transfers[snap_transfer]["name"]
+ pgroups_info[protgroup]["snaps"][snap] = {
+ "time_remaining": snap_transfers[snap_transfer]["time_remaining"],
+ "created": snap_transfers[snap_transfer]["created"],
+ "started": snap_transfers[snap_transfer]["started"],
+ "completed": snap_transfers[snap_transfer]["completed"],
+ "physical_bytes_written": snap_transfers[snap_transfer][
+ "physical_bytes_written"
+ ],
+ "data_transferred": snap_transfers[snap_transfer]["data_transferred"],
+ "progress": snap_transfers[snap_transfer]["progress"],
+ }
+ if deleted_enabled:
+ pgroups_info[protgroup]["deleted_volumes"] = []
+ volumes = list(
+ array_v6.get_protection_groups_volumes(group_names=[protgroup]).items
+ )
+ if volumes:
+ for volume in range(0, len(volumes)):
+ if volumes[volume].member["destroyed"]:
+ pgroups_info[protgroup]["deleted_volumes"].append(
+ volumes[volume].member["name"]
+ )
+ else:
+ pgroups_info[protgroup]["deleted_volumes"] = None
+ if PER_PG_VERSION in api_version:
+ try:
+ pgroups_info[protgroup]["retention_lock"] = list(
+ array_v6.get_protection_groups(names=[protgroup]).items
+ )[0].retention_lock
+ pgroups_info[protgroup]["manual_eradication"] = list(
+ array_v6.get_protection_groups(names=[protgroup]).items
+ )[0].eradication_config.manual_eradication
+ except Exception:
+ pass
+ if V6_MINIMUM_API_VERSION in api_version:
+ pgroups = list(array_v6.get_protection_groups().items)
+ for pgroup in range(0, len(pgroups)):
+ name = pgroups[pgroup].name
+ pgroups_info[name]["snapshots"] = getattr(
+ pgroups[pgroup].space, "snapshots", None
+ )
+ pgroups_info[name]["shared"] = getattr(
+ pgroups[pgroup].space, "shared", None
+ )
+ pgroups_info[name]["data_reduction"] = getattr(
+ pgroups[pgroup].space, "data_reduction", None
+ )
+ pgroups_info[name]["thin_provisioning"] = getattr(
+ pgroups[pgroup].space, "thin_provisioning", None
+ )
+ pgroups_info[name]["total_physical"] = getattr(
+ pgroups[pgroup].space, "total_physical", None
+ )
+ pgroups_info[name]["total_provisioned"] = getattr(
+ pgroups[pgroup].space, "total_provisioned", None
+ )
+ pgroups_info[name]["total_reduction"] = getattr(
+ pgroups[pgroup].space, "total_reduction", None
+ )
+ pgroups_info[name]["unique"] = getattr(
+ pgroups[pgroup].space, "unique", None
+ )
+ pgroups_info[name]["virtual"] = getattr(
+ pgroups[pgroup].space, "virtual", None
+ )
+ pgroups_info[name]["replication"] = getattr(
+ pgroups[pgroup].space, "replication", None
+ )
+ pgroups_info[name]["used_provisioned"] = getattr(
+ pgroups[pgroup].space, "used_provisioned", None
+ )
+ return pgroups_info
+
+
+def generate_rl_dict(module, array):
+ rl_info = {}
+ api_version = array._list_available_rest_versions()
+ if ACTIVE_DR_API in api_version:
+ try:
+ rlinks = array.list_pod_replica_links()
+ for rlink in range(0, len(rlinks)):
+ link_name = rlinks[rlink]["local_pod_name"]
+ since_epoch = rlinks[rlink]["recovery_point"] / 1000
+ recovery_datatime = time.strftime(
+ "%Y-%m-%d %H:%M:%S", time.localtime(since_epoch)
+ )
+ rl_info[link_name] = {
+ "status": rlinks[rlink]["status"],
+ "direction": rlinks[rlink]["direction"],
+ "lag": str(rlinks[rlink]["lag"] / 1000) + "s",
+ "remote_pod_name": rlinks[rlink]["remote_pod_name"],
+ "remote_names": rlinks[rlink]["remote_names"],
+ "recovery_point": recovery_datatime,
+ }
+ except Exception:
+ module.warn("Replica Links info requires purestorage SDK 1.19 or hisher")
+ return rl_info
+
+
+def generate_del_pods_dict(module, array):
+ pods_info = {}
+ api_version = array._list_available_rest_versions()
+ if AC_REQUIRED_API_VERSION in api_version:
+ pods = array.list_pods(mediator=True, pending_only=True)
+ for pod in range(0, len(pods)):
+ acpod = pods[pod]["name"]
+ pods_info[acpod] = {
+ "source": pods[pod]["source"],
+ "arrays": pods[pod]["arrays"],
+ "mediator": pods[pod]["mediator"],
+ "mediator_version": pods[pod]["mediator_version"],
+ "time_remaining": pods[pod]["time_remaining"],
+ }
+ if ACTIVE_DR_API in api_version:
+ if pods_info[acpod]["arrays"][0]["frozen_at"]:
+ frozen_time = pods_info[acpod]["arrays"][0]["frozen_at"] / 1000
+ frozen_datetime = time.strftime(
+ "%Y-%m-%d %H:%M:%S", time.localtime(frozen_time)
+ )
+ pods_info[acpod]["arrays"][0]["frozen_at"] = frozen_datetime
+ pods_info[acpod]["link_source_count"] = pods[pod]["link_source_count"]
+ pods_info[acpod]["link_target_count"] = pods[pod]["link_target_count"]
+ pods_info[acpod]["promotion_status"] = pods[pod]["promotion_status"]
+ pods_info[acpod]["requested_promotion_state"] = pods[pod][
+ "requested_promotion_state"
+ ]
+ if PREFERRED_API_VERSION in api_version:
+ pods_fp = array.list_pods(failover_preference=True, pending_only=True)
+ for pod in range(0, len(pods_fp)):
+ acpod = pods_fp[pod]["name"]
+ pods_info[acpod]["failover_preference"] = pods_fp[pod][
+ "failover_preference"
+ ]
+ if V6_MINIMUM_API_VERSION in api_version:
+ arrayv6 = get_array(module)
+ pods = list(arrayv6.get_pods(destroyed=True).items)
+ for pod in range(0, len(pods)):
+ name = pods[pod].name
+ pods_info[name]["snapshots"] = pods[pod].space.snapshots
+ pods_info[name]["shared"] = pods[pod].space.shared
+ pods_info[name]["data_reduction"] = pods[pod].space.data_reduction
+ pods_info[name]["thin_provisioning"] = pods[pod].space.thin_provisioning
+ pods_info[name]["total_physical"] = pods[pod].space.total_physical
+ pods_info[name]["total_provisioned"] = pods[pod].space.total_provisioned
+ pods_info[name]["total_reduction"] = pods[pod].space.total_reduction
+ pods_info[name]["unique"] = pods[pod].space.unique
+ pods_info[name]["virtual"] = pods[pod].space.virtual
+ pods_info[name]["replication"] = pods[pod].space.replication
+ pods_info[name]["used_provisioned"] = getattr(
+ pods[pod].space, "used_provisioned", None
+ )
+ if POD_QUOTA_VERSION in api_version:
+ pods_info[name]["quota_limit"] = pods[pod].quota_limit
+ return pods_info
+
+
+def generate_pods_dict(module, array):
+ pods_info = {}
+ api_version = array._list_available_rest_versions()
+ if AC_REQUIRED_API_VERSION in api_version:
+ pods = array.list_pods(mediator=True)
+ for pod in range(0, len(pods)):
+ acpod = pods[pod]["name"]
+ pods_info[acpod] = {
+ "source": pods[pod]["source"],
+ "arrays": pods[pod]["arrays"],
+ "mediator": pods[pod]["mediator"],
+ "mediator_version": pods[pod]["mediator_version"],
+ }
+ if ACTIVE_DR_API in api_version:
+ if pods_info[acpod]["arrays"][0]["frozen_at"]:
+ frozen_time = pods_info[acpod]["arrays"][0]["frozen_at"] / 1000
+ frozen_datetime = time.strftime(
+ "%Y-%m-%d %H:%M:%S", time.localtime(frozen_time)
+ )
+ pods_info[acpod]["arrays"][0]["frozen_at"] = frozen_datetime
+ pods_info[acpod]["link_source_count"] = pods[pod]["link_source_count"]
+ pods_info[acpod]["link_target_count"] = pods[pod]["link_target_count"]
+ pods_info[acpod]["promotion_status"] = pods[pod]["promotion_status"]
+ pods_info[acpod]["requested_promotion_state"] = pods[pod][
+ "requested_promotion_state"
+ ]
+ if PREFERRED_API_VERSION in api_version:
+ pods_fp = array.list_pods(failover_preference=True)
+ for pod in range(0, len(pods_fp)):
+ acpod = pods_fp[pod]["name"]
+ pods_info[acpod]["failover_preference"] = pods_fp[pod][
+ "failover_preference"
+ ]
+ if V6_MINIMUM_API_VERSION in api_version:
+ arrayv6 = get_array(module)
+ pods = list(arrayv6.get_pods(destroyed=False).items)
+ for pod in range(0, len(pods)):
+ name = pods[pod].name
+ pods_info[name]["snapshots"] = getattr(
+ pods[pod].space, "snapshots", None
+ )
+ pods_info[name]["shared"] = getattr(pods[pod].space, "shared", None)
+ pods_info[name]["data_reduction"] = getattr(
+ pods[pod].space, "data_reduction", None
+ )
+ pods_info[name]["thin_provisioning"] = getattr(
+ pods[pod].space, "thin_provisioning", None
+ )
+ pods_info[name]["total_physical"] = getattr(
+ pods[pod].space, "total_physical", None
+ )
+ pods_info[name]["total_provisioned"] = getattr(
+ pods[pod].space, "total_provisioned", None
+ )
+ pods_info[name]["total_reduction"] = getattr(
+ pods[pod].space, "total_reduction", None
+ )
+ pods_info[name]["unique"] = getattr(pods[pod].space, "unique", None)
+ pods_info[name]["virtual"] = getattr(pods[pod].space, "virtual", None)
+ pods_info[name]["replication"] = getattr(
+ pods[pod].space, "replication", None
+ )
+ pods_info[name]["used_provisioned"] = getattr(
+ pods[pod].space, "used_provisioned", None
+ )
+ return pods_info
+
+
+def generate_conn_array_dict(module, array):
+ conn_array_info = {}
+ api_version = array._list_available_rest_versions()
+ if FC_REPL_API_VERSION not in api_version:
+ carrays = array.list_array_connections()
+ for carray in range(0, len(carrays)):
+ arrayname = carrays[carray]["array_name"]
+ conn_array_info[arrayname] = {
+ "array_id": carrays[carray]["id"],
+ "throttled": carrays[carray]["throttled"],
+ "version": carrays[carray]["version"],
+ "type": carrays[carray]["type"],
+ "mgmt_ip": carrays[carray]["management_address"],
+ "repl_ip": carrays[carray]["replication_address"],
+ }
+ if P53_API_VERSION in api_version:
+ conn_array_info[arrayname]["status"] = carrays[carray]["status"]
+ else:
+ conn_array_info[arrayname]["connected"] = carrays[carray]["connected"]
+ throttles = array.list_array_connections(throttle=True)
+ for throttle in range(0, len(throttles)):
+ arrayname = throttles[throttle]["array_name"]
+ if conn_array_info[arrayname]["throttled"]:
+ conn_array_info[arrayname]["throttling"] = {
+ "default_limit": throttles[throttle]["default_limit"],
+ "window_limit": throttles[throttle]["window_limit"],
+ "window": throttles[throttle]["window"],
+ }
+ else:
+ arrayv6 = get_array(module)
+ carrays = list(arrayv6.get_array_connections().items)
+ for carray in range(0, len(carrays)):
+ arrayname = carrays[carray].name
+ conn_array_info[arrayname] = {
+ "array_id": carrays[carray].id,
+ "version": getattr(carrays[carray], "version", None),
+ "status": carrays[carray].status,
+ "type": carrays[carray].type,
+ "mgmt_ip": getattr(carrays[carray], "management_address", "-"),
+ "repl_ip": getattr(carrays[carray], "replication_addresses", "-"),
+ "transport": carrays[carray].replication_transport,
+ }
+
+ if bool(carrays[carray].throttle.to_dict()):
+ conn_array_info[arrayname]["throttled"] = True
+ conn_array_info[arrayname]["throttling"] = {}
+ try:
+ if bool(carrays[carray].throttle.window):
+ conn_array_info[arrayname]["throttling"]["window"] = carrays[
+ carray
+ ].throttle.window.to_dict()
+ except AttributeError:
+ pass
+ try:
+ if bool(carrays[carray].throttle.default_limit):
+ conn_array_info[arrayname]["throttling"][
+ "default_limit"
+ ] = carrays[carray].throttle.default_limit
+ except AttributeError:
+ pass
+ try:
+ if bool(carrays[carray].throttle.window_limit):
+ conn_array_info[arrayname]["throttling"][
+ "window_limit"
+ ] = carrays[carray].throttle.window_limit
+ except AttributeError:
+ pass
+ else:
+ conn_array_info[arrayname]["throttled"] = False
+ return conn_array_info
+
+
+def generate_apps_dict(array):
+ apps_info = {}
+ api_version = array._list_available_rest_versions()
+ if SAN_REQUIRED_API_VERSION in api_version:
+ apps = array.list_apps()
+ for app in range(0, len(apps)):
+ appname = apps[app]["name"]
+ apps_info[appname] = {
+ "version": apps[app]["version"],
+ "status": apps[app]["status"],
+ "description": apps[app]["description"],
+ }
+ if P53_API_VERSION in api_version:
+ app_nodes = array.list_app_nodes()
+ for app in range(0, len(app_nodes)):
+ appname = app_nodes[app]["name"]
+ apps_info[appname]["index"] = app_nodes[app]["index"]
+ apps_info[appname]["vnc"] = app_nodes[app]["vnc"]
+ return apps_info
+
+
+def generate_vgroups_dict(module, array):
+ vgroups_info = {}
+ api_version = array._list_available_rest_versions()
+ if AC_REQUIRED_API_VERSION in api_version:
+ vgroups = array.list_vgroups(pending=False)
+ for vgroup in range(0, len(vgroups)):
+ virtgroup = vgroups[vgroup]["name"]
+ vgroups_info[virtgroup] = {
+ "volumes": vgroups[vgroup]["volumes"],
+ }
+ if V6_MINIMUM_API_VERSION in api_version:
+ arrayv6 = get_array(module)
+ vgroups = list(arrayv6.get_volume_groups(destroyed=False).items)
+ for vgroup in range(0, len(vgroups)):
+ name = vgroups[vgroup].name
+ vgroups_info[name]["snapshots_space"] = vgroups[vgroup].space.snapshots
+ # Provide system as this matches the old naming convention
+ vgroups_info[name]["system"] = vgroups[vgroup].space.unique
+ vgroups_info[name]["unique_space"] = vgroups[vgroup].space.unique
+ vgroups_info[name]["virtual_space"] = vgroups[vgroup].space.virtual
+ vgroups_info[name]["data_reduction"] = vgroups[vgroup].space.data_reduction
+ vgroups_info[name]["total_reduction"] = vgroups[
+ vgroup
+ ].space.total_reduction
+ vgroups_info[name]["total_provisioned"] = vgroups[
+ vgroup
+ ].space.total_provisioned
+ vgroups_info[name]["thin_provisioning"] = vgroups[
+ vgroup
+ ].space.thin_provisioning
+ vgroups_info[name]["used_provisioned"] = (
+ getattr(vgroups[vgroup].space, "used_provisioned", None),
+ )
+ vgroups_info[name]["bandwidth_limit"] = getattr(
+ vgroups[vgroup].qos, "bandwidth_limit", ""
+ )
+ vgroups_info[name]["iops_limit"] = getattr(
+ vgroups[vgroup].qos, "iops_limit", ""
+ )
+ if SAFE_MODE_VERSION in api_version:
+ for vgroup in range(0, len(vgroups)):
+ name = vgroups[vgroup].name
+ vgroups_info[name]["priority_adjustment"] = vgroups[
+ vgroup
+ ].priority_adjustment.priority_adjustment_operator + str(
+ vgroups[vgroup].priority_adjustment.priority_adjustment_value
+ )
+ return vgroups_info
+
+
+def generate_del_vgroups_dict(module, array):
+ vgroups_info = {}
+ api_version = array._list_available_rest_versions()
+ if AC_REQUIRED_API_VERSION in api_version:
+ vgroups = array.list_vgroups(pending_only=True)
+ for vgroup in range(0, len(vgroups)):
+ virtgroup = vgroups[vgroup]["name"]
+ vgroups_info[virtgroup] = {
+ "volumes": vgroups[vgroup]["volumes"],
+ }
+ if V6_MINIMUM_API_VERSION in api_version:
+ arrayv6 = get_array(module)
+ vgroups = list(arrayv6.get_volume_groups(destroyed=True).items)
+ for vgroup in range(0, len(vgroups)):
+ name = vgroups[vgroup].name
+ vgroups_info[name]["snapshots_space"] = vgroups[vgroup].space.snapshots
+ # Provide system as this matches the old naming convention
+ vgroups_info[name]["system"] = vgroups[vgroup].space.unique
+ vgroups_info[name]["unique_space"] = vgroups[vgroup].space.unique
+ vgroups_info[name]["virtual_space"] = vgroups[vgroup].space.virtual
+ vgroups_info[name]["data_reduction"] = vgroups[vgroup].space.data_reduction
+ vgroups_info[name]["total_reduction"] = vgroups[
+ vgroup
+ ].space.total_reduction
+ vgroups_info[name]["total_provisioned"] = vgroups[
+ vgroup
+ ].space.total_provisioned
+ vgroups_info[name]["thin_provisioning"] = vgroups[
+ vgroup
+ ].space.thin_provisioning
+ vgroups_info[name]["used_provisioned"] = (
+ getattr(vgroups[vgroup].space, "used_provisioned", None),
+ )
+ vgroups_info[name]["time_remaining"] = (vgroups[vgroup].time_remaining,)
+ vgroups_info[name]["bandwidth_limit"] = getattr(
+ vgroups[vgroup].qos, "bandwidth_limit", ""
+ )
+ vgroups_info[name]["iops_limit"] = getattr(
+ vgroups[vgroup].qos, "iops_limit", ""
+ )
+ if SAFE_MODE_VERSION in api_version:
+ for vgroup in range(0, len(vgroups)):
+ name = vgroups[vgroup].name
+ vgroups_info[name]["priority_adjustment"] = vgroups[
+ vgroup
+ ].priority_adjustment.priority_adjustment_operator + str(
+ vgroups[vgroup].priority_adjustment.priority_adjustment_value
+ )
+ return vgroups_info
+
+
+def generate_certs_dict(array):
+ certs_info = {}
+ api_version = array._list_available_rest_versions()
+ if P53_API_VERSION in api_version:
+ certs = array.list_certificates()
+ for cert in range(0, len(certs)):
+ certificate = certs[cert]["name"]
+ valid_from = time.strftime(
+ "%a, %d %b %Y %H:%M:%S %Z",
+ time.localtime(certs[cert]["valid_from"] / 1000),
+ )
+ valid_to = time.strftime(
+ "%a, %d %b %Y %H:%M:%S %Z",
+ time.localtime(certs[cert]["valid_to"] / 1000),
+ )
+ certs_info[certificate] = {
+ "status": certs[cert]["status"],
+ "issued_to": certs[cert]["issued_to"],
+ "valid_from": valid_from,
+ "locality": certs[cert]["locality"],
+ "country": certs[cert]["country"],
+ "issued_by": certs[cert]["issued_by"],
+ "valid_to": valid_to,
+ "state": certs[cert]["state"],
+ "key_size": certs[cert]["key_size"],
+ "org_unit": certs[cert]["organizational_unit"],
+ "common_name": certs[cert]["common_name"],
+ "organization": certs[cert]["organization"],
+ "email": certs[cert]["email"],
+ }
+ return certs_info
+
+
+def generate_kmip_dict(array):
+ kmip_info = {}
+ api_version = array._list_available_rest_versions()
+ if P53_API_VERSION in api_version:
+ kmips = array.list_kmip()
+ for kmip in range(0, len(kmips)):
+ key = kmips[kmip]["name"]
+ kmip_info[key] = {
+ "certificate": kmips[kmip]["certificate"],
+ "ca_cert_configured": kmips[kmip]["ca_certificate_configured"],
+ "uri": kmips[kmip]["uri"],
+ }
+ return kmip_info
+
+
+def generate_nfs_offload_dict(module, array):
+ offload_info = {}
+ api_version = array._list_available_rest_versions()
+ if AC_REQUIRED_API_VERSION in api_version:
+ offload = array.list_nfs_offload()
+ for target in range(0, len(offload)):
+ offloadt = offload[target]["name"]
+ offload_info[offloadt] = {
+ "status": offload[target]["status"],
+ "mount_point": offload[target]["mount_point"],
+ "protocol": offload[target]["protocol"],
+ "mount_options": offload[target]["mount_options"],
+ "address": offload[target]["address"],
+ }
+ if V6_MINIMUM_API_VERSION in api_version:
+ arrayv6 = get_array(module)
+ offloads = list(arrayv6.get_offloads(protocol="nfs").items)
+ for offload in range(0, len(offloads)):
+ name = offloads[offload].name
+ offload_info[name]["snapshots"] = getattr(
+ offloads[offload].space, "snapshots", None
+ )
+ offload_info[name]["shared"] = getattr(
+ offloads[offload].space, "shared", None
+ )
+ offload_info[name]["data_reduction"] = getattr(
+ offloads[offload].space, "data_reduction", None
+ )
+ offload_info[name]["thin_provisioning"] = getattr(
+ offloads[offload].space, "thin_provisioning", None
+ )
+ offload_info[name]["total_physical"] = getattr(
+ offloads[offload].space, "total_physical", None
+ )
+ offload_info[name]["total_provisioned"] = getattr(
+ offloads[offload].space, "total_provisioned", None
+ )
+ offload_info[name]["total_reduction"] = getattr(
+ offloads[offload].space, "total_reduction", None
+ )
+ offload_info[name]["unique"] = getattr(
+ offloads[offload].space, "unique", None
+ )
+ offload_info[name]["virtual"] = getattr(
+ offloads[offload].space, "virtual", None
+ )
+ offload_info[name]["replication"] = getattr(
+ offloads[offload].space, "replication", None
+ )
+ offload_info[name]["used_provisioned"] = getattr(
+ offloads[offload].space, "used_provisioned", None
+ )
+ return offload_info
+
+
+def generate_s3_offload_dict(module, array):
+ offload_info = {}
+ api_version = array._list_available_rest_versions()
+ if S3_REQUIRED_API_VERSION in api_version:
+ offload = array.list_s3_offload()
+ for target in range(0, len(offload)):
+ offloadt = offload[target]["name"]
+ offload_info[offloadt] = {
+ "status": offload[target]["status"],
+ "bucket": offload[target]["bucket"],
+ "protocol": offload[target]["protocol"],
+ "access_key_id": offload[target]["access_key_id"],
+ }
+ if P53_API_VERSION in api_version:
+ offload_info[offloadt]["placement_strategy"] = offload[target][
+ "placement_strategy"
+ ]
+ if V6_MINIMUM_API_VERSION in api_version:
+ arrayv6 = get_array(module)
+ offloads = list(arrayv6.get_offloads(protocol="s3").items)
+ for offload in range(0, len(offloads)):
+ name = offloads[offload].name
+ offload_info[name]["snapshots"] = getattr(
+ offloads[offload].space, "snapshots", None
+ )
+ offload_info[name]["shared"] = getattr(
+ offloads[offload].space, "shared", None
+ )
+ offload_info[name]["data_reduction"] = getattr(
+ offloads[offload].space, "data_reduction", None
+ )
+ offload_info[name]["thin_provisioning"] = getattr(
+ offloads[offload].space, "thin_provisioning", None
+ )
+ offload_info[name]["total_physical"] = getattr(
+ offloads[offload].space, "total_physical", None
+ )
+ offload_info[name]["total_provisioned"] = getattr(
+ offloads[offload].space, "total_provisioned", None
+ )
+ offload_info[name]["total_reduction"] = getattr(
+ offloads[offload].space, "total_reduction", None
+ )
+ offload_info[name]["unique"] = getattr(
+ offloads[offload].space, "unique", None
+ )
+ offload_info[name]["virtual"] = getattr(
+ offloads[offload].space, "virtual", None
+ )
+ offload_info[name]["replication"] = getattr(
+ offloads[offload].space, "replication", None
+ )
+ offload_info[name]["used_provisioned"] = getattr(
+ offloads[offload].space, "used_provisioned", None
+ )
+ return offload_info
+
+
+def generate_azure_offload_dict(module, array):
+ offload_info = {}
+ api_version = array._list_available_rest_versions()
+ if P53_API_VERSION in api_version:
+ offload = array.list_azure_offload()
+ for target in range(0, len(offload)):
+ offloadt = offload[target]["name"]
+ offload_info[offloadt] = {
+ "status": offload[target]["status"],
+ "account_name": offload[target]["account_name"],
+ "protocol": offload[target]["protocol"],
+ "secret_access_key": offload[target]["secret_access_key"],
+ "container_name": offload[target]["container_name"],
+ }
+ if V6_MINIMUM_API_VERSION in api_version:
+ arrayv6 = get_array(module)
+ offloads = list(arrayv6.get_offloads(protocol="azure").items)
+ for offload in range(0, len(offloads)):
+ name = offloads[offload].name
+ offload_info[name]["snapshots"] = getattr(
+ offloads[offload].space, "snapshots", None
+ )
+ offload_info[name]["shared"] = getattr(
+ offloads[offload].space, "shared", None
+ )
+ offload_info[name]["data_reduction"] = getattr(
+ offloads[offload].space, "data_reduction", None
+ )
+ offload_info[name]["thin_provisioning"] = getattr(
+ offloads[offload].space, "thin_provisioning", None
+ )
+ offload_info[name]["total_physical"] = getattr(
+ offloads[offload].space, "total_physical", None
+ )
+ offload_info[name]["total_provisioned"] = getattr(
+ offloads[offload].space, "total_provisioned", None
+ )
+ offload_info[name]["total_reduction"] = getattr(
+ offloads[offload].space, "total_reduction", None
+ )
+ offload_info[name]["unique"] = getattr(
+ offloads[offload].space, "unique", None
+ )
+ offload_info[name]["virtual"] = getattr(
+ offloads[offload].space, "virtual", None
+ )
+ offload_info[name]["replication"] = getattr(
+ offloads[offload].space, "replication", None
+ )
+ offload_info[name]["used_provisioned"] = getattr(
+ offloads[offload].space, "used_provisioned", None
+ )
+ return offload_info
+
+
+def generate_google_offload_dict(array):
+ offload_info = {}
+ offloads = list(array.get_offloads(protocol="google-cloud").items)
+ for offload in range(0, len(offloads)):
+ name = offloads[offload].name
+ offload_info[name] = {
+ # "access_key_id": offloads[offload].google-cloud.access_key_id,
+ # "bucket": offloads[offload].google-cloud.bucket,
+ # "auth_region": offloads[offload].google-cloud.auth_region,
+ "snapshots": getattr(offloads[offload].space, "snapshots", None),
+ "shared": getattr(offloads[offload].space, "shared", None),
+ "data_reduction": getattr(offloads[offload].space, "data_reduction", None),
+ "thin_provisioning": getattr(
+ offloads[offload].space, "thin_provisioning", None
+ ),
+ "total_physical": getattr(offloads[offload].space, "total_physical", None),
+ "total_provisioned": getattr(
+ offloads[offload].space, "total_provisioned", None
+ ),
+ "total_reduction": getattr(
+ offloads[offload].space, "total_reduction", None
+ ),
+ "unique": getattr(offloads[offload].space, "unique", None),
+ "virtual": getattr(offloads[offload].space, "virtual", None),
+ "replication": getattr(offloads[offload].space, "replication", None),
+ "used_provisioned": getattr(
+ offloads[offload].space, "used_provisioned", None
+ ),
+ }
+ return offload_info
+
+
+def generate_hgroups_dict(module, array):
+ hgroups_info = {}
+ api_version = array._list_available_rest_versions()
+ hgroups = array.list_hgroups()
+ for hgroup in range(0, len(hgroups)):
+ hostgroup = hgroups[hgroup]["name"]
+ hgroups_info[hostgroup] = {
+ "hosts": hgroups[hgroup]["hosts"],
+ "pgs": [],
+ "vols": [],
+ }
+ pghgroups = array.list_hgroups(protect=True)
+ for pghg in range(0, len(pghgroups)):
+ pgname = pghgroups[pghg]["name"]
+ hgroups_info[pgname]["pgs"].append(pghgroups[pghg]["protection_group"])
+ volhgroups = array.list_hgroups(connect=True)
+ for pgvol in range(0, len(volhgroups)):
+ pgname = volhgroups[pgvol]["name"]
+ volpgdict = [volhgroups[pgvol]["vol"], volhgroups[pgvol]["lun"]]
+ hgroups_info[pgname]["vols"].append(volpgdict)
+ if V6_MINIMUM_API_VERSION in api_version:
+ arrayv6 = get_array(module)
+ hgroups = list(arrayv6.get_host_groups().items)
+ for hgroup in range(0, len(hgroups)):
+ name = hgroups[hgroup].name
+ hgroups_info[name]["snapshots"] = hgroups[hgroup].space.snapshots
+ hgroups_info[name]["data_reduction"] = hgroups[hgroup].space.data_reduction
+ hgroups_info[name]["thin_provisioning"] = hgroups[
+ hgroup
+ ].space.thin_provisioning
+ hgroups_info[name]["total_physical"] = hgroups[hgroup].space.total_physical
+ hgroups_info[name]["total_provisioned"] = hgroups[
+ hgroup
+ ].space.total_provisioned
+ hgroups_info[name]["total_reduction"] = hgroups[
+ hgroup
+ ].space.total_reduction
+ hgroups_info[name]["unique"] = hgroups[hgroup].space.unique
+ hgroups_info[name]["virtual"] = hgroups[hgroup].space.virtual
+ hgroups_info[name]["used_provisioned"] = getattr(
+ hgroups[hgroup].space, "used_provisioned", None
+ )
+ return hgroups_info
+
+
+def generate_interfaces_dict(array):
+ api_version = array._list_available_rest_versions()
+ int_info = {}
+ ports = array.list_ports()
+ for port in range(0, len(ports)):
+ int_name = ports[port]["name"]
+ if ports[port]["wwn"]:
+ int_info[int_name] = ports[port]["wwn"]
+ if ports[port]["iqn"]:
+ int_info[int_name] = ports[port]["iqn"]
+ if NVME_API_VERSION in api_version:
+ if ports[port]["nqn"]:
+ int_info[int_name] = ports[port]["nqn"]
+ return int_info
+
+
+def generate_vm_dict(array):
+ vm_info = {}
+ virt_machines = list(array.get_virtual_machines(vm_type="vvol").items)
+ for machine in range(0, len(virt_machines)):
+ name = virt_machines[machine].name
+ vm_info[name] = {
+ "vm_type": virt_machines[machine].vm_type,
+ "vm_id": virt_machines[machine].vm_id,
+ "destroyed": virt_machines[machine].destroyed,
+ "created": virt_machines[machine].created,
+ "time_remaining": getattr(virt_machines[machine], "time_remaining", None),
+ "latest_snapshot_name": getattr(
+ virt_machines[machine].recover_context, "name", None
+ ),
+ "latest_snapshot_id": getattr(
+ virt_machines[machine].recover_context, "id", None
+ ),
+ }
+ return vm_info
+
+
+def generate_alerts_dict(array):
+ alerts_info = {}
+ alerts = list(array.get_alerts().items)
+ for alert in range(0, len(alerts)):
+ name = alerts[alert].name
+ try:
+ notified_time = alerts[alert].notified / 1000
+ notified_datetime = time.strftime(
+ "%Y-%m-%d %H:%M:%S", time.localtime(notified_time)
+ )
+ except AttributeError:
+ notified_datetime = ""
+ try:
+ closed_time = alerts[alert].closed / 1000
+ closed_datetime = time.strftime(
+ "%Y-%m-%d %H:%M:%S", time.localtime(closed_time)
+ )
+ except AttributeError:
+ closed_datetime = ""
+ try:
+ updated_time = alerts[alert].updated / 1000
+ updated_datetime = time.strftime(
+ "%Y-%m-%d %H:%M:%S", time.localtime(updated_time)
+ )
+ except AttributeError:
+ updated_datetime = ""
+ try:
+ created_time = alerts[alert].created / 1000
+ created_datetime = time.strftime(
+ "%Y-%m-%d %H:%M:%S", time.localtime(created_time)
+ )
+ except AttributeError:
+ updated_datetime = ""
+ alerts_info[name] = {
+ "flagged": alerts[alert].flagged,
+ "category": alerts[alert].category,
+ "code": alerts[alert].code,
+ "issue": alerts[alert].issue,
+ "kb_url": alerts[alert].knowledge_base_url,
+ "summary": alerts[alert].summary,
+ "id": alerts[alert].id,
+ "state": alerts[alert].state,
+ "severity": alerts[alert].severity,
+ "component_name": alerts[alert].component_name,
+ "component_type": alerts[alert].component_type,
+ "created": created_datetime,
+ "closed": closed_datetime,
+ "notified": notified_datetime,
+ "updated": updated_datetime,
+ "actual": getattr(alerts[alert], "actual", ""),
+ "expected": getattr(alerts[alert], "expected", ""),
+ }
+ return alerts_info
+
+
+def generate_vmsnap_dict(array):
+ vmsnap_info = {}
+ virt_snaps = list(array.get_virtual_machine_snapshots(vm_type="vvol").items)
+ for snap in range(0, len(virt_snaps)):
+ name = virt_snaps[snap].name
+ vmsnap_info[name] = {
+ "vm_type": virt_snaps[snap].vm_type,
+ "vm_id": virt_snaps[snap].vm_id,
+ "destroyed": virt_snaps[snap].destroyed,
+ "created": virt_snaps[snap].created,
+ "time_remaining": getattr(virt_snaps[snap], "time_remaining", None),
+ "latest_pgsnapshot_name": getattr(
+ virt_snaps[snap].recover_context, "name", None
+ ),
+ "latest_pgsnapshot_id": getattr(
+ virt_snaps[snap].recover_context, "id", None
+ ),
+ }
+ return vmsnap_info
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(gather_subset=dict(default="minimum", type="list", elements="str"))
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+
+ subset = [test.lower() for test in module.params["gather_subset"]]
+ valid_subsets = (
+ "all",
+ "minimum",
+ "config",
+ "performance",
+ "capacity",
+ "network",
+ "subnet",
+ "interfaces",
+ "hgroups",
+ "pgroups",
+ "hosts",
+ "admins",
+ "volumes",
+ "snapshots",
+ "pods",
+ "replication",
+ "vgroups",
+ "offload",
+ "apps",
+ "arrays",
+ "certs",
+ "kmip",
+ "clients",
+ "policies",
+ "dir_snaps",
+ "filesystems",
+ "virtual_machines",
+ )
+ subset_test = (test in valid_subsets for test in subset)
+ if not all(subset_test):
+ module.fail_json(
+ msg="value must gather_subset must be one or more of: %s, got: %s"
+ % (",".join(valid_subsets), ",".join(subset))
+ )
+
+ info = {}
+
+ if "minimum" in subset or "all" in subset or "apps" in subset:
+ info["default"] = generate_default_dict(module, array)
+ if "performance" in subset or "all" in subset:
+ info["performance"] = generate_perf_dict(array)
+ if "config" in subset or "all" in subset:
+ info["config"] = generate_config_dict(module, array)
+ if "capacity" in subset or "all" in subset:
+ info["capacity"] = generate_capacity_dict(module, array)
+ if "network" in subset or "all" in subset:
+ info["network"] = generate_network_dict(module, array)
+ if "subnet" in subset or "all" in subset:
+ info["subnet"] = generate_subnet_dict(array)
+ if "interfaces" in subset or "all" in subset:
+ info["interfaces"] = generate_interfaces_dict(array)
+ if "hosts" in subset or "all" in subset:
+ info["hosts"] = generate_host_dict(module, array)
+ if "volumes" in subset or "all" in subset:
+ info["volumes"] = generate_vol_dict(module, array)
+ info["deleted_volumes"] = generate_del_vol_dict(module, array)
+ if "snapshots" in subset or "all" in subset:
+ info["snapshots"] = generate_snap_dict(module, array)
+ info["deleted_snapshots"] = generate_del_snap_dict(module, array)
+ if "hgroups" in subset or "all" in subset:
+ info["hgroups"] = generate_hgroups_dict(module, array)
+ if "pgroups" in subset or "all" in subset:
+ info["pgroups"] = generate_pgroups_dict(module, array)
+ if "pods" in subset or "all" in subset or "replication" in subset:
+ info["replica_links"] = generate_rl_dict(module, array)
+ info["pods"] = generate_pods_dict(module, array)
+ info["deleted_pods"] = generate_del_pods_dict(module, array)
+ if "admins" in subset or "all" in subset:
+ info["admins"] = generate_admin_dict(array)
+ if "vgroups" in subset or "all" in subset:
+ info["vgroups"] = generate_vgroups_dict(module, array)
+ info["deleted_vgroups"] = generate_del_vgroups_dict(module, array)
+ if "offload" in subset or "all" in subset:
+ info["azure_offload"] = generate_azure_offload_dict(module, array)
+ info["nfs_offload"] = generate_nfs_offload_dict(module, array)
+ info["s3_offload"] = generate_s3_offload_dict(module, array)
+ if "apps" in subset or "all" in subset:
+ if "CBS" not in info["default"]["array_model"]:
+ info["apps"] = generate_apps_dict(array)
+ else:
+ info["apps"] = {}
+ if "arrays" in subset or "all" in subset:
+ info["arrays"] = generate_conn_array_dict(module, array)
+ if "certs" in subset or "all" in subset:
+ info["certs"] = generate_certs_dict(array)
+ if "kmip" in subset or "all" in subset:
+ info["kmip"] = generate_kmip_dict(array)
+ if FILES_API_VERSION in api_version:
+ array_v6 = get_array(module)
+ if "offload" in subset or "all" in subset:
+ info["google_offload"] = generate_google_offload_dict(array_v6)
+ if "filesystems" in subset or "all" in subset:
+ info["filesystems"] = generate_filesystems_dict(array_v6)
+ if "policies" in subset or "all" in subset:
+ if NFS_USER_MAP_VERSION in api_version:
+ user_map = True
+ else:
+ user_map = False
+ if DIR_QUOTA_API_VERSION in api_version:
+ quota = True
+ else:
+ quota = False
+ info["policies"] = generate_policies_dict(array_v6, quota, user_map)
+ if "clients" in subset or "all" in subset:
+ info["clients"] = generate_clients_dict(array_v6)
+ if "dir_snaps" in subset or "all" in subset:
+ info["dir_snaps"] = generate_dir_snaps_dict(array_v6)
+ if "snapshots" in subset or "all" in subset:
+ info["pg_snapshots"] = generate_pgsnaps_dict(array_v6)
+ if "alerts" in subset or "all" in subset:
+ info["alerts"] = generate_alerts_dict(array_v6)
+ if VM_VERSION in api_version and (
+ "virtual_machines" in subset or "all" in subset
+ ):
+ info["virtual_machines"] = generate_vm_dict(array_v6)
+ info["virtual_machines_snaps"] = generate_vmsnap_dict(array_v6)
+
+ module.exit_json(changed=False, purefa_info=info)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_inventory.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_inventory.py
new file mode 100644
index 000000000..8e65ee07e
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_inventory.py
@@ -0,0 +1,368 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_inventory
+short_description: Collect information from Pure Storage FlashArray
+version_added: '1.0.0'
+description:
+ - Collect hardware inventory information from a Pure Storage Flasharray
+author:
+ - Pure Storage ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+extends_documentation_fragment:
+ - purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: collect FlashArray invenroty
+ purestorage.flasharray.purefa_inventory:
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+- name: show inventory information
+ debug:
+ msg: "{{ array_info['purefa_inv'] }}"
+
+"""
+
+RETURN = r"""
+purefa_inventory:
+ description: Returns the inventory information for the FlashArray
+ returned: always
+ type: dict
+"""
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ get_array,
+ purefa_argument_spec,
+)
+
+
+NEW_API_VERSION = "2.2"
+SFP_API_VERSION = "2.16"
+
+
+def generate_new_hardware_dict(array, versions):
+ hw_info = {
+ "fans": {},
+ "controllers": {},
+ "temps": {},
+ "drives": {},
+ "interfaces": {},
+ "power": {},
+ "chassis": {},
+ "tempatures": {},
+ }
+ components = list(array.get_hardware().items)
+ for component in range(0, len(components)):
+ component_name = components[component].name
+ if components[component].type == "chassis":
+ hw_info["chassis"][component_name] = {
+ "status": components[component].status,
+ "serial": components[component].serial,
+ "model": components[component].model,
+ "identify_enabled": components[component].identify_enabled,
+ }
+ if components[component].type == "controller":
+ hw_info["controllers"][component_name] = {
+ "status": components[component].status,
+ "serial": components[component].serial,
+ "model": components[component].model,
+ "identify_enabled": components[component].identify_enabled,
+ }
+ if components[component].type == "cooling":
+ hw_info["fans"][component_name] = {
+ "status": components[component].status,
+ }
+ if components[component].type == "temp_sensor":
+ hw_info["controllers"][component_name] = {
+ "status": components[component].status,
+ "temperature": components[component].temperature,
+ }
+ if components[component].type == "drive_bay":
+ hw_info["drives"][component_name] = {
+ "status": components[component].status,
+ "identify_enabled": components[component].identify_enabled,
+ "serial": getattr(components[component], "serial", None),
+ }
+ if components[component].type in [
+ "sas_port",
+ "fc_port",
+ "eth_port",
+ "ib_port",
+ ]:
+ hw_info["interfaces"][component_name] = {
+ "type": components[component].type,
+ "status": components[component].status,
+ "speed": components[component].speed,
+ "connector_type": None,
+ "rx_los": None,
+ "rx_power": None,
+ "static": {},
+ "temperature": None,
+ "tx_bias": None,
+ "tx_fault": None,
+ "tx_power": None,
+ "voltage": None,
+ }
+ if components[component].type == "power_supply":
+ hw_info["power"][component_name] = {
+ "status": components[component].status,
+ "voltage": components[component].voltage,
+ "serial": components[component].serial,
+ "model": components[component].model,
+ }
+ drives = list(array.get_drives().items)
+ for drive in range(0, len(drives)):
+ drive_name = drives[drive].name
+ hw_info["drives"][drive_name] = {
+ "capacity": drives[drive].capacity,
+ "status": drives[drive].status,
+ "protocol": getattr(drives[drive], "protocol", None),
+ "type": drives[drive].type,
+ }
+ if SFP_API_VERSION in versions:
+ port_details = list(array.get_network_interfaces_port_details().items)
+ for port_detail in range(0, len(port_details)):
+ port_name = port_details[port_detail].name
+ hw_info["interfaces"][port_name]["interface_type"] = port_details[
+ port_detail
+ ].interface_type
+ hw_info["interfaces"][port_name]["rx_los"] = (
+ port_details[port_detail].rx_los[0].flag
+ )
+ hw_info["interfaces"][port_name]["rx_power"] = (
+ port_details[port_detail].rx_power[0].measurement
+ )
+ hw_info["interfaces"][port_name]["static"] = {
+ "connector_type": port_details[port_detail].static.connector_type,
+ "vendor_name": port_details[port_detail].static.vendor_name,
+ "vendor_oui": port_details[port_detail].static.vendor_oui,
+ "vendor_serial_number": port_details[
+ port_detail
+ ].static.vendor_serial_number,
+ "vendor_part_number": port_details[
+ port_detail
+ ].static.vendor_part_number,
+ "vendor_date_code": port_details[port_detail].static.vendor_date_code,
+ "signaling_rate": port_details[port_detail].static.signaling_rate,
+ "wavelength": port_details[port_detail].static.wavelength,
+ "rate_identifier": port_details[port_detail].static.rate_identifier,
+ "identifier": port_details[port_detail].static.identifier,
+ "link_length": port_details[port_detail].static.link_length,
+ "voltage_thresholds": {
+ "alarm_high": port_details[
+ port_detail
+ ].static.voltage_thresholds.alarm_high,
+ "alarm_low": port_details[
+ port_detail
+ ].static.voltage_thresholds.alarm_low,
+ "warn_high": port_details[
+ port_detail
+ ].static.voltage_thresholds.warn_high,
+ "warn_low": port_details[
+ port_detail
+ ].static.voltage_thresholds.warn_low,
+ },
+ "tx_power_thresholds": {
+ "alarm_high": port_details[
+ port_detail
+ ].static.tx_power_thresholds.alarm_high,
+ "alarm_low": port_details[
+ port_detail
+ ].static.tx_power_thresholds.alarm_low,
+ "warn_high": port_details[
+ port_detail
+ ].static.tx_power_thresholds.warn_high,
+ "warn_low": port_details[
+ port_detail
+ ].static.tx_power_thresholds.warn_low,
+ },
+ "rx_power_thresholds": {
+ "alarm_high": port_details[
+ port_detail
+ ].static.rx_power_thresholds.alarm_high,
+ "alarm_low": port_details[
+ port_detail
+ ].static.rx_power_thresholds.alarm_low,
+ "warn_high": port_details[
+ port_detail
+ ].static.rx_power_thresholds.warn_high,
+ "warn_low": port_details[
+ port_detail
+ ].static.rx_power_thresholds.warn_low,
+ },
+ "tx_bias_thresholds": {
+ "alarm_high": port_details[
+ port_detail
+ ].static.tx_bias_thresholds.alarm_high,
+ "alarm_low": port_details[
+ port_detail
+ ].static.tx_bias_thresholds.alarm_low,
+ "warn_high": port_details[
+ port_detail
+ ].static.tx_bias_thresholds.warn_high,
+ "warn_low": port_details[
+ port_detail
+ ].static.tx_bias_thresholds.warn_low,
+ },
+ "temperature_thresholds": {
+ "alarm_high": port_details[
+ port_detail
+ ].static.temperature_thresholds.alarm_high,
+ "alarm_low": port_details[
+ port_detail
+ ].static.temperature_thresholds.alarm_low,
+ "warn_high": port_details[
+ port_detail
+ ].static.temperature_thresholds.warn_high,
+ "warn_low": port_details[
+ port_detail
+ ].static.temperature_thresholds.warn_low,
+ },
+ "fc_speeds": port_details[port_detail].static.fc_speeds,
+ "fc_technology": port_details[port_detail].static.fc_technology,
+ "encoding": port_details[port_detail].static.encoding,
+ "fc_link_lengths": port_details[port_detail].static.fc_link_lengths,
+ "fc_transmission_media": port_details[
+ port_detail
+ ].static.fc_transmission_media,
+ "extended_identifier": port_details[
+ port_detail
+ ].static.extended_identifier,
+ }
+ hw_info["interfaces"][port_name]["temperature"] = (
+ port_details[port_detail].temperature[0].measurement
+ )
+ hw_info["interfaces"][port_name]["tx_bias"] = (
+ port_details[port_detail].tx_bias[0].measurement
+ )
+ hw_info["interfaces"][port_name]["tx_fault"] = (
+ port_details[port_detail].tx_fault[0].flag
+ )
+ hw_info["interfaces"][port_name]["tx_power"] = (
+ port_details[port_detail].tx_power[0].measurement
+ )
+ hw_info["interfaces"][port_name]["voltage"] = (
+ port_details[port_detail].voltage[0].measurement
+ )
+ return hw_info
+
+
+def generate_hardware_dict(array):
+ hw_info = {
+ "fans": {},
+ "controllers": {},
+ "temps": {},
+ "drives": {},
+ "interfaces": {},
+ "power": {},
+ "chassis": {},
+ }
+ components = array.list_hardware()
+ for component in range(0, len(components)):
+ component_name = components[component]["name"]
+ if "FAN" in component_name:
+ fan_name = component_name
+ hw_info["fans"][fan_name] = {"status": components[component]["status"]}
+ if "PWR" in component_name:
+ pwr_name = component_name
+ hw_info["power"][pwr_name] = {
+ "status": components[component]["status"],
+ "voltage": components[component]["voltage"],
+ "serial": components[component]["serial"],
+ "model": components[component]["model"],
+ }
+ if "IB" in component_name:
+ ib_name = component_name
+ hw_info["interfaces"][ib_name] = {
+ "status": components[component]["status"],
+ "speed": components[component]["speed"],
+ }
+ if "SAS" in component_name:
+ sas_name = component_name
+ hw_info["interfaces"][sas_name] = {
+ "status": components[component]["status"],
+ "speed": components[component]["speed"],
+ }
+ if "ETH" in component_name:
+ eth_name = component_name
+ hw_info["interfaces"][eth_name] = {
+ "status": components[component]["status"],
+ "speed": components[component]["speed"],
+ }
+ if "FC" in component_name:
+ eth_name = component_name
+ hw_info["interfaces"][eth_name] = {
+ "status": components[component]["status"],
+ "speed": components[component]["speed"],
+ }
+ if "TMP" in component_name:
+ tmp_name = component_name
+ hw_info["temps"][tmp_name] = {
+ "status": components[component]["status"],
+ "temperature": components[component]["temperature"],
+ }
+ if component_name in ["CT0", "CT1"]:
+ cont_name = component_name
+ hw_info["controllers"][cont_name] = {
+ "status": components[component]["status"],
+ "serial": components[component]["serial"],
+ "model": components[component]["model"],
+ }
+ if component_name in ["CH0"]:
+ cont_name = component_name
+ hw_info["chassis"][cont_name] = {
+ "status": components[component]["status"],
+ "serial": components[component]["serial"],
+ "model": components[component]["model"],
+ }
+
+ drives = array.list_drives()
+ for drive in range(0, len(drives)):
+ drive_name = drives[drive]["name"]
+ hw_info["drives"][drive_name] = {
+ "capacity": drives[drive]["capacity"],
+ "status": drives[drive]["status"],
+ "protocol": drives[drive]["protocol"],
+ "type": drives[drive]["type"],
+ }
+ for disk in range(0, len(components)):
+ if components[disk]["name"] == drive_name:
+ hw_info["drives"][drive_name]["serial"] = components[disk]["serial"]
+
+ return hw_info
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ inv_info = {}
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+ if NEW_API_VERSION in api_version:
+ arrayv6 = get_array(module)
+ inv_info = generate_new_hardware_dict(arrayv6, api_version)
+ else:
+ inv_info = generate_hardware_dict(array)
+ module.exit_json(changed=False, purefa_inv=inv_info)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_kmip.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_kmip.py
new file mode 100644
index 000000000..8774abe87
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_kmip.py
@@ -0,0 +1,251 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2021, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_kmip
+version_added: '1.10.0'
+short_description: Manage FlashArray KMIP server objects
+description:
+- Manage FlashArray KMIP Server objects
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Name of the KMIP server object
+ type: str
+ required: true
+ certificate:
+ description:
+ - Name of existing certifcate used to verify FlashArray
+ authenticity to the KMIP server.
+ - Use the I(purestorage.flasharray.purefa_certs) module to create certificates.
+ type: str
+ state:
+ description:
+ - Action for the module to perform
+ default: present
+ choices: [ absent, present ]
+ type: str
+ ca_certificate:
+ type: str
+ description:
+ - The text of the CA certificate for the KMIP server.
+ - Includes the "-----BEGIN CERTIFICATE-----" and "-----END CERTIFICATE-----" lines
+ - Does not exceed 3000 characters in length
+ uris:
+ type: list
+ elements: str
+ description:
+ - A list of URIs for the configured KMIP servers.
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Create KMIP obejct
+ purestorage.flasharray.purefa_kmip:
+ name: foo
+ certificate: bar
+ ca_certificate: "{{lookup('file', 'example.crt') }}"
+ uris:
+ - 1.1.1.1:8888
+ - 2.3.3.3:9999
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete KMIP object
+ purestorage.flasharray.purefa_kmip:
+ name: foo
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Update KMIP object
+ purestorage.flasharray.purefa_kmip:
+ name: foo
+ ca_certificate: "{{lookup('file', 'example2.crt') }}"
+ uris:
+ - 3.3.3.3:8888
+ - 4.4.4.4:9999
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ get_array,
+ purefa_argument_spec,
+)
+
+MIN_REQUIRED_API_VERSION = "2.2"
+
+
+def update_kmip(module, array):
+ """Update existing KMIP object"""
+ changed = False
+ current_kmip = list(array.get_kmip(names=[module.params["name"]]).items)[0]
+ if (
+ module.params["certificate"]
+ and current_kmip.certificate.name != module.params["certificate"]
+ ):
+ if (
+ array.get_certificates(names=[module.params["certificate"]]).status_code
+ != 200
+ ):
+ module.fail_json(
+ msg="Array certificate {0} does not exist.".format(
+ module.params["certificate"]
+ )
+ )
+ changed = True
+ certificate = module.params["certificate"]
+ else:
+ certificate = current_kmip.certificate.name
+ if module.params["uris"] and sorted(current_kmip.uris) != sorted(
+ module.params["uris"]
+ ):
+ changed = True
+ uris = sorted(module.params["uris"])
+ else:
+ uris = sorted(current_kmip.uris)
+ if (
+ module.params["ca_certificate"]
+ and module.params["ca_certificate"] != current_kmip.ca_certificate
+ ):
+ changed = True
+ ca_cert = module.params["ca_certificate"]
+ else:
+ ca_cert = current_kmip.ca_certificate
+ if not module.check_mode:
+ if changed:
+ kmip = flasharray.KmipPost(
+ uris=uris,
+ ca_certificate=ca_cert,
+ certificate=flasharray.ReferenceNoId(name=certificate),
+ )
+ res = array.patch_kmip(names=[module.params["name"]], kmip=kmip)
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Updating existing KMIP object {0} failed. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+
+ module.exit_json(changed=changed)
+
+
+def create_kmip(module, array):
+ """Create KMIP object"""
+ if array.get_certificates(names=[module.params["certificate"]]).status_code != 200:
+ module.fail_json(
+ msg="Array certificate {0} does not exist.".format(
+ module.params["certificate"]
+ )
+ )
+ changed = True
+ kmip = flasharray.KmipPost(
+ uris=sorted(module.params["uris"]),
+ ca_certificate=module.params["ca_certificate"],
+ certificate=flasharray.ReferenceNoId(name=module.params["certificate"]),
+ )
+ if not module.check_mode:
+ res = array.post_kmip(names=[module.params["name"]], kmip=kmip)
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Creating KMIP object {0} failed. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def delete_kmip(module, array):
+ """Delete existing KMIP object"""
+ changed = True
+ if not module.check_mode:
+ res = array.delete_kmip(names=[module.params["name"]])
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete {0} KMIP object. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(
+ type="str",
+ default="present",
+ choices=["absent", "present"],
+ ),
+ name=dict(type="str", required=True),
+ certificate=dict(type="str"),
+ ca_certificate=dict(type="str", no_log=True),
+ uris=dict(type="list", elements="str"),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec,
+ supports_check_mode=True,
+ )
+
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashArray REST version not supported. "
+ "Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
+ )
+
+ array = get_array(module)
+ state = module.params["state"]
+ exists = bool(array.get_kmip(names=[module.params["name"]]).status_code == 200)
+ if module.params["certificate"] and len(module.params["certificate"]) > 3000:
+ module.fail_json(msg="Certificate exceeds 3000 characters")
+
+ if not exists and state == "present":
+ create_kmip(module, array)
+ elif exists and state == "present":
+ update_kmip(module, array)
+ elif exists and state == "absent":
+ delete_kmip(module, array)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_logging.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_logging.py
new file mode 100644
index 000000000..a2f8e136d
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_logging.py
@@ -0,0 +1,166 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2021, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["deprecated"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_logging
+version_added: '1.19.0'
+short_description: Manage Pure Storage FlashArray Audit and Session logs
+description:
+- view the FlashArray audit trail oe session logs, newest to oldest based on (start) time
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ log_type:
+ description:
+ - The type of logs to be viewed
+ type: str
+ default: audit
+ choices: [audit, session]
+ limit:
+ description:
+ - The maximum number of audit events returned
+ default: 1000
+ type: int
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: List last 100 audit events
+ purestorage.flasharray.purefa_audit:
+ limit: 100
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: List last 24 session events
+ purestorage.flasharray.purefa_audit:
+ limit: 24
+ log_type: session
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ get_array,
+ purefa_argument_spec,
+)
+
+AUDIT_API_VERSION = "2.2"
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ limit=dict(type="int", default=1000),
+ log_type=dict(type="str", default="audit", choices=["audit", "session"]),
+ )
+ )
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+ audits = []
+ changed = False
+ if AUDIT_API_VERSION in api_version:
+ changed = True
+ array = get_array(module)
+ if not module.check_mode:
+ if module.params["log_type"] == "audit":
+ all_audits = list(
+ array.get_audits(
+ limit=module.params["limit"],
+ sort=flasharray.Property("time-"),
+ ).items
+ )
+ else:
+ all_audits = list(
+ array.get_sessions(
+ limit=module.params["limit"],
+ sort=flasharray.Property("start_time-"),
+ ).items
+ )
+ for audit in range(0, len(all_audits)):
+ if module.params["log_type"] == "session":
+ start_time = getattr(all_audits[audit], "start_time", None)
+ end_time = getattr(all_audits[audit], "end_time", None)
+ if start_time:
+ human_start_time = time.strftime(
+ "%Y-%m-%d %H:%M:%S", time.localtime(start_time / 1000)
+ )
+ else:
+ human_start_time = None
+ if end_time:
+ human_end_time = time.strftime(
+ "%Y-%m-%d %H:%M:%S", time.localtime(end_time / 1000)
+ )
+ else:
+ human_end_time = None
+
+ data = {
+ "start_time": human_start_time,
+ "end_time": human_end_time,
+ "location": getattr(all_audits[audit], "location", None),
+ "user": getattr(all_audits[audit], "user", None),
+ "event": all_audits[audit].event,
+ "event_count": all_audits[audit].event_count,
+ "user_interface": getattr(
+ all_audits[audit], "user_interface", None
+ ),
+ }
+ else:
+ event_time = getattr(all_audits[audit], "time", None)
+ if event_time:
+ human_event_time = time.strftime(
+ "%Y-%m-%d %H:%M:%S", time.localtime(event_time / 1000)
+ )
+ else:
+ human_event_time = None
+ data = {
+ "time": human_event_time,
+ "arguments": all_audits[audit].arguments,
+ "command": all_audits[audit].command,
+ "subcommand": all_audits[audit].subcommand,
+ "user": all_audits[audit].user,
+ "origin": all_audits[audit].origin.name,
+ }
+ audits.append(data)
+ else:
+ module.fail_json(msg="Purity version does not support audit log return")
+ if module.params["log_type"] == "audit":
+ module.exit_json(changed=changed, audits=audits)
+ else:
+ module.exit_json(changed=changed, sessions=audits)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_maintenance.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_maintenance.py
new file mode 100644
index 000000000..8aa5c76f9
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_maintenance.py
@@ -0,0 +1,133 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2021, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_maintenance
+version_added: '1.7.0'
+short_description: Configure Pure Storage FlashArray Maintence Windows
+description:
+- Configuration for Pure Storage FlashArray Maintenance Windows.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Create or delete maintennance window
+ type: str
+ default: present
+ choices: [ absent, present ]
+ timeout :
+ type: int
+ default: 3600
+ description:
+ - Maintenance window period, specified in seconds.
+ - Range allowed is 1 minute (60 seconds) to 24 hours (86400 seconds)
+ - Default setting is 1 hour (3600 seconds)
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Delete exisitng maintenance window
+ purestorage.flasharray.purefa_maintenance:
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Set maintnence window to default of 1 hour
+ purestorage.flasharray.purefa_maintenance:
+ state: present
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Update existing maintnence window
+ purestorage.flasharray.purefa_maintenance:
+ state: present
+ timeout: 86400
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_array,
+ purefa_argument_spec,
+)
+
+
+def delete_window(module, array):
+ """Delete Maintenance Window"""
+ changed = False
+ if list(array.get_maintenance_windows().items):
+ changed = True
+ if not module.check_mode:
+ state = array.delete_maintenance_windows(names=["environment"])
+ if state.status_code != 200:
+ changed = False
+ module.exit_json(changed=changed)
+
+
+def set_window(module, array):
+ """Set Maintenace Window"""
+ changed = True
+ if not 60 <= module.params["timeout"] <= 86400:
+ module.fail_json(msg="Maintenance Window Timeout is out of range (60 to 86400)")
+ window = flasharray.MaintenanceWindowPost(timeout=module.params["timeout"] * 1000)
+ if not module.check_mode:
+ state = array.post_maintenance_windows(
+ names=["environment"], maintenance_window=window
+ )
+ if state.status_code != 200:
+ module.fail_json(msg="Setting maintenance window failed")
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ timeout=dict(type="int", default=3600),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+
+ array = get_array(module)
+
+ if module.params["state"] == "absent":
+ delete_window(module, array)
+ else:
+ set_window(module, array)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_messages.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_messages.py
new file mode 100644
index 000000000..a28bd56b2
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_messages.py
@@ -0,0 +1,198 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2022, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_messages
+version_added: '1.14.0'
+short_description: List FlashArray Alert Messages
+description:
+- List Alert messages based on filters provided
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ severity:
+ description:
+ - severity of the alerts to show
+ type: list
+ elements: str
+ choices: [ all, critical, warning, info ]
+ default: [ all ]
+ state:
+ description:
+ - State of alerts to show
+ default: open
+ choices: [ all, open, closed ]
+ type: str
+ flagged:
+ description:
+ - Show alerts that have been acknowledged or not
+ default: false
+ type: bool
+ history:
+ description:
+ - Historical time period to show alerts for, from present time
+ - Allowed time period are hour(h), day(d), week(w) and year(y)
+ type: str
+ default: 1w
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Show critical alerts from past 4 weeks that haven't been acknowledged
+ purefa_messages:
+ history: 4w
+ flagged : false
+ severity:
+ - critical
+ fa_url: 10.10.10.2
+ api_token: 89a9356f-c203-d263-8a89-c229486a13ba
+"""
+
+RETURN = r"""
+"""
+
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ get_array,
+ purefa_argument_spec,
+)
+
+MIN_REQUIRED_API_VERSION = "2.2"
+ALLOWED_PERIODS = ["h", "d", "w", "y"]
+# Time periods in micro-seconds
+HOUR = 3600000
+DAY = HOUR * 24
+WEEK = DAY * 7
+YEAR = WEEK * 52
+
+
+def _create_time_window(window):
+ period = window[-1].lower()
+ multiple = int(window[0:-1])
+ if period == "h":
+ return HOUR * multiple
+ if period == "d":
+ return DAY * multiple
+ if period == "w":
+ return WEEK * multiple
+ if period == "y":
+ return YEAR * multiple
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="open", choices=["all", "open", "closed"]),
+ history=dict(type="str", default="1w"),
+ flagged=dict(type="bool", default=False),
+ severity=dict(
+ type="list",
+ elements="str",
+ default=["all"],
+ choices=["all", "critical", "warning", "info"],
+ ),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+ time_now = int(time.time() * 1000)
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashArray REST version not supported. "
+ "Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
+ )
+ array_v6 = get_array(module)
+ if module.params["history"][-1].lower() not in ALLOWED_PERIODS:
+ module.fail_json(msg="historical window value is not an allowsd time period")
+ since_time = str(time_now - _create_time_window(module.params["history"].lower()))
+ if module.params["flagged"]:
+ flagged = " and flagged='True'"
+ else:
+ flagged = " and flagged='False'"
+
+ multi_sev = False
+ if len(module.params["severity"]) > 1:
+ if "all" in module.params["severity"]:
+ module.params["severity"] = ["*"]
+ else:
+ multi_sev = True
+ if multi_sev:
+ severity = " and ("
+ for level in range(0, len(module.params["severity"])):
+ severity += "severity='" + str(module.params["severity"][level]) + "' or "
+ severity = severity[0:-4] + ")"
+ else:
+ if module.params["severity"] == ["all"]:
+ severity = " and severity='*'"
+ else:
+ severity = " and severity='" + str(module.params["severity"][0]) + "'"
+ messages = {}
+ if module.params["state"] == "all":
+ state = " and state='*'"
+ else:
+ state = " and state='" + module.params["state"] + "'"
+ filter_string = "notified>" + since_time + state + flagged + severity
+ try:
+ res = array_v6.get_alerts(filter=filter_string)
+ alerts = list(res.items)
+ except Exception:
+ module.fail_json(
+ msg="Failed to get alert messages. Error: {0}".format(res.errors[0].message)
+ )
+ for message in range(0, len(alerts)):
+ name = alerts[message].name
+ messages[name] = {
+ "summary": alerts[message].summary,
+ "component_type": alerts[message].component_type,
+ "component_name": alerts[message].component_name,
+ "code": alerts[message].code,
+ "severity": alerts[message].severity,
+ "actual": alerts[message].actual,
+ "issue": alerts[message].issue,
+ "state": alerts[message].state,
+ "flagged": alerts[message].flagged,
+ "closed": None,
+ "created": time.strftime(
+ "%Y-%m-%d %H:%M:%S",
+ time.gmtime(alerts[message].created / 1000),
+ )
+ + " UTC",
+ "updated": time.strftime(
+ "%Y-%m-%d %H:%M:%S",
+ time.gmtime(alerts[message].updated / 1000),
+ )
+ + " UTC",
+ }
+ if alerts[message].state == "closed":
+ messages[name]["closed"] = (
+ time.strftime(
+ "%Y-%m-%d %H:%M:%S", time.gmtime(alerts[message].closed / 1000)
+ )
+ + " UTC"
+ )
+ module.exit_json(changed=False, purefa_messages=messages)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_network.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_network.py
new file mode 100644
index 000000000..e5004568a
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_network.py
@@ -0,0 +1,437 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+
+DOCUMENTATION = """
+---
+module: purefa_network
+short_description: Manage network interfaces in a Pure Storage FlashArray
+version_added: '1.0.0'
+description:
+ - This module manages the physical and virtual network interfaces on a Pure Storage FlashArray.
+ - To manage VLAN interfaces use the I(purestorage.flasharray.purefa_vlan) module.
+ - To manage network subnets use the I(purestorage.flasharray.purefa_subnet) module.
+ - To remove an IP address from a non-management port use 0.0.0.0/0
+author: Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Interface name (physical or virtual).
+ required: true
+ type: str
+ state:
+ description:
+ - State of existing interface (on/off).
+ required: false
+ default: present
+ choices: [ "present", "absent" ]
+ type: str
+ address:
+ description:
+ - IPv4 or IPv6 address of interface in CIDR notation.
+ - To remove an IP address from a non-management port use 0.0.0.0/0
+ required: false
+ type: str
+ gateway:
+ description:
+ - IPv4 or IPv6 address of interface gateway.
+ required: false
+ type: str
+ mtu:
+ description:
+ - MTU size of the interface. Range is 1280 to 9216.
+ required: false
+ default: 1500
+ type: int
+ servicelist:
+ description:
+ - Assigns the specified (comma-separated) service list to one or more specified interfaces.
+ - Replaces the previous service list.
+ - Supported service lists depend on whether the network interface is Ethernet or Fibre Channel.
+ - Note that I(system) is only valid for Cloud Block Store.
+ elements: str
+ type: list
+ choices: [ "replication", "management", "ds", "file", "iscsi", "scsi-fc", "nvme-fc", "nvme-tcp", "nvme-roce", "system"]
+ version_added: '1.15.0'
+extends_documentation_fragment:
+ - purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = """
+- name: Configure and enable network interface ct0.eth8
+ purestorage.flasharray.purefa_network:
+ name: ct0.eth8
+ gateway: 10.21.200.1
+ address: "10.21.200.18/24"
+ mtu: 9000
+ state: present
+ fa_url: 10.10.10.2
+ api_token: c6033033-fe69-2515-a9e8-966bb7fe4b40
+
+- name: Disable physical interface ct1.eth2
+ purestorage.flasharray.purefa_network:
+ name: ct1.eth2
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: c6033033-fe69-2515-a9e8-966bb7fe4b40
+
+- name: Enable virtual network interface vir0
+ purestorage.flasharray.purefa_network:
+ name: vir0
+ state: present
+ fa_url: 10.10.10.2
+ api_token: c6033033-fe69-2515-a9e8-966bb7fe4b40
+
+- name: Remove an IP address from iSCSI interface ct0.eth4
+ purestorage.flasharray.purefa_network:
+ name: ct0.eth4
+ address: 0.0.0.0/0
+ gateway: 0.0.0.0
+ fa_url: 10.10.10.2
+ api_token: c6033033-fe69-2515-a9e8-966bb7fe4b40
+
+- name: Change service list for FC interface ct0.fc1
+ purestorage.flasharray.purefa_network:
+ name: ct0.fc1
+ servicelist:
+ - replication
+ fa_url: 10.10.10.2
+ api_token: c6033033-fe69-2515-a9e8-966bb7fe4b40
+"""
+
+RETURN = """
+"""
+
+try:
+ from netaddr import IPAddress, IPNetwork
+
+ HAS_NETADDR = True
+except ImportError:
+ HAS_NETADDR = False
+
+try:
+ from pypureclient.flasharray import NetworkInterfacePatch
+
+ HAS_PYPURECLIENT = True
+except ImportError:
+ HAS_PYPURECLIENT = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ get_array,
+ purefa_argument_spec,
+)
+
+FC_ENABLE_API = "2.4"
+
+
+def _is_cbs(array, is_cbs=False):
+ """Is the selected array a Cloud Block Store"""
+ model = array.get(controllers=True)[0]["model"]
+ is_cbs = bool("CBS" in model)
+ return is_cbs
+
+
+def _get_fc_interface(module, array):
+ """Return FC Interface or None"""
+ interface = {}
+ interface_list = array.get_network_interfaces(names=[module.params["name"]])
+ if interface_list.status_code == 200:
+ interface = list(interface_list.items)[0]
+ return interface
+ else:
+ return None
+
+
+def _get_interface(module, array):
+ """Return Network Interface or None"""
+ interface = {}
+ if module.params["name"][0] == "v":
+ try:
+ interface = array.get_network_interface(module.params["name"])
+ except Exception:
+ return None
+ else:
+ try:
+ interfaces = array.list_network_interfaces()
+ except Exception:
+ return None
+ for ints in range(0, len(interfaces)):
+ if interfaces[ints]["name"] == module.params["name"]:
+ interface = interfaces[ints]
+ break
+ return interface
+
+
+def update_fc_interface(module, array, interface, api_version):
+ """Modify FC Interface settings"""
+ changed = False
+ if FC_ENABLE_API in api_version:
+ if not interface.enabled and module.params["state"] == "present":
+ changed = True
+ if not module.check_mode:
+ network = NetworkInterfacePatch(enabled=True, override_npiv_check=True)
+ res = array.patch_network_interfaces(
+ names=[module.params["name"]], network=network
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to enable interface {0}.".format(
+ module.params["name"]
+ )
+ )
+ if interface.enabled and module.params["state"] == "absent":
+ changed = True
+ if not module.check_mode:
+ network = NetworkInterfacePatch(enabled=False, override_npiv_check=True)
+ res = array.patch_network_interfaces(
+ names=[module.params["name"]], network=network
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to disable interface {0}.".format(
+ module.params["name"]
+ )
+ )
+ if module.params["servicelist"] and sorted(module.params["servicelist"]) != sorted(
+ interface.services
+ ):
+ changed = True
+ if not module.check_mode:
+ network = NetworkInterfacePatch(services=module.params["servicelist"])
+ res = array.patch_network_interfaces(
+ names=[module.params["name"]], network=network
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to update interface service list {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+
+ module.exit_json(changed=changed)
+
+
+def update_interface(module, array, interface):
+ """Modify Interface settings"""
+ changed = False
+ current_state = {
+ "mtu": interface["mtu"],
+ "gateway": interface["gateway"],
+ "address": interface["address"],
+ "netmask": interface["netmask"],
+ "services": sorted(interface["services"]),
+ }
+ if not module.params["servicelist"]:
+ services = sorted(interface["services"])
+ else:
+ services = sorted(module.params["servicelist"])
+ if not module.params["address"]:
+ address = interface["address"]
+ else:
+ if module.params["gateway"]:
+ if module.params["gateway"] and module.params["gateway"] not in IPNetwork(
+ module.params["address"]
+ ):
+ module.fail_json(msg="Gateway and subnet are not compatible.")
+ elif not module.params["gateway"] and interface["gateway"] not in [
+ None,
+ IPNetwork(module.params["address"]),
+ ]:
+ module.fail_json(msg="Gateway and subnet are not compatible.")
+ address = str(module.params["address"].split("/", 1)[0])
+ ip_version = str(IPAddress(address).version)
+ if not module.params["mtu"]:
+ mtu = interface["mtu"]
+ else:
+ if not 1280 <= module.params["mtu"] <= 9216:
+ module.fail_json(
+ msg="MTU {0} is out of range (1280 to 9216)".format(
+ module.params["mtu"]
+ )
+ )
+ else:
+ mtu = module.params["mtu"]
+ if module.params["address"]:
+ netmask = str(IPNetwork(module.params["address"]).netmask)
+ else:
+ netmask = interface["netmask"]
+ if not module.params["gateway"]:
+ gateway = interface["gateway"]
+ else:
+ cidr = str(IPAddress(netmask).netmask_bits())
+ full_addr = address + "/" + cidr
+ if module.params["gateway"] not in IPNetwork(full_addr):
+ module.fail_json(msg="Gateway and subnet are not compatible.")
+ gateway = module.params["gateway"]
+ if ip_version == "6":
+ netmask = str(IPAddress(netmask).netmask_bits())
+ new_state = {
+ "address": address,
+ "mtu": mtu,
+ "gateway": gateway,
+ "netmask": netmask,
+ "services": services,
+ }
+ if new_state != current_state:
+ changed = True
+ if (
+ "management" in interface["services"] or "app" in interface["services"]
+ ) and address == "0.0.0.0/0":
+ module.fail_json(
+ msg="Removing IP address from a management or app port is not supported"
+ )
+ if not module.check_mode:
+ try:
+ if new_state["gateway"] is not None:
+ array.set_network_interface(
+ interface["name"],
+ address=new_state["address"],
+ mtu=new_state["mtu"],
+ netmask=new_state["netmask"],
+ gateway=new_state["gateway"],
+ )
+ else:
+ array.set_network_interface(
+ interface["name"],
+ address=new_state["address"],
+ mtu=new_state["mtu"],
+ netmask=new_state["netmask"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to change settings for interface {0}.".format(
+ interface["name"]
+ )
+ )
+ if not interface["enabled"] and module.params["state"] == "present":
+ changed = True
+ if not module.check_mode:
+ try:
+ array.enable_network_interface(interface["name"])
+ except Exception:
+ module.fail_json(
+ msg="Failed to enable interface {0}.".format(interface["name"])
+ )
+ if interface["enabled"] and module.params["state"] == "absent":
+ changed = True
+ if not module.check_mode:
+ try:
+ array.disable_network_interface(interface["name"])
+ except Exception:
+ module.fail_json(
+ msg="Failed to disable interface {0}.".format(interface["name"])
+ )
+ if (
+ module.params["servicelist"]
+ and sorted(module.params["servicelist"]) != interface["services"]
+ ):
+ api_version = array._list_available_rest_versions()
+ if FC_ENABLE_API in api_version:
+ if HAS_PYPURECLIENT:
+ array = get_array(module)
+ changed = True
+ if not module.check_mode:
+ network = NetworkInterfacePatch(
+ services=module.params["servicelist"]
+ )
+ res = array.patch_network_interfaces(
+ names=[module.params["name"]], network=network
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to update interface service list {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ else:
+ module.warn_json(
+ "Servicelist not update as pypureclient module is required"
+ )
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True),
+ state=dict(type="str", default="present", choices=["present", "absent"]),
+ address=dict(type="str"),
+ gateway=dict(type="str"),
+ mtu=dict(type="int", default=1500),
+ servicelist=dict(
+ type="list",
+ elements="str",
+ choices=[
+ "replication",
+ "management",
+ "ds",
+ "file",
+ "iscsi",
+ "scsi-fc",
+ "nvme-fc",
+ "nvme-tcp",
+ "nvme-roce",
+ "system",
+ ],
+ ),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_NETADDR:
+ module.fail_json(msg="netaddr module is required")
+
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+ if not _is_cbs(array):
+ if module.params["servicelist"] and "system" in module.params["servicelist"]:
+ module.fail_json(
+ msg="Only Cloud Block Store supports the 'system' service type"
+ )
+ if "." in module.params["name"]:
+ if module.params["name"].split(".")[1][0].lower() == "f":
+ if not HAS_PYPURECLIENT:
+ module.fail_json(msg="pypureclient module is required")
+ array = get_array(module)
+ interface = _get_fc_interface(module, array)
+ if not interface:
+ module.fail_json(msg="Invalid network interface specified.")
+ else:
+ update_fc_interface(module, array, interface, api_version)
+ else:
+ interface = _get_interface(module, array)
+ if not interface:
+ module.fail_json(msg="Invalid network interface specified.")
+ else:
+ update_interface(module, array, interface)
+ else:
+ interface = _get_interface(module, array)
+ if not interface:
+ module.fail_json(msg="Invalid network interface specified.")
+ else:
+ update_interface(module, array, interface)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_ntp.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_ntp.py
new file mode 100644
index 000000000..e2a5c8f18
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_ntp.py
@@ -0,0 +1,151 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_ntp
+version_added: '1.0.0'
+short_description: Configure Pure Storage FlashArray NTP settings
+description:
+- Set or erase NTP configuration for Pure Storage FlashArrays.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Create or delete NTP servers configuration
+ type: str
+ default: present
+ choices: [ absent, present ]
+ ntp_servers:
+ type: list
+ elements: str
+ description:
+ - A list of up to 4 alternate NTP servers. These may include IPv4,
+ IPv6 or FQDNs. Invalid IP addresses will cause the module to fail.
+ No validation is performed for FQDNs.
+ - If more than 4 servers are provided, only the first 4 unique
+ nameservers will be used.
+ - if no servers are given a default of I(0.pool.ntp.org) will be used.
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Delete exisitng NTP server entries
+ purestorage.flasharray.purefa_ntp:
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Set array NTP servers
+ purestorage.flasharray.purefa_ntp:
+ state: present
+ ntp_servers:
+ - "0.pool.ntp.org"
+ - "1.pool.ntp.org"
+ - "2.pool.ntp.org"
+ - "3.pool.ntp.org"
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ purefa_argument_spec,
+)
+
+
+def _is_cbs(array, is_cbs=False):
+ """Is the selected array a Cloud Block Store"""
+ model = array.get(controllers=True)[0]["model"]
+ is_cbs = bool("CBS" in model)
+ return is_cbs
+
+
+def remove(duplicate):
+ final_list = []
+ for num in duplicate:
+ if num not in final_list:
+ final_list.append(num)
+ return final_list
+
+
+def delete_ntp(module, array):
+ """Delete NTP Servers"""
+ if array.get(ntpserver=True)["ntpserver"] != []:
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set(ntpserver=[])
+ except Exception:
+ module.fail_json(msg="Deletion of NTP servers failed")
+ else:
+ changed = False
+ module.exit_json(changed=changed)
+
+
+def create_ntp(module, array):
+ """Set NTP Servers"""
+ changed = True
+ if not module.check_mode:
+ if not module.params["ntp_servers"]:
+ module.params["ntp_servers"] = ["0.pool.ntp.org"]
+ try:
+ array.set(ntpserver=module.params["ntp_servers"][0:4])
+ except Exception:
+ module.fail_json(msg="Update of NTP servers failed")
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ ntp_servers=dict(type="list", elements="str"),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ )
+ )
+
+ required_if = [["state", "present", ["ntp_servers"]]]
+
+ module = AnsibleModule(
+ argument_spec, required_if=required_if, supports_check_mode=True
+ )
+
+ array = get_system(module)
+ if _is_cbs(array):
+ module.warn("NTP settings are not necessary for a CBS array - ignoring...")
+ module.exit_json(changed=False)
+
+ if module.params["state"] == "absent":
+ delete_ntp(module, array)
+ else:
+ module.params["ntp_servers"] = remove(module.params["ntp_servers"])
+ if sorted(array.get(ntpserver=True)["ntpserver"]) != sorted(
+ module.params["ntp_servers"][0:4]
+ ):
+ create_ntp(module, array)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_offload.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_offload.py
new file mode 100644
index 000000000..1265911fe
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_offload.py
@@ -0,0 +1,443 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2019, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_offload
+version_added: '1.0.0'
+short_description: Create, modify and delete NFS, S3 or Azure offload targets
+description:
+- Create, modify and delete NFS, S3 or Azure offload targets.
+- Only supported on Purity v5.2.0 or higher.
+- You must have a correctly configured offload network for offload to work.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Define state of offload
+ default: present
+ choices: [ absent, present ]
+ type: str
+ name:
+ description:
+ - The name of the offload target
+ required: true
+ type: str
+ protocol:
+ description:
+ - Define which protocol the offload engine uses
+ default: nfs
+ choices: [ nfs, s3, azure, gcp ]
+ type: str
+ address:
+ description:
+ - The IP or FQDN address of the NFS server
+ type: str
+ share:
+ description:
+ - NFS export on the NFS server
+ type: str
+ options:
+ description:
+ - Additonal mount options for the NFS share
+ - Supported mount options include I(port), I(rsize),
+ I(wsize), I(nfsvers), and I(tcp) or I(udp)
+ required: false
+ default: ""
+ type: str
+ access_key:
+ description:
+ - Access Key ID of the offload target
+ type: str
+ container:
+ description:
+ - Name of the blob container of the Azure target
+ default: offload
+ type: str
+ bucket:
+ description:
+ - Name of the bucket for the S3 or GCP target
+ type: str
+ account:
+ description:
+ - Name of the Azure blob storage account
+ type: str
+ secret:
+ description:
+ - Secret Access Key for the offload target
+ type: str
+ initialize:
+ description:
+ - Define whether to initialize the offload bucket
+ type: bool
+ default: true
+ placement:
+ description:
+ - AWS S3 placement strategy
+ type: str
+ choices: ['retention-based', 'aws-standard-class']
+ default: retention-based
+
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Create NFS offload target
+ purestorage.flasharray.purefa_offload:
+ name: nfs-offload
+ protocol: nfs
+ address: 10.21.200.4
+ share: "/offload_target"
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create S3 offload target
+ purestorage.flasharray.purefa_offload:
+ name: s3-offload
+ protocol: s3
+ access_key: "3794fb12c6204e19195f"
+ bucket: offload-bucket
+ secret: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"
+ placement: aws-standard-class
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create Azure offload target
+ purestorage.flasharray.purefa_offload:
+ name: azure-offload
+ protocol: azure
+ secret: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"
+ container: offload-container
+ account: user1
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete offload target
+ purestorage.flasharray.purefa_offload:
+ name: nfs-offload
+ protocol: nfs
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+"""
+
+RETURN = r"""
+"""
+
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+HAS_PACKAGING = True
+try:
+ from packaging import version
+except ImportError:
+ HAS_PACKAGING = False
+
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_array,
+ get_system,
+ purefa_argument_spec,
+)
+
+MIN_REQUIRED_API_VERSION = "1.16"
+REGEX_TARGET_NAME = re.compile(r"^[a-zA-Z0-9\-]*$")
+P53_API_VERSION = "1.17"
+GCP_API_VERSION = "2.3"
+MULTIOFFLOAD_API_VERSION = "2.11"
+MULTIOFFLOAD_LIMIT = 5
+
+
+def get_target(module, array):
+ """Return target or None"""
+ try:
+ return array.get_offload(module.params["name"])
+ except Exception:
+ return None
+
+
+def create_offload(module, array):
+ """Create offload target"""
+ changed = True
+ api_version = array._list_available_rest_versions()
+ # First check if the offload network inteface is there and enabled
+ try:
+ if not array.get_network_interface("@offload.data")["enabled"]:
+ module.fail_json(
+ msg="Offload Network interface not enabled. Please resolve."
+ )
+ except Exception:
+ module.fail_json(
+ msg="Offload Network interface not correctly configured. Please resolve."
+ )
+ if not module.check_mode:
+ if module.params["protocol"] == "nfs":
+ try:
+ array.connect_nfs_offload(
+ module.params["name"],
+ mount_point=module.params["share"],
+ address=module.params["address"],
+ mount_options=module.params["options"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to create NFS offload {0}. "
+ "Please perform diagnostic checks.".format(module.params["name"])
+ )
+ if module.params["protocol"] == "s3":
+ if P53_API_VERSION in api_version:
+ try:
+ array.connect_s3_offload(
+ module.params["name"],
+ access_key_id=module.params["access_key"],
+ secret_access_key=module.params["secret"],
+ bucket=module.params["bucket"],
+ placement_strategy=module.params["placement"],
+ initialize=module.params["initialize"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to create S3 offload {0}. "
+ "Please perform diagnostic checks.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ try:
+ array.connect_s3_offload(
+ module.params["name"],
+ access_key_id=module.params["access_key"],
+ secret_access_key=module.params["secret"],
+ bucket=module.params["bucket"],
+ initialize=module.params["initialize"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to create S3 offload {0}. "
+ "Please perform diagnostic checks.".format(
+ module.params["name"]
+ )
+ )
+ if module.params["protocol"] == "azure" and P53_API_VERSION in api_version:
+ try:
+ array.connect_azure_offload(
+ module.params["name"],
+ container_name=module.params["container"],
+ secret_access_key=module.params["secret"],
+ account_name=module.params[".bucket"],
+ initialize=module.params["initialize"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to create Azure offload {0}. "
+ "Please perform diagnostic checks.".format(module.params["name"])
+ )
+ if module.params["protocol"] == "gcp" and GCP_API_VERSION in api_version:
+ arrayv6 = get_array(module)
+ bucket = flasharray.OffloadGoogleCloud(
+ access_key_id=module.params["access_key"],
+ bucket=module.params["bucket"],
+ secret_access_key=module.params["secret"],
+ )
+ offload = flasharray.OffloadPost(google_cloud=bucket)
+ res = arrayv6.post_offloads(
+ offload=offload,
+ initialize=module.params["initialize"],
+ names=[module.params["name"]],
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to create GCP offload {0}. Error: {1}"
+ "Please perform diagnostic checks.".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def update_offload(module, array):
+ """Update offload target"""
+ changed = False
+ module.exit_json(changed=changed)
+
+
+def delete_offload(module, array):
+ """Delete offload target"""
+ changed = True
+ api_version = array._list_available_rest_versions()
+ if not module.check_mode:
+ if module.params["protocol"] == "nfs":
+ try:
+ array.disconnect_nfs_offload(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Failed to delete NFS offload {0}.".format(
+ module.params["name"]
+ )
+ )
+ if module.params["protocol"] == "s3":
+ try:
+ array.disconnect_s3_offload(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Failed to delete S3 offload {0}.".format(module.params["name"])
+ )
+ if module.params["protocol"] == "azure" and P53_API_VERSION in api_version:
+ try:
+ array.disconnect_azure_offload(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Failed to delete Azure offload {0}.".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["present", "absent"]),
+ protocol=dict(
+ type="str", default="nfs", choices=["nfs", "s3", "azure", "gcp"]
+ ),
+ placement=dict(
+ type="str",
+ default="retention-based",
+ choices=["retention-based", "aws-standard-class"],
+ ),
+ name=dict(type="str", required=True),
+ initialize=dict(default=True, type="bool"),
+ access_key=dict(type="str", no_log=False),
+ secret=dict(type="str", no_log=True),
+ bucket=dict(type="str"),
+ container=dict(type="str", default="offload"),
+ account=dict(type="str"),
+ share=dict(type="str"),
+ address=dict(type="str"),
+ options=dict(type="str", default=""),
+ )
+ )
+
+ required_if = []
+
+ if argument_spec["state"] == "present":
+ required_if = [
+ ("protocol", "nfs", ["address", "share"]),
+ ("protocol", "s3", ["access_key", "secret", "bucket"]),
+ ["protocol", "gcp", ["access_key", "secret", "bucket"]],
+ ("protocol", "azure", ["account", "secret"]),
+ ]
+
+ module = AnsibleModule(
+ argument_spec, required_if=required_if, supports_check_mode=True
+ )
+
+ if not HAS_PACKAGING:
+ module.fail_json(msg="packagingsdk is required for this module")
+ if not HAS_PURESTORAGE and module.params["protocol"] == "gcp":
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashArray REST version not supported. "
+ "Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
+ )
+
+ if (
+ not re.match(r"^[a-zA-Z][a-zA-Z0-9\-]*[a-zA-Z0-9]$", module.params["name"])
+ or len(module.params["name"]) > 56
+ ):
+ module.fail_json(
+ msg="Target name invalid. "
+ "Target name must be between 1 and 56 characters (alphanumeric and -) in length "
+ "and begin and end with a letter or number. The name must include at least one letter."
+ )
+ if module.params["protocol"] in ["s3", "gcp"]:
+ if (
+ not re.match(r"^[a-z0-9][a-z0-9.\-]*[a-z0-9]$", module.params["bucket"])
+ or len(module.params["bucket"]) > 63
+ ):
+ module.fail_json(
+ msg="Bucket name invalid. "
+ "Bucket name must be between 3 and 63 characters "
+ "(lowercase, alphanumeric, dash or period) in length "
+ "and begin and end with a letter or number."
+ )
+
+ apps = array.list_apps()
+ app_version = 0
+ all_good = False
+ for app in range(0, len(apps)):
+ if apps[app]["name"] == "offload":
+ if (
+ apps[app]["enabled"]
+ and apps[app]["status"] == "healthy"
+ and version.parse(apps[app]["version"]) >= version.parse("5.2.0")
+ ):
+ all_good = True
+ app_version = apps[app]["version"]
+ break
+
+ if not all_good:
+ module.fail_json(
+ msg="Correct Offload app not installed or incorrectly configured"
+ )
+ else:
+ if version.parse(array.get()["version"]) != version.parse(app_version):
+ module.fail_json(
+ msg="Offload app version must match Purity version. Please upgrade."
+ )
+
+ target = get_target(module, array)
+ if module.params["state"] == "present" and not target:
+ offloads = array.list_offload()
+ target_count = len(offloads)
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ MULTIOFFLOAD_LIMIT = 1
+ if target_count >= MULTIOFFLOAD_LIMIT:
+ module.fail_json(
+ msg="Cannot add offload target {0}. Offload Target Limit of {1} would be exceeded.".format(
+ module.params["name"], MULTIOFFLOAD_LIMIT
+ )
+ )
+ # TODO: (SD) Remove this check when multi-protocol offloads are supported
+ if offloads[0].protocol != module.params["protocol"]:
+ module.fail_json(msg="Currently all offloads must be of the same type.")
+ create_offload(module, array)
+ elif module.params["state"] == "present" and target:
+ update_offload(module, array)
+ elif module.params["state"] == "absent" and target:
+ delete_offload(module, array)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pg.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pg.py
new file mode 100644
index 000000000..3fa51ebbb
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pg.py
@@ -0,0 +1,909 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_pg
+version_added: '1.0.0'
+short_description: Manage protection groups on Pure Storage FlashArrays
+description:
+- Create, delete or modify protection groups on Pure Storage FlashArrays.
+- If a protection group exists and you try to add non-valid types, eg. a host
+ to a volume protection group the module will ignore the invalid types.
+- Protection Groups on Offload targets are supported.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - The name of the protection group.
+ type: str
+ aliases: [ pgroup ]
+ required: true
+ state:
+ description:
+ - Define whether the protection group should exist or not.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ volume:
+ description:
+ - List of existing volumes to add to protection group.
+ - Note that volume are case-sensitive however FlashArray volume names are unique
+ and ignore case - you cannot have I(volumea) and I(volumeA)
+ type: list
+ elements: str
+ host:
+ description:
+ - List of existing hosts to add to protection group.
+ - Note that hostnames are case-sensitive however FlashArray hostnames are unique
+ and ignore case - you cannot have I(hosta) and I(hostA)
+ type: list
+ elements: str
+ hostgroup:
+ description:
+ - List of existing hostgroups to add to protection group.
+ - Note that hostgroups are case-sensitive however FlashArray hostgroup names are unique
+ and ignore case - you cannot have I(groupa) and I(groupA)
+ type: list
+ elements: str
+ eradicate:
+ description:
+ - Define whether to eradicate the protection group on delete and leave in trash.
+ type : bool
+ default: false
+ enabled:
+ description:
+ - Define whether to enabled snapshots for the protection group.
+ type : bool
+ default: true
+ target:
+ description:
+ - List of remote arrays or offload target for replication protection group
+ to connect to.
+ - Note that all replicated protection groups are asynchronous.
+ - Target arrays or offload targets must already be connected to the source array.
+ - Maximum number of targets per Portection Group is 4, assuming your
+ configuration suppors this.
+ type: list
+ elements: str
+ rename:
+ description:
+ - Rename a protection group
+ - If the source protection group is in a Pod or Volume Group 'container'
+ you only need to provide the new protection group name in the same 'container'
+ type: str
+ safe_mode:
+ description:
+ - Enables SafeMode restrictions on the protection group
+ - B(Once set disabling this can only be performed by Pure Technical Support)
+ type: bool
+ default: false
+ version_added: '1.13.0'
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Create new local protection group
+ purestorage.flasharray.purefa_pg:
+ name: foo
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create new protection group called bar in pod called foo
+ purestorage.flasharray.purefa_pg:
+ name: "foo::bar"
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create new replicated protection group
+ purestorage.flasharray.purefa_pg:
+ name: foo
+ target:
+ - arrayb
+ - arrayc
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create new replicated protection group to offload target and remote array
+ purestorage.flasharray.purefa_pg:
+ name: foo
+ target:
+ - offload
+ - arrayc
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create new protection group with snapshots disabled
+ purestorage.flasharray.purefa_pg:
+ name: foo
+ enabled: false
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete protection group
+ purestorage.flasharray.purefa_pg:
+ name: foo
+ eradicate: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: absent
+
+- name: Eradicate protection group foo on offload target where source array is arrayA
+ purestorage.flasharray.purefa_pg:
+ name: "arrayA:foo"
+ target: offload
+ eradicate: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: absent
+
+- name: Rename protection group foo in pod arrayA to bar
+ purestorage.flasharray.purefa_pg:
+ name: "arrayA::foo"
+ rename: bar
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create protection group for hostgroups
+ purestorage.flasharray.purefa_pg:
+ name: bar
+ hostgroup:
+ - hg1
+ - hg2
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create protection group for hosts
+ purestorage.flasharray.purefa_pg:
+ name: bar
+ host:
+ - host1
+ - host2
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create replicated protection group for volumes
+ purestorage.flasharray.purefa_pg:
+ name: bar
+ volume:
+ - vol1
+ - vol2
+ target: arrayb
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ get_array,
+ purefa_argument_spec,
+)
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+
+OFFLOAD_API_VERSION = "1.16"
+P53_API_VERSION = "1.17"
+AC_PG_API_VERSION = "1.13"
+RETENTION_LOCK_VERSION = "2.13"
+
+
+def get_pod(module, array):
+ """Get ActiveCluster Pod"""
+ pod_name = module.params["name"].split("::")[0]
+ try:
+ return array.get_pod(pod=pod_name)
+ except Exception:
+ return None
+
+
+def get_targets(array):
+ """Get Offload Targets"""
+ targets = []
+ try:
+ target_details = array.list_offload()
+ except Exception:
+ return None
+
+ for targetcnt in range(0, len(target_details)):
+ if target_details[targetcnt]["status"] in ["connected", "partially_connected"]:
+ targets.append(target_details[targetcnt]["name"])
+ return targets
+
+
+def get_arrays(array):
+ """Get Connected Arrays"""
+ arrays = []
+ array_details = array.list_array_connections()
+ api_version = array._list_available_rest_versions()
+ for arraycnt in range(0, len(array_details)):
+ if P53_API_VERSION in api_version:
+ if array_details[arraycnt]["status"] in [
+ "connected",
+ "partially_connected",
+ ]:
+ arrays.append(array_details[arraycnt]["array_name"])
+ else:
+ if array_details[arraycnt]["connected"]:
+ arrays.append(array_details[arraycnt]["array_name"])
+ return arrays
+
+
+def get_pending_pgroup(module, array):
+ """Get Protection Group"""
+ pgroup = None
+ if ":" in module.params["name"]:
+ if "::" not in module.params["name"]:
+ for pgrp in array.list_pgroups(pending=True, on="*"):
+ if pgrp["name"].casefold() == module.params["name"].casefold():
+ pgroup = pgrp
+ break
+ else:
+ for pgrp in array.list_pgroups(pending=True):
+ if (
+ pgrp["name"].casefold() == module.params["name"].casefold()
+ and pgrp["time_remaining"]
+ ):
+ pgroup = pgrp
+ break
+ else:
+ for pgrp in array.list_pgroups(pending=True):
+ if (
+ pgrp["name"].casefold() == module.params["name"].casefold()
+ and pgrp["time_remaining"]
+ ):
+ pgroup = pgrp
+ break
+
+ return pgroup
+
+
+def get_pgroup(module, array):
+ """Get Protection Group"""
+ pgroup = None
+ if ":" in module.params["name"]:
+ if "::" not in module.params["name"]:
+ for pgrp in array.list_pgroups(on="*"):
+ if pgrp["name"].casefold() == module.params["name"].casefold():
+ pgroup = pgrp
+ break
+ else:
+ for pgrp in array.list_pgroups():
+ if pgrp["name"].casefold() == module.params["name"].casefold():
+ pgroup = pgrp
+ break
+ else:
+ for pgrp in array.list_pgroups():
+ if pgrp["name"].casefold() == module.params["name"].casefold():
+ pgroup = pgrp
+ break
+
+ return pgroup
+
+
+def get_pgroup_sched(module, array):
+ """Get Protection Group Schedule"""
+ pgroup = None
+
+ for pgrp in array.list_pgroups(schedule=True):
+ if pgrp["name"].casefold() == module.params["name"].casefold():
+ pgroup = pgrp
+ break
+
+ return pgroup
+
+
+def check_pg_on_offload(module, array):
+ """Check if PG already exists on offload target"""
+ array_name = array.get()["array_name"]
+ remote_pg = array_name + ":" + module.params["name"]
+ targets = get_targets(array)
+ for target in targets:
+ remote_pgs = array.list_pgroups(pending=True, on=target)
+ for rpg in range(0, len(remote_pgs)):
+ if remote_pg == remote_pgs[rpg]["name"]:
+ return target
+ return None
+
+
+def make_pgroup(module, array):
+ """Create Protection Group"""
+ changed = True
+ if module.params["target"]:
+ api_version = array._list_available_rest_versions()
+ connected_targets = []
+ connected_arrays = get_arrays(array)
+ if OFFLOAD_API_VERSION in api_version:
+ connected_targets = get_targets(array)
+ offload_name = check_pg_on_offload(module, array)
+ if offload_name and offload_name in module.params["target"][0:4]:
+ module.fail_json(
+ msg="Protection Group {0} already exists on offload target {1}.".format(
+ module.params["name"], offload_name
+ )
+ )
+
+ connected_arrays = connected_arrays + connected_targets
+ if connected_arrays == []:
+ module.fail_json(msg="No connected targets on source array.")
+ if set(module.params["target"][0:4]).issubset(connected_arrays):
+ if not module.check_mode:
+ try:
+ array.create_pgroup(
+ module.params["name"], targetlist=module.params["target"][0:4]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Creation of replicated pgroup {0} failed. {1}".format(
+ module.params["name"], module.params["target"][0:4]
+ )
+ )
+ else:
+ module.fail_json(
+ msg="Check all selected targets are connected to the source array."
+ )
+ else:
+ if not module.check_mode:
+ try:
+ array.create_pgroup(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Creation of pgroup {0} failed.".format(module.params["name"])
+ )
+ try:
+ if module.params["target"]:
+ array.set_pgroup(
+ module.params["name"],
+ replicate_enabled=module.params["enabled"],
+ )
+ else:
+ array.set_pgroup(
+ module.params["name"], snap_enabled=module.params["enabled"]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Enabling pgroup {0} failed.".format(module.params["name"])
+ )
+ if module.params["volume"]:
+ try:
+ array.set_pgroup(
+ module.params["name"], vollist=module.params["volume"]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Adding volumes to pgroup {0} failed.".format(
+ module.params["name"]
+ )
+ )
+ if module.params["host"]:
+ try:
+ array.set_pgroup(
+ module.params["name"], hostlist=module.params["host"]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Adding hosts to pgroup {0} failed.".format(
+ module.params["name"]
+ )
+ )
+ if module.params["hostgroup"]:
+ try:
+ array.set_pgroup(
+ module.params["name"], hgrouplist=module.params["hostgroup"]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Adding hostgroups to pgroup {0} failed.".format(
+ module.params["name"]
+ )
+ )
+ if module.params["safe_mode"]:
+ arrayv6 = get_array(module)
+ try:
+ arrayv6.patch_protection_groups(
+ names=[module.params["name"]],
+ protection_group=flasharray.ProtectionGroup(
+ retention_lock="ratcheted"
+ ),
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to set SafeMode on pgroup {0}".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def rename_exists(module, array):
+ """Determine if rename target already exists"""
+ exists = False
+ new_name = module.params["rename"]
+ if ":" in module.params["name"]:
+ container = module.params["name"].split(":")[0]
+ new_name = container + ":" + module.params["rename"]
+ if "::" in module.params["name"]:
+ new_name = container + "::" + module.params["rename"]
+ for pgroup in array.list_pgroups(pending=True):
+ if pgroup["name"].casefold() == new_name.casefold():
+ exists = True
+ break
+ return exists
+
+
+def update_pgroup(module, array):
+ """Update Protection Group"""
+ changed = renamed = False
+ api_version = array._list_available_rest_versions()
+ if module.params["target"]:
+ connected_targets = []
+ connected_arrays = get_arrays(array)
+
+ if OFFLOAD_API_VERSION in api_version:
+ connected_targets = get_targets(array)
+ connected_arrays = connected_arrays + connected_targets
+ if connected_arrays == []:
+ module.fail_json(msg="No targets connected to source array.")
+ current_connects = array.get_pgroup(module.params["name"])["targets"]
+ current_targets = []
+
+ if current_connects:
+ for targetcnt in range(0, len(current_connects)):
+ current_targets.append(current_connects[targetcnt]["name"])
+
+ if set(module.params["target"][0:4]) != set(current_targets):
+ if not set(module.params["target"][0:4]).issubset(connected_arrays):
+ module.fail_json(
+ msg="Check all selected targets are connected to the source array."
+ )
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set_pgroup(
+ module.params["name"],
+ targetlist=module.params["target"][0:4],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Changing targets for pgroup {0} failed.".format(
+ module.params["name"]
+ )
+ )
+
+ if (
+ module.params["target"]
+ and module.params["enabled"]
+ != get_pgroup_sched(module, array)["replicate_enabled"]
+ ):
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set_pgroup(
+ module.params["name"], replicate_enabled=module.params["enabled"]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Changing enabled status of pgroup {0} failed.".format(
+ module.params["name"]
+ )
+ )
+ elif (
+ not module.params["target"]
+ and module.params["enabled"] != get_pgroup_sched(module, array)["snap_enabled"]
+ ):
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set_pgroup(
+ module.params["name"], snap_enabled=module.params["enabled"]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Changing enabled status of pgroup {0} failed.".format(
+ module.params["name"]
+ )
+ )
+
+ if (
+ module.params["volume"]
+ and get_pgroup(module, array)["hosts"] is None
+ and get_pgroup(module, array)["hgroups"] is None
+ ):
+ if get_pgroup(module, array)["volumes"] is None:
+ if not module.check_mode:
+ changed = True
+ try:
+ array.set_pgroup(
+ module.params["name"], vollist=module.params["volume"]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Adding volumes to pgroup {0} failed.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ cased_vols = list(module.params["volume"])
+ cased_pgvols = list(get_pgroup(module, array)["volumes"])
+ if not all(x in cased_pgvols for x in cased_vols):
+ if not module.check_mode:
+ changed = True
+ try:
+ array.set_pgroup(
+ module.params["name"], addvollist=module.params["volume"]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Changing volumes in pgroup {0} failed.".format(
+ module.params["name"]
+ )
+ )
+
+ if (
+ module.params["host"]
+ and get_pgroup(module, array)["volumes"] is None
+ and get_pgroup(module, array)["hgroups"] is None
+ ):
+ if get_pgroup(module, array)["hosts"] is None:
+ if not module.check_mode:
+ changed = True
+ try:
+ array.set_pgroup(
+ module.params["name"], hostlist=module.params["host"]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Adding hosts to pgroup {0} failed.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ cased_hosts = list(module.params["host"])
+ cased_pghosts = list(get_pgroup(module, array)["hosts"])
+ if not all(x in cased_pghosts for x in cased_hosts):
+ if not module.check_mode:
+ changed = True
+ try:
+ array.set_pgroup(
+ module.params["name"], addhostlist=module.params["host"]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Changing hosts in pgroup {0} failed.".format(
+ module.params["name"]
+ )
+ )
+
+ if (
+ module.params["hostgroup"]
+ and get_pgroup(module, array)["hosts"] is None
+ and get_pgroup(module, array)["volumes"] is None
+ ):
+ if get_pgroup(module, array)["hgroups"] is None:
+ if not module.check_mode:
+ changed = True
+ try:
+ array.set_pgroup(
+ module.params["name"], hgrouplist=module.params["hostgroup"]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Adding hostgroups to pgroup {0} failed.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ cased_hostg = list(module.params["hostgroup"])
+ cased_pghostg = list(get_pgroup(module, array)["hgroups"])
+ if not all(x in cased_pghostg for x in cased_hostg):
+ if not module.check_mode:
+ changed = True
+ try:
+ array.set_pgroup(
+ module.params["name"],
+ addhgrouplist=module.params["hostgroup"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Changing hostgroups in pgroup {0} failed.".format(
+ module.params["name"]
+ )
+ )
+ if module.params["rename"]:
+ if not rename_exists(module, array):
+ if ":" in module.params["name"]:
+ container = module.params["name"].split(":")[0]
+ if "::" in module.params["name"]:
+ rename = container + "::" + module.params["rename"]
+ else:
+ rename = container + ":" + module.params["rename"]
+ else:
+ rename = module.params["rename"]
+ renamed = True
+ if not module.check_mode:
+ try:
+ array.rename_pgroup(module.params["name"], rename)
+ module.params["name"] = rename
+ except Exception:
+ module.fail_json(msg="Rename to {0} failed.".format(rename))
+ else:
+ module.warn(
+ "Rename failed. Protection group {0} already exists in container. Continuing with other changes...".format(
+ module.params["rename"]
+ )
+ )
+ if RETENTION_LOCK_VERSION in api_version:
+ arrayv6 = get_array(module)
+ current_pg = list(
+ arrayv6.get_protection_groups(names=[module.params["name"]]).items
+ )[0]
+ if current_pg.retention_lock == "unlocked" and module.params["safe_mode"]:
+ changed = True
+ if not module.check_mode:
+ res = arrayv6.patch_protection_groups(
+ names=[module.params["name"]],
+ protection_group=flasharray.ProtectionGroup(
+ retention_lock="ratcheted"
+ ),
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to set SafeMode on protection group {0}. Error: {1}".format(
+ module.params["name"],
+ res.errors[0].message,
+ )
+ )
+ if current_pg.retention_lock == "ratcheted" and not module.params["safe_mode"]:
+ module.warn(
+ "Disabling SafeMode on protection group {0} can only be performed by Pure Technical Support".format(
+ module.params["name"]
+ )
+ )
+ changed = changed or renamed
+ module.exit_json(changed=changed)
+
+
+def eradicate_pgroup(module, array):
+ """Eradicate Protection Group"""
+ changed = True
+ if not module.check_mode:
+ if ":" in module.params["name"]:
+ if "::" not in module.params["name"]:
+ try:
+ target = "".join(module.params["target"])
+ array.destroy_pgroup(
+ module.params["name"], on=target, eradicate=True
+ )
+ except Exception:
+ module.fail_json(
+ msg="Eradicating pgroup {0} failed.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ try:
+ array.destroy_pgroup(module.params["name"], eradicate=True)
+ except Exception:
+ module.fail_json(
+ msg="Eradicating pgroup {0} failed.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ try:
+ array.destroy_pgroup(module.params["name"], eradicate=True)
+ except Exception:
+ module.fail_json(
+ msg="Eradicating pgroup {0} failed.".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def delete_pgroup(module, array):
+ """Delete Protection Group"""
+ changed = True
+ if not module.check_mode:
+ if ":" in module.params["name"]:
+ if "::" not in module.params["name"]:
+ try:
+ target = "".join(module.params["target"])
+ array.destroy_pgroup(module.params["name"], on=target)
+ except Exception:
+ module.fail_json(
+ msg="Deleting pgroup {0} failed.".format(module.params["name"])
+ )
+ else:
+ try:
+ array.destroy_pgroup(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Deleting pgroup {0} failed.".format(module.params["name"])
+ )
+ else:
+ try:
+ array.destroy_pgroup(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Deleting pgroup {0} failed.".format(module.params["name"])
+ )
+ if module.params["eradicate"]:
+ eradicate_pgroup(module, array)
+
+ module.exit_json(changed=changed)
+
+
+def recover_pgroup(module, array):
+ """Recover deleted protection group"""
+ changed = True
+ if not module.check_mode:
+ if ":" in module.params["name"]:
+ if "::" not in module.params["name"]:
+ try:
+ target = "".join(module.params["target"])
+ array.recover_pgroup(module.params["name"], on=target)
+ except Exception:
+ module.fail_json(
+ msg="Recover pgroup {0} failed.".format(module.params["name"])
+ )
+ else:
+ try:
+ array.recover_pgroup(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Recover pgroup {0} failed.".format(module.params["name"])
+ )
+ else:
+ try:
+ array.recover_pgroup(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="ecover pgroup {0} failed.".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True, aliases=["pgroup"]),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ volume=dict(type="list", elements="str"),
+ host=dict(type="list", elements="str"),
+ hostgroup=dict(type="list", elements="str"),
+ target=dict(type="list", elements="str"),
+ safe_mode=dict(type="bool", default=False),
+ eradicate=dict(type="bool", default=False),
+ enabled=dict(type="bool", default=True),
+ rename=dict(type="str"),
+ )
+ )
+
+ mutually_exclusive = [["volume", "host", "hostgroup"]]
+ module = AnsibleModule(
+ argument_spec, mutually_exclusive=mutually_exclusive, supports_check_mode=True
+ )
+ if not HAS_PURESTORAGE and module.params["safe_mode"]:
+ module.fail_json(
+ msg="py-pure-client sdk is required to support 'safe_mode' parameter"
+ )
+
+ state = module.params["state"]
+ array = get_system(module)
+ pattern = re.compile("^[a-zA-Z0-9]([a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?$")
+ if module.params["rename"]:
+ if not pattern.match(module.params["rename"]):
+ module.fail_json(
+ msg="Rename value {0} does not conform to naming convention".format(
+ module.params["rename"]
+ )
+ )
+ if not pattern.match(module.params["name"].split(":")[-1]):
+ module.fail_json(
+ msg="Protection Group name {0} does not conform to naming convention".format(
+ module.params["name"]
+ )
+ )
+ api_version = array._list_available_rest_versions()
+ if module.params["safe_mode"] and RETENTION_LOCK_VERSION not in api_version:
+ module.fail_json(
+ msg="API version does not support setting SafeMode on a protection group."
+ )
+ if ":" in module.params["name"] and OFFLOAD_API_VERSION not in api_version:
+ module.fail_json(msg="API version does not support offload protection groups.")
+ if "::" in module.params["name"] and AC_PG_API_VERSION not in api_version:
+ module.fail_json(
+ msg="API version does not support ActiveCluster protection groups."
+ )
+ if ":" in module.params["name"]:
+ if "::" in module.params["name"]:
+ pgname = module.params["name"].split("::")[1]
+ else:
+ pgname = module.params["name"].split(":")[1]
+ if not pattern.match(pgname):
+ module.fail_json(
+ msg="Protection Group name {0} does not conform to naming convention".format(
+ pgname
+ )
+ )
+ else:
+ if not pattern.match(module.params["name"]):
+ module.fail_json(
+ msg="Protection Group name {0} does not conform to naming convention".format(
+ module.params["name"]
+ )
+ )
+
+ pgroup = get_pgroup(module, array)
+ xpgroup = get_pending_pgroup(module, array)
+ if "::" in module.params["name"]:
+ if not get_pod(module, array):
+ module.fail_json(
+ msg="Pod {0} does not exist.".format(
+ module.params["name"].split("::")[0]
+ )
+ )
+
+ if module.params["host"]:
+ try:
+ for hst in module.params["host"]:
+ array.get_host(hst)
+ except Exception:
+ module.fail_json(msg="Host {0} not found".format(hst))
+
+ if module.params["hostgroup"]:
+ try:
+ for hstg in module.params["hostgroup"]:
+ array.get_hgroup(hstg)
+ except Exception:
+ module.fail_json(msg="Hostgroup {0} not found".format(hstg))
+
+ if pgroup and state == "present":
+ update_pgroup(module, array)
+ elif pgroup and state == "absent":
+ delete_pgroup(module, array)
+ elif xpgroup and state == "absent" and module.params["eradicate"]:
+ eradicate_pgroup(module, array)
+ elif (
+ not pgroup
+ and not xpgroup
+ and state == "present"
+ and not module.params["rename"]
+ ):
+ make_pgroup(module, array)
+ elif not pgroup and state == "present" and module.params["rename"]:
+ module.exit_json(changed=False)
+ elif xpgroup and state == "present":
+ recover_pgroup(module, array)
+ elif pgroup is None and state == "absent":
+ module.exit_json(changed=False)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pgsched.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pgsched.py
new file mode 100644
index 000000000..dc0a488d4
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pgsched.py
@@ -0,0 +1,527 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_pgsched
+short_description: Manage protection groups replication schedules on Pure Storage FlashArrays
+version_added: '1.0.0'
+description:
+- Modify or delete protection groups replication schedules on Pure Storage FlashArrays.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - The name of the protection group.
+ type: str
+ required: true
+ state:
+ description:
+ - Define whether to set or delete the protection group schedule.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ schedule:
+ description:
+ - Which schedule to change.
+ type: str
+ choices: ['replication', 'snapshot']
+ required: true
+ enabled:
+ description:
+ - Enable the schedule being configured.
+ type: bool
+ default: true
+ replicate_at:
+ description:
+ - Specifies the preferred time as HH:MM:SS, using 24-hour clock, at which to generate snapshots.
+ type: int
+ blackout_start:
+ description:
+ - Specifies the time at which to suspend replication.
+ - Provide a time in 12-hour AM/PM format, eg. 11AM
+ type: str
+ blackout_end:
+ description:
+ - Specifies the time at which to restart replication.
+ - Provide a time in 12-hour AM/PM format, eg. 5PM
+ type: str
+ replicate_frequency:
+ description:
+ - Specifies the replication frequency in seconds.
+ - Range 900 - 34560000 (FA-405, //M10, //X10i and Cloud Block Store).
+ - Range 300 - 34560000 (all other arrays).
+ type: int
+ snap_at:
+ description:
+ - Specifies the preferred time as HH:MM:SS, using 24-hour clock, at which to generate snapshots.
+ - Only valid if I(snap_frequency) is an exact multiple of 86400, ie 1 day.
+ type: int
+ snap_frequency:
+ description:
+ - Specifies the snapshot frequency in seconds.
+ - Range available 300 - 34560000.
+ type: int
+ days:
+ description:
+ - Specifies the number of days to keep the I(per_day) snapshots beyond the
+ I(all_for) period before they are eradicated
+ - Max retention period is 4000 days
+ type: int
+ all_for:
+ description:
+ - Specifies the length of time, in seconds, to keep the snapshots on the
+ source array before they are eradicated.
+ - Range available 1 - 34560000.
+ type: int
+ per_day:
+ description:
+ - Specifies the number of I(per_day) snapshots to keep beyond the I(all_for) period.
+ - Maximum number is 1440
+ type: int
+ target_all_for:
+ description:
+ - Specifies the length of time, in seconds, to keep the replicated snapshots on the targets.
+ - Range is 1 - 34560000 seconds.
+ type: int
+ target_per_day:
+ description:
+ - Specifies the number of I(per_day) replicated snapshots to keep beyond the I(target_all_for) period.
+ - Maximum number is 1440
+ type: int
+ target_days:
+ description:
+ - Specifies the number of days to keep the I(target_per_day) replicated snapshots
+ beyond the I(target_all_for) period before they are eradicated.
+ - Max retention period is 4000 days
+ type: int
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Update protection group snapshot schedule
+ purestorage.flasharray.purefa_pgsched:
+ name: foo
+ schedule: snapshot
+ enabled: true
+ snap_frequency: 86400
+ snap_at: 15:30:00
+ per_day: 5
+ all_for: 5
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Update protection group replication schedule
+ purestorage.flasharray.purefa_pgsched:
+ name: foo
+ schedule: replication
+ enabled: true
+ replicate_frequency: 86400
+ replicate_at: 15:30:00
+ target_per_day: 5
+ target_all_for: 5
+ blackout_start: 2AM
+ blackout_end: 5AM
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete protection group snapshot schedule
+ purestorage.flasharray.purefa_pgsched:
+ name: foo
+ schedule: snapshot
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete protection group replication schedule
+ purestorage.flasharray.purefa_pgsched:
+ name: foo
+ schedule: replication
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ purefa_argument_spec,
+)
+
+
+def get_pending_pgroup(module, array):
+ """Get Protection Group"""
+ pgroup = None
+ if ":" in module.params["name"]:
+ for pgrp in array.list_pgroups(pending=True, on="*"):
+ if pgrp["name"] == module.params["name"] and pgrp["time_remaining"]:
+ pgroup = pgrp
+ break
+ else:
+ for pgrp in array.list_pgroups(pending=True):
+ if pgrp["name"] == module.params["name"] and pgrp["time_remaining"]:
+ pgroup = pgrp
+ break
+
+ return pgroup
+
+
+def get_pgroup(module, array):
+ """Get Protection Group"""
+ pgroup = None
+ if ":" in module.params["name"]:
+ if "::" not in module.params["name"]:
+ for pgrp in array.list_pgroups(on="*"):
+ if pgrp["name"] == module.params["name"]:
+ pgroup = pgrp
+ break
+ else:
+ for pgrp in array.list_pgroups():
+ if pgrp["name"] == module.params["name"]:
+ pgroup = pgrp
+ break
+ else:
+ for pgrp in array.list_pgroups():
+ if pgrp["name"] == module.params["name"]:
+ pgroup = pgrp
+ break
+
+ return pgroup
+
+
+def _convert_to_minutes(hour):
+ if hour[-2:] == "AM" and hour[:2] == "12":
+ return 0
+ elif hour[-2:] == "AM":
+ return int(hour[:-2]) * 3600
+ elif hour[-2:] == "PM" and hour[:2] == "12":
+ return 43200
+ return (int(hour[:-2]) + 12) * 3600
+
+
+def update_schedule(module, array):
+ """Update Protection Group Schedule"""
+ changed = False
+ try:
+ schedule = array.get_pgroup(module.params["name"], schedule=True)
+ retention = array.get_pgroup(module.params["name"], retention=True)
+ if not schedule["replicate_blackout"]:
+ schedule["replicate_blackout"] = [{"start": 0, "end": 0}]
+ except Exception:
+ module.fail_json(
+ msg="Failed to get current schedule for pgroup {0}.".format(
+ module.params["name"]
+ )
+ )
+ current_repl = {
+ "replicate_frequency": schedule["replicate_frequency"],
+ "replicate_enabled": schedule["replicate_enabled"],
+ "target_days": retention["target_days"],
+ "replicate_at": schedule["replicate_at"],
+ "target_per_day": retention["target_per_day"],
+ "target_all_for": retention["target_all_for"],
+ "blackout_start": schedule["replicate_blackout"][0]["start"],
+ "blackout_end": schedule["replicate_blackout"][0]["end"],
+ }
+ current_snap = {
+ "days": retention["days"],
+ "snap_frequency": schedule["snap_frequency"],
+ "snap_enabled": schedule["snap_enabled"],
+ "snap_at": schedule["snap_at"],
+ "per_day": retention["per_day"],
+ "all_for": retention["all_for"],
+ }
+ if module.params["schedule"] == "snapshot":
+ if not module.params["snap_frequency"]:
+ snap_frequency = current_snap["snap_frequency"]
+ else:
+ if not 300 <= module.params["snap_frequency"] <= 34560000:
+ module.fail_json(
+ msg="Snap Frequency support is out of range (300 to 34560000)"
+ )
+ else:
+ snap_frequency = module.params["snap_frequency"]
+
+ if not module.params["snap_at"]:
+ snap_at = current_snap["snap_at"]
+ else:
+ snap_at = module.params["snap_at"]
+
+ if not module.params["days"]:
+ if isinstance(module.params["days"], int):
+ days = module.params["days"]
+ else:
+ days = current_snap["days"]
+ else:
+ if module.params["days"] > 4000:
+ module.fail_json(msg="Maximum value for days is 4000")
+ else:
+ days = module.params["days"]
+
+ if module.params["per_day"] is None:
+ per_day = current_snap["per_day"]
+ else:
+ if module.params["per_day"] > 1440:
+ module.fail_json(msg="Maximum value for per_day is 1440")
+ else:
+ per_day = module.params["per_day"]
+
+ if not module.params["all_for"]:
+ all_for = current_snap["all_for"]
+ else:
+ if module.params["all_for"] > 34560000:
+ module.fail_json(msg="Maximum all_for value is 34560000")
+ else:
+ all_for = module.params["all_for"]
+ new_snap = {
+ "days": days,
+ "snap_frequency": snap_frequency,
+ "snap_enabled": module.params["enabled"],
+ "snap_at": snap_at,
+ "per_day": per_day,
+ "all_for": all_for,
+ }
+ if current_snap != new_snap:
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set_pgroup(
+ module.params["name"], snap_enabled=module.params["enabled"]
+ )
+ array.set_pgroup(
+ module.params["name"],
+ snap_frequency=snap_frequency,
+ snap_at=snap_at,
+ )
+ array.set_pgroup(
+ module.params["name"],
+ days=days,
+ per_day=per_day,
+ all_for=all_for,
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to change snapshot schedule for pgroup {0}.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ if not module.params["replicate_frequency"]:
+ replicate_frequency = current_repl["replicate_frequency"]
+ else:
+ model = array.get(controllers=True)[0]["model"]
+ if "405" in model or "10" in model or "CBS" in model:
+ if not 900 <= module.params["replicate_frequency"] <= 34560000:
+ module.fail_json(
+ msg="Replication Frequency support is out of range (900 to 34560000)"
+ )
+ else:
+ replicate_frequency = module.params["replicate_frequency"]
+ else:
+ if not 300 <= module.params["replicate_frequency"] <= 34560000:
+ module.fail_json(
+ msg="Replication Frequency support is out of range (300 to 34560000)"
+ )
+ else:
+ replicate_frequency = module.params["replicate_frequency"]
+
+ if not module.params["replicate_at"]:
+ replicate_at = current_repl["replicate_at"]
+ else:
+ replicate_at = module.params["replicate_at"]
+
+ if not module.params["target_days"]:
+ if isinstance(module.params["target_days"], int):
+ target_days = module.params["target_days"]
+ else:
+ target_days = current_repl["target_days"]
+ else:
+ if module.params["target_days"] > 4000:
+ module.fail_json(msg="Maximum value for target_days is 4000")
+ else:
+ target_days = module.params["target_days"]
+
+ if not module.params["target_per_day"]:
+ if isinstance(module.params["target_per_day"], int):
+ target_per_day = module.params["target_per_day"]
+ else:
+ target_per_day = current_repl["target_per_day"]
+ else:
+ if module.params["target_per_day"] > 1440:
+ module.fail_json(msg="Maximum value for target_per_day is 1440")
+ else:
+ target_per_day = module.params["target_per_day"]
+
+ if not module.params["target_all_for"]:
+ target_all_for = current_repl["target_all_for"]
+ else:
+ if module.params["target_all_for"] > 34560000:
+ module.fail_json(msg="Maximum target_all_for value is 34560000")
+ else:
+ target_all_for = module.params["target_all_for"]
+ if not module.params["blackout_end"]:
+ blackout_end = current_repl["blackout_start"]
+ else:
+ blackout_end = _convert_to_minutes(module.params["blackout_end"])
+ if not module.params["blackout_start"]:
+ blackout_start = current_repl["blackout_start"]
+ else:
+ blackout_start = _convert_to_minutes(module.params["blackout_start"])
+
+ new_repl = {
+ "replicate_frequency": replicate_frequency,
+ "replicate_enabled": module.params["enabled"],
+ "target_days": target_days,
+ "replicate_at": replicate_at,
+ "target_per_day": target_per_day,
+ "target_all_for": target_all_for,
+ "blackout_start": blackout_start,
+ "blackout_end": blackout_end,
+ }
+ if current_repl != new_repl:
+ changed = True
+ if not module.check_mode:
+ blackout = {"start": blackout_start, "end": blackout_end}
+ try:
+ array.set_pgroup(
+ module.params["name"],
+ replicate_enabled=module.params["enabled"],
+ )
+ array.set_pgroup(
+ module.params["name"],
+ replicate_frequency=replicate_frequency,
+ replicate_at=replicate_at,
+ )
+ if blackout_start == 0:
+ array.set_pgroup(module.params["name"], replicate_blackout=None)
+ else:
+ array.set_pgroup(
+ module.params["name"], replicate_blackout=blackout
+ )
+ array.set_pgroup(
+ module.params["name"],
+ target_days=target_days,
+ target_per_day=target_per_day,
+ target_all_for=target_all_for,
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to change replication schedule for pgroup {0}.".format(
+ module.params["name"]
+ )
+ )
+
+ module.exit_json(changed=changed)
+
+
+def delete_schedule(module, array):
+ """Delete, ie. disable, Protection Group Schedules"""
+ changed = False
+ try:
+ current_state = array.get_pgroup(module.params["name"], schedule=True)
+ if module.params["schedule"] == "replication":
+ if current_state["replicate_enabled"]:
+ changed = True
+ if not module.check_mode:
+ array.set_pgroup(module.params["name"], replicate_enabled=False)
+ array.set_pgroup(
+ module.params["name"],
+ target_days=0,
+ target_per_day=0,
+ target_all_for=1,
+ )
+ array.set_pgroup(
+ module.params["name"],
+ replicate_frequency=14400,
+ replicate_blackout=None,
+ )
+ else:
+ if current_state["snap_enabled"]:
+ changed = True
+ if not module.check_mode:
+ array.set_pgroup(module.params["name"], snap_enabled=False)
+ array.set_pgroup(
+ module.params["name"], days=0, per_day=0, all_for=1
+ )
+ array.set_pgroup(module.params["name"], snap_frequency=300)
+ except Exception:
+ module.fail_json(
+ msg="Deleting pgroup {0} {1} schedule failed.".format(
+ module.params["name"], module.params["schedule"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ schedule=dict(
+ type="str", required=True, choices=["replication", "snapshot"]
+ ),
+ blackout_start=dict(type="str"),
+ blackout_end=dict(type="str"),
+ snap_at=dict(type="int"),
+ replicate_at=dict(type="int"),
+ replicate_frequency=dict(type="int"),
+ snap_frequency=dict(type="int"),
+ all_for=dict(type="int"),
+ days=dict(type="int"),
+ per_day=dict(type="int"),
+ target_all_for=dict(type="int"),
+ target_per_day=dict(type="int"),
+ target_days=dict(type="int"),
+ enabled=dict(type="bool", default=True),
+ )
+ )
+
+ required_together = [["blackout_start", "blackout_end"]]
+
+ module = AnsibleModule(
+ argument_spec, required_together=required_together, supports_check_mode=True
+ )
+
+ state = module.params["state"]
+ array = get_system(module)
+
+ pgroup = get_pgroup(module, array)
+ if module.params["snap_at"] and module.params["snap_frequency"]:
+ if not module.params["snap_frequency"] % 86400 == 0:
+ module.fail_json(
+ msg="snap_at not valid unless snapshot frequency is measured in days, ie. a multiple of 86400"
+ )
+ if pgroup and state == "present":
+ update_schedule(module, array)
+ elif pgroup and state == "absent":
+ delete_schedule(module, array)
+ elif pgroup is None:
+ module.fail_json(
+ msg="Specified protection group {0} does not exist.".format(
+ module.params["name"]
+ )
+ )
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pgsnap.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pgsnap.py
new file mode 100644
index 000000000..822b0491f
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pgsnap.py
@@ -0,0 +1,481 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_pgsnap
+version_added: '1.0.0'
+short_description: Manage protection group snapshots on Pure Storage FlashArrays
+description:
+- Create or delete protection group snapshots on Pure Storage FlashArray.
+- Recovery of replicated snapshots on the replica target array is enabled.
+- Support for ActiveCluster and Volume Group protection groups is supported.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - The name of the source protection group.
+ type: str
+ required: true
+ suffix:
+ description:
+ - Suffix of snapshot name.
+ - Special case. If I(latest) the module will select the latest snapshot created in the group
+ type: str
+ state:
+ description:
+ - Define whether the protection group snapshot should exist or not.
+ Copy (added in 2.7) will create a full read/write clone of the
+ snapshot.
+ type: str
+ choices: [ absent, present, copy ]
+ default: present
+ eradicate:
+ description:
+ - Define whether to eradicate the snapshot on delete or leave in trash.
+ type: bool
+ default: false
+ restore:
+ description:
+ - Restore a specific volume from a protection group snapshot.
+ - The protection group name is not required. Only provide the name of the
+ volume to be restored.
+ type: str
+ overwrite:
+ description:
+ - Define whether to overwrite the target volume if it already exists.
+ type: bool
+ default: false
+ target:
+ description:
+ - Volume to restore a specified volume to.
+ - If not supplied this will default to the volume defined in I(restore)
+ type: str
+ offload:
+ description:
+ - Name of offload target on which the snapshot exists.
+ - This is only applicable for deletion and erasure of snapshots
+ type: str
+ now:
+ description:
+ - Whether to initiate a snapshot of the protection group immeadiately
+ type: bool
+ default: false
+ apply_retention:
+ description:
+ - Apply retention schedule settings to the snapshot
+ type: bool
+ default: false
+ remote:
+ description:
+ - Force immeadiate snapshot to remote targets
+ type: bool
+ default: false
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Create protection group snapshot foo.ansible
+ purestorage.flasharray.purefa_pgsnap:
+ name: foo
+ suffix: ansible
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: present
+
+- name: Delete and eradicate protection group snapshot named foo.snap
+ purestorage.flasharray.purefa_pgsnap:
+ name: foo
+ suffix: snap
+ eradicate: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: absent
+
+- name: Restore volume data from local protection group snapshot named foo.snap to volume data2
+ purestorage.flasharray.purefa_pgsnap:
+ name: foo
+ suffix: snap
+ restore: data
+ target: data2
+ overwrite: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: copy
+
+- name: Restore remote protection group snapshot arrayA:pgname.snap.data to local copy
+ purestorage.flasharray.purefa_pgsnap:
+ name: arrayA:pgname
+ suffix: snap
+ restore: data
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: copy
+
+- name: Restore AC pod protection group snapshot pod1::pgname.snap.data to pdo1::data2
+ purestorage.flasharray.purefa_pgsnap:
+ name: pod1::pgname
+ suffix: snap
+ restore: data
+ target: pod1::data2
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: copy
+
+- name: Create snapshot of existing pgroup foo with suffix and force immeadiate copy to remote targets
+ purestorage.flasharray.purefa_pgsnap:
+ name: pgname
+ suffix: force
+ now: true
+ apply_retention: true
+ remote: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete and eradicate snapshot named foo.snap on offload target bar from arrayA
+ purestorage.flasharray.purefa_pgsnap:
+ name: "arrayA:foo"
+ suffix: snap
+ offload: bar
+ eradicate: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: absent
+"""
+
+RETURN = r"""
+"""
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ purefa_argument_spec,
+)
+
+from datetime import datetime
+
+OFFLOAD_API = "1.16"
+POD_SNAPSHOT = "2.4"
+
+
+def _check_offload(module, array):
+ try:
+ offload = array.get_offload(module.params["offload"])
+ if offload["status"] == "connected":
+ return True
+ return False
+ except Exception:
+ return None
+
+
+def get_pgroup(module, array):
+ """Return Protection Group or None"""
+ try:
+ return array.get_pgroup(module.params["name"])
+ except Exception:
+ return None
+
+
+def get_pgroupvolume(module, array):
+ """Return Protection Group Volume or None"""
+ try:
+ pgroup = array.get_pgroup(module.params["name"])
+ if "::" in module.params["name"]:
+ restore_volume = (
+ module.params["name"].split("::")[0] + "::" + module.params["restore"]
+ )
+ else:
+ restore_volume = module.params["restore"]
+ for volume in pgroup["volumes"]:
+ if volume == restore_volume:
+ return volume
+ except Exception:
+ return None
+
+
+def get_rpgsnapshot(module, array):
+ """Return iReplicated Snapshot or None"""
+ try:
+ snapname = (
+ module.params["name"]
+ + "."
+ + module.params["suffix"]
+ + "."
+ + module.params["restore"]
+ )
+ for snap in array.list_volumes(snap=True):
+ if snap["name"] == snapname:
+ return snapname
+ except Exception:
+ return None
+
+
+def get_offload_snapshot(module, array):
+ """Return Snapshot (active or deleted) or None"""
+ try:
+ snapname = module.params["name"] + "." + module.params["suffix"]
+ for snap in array.get_pgroup(
+ module.params["name"], snap=True, on=module.params["offload"]
+ ):
+ if snap["name"] == snapname:
+ return snapname
+ except Exception:
+ return None
+
+
+def get_pgsnapshot(module, array):
+ """Return Snapshot (active or deleted) or None"""
+ try:
+ snapname = module.params["name"] + "." + module.params["suffix"]
+ for snap in array.get_pgroup(module.params["name"], pending=True, snap=True):
+ if snap["name"] == snapname:
+ return snapname
+ except Exception:
+ return None
+
+
+def create_pgsnapshot(module, array):
+ """Create Protection Group Snapshot"""
+ changed = True
+ if not module.check_mode:
+ try:
+ if (
+ module.params["now"]
+ and array.get_pgroup(module.params["name"])["targets"] is not None
+ ):
+ array.create_pgroup_snapshot(
+ source=module.params["name"],
+ suffix=module.params["suffix"],
+ snap=True,
+ apply_retention=module.params["apply_retention"],
+ replicate_now=module.params["remote"],
+ )
+ else:
+ array.create_pgroup_snapshot(
+ source=module.params["name"],
+ suffix=module.params["suffix"],
+ snap=True,
+ apply_retention=module.params["apply_retention"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Snapshot of pgroup {0} failed.".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def restore_pgsnapvolume(module, array):
+ """Restore a Protection Group Snapshot Volume"""
+ api_version = array._list_available_rest_versions()
+ changed = True
+ if module.params["suffix"] == "latest":
+ all_snaps = array.get_pgroup(
+ module.params["name"], snap=True, transfer=True
+ ).reverse()
+ for snap in all_snaps:
+ if not snap["completed"]:
+ latest_snap = snap["name"]
+ break
+ try:
+ module.params["suffix"] = latest_snap.split(".")[1]
+ except NameError:
+ module.fail_json(msg="There is no completed snapshot available.")
+ if ":" in module.params["name"] and "::" not in module.params["name"]:
+ if get_rpgsnapshot(module, array) is None:
+ module.fail_json(
+ msg="Selected restore snapshot {0} does not exist in the Protection Group".format(
+ module.params["restore"]
+ )
+ )
+ else:
+ if get_pgroupvolume(module, array) is None:
+ module.fail_json(
+ msg="Selected restore volume {0} does not exist in the Protection Group".format(
+ module.params["restore"]
+ )
+ )
+ volume = (
+ module.params["name"]
+ + "."
+ + module.params["suffix"]
+ + "."
+ + module.params["restore"]
+ )
+ if "::" in module.params["target"]:
+ target_pod_name = module.params["target"].split(":")[0]
+ if "::" in module.params["name"]:
+ source_pod_name = module.params["name"].split(":")[0]
+ else:
+ source_pod_name = ""
+ if source_pod_name != target_pod_name:
+ if (
+ len(array.get_pod(target_pod_name, mediator=True)["arrays"]) > 1
+ and POD_SNAPSHOT not in api_version
+ ):
+ module.fail_json(msg="Volume cannot be restored to a stretched pod")
+ if not module.check_mode:
+ try:
+ array.copy_volume(
+ volume, module.params["target"], overwrite=module.params["overwrite"]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to restore {0} from pgroup {1}".format(
+ volume, module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def delete_offload_snapshot(module, array):
+ """Delete Offloaded Protection Group Snapshot"""
+ changed = False
+ snapname = module.params["name"] + "." + module.params["suffix"]
+ if ":" in module.params["name"] and module.params["offload"]:
+ if _check_offload(module, array):
+ changed = True
+ if not module.check_mode:
+ try:
+ array.destroy_pgroup(snapname, on=module.params["offload"])
+ if module.params["eradicate"]:
+ try:
+ array.eradicate_pgroup(
+ snapname, on=module.params["offload"]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to eradicate offloaded snapshot {0} on target {1}".format(
+ snapname, module.params["offload"]
+ )
+ )
+ except Exception:
+ pass
+ else:
+ module.fail_json(
+ msg="Offload target {0} does not exist or not connected".format(
+ module.params["offload"]
+ )
+ )
+ else:
+ module.fail_json(msg="Protection Group name not in the correct format")
+
+ module.exit_json(changed=changed)
+
+
+def delete_pgsnapshot(module, array):
+ """Delete Protection Group Snapshot"""
+ changed = True
+ if not module.check_mode:
+ snapname = module.params["name"] + "." + module.params["suffix"]
+ try:
+ array.destroy_pgroup(snapname)
+ if module.params["eradicate"]:
+ try:
+ array.eradicate_pgroup(snapname)
+ except Exception:
+ module.fail_json(
+ msg="Failed to eradicate pgroup {0}".format(snapname)
+ )
+ except Exception:
+ module.fail_json(msg="Failed to delete pgroup {0}".format(snapname))
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True),
+ suffix=dict(type="str"),
+ restore=dict(type="str"),
+ offload=dict(type="str"),
+ overwrite=dict(type="bool", default=False),
+ target=dict(type="str"),
+ eradicate=dict(type="bool", default=False),
+ now=dict(type="bool", default=False),
+ apply_retention=dict(type="bool", default=False),
+ remote=dict(type="bool", default=False),
+ state=dict(
+ type="str", default="present", choices=["absent", "present", "copy"]
+ ),
+ )
+ )
+
+ required_if = [("state", "copy", ["suffix", "restore"])]
+
+ module = AnsibleModule(
+ argument_spec, required_if=required_if, supports_check_mode=True
+ )
+ pattern = re.compile("^(?=.*[a-zA-Z-])[a-zA-Z0-9]([a-zA-Z0-9-]{0,63}[a-zA-Z0-9])?$")
+ state = module.params["state"]
+ if state == "present":
+ if module.params["suffix"] is None:
+ suffix = "snap-" + str(
+ (datetime.utcnow() - datetime(1970, 1, 1, 0, 0, 0, 0)).total_seconds()
+ )
+ module.params["suffix"] = suffix.replace(".", "")
+ else:
+ if not pattern.match(module.params["suffix"]):
+ module.fail_json(
+ msg="Suffix name {0} does not conform to suffix name rules".format(
+ module.params["suffix"]
+ )
+ )
+
+ if not module.params["target"] and module.params["restore"]:
+ module.params["target"] = module.params["restore"]
+
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+ if OFFLOAD_API not in api_version and module.params["offload"]:
+ module.fail_json(
+ msg="Minimum version {0} required for offload support".format(OFFLOAD_API)
+ )
+ pgroup = get_pgroup(module, array)
+ if pgroup is None:
+ module.fail_json(
+ msg="Protection Group {0} does not exist.".format(module.params["name"])
+ )
+ pgsnap = get_pgsnapshot(module, array)
+ if state != "absent" and module.params["offload"]:
+ module.fail_json(
+ msg="offload parameter not supported for state {0}".format(state)
+ )
+ elif state == "copy":
+ restore_pgsnapvolume(module, array)
+ elif state == "present" and not pgsnap:
+ create_pgsnapshot(module, array)
+ elif state == "present" and pgsnap:
+ module.exit_json(changed=False)
+ elif (
+ state == "absent"
+ and module.params["offload"]
+ and get_offload_snapshot(module, array)
+ ):
+ delete_offload_snapshot(module, array)
+ elif state == "absent" and pgsnap:
+ delete_pgsnapshot(module, array)
+ elif state == "absent" and not pgsnap:
+ module.exit_json(changed=False)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_phonehome.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_phonehome.py
new file mode 100644
index 000000000..b428b3e33
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_phonehome.py
@@ -0,0 +1,106 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_phonehome
+version_added: '1.0.0'
+short_description: Enable or Disable Pure Storage FlashArray Phonehome
+description:
+- Enablke or Disable Phonehome for a Pure Storage FlashArray.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Define state of phonehome
+ type: str
+ default: present
+ choices: [ present, absent ]
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Enable Phonehome
+ purestorage.flasharray.purefa_phonehome:
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Disable Phonehome
+ purestorage.flasharray.purefa_phonehome:
+ state: disable
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ purefa_argument_spec,
+)
+
+
+def enable_ph(module, array):
+ """Enable Remote Assist"""
+ changed = False
+ if array.get_phonehome()["phonehome"] != "enabled":
+ try:
+ if not module.check_mode:
+ array.enable_phonehome()
+ changed = True
+ except Exception:
+ module.fail_json(msg="Enabling Phonehome failed")
+ module.exit_json(changed=changed)
+
+
+def disable_ph(module, array):
+ """Disable Remote Assist"""
+ changed = False
+ if array.get_phonehome()["phonehome"] == "enabled":
+ try:
+ if not module.check_mode:
+ array.disable_phonehome()
+ changed = True
+ except Exception:
+ module.fail_json(msg="Disabling Remote Assist failed")
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["present", "absent"]),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ array = get_system(module)
+
+ if module.params["state"] == "present":
+ enable_ph(module, array)
+ else:
+ disable_ph(module, array)
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pod.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pod.py
new file mode 100644
index 000000000..75c4eb6c9
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pod.py
@@ -0,0 +1,664 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2019, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_pod
+short_description: Manage AC pods in Pure Storage FlashArrays
+version_added: '1.0.0'
+description:
+- Manage AC pods in a Pure Storage FlashArray.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - The name of the pod.
+ type: str
+ required: true
+ stretch:
+ description:
+ - The name of the array to stretch to/unstretch from. Must be synchromously replicated.
+ - To unstretch an array use state I(absent)
+ - You can only specify a remote array, ie you cannot unstretch a pod from the
+ current array and then restretch back to the current array.
+ - To restretch a pod you must perform this from the remaining array the pod
+ resides on.
+ type: str
+ failover:
+ description:
+ - The name of the array given priority to stay online if arrays loose
+ contact with eachother.
+ - Oprions are either array in the cluster, or I(auto)
+ type: list
+ elements: str
+ state:
+ description:
+ - Define whether the pod should exist or not.
+ default: present
+ choices: [ absent, present ]
+ type: str
+ eradicate:
+ description:
+ - Define whether to eradicate the pod on delete or leave in trash.
+ type: bool
+ default: false
+ target:
+ description:
+ - Name of clone target pod.
+ type: str
+ mediator:
+ description:
+ - Name of the mediator to use for a pod
+ type: str
+ default: purestorage
+ promote:
+ description:
+ - Promote/demote any pod not in a stretched relationship. .
+ - Demoting a pod will render it read-only.
+ required: false
+ type: bool
+ quiesce:
+ description:
+ - Quiesce/Skip quiesce when I(promote) is false and demoting an ActiveDR pod.
+ - Quiesce will ensure all local data has been replicated before demotion.
+ - Skipping quiesce looses all pending data to be replicated to the remote pod.
+ - Can only demote the pod if it is in a Acrive DR replica link relationship.
+ - This will default to True
+ required: false
+ type: bool
+ undo:
+ description:
+ - Use the I(undo-remote) pod when I(promote) is true and promoting an ActiveDR pod.
+ - This will default to True
+ required: false
+ type: bool
+ quota:
+ description:
+ - Logical quota limit of the pod in K, M, G, T or P units, or bytes.
+ type: str
+ version_added: '1.18.0'
+ ignore_usage:
+ description:
+ - Flag used to override checks for quota management
+ operations.
+ - If set to true, pod usage is not checked against the
+ quota_limits that are set.
+ - If set to false, the actual logical bytes in use are prevented
+ from exceeding the limits set on the pod.
+ - Client operations might be impacted.
+ - If the limit exceeds the quota, the operation is not allowed.
+ default: false
+ type: bool
+ version_added: '1.18.0'
+
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Create new pod named foo
+ purestorage.flasharray.purefa_pod:
+ name: foo
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: present
+
+- name: Delete and eradicate pod named foo
+ purestorage.flasharray.purefa_pod:
+ name: foo
+ eradicate: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: absent
+
+- name: Set failover array for pod named foo
+ purestorage.flasharray.purefa_pod:
+ name: foo
+ failover:
+ - array1
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Set mediator for pod named foo
+ purestorage.flasharray.purefa_pod:
+ name: foo
+ mediator: bar
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Stretch a pod named foo to array2
+ purestorage.flasharray.purefa_pod:
+ name: foo
+ stretch: array2
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Unstretch a pod named foo from array2
+ purestorage.flasharray.purefa_pod:
+ name: foo
+ stretch: array2
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create clone of pod foo named bar
+ purestorage.flasharray.purefa_pod:
+ name: foo
+ target: bar
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: present
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ get_array,
+ purefa_argument_spec,
+)
+
+
+POD_API_VERSION = "1.13"
+POD_QUOTA_VERSION = "2.23"
+
+
+def human_to_bytes(size):
+ """Given a human-readable byte string (e.g. 2G, 30M),
+ return the number of bytes. Will return 0 if the argument has
+ unexpected form.
+ """
+ bytes = size[:-1]
+ unit = size[-1].upper()
+ if bytes.isdigit():
+ bytes = int(bytes)
+ if unit == "P":
+ bytes *= 1125899906842624
+ elif unit == "T":
+ bytes *= 1099511627776
+ elif unit == "G":
+ bytes *= 1073741824
+ elif unit == "M":
+ bytes *= 1048576
+ elif unit == "K":
+ bytes *= 1024
+ else:
+ bytes = 0
+ else:
+ bytes = 0
+ return bytes
+
+
+def get_pod(module, array):
+ """Return Pod or None"""
+ try:
+ return array.get_pod(module.params["name"])
+ except Exception:
+ return None
+
+
+def get_undo_pod(module, array):
+ """Return Undo Pod or None"""
+ try:
+ return array.get_pod(module.params["name"] + ".undo-demote", pending_only=True)
+ except Exception:
+ return None
+
+
+def get_target(module, array):
+ """Return Pod or None"""
+ try:
+ return array.get_pod(module.params["target"])
+ except Exception:
+ return None
+
+
+def get_destroyed_pod(module, array):
+ """Return Destroyed Volume or None"""
+ try:
+ return bool(
+ array.get_pod(module.params["name"], pending=True)["time_remaining"] != ""
+ )
+ except Exception:
+ return False
+
+
+def get_destroyed_target(module, array):
+ """Return Destroyed Volume or None"""
+ try:
+ return bool(
+ array.get_pod(module.params["target"], pending=True)["time_remaining"] != ""
+ )
+ except Exception:
+ return False
+
+
+def check_arrays(module, array):
+ """Check if array name provided are sync-replicated"""
+ good_arrays = []
+ good_arrays.append(array.get()["array_name"])
+ connected_arrays = array.list_array_connections()
+ for arr in range(0, len(connected_arrays)):
+ if connected_arrays[arr]["type"] == "sync-replication":
+ good_arrays.append(connected_arrays[arr]["array_name"])
+ if module.params["failover"] is not None:
+ if module.params["failover"] == ["auto"]:
+ failover_array = []
+ else:
+ failover_array = module.params["failover"]
+ if failover_array != []:
+ for arr in range(0, len(failover_array)):
+ if failover_array[arr] not in good_arrays:
+ module.fail_json(
+ msg="Failover array {0} is not valid.".format(
+ failover_array[arr]
+ )
+ )
+ if module.params["stretch"] is not None:
+ if module.params["stretch"] not in good_arrays:
+ module.fail_json(
+ msg="Stretch: Array {0} is not connected.".format(
+ module.params["stretch"]
+ )
+ )
+ return None
+
+
+def create_pod(module, array):
+ """Create Pod"""
+ changed = True
+ if module.params["target"]:
+ module.fail_json(msg="Cannot clone non-existant pod.")
+ if not module.check_mode:
+ try:
+ if module.params["failover"]:
+ array.create_pod(
+ module.params["name"], failover_list=module.params["failover"]
+ )
+ else:
+ array.create_pod(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Pod {0} creation failed.".format(module.params["name"])
+ )
+ if module.params["mediator"] != "purestorage":
+ try:
+ array.set_pod(module.params["name"], mediator=module.params["mediator"])
+ except Exception:
+ module.warn(
+ "Failed to communicate with mediator {0}, using default value".format(
+ module.params["mediator"]
+ )
+ )
+ if module.params["stretch"]:
+ current_array = array.get()["array_name"]
+ if module.params["stretch"] != current_array:
+ try:
+ array.add_pod(module.params["name"], module.params["rrays"])
+ except Exception:
+ module.fail_json(
+ msg="Failed to stretch pod {0} to array {1}.".format(
+ module.params["name"], module.params["stretch"]
+ )
+ )
+ if module.params["quota"]:
+ arrayv6 = get_array(module)
+ res = arrayv6.patch_pods(
+ names=[module.params["name"]],
+ pod=flasharray.PodPatch(
+ quota_limit=human_to_bytes(module.params["quota"])
+ ),
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to apply quota to pod {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def clone_pod(module, array):
+ """Create Pod Clone"""
+ changed = False
+ if get_target(module, array) is None:
+ if not get_destroyed_target(module, array):
+ changed = True
+ if not module.check_mode:
+ try:
+ array.clone_pod(module.params["name"], module.params["target"])
+ except Exception:
+ module.fail_json(
+ msg="Clone pod {0} to pod {1} failed.".format(
+ module.params["name"], module.params["target"]
+ )
+ )
+ else:
+ module.fail_json(
+ msg="Target pod {0} already exists but deleted.".format(
+ module.params["target"]
+ )
+ )
+
+ module.exit_json(changed=changed)
+
+
+def update_pod(module, array):
+ """Update Pod configuration"""
+ changed = False
+ current_config = array.get_pod(module.params["name"], failover_preference=True)
+ if module.params["failover"]:
+ current_failover = current_config["failover_preference"]
+ if current_failover == [] or sorted(module.params["failover"]) != sorted(
+ current_failover
+ ):
+ changed = True
+ if not module.check_mode:
+ try:
+ if module.params["failover"] == ["auto"]:
+ if current_failover != []:
+ array.set_pod(module.params["name"], failover_preference=[])
+ else:
+ array.set_pod(
+ module.params["name"],
+ failover_preference=module.params["failover"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to set failover preference for pod {0}.".format(
+ module.params["name"]
+ )
+ )
+ current_config = array.get_pod(module.params["name"], mediator=True)
+ if current_config["mediator"] != module.params["mediator"]:
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set_pod(module.params["name"], mediator=module.params["mediator"])
+ except Exception:
+ module.warn(
+ "Failed to communicate with mediator {0}. Setting unchanged.".format(
+ module.params["mediator"]
+ )
+ )
+ if module.params["promote"] is not None:
+ if len(current_config["arrays"]) > 1:
+ module.fail_json(
+ msg="Promotion/Demotion not permitted. Pod {0} is stretched".format(
+ module.params["name"]
+ )
+ )
+ else:
+ if (
+ current_config["promotion_status"] == "demoted"
+ and module.params["promote"]
+ ):
+ try:
+ if module.params["undo"] is None:
+ module.params["undo"] = True
+ if current_config["promotion_status"] == "quiescing":
+ module.fail_json(
+ msg="Cannot promote pod {0} as it is still quiesing".format(
+ module.params["name"]
+ )
+ )
+ elif module.params["undo"]:
+ changed = True
+ if not module.check_mode:
+ if get_undo_pod(module, array):
+ array.promote_pod(
+ module.params["name"],
+ promote_from=module.params["name"] + ".undo-demote",
+ )
+ else:
+ array.promote_pod(module.params["name"])
+ module.warn(
+ "undo-demote pod remaining for {0}. Consider eradicating this.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ changed = True
+ if not module.check_mode:
+ array.promote_pod(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Failed to promote pod {0}.".format(module.params["name"])
+ )
+ elif (
+ current_config["promotion_status"] != "demoted"
+ and not module.params["promote"]
+ ):
+ try:
+ if get_undo_pod(module, array):
+ module.fail_json(
+ msg="Cannot demote pod {0} due to associated undo-demote pod not being eradicated".format(
+ module.params["name"]
+ )
+ )
+ if module.params["quiesce"] is None:
+ module.params["quiesce"] = True
+ if current_config["link_target_count"] == 0:
+ changed = True
+ if not module.check_mode:
+ array.demote_pod(module.params["name"])
+ elif not module.params["quiesce"]:
+ changed = True
+ if not module.check_mode:
+ array.demote_pod(module.params["name"], skip_quiesce=True)
+ else:
+ changed = True
+ if not module.check_mode:
+ array.demote_pod(module.params["name"], quiesce=True)
+ except Exception:
+ module.fail_json(
+ msg="Failed to demote pod {0}.".format(module.params["name"])
+ )
+ if module.params["quota"]:
+ arrayv6 = get_array(module)
+ current_pod = list(arrayv6.get_pods(names=[module.params["name"]]).items)[0]
+ quota = human_to_bytes(module.params["quota"])
+ if current_pod.quota_limit != quota:
+ changed = True
+ if not module.check_mode:
+ res = arrayv6.patch_pods(
+ names=[module.params["name"]],
+ pod=flasharray.PodPatch(
+ quota_limit=quota, ignore_usage=module.params["ignore_usage"]
+ ),
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to update quota on pod {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+
+ module.exit_json(changed=changed)
+
+
+def stretch_pod(module, array):
+ """Stretch/unstretch Pod configuration"""
+ changed = False
+ current_config = array.get_pod(module.params["name"], failover_preference=True)
+ if module.params["stretch"]:
+ current_arrays = []
+ for arr in range(0, len(current_config["arrays"])):
+ current_arrays.append(current_config["arrays"][arr]["name"])
+ if (
+ module.params["stretch"] not in current_arrays
+ and module.params["state"] == "present"
+ ):
+ changed = True
+ if not module.check_mode:
+ try:
+ array.add_pod(module.params["name"], module.params["stretch"])
+ except Exception:
+ module.fail_json(
+ msg="Failed to stretch pod {0} to array {1}.".format(
+ module.params["name"], module.params["stretch"]
+ )
+ )
+
+ if (
+ module.params["stretch"] in current_arrays
+ and module.params["state"] == "absent"
+ ):
+ changed = True
+ if not module.check_mode:
+ try:
+ array.remove_pod(module.params["name"], module.params["stretch"])
+ except Exception:
+ module.fail_json(
+ msg="Failed to unstretch pod {0} from array {1}.".format(
+ module.params["name"], module.params["stretch"]
+ )
+ )
+
+ module.exit_json(changed=changed)
+
+
+def delete_pod(module, array):
+ """Delete Pod"""
+ changed = True
+ if not module.check_mode:
+ try:
+ array.destroy_pod(module.params["name"])
+ if module.params["eradicate"]:
+ try:
+ array.eradicate_pod(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Eradicate pod {0} failed.".format(module.params["name"])
+ )
+ except Exception:
+ module.fail_json(msg="Delete pod {0} failed.".format(module.params["name"]))
+ module.exit_json(changed=changed)
+
+
+def eradicate_pod(module, array):
+ """Eradicate Deleted Pod"""
+ changed = True
+ if not module.check_mode:
+ if module.params["eradicate"]:
+ try:
+ array.eradicate_pod(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Eradication of pod {0} failed".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def recover_pod(module, array):
+ """Recover Deleted Pod"""
+ changed = True
+ if not module.check_mode:
+ try:
+ array.recover_pod(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Recovery of pod {0} failed".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True),
+ stretch=dict(type="str"),
+ target=dict(type="str"),
+ mediator=dict(type="str", default="purestorage"),
+ failover=dict(type="list", elements="str"),
+ promote=dict(type="bool"),
+ undo=dict(type="bool"),
+ quiesce=dict(type="bool"),
+ eradicate=dict(type="bool", default=False),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ quota=dict(type="str"),
+ ignore_usage=dict(type="bool", default=False),
+ )
+ )
+
+ mutually_exclusive = [
+ ["stretch", "failover"],
+ ["stretch", "eradicate"],
+ ["stretch", "mediator"],
+ ["target", "mediator"],
+ ["target", "stretch"],
+ ["target", "failover"],
+ ["target", "eradicate"],
+ ]
+
+ module = AnsibleModule(
+ argument_spec, mutually_exclusive=mutually_exclusive, supports_check_mode=True
+ )
+
+ state = module.params["state"]
+ array = get_system(module)
+
+ api_version = array._list_available_rest_versions()
+ if POD_API_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashArray REST version not supported. "
+ "Minimum version required: {0}".format(POD_API_VERSION)
+ )
+
+ if module.params["quota"] and POD_QUOTA_VERSION in api_version:
+ if not HAS_PURESTORAGE:
+ module.fail_json(
+ msg="py-pure-client sdk is required to support 'count' parameter"
+ )
+ pod = get_pod(module, array)
+ destroyed = ""
+ if not pod:
+ destroyed = get_destroyed_pod(module, array)
+ if module.params["failover"] or module.params["failover"] != "auto":
+ check_arrays(module, array)
+
+ if state == "present" and not pod:
+ create_pod(module, array)
+ elif pod and module.params["stretch"]:
+ stretch_pod(module, array)
+ elif state == "present" and pod and module.params["target"]:
+ clone_pod(module, array)
+ elif state == "present" and pod and module.params["target"]:
+ clone_pod(module, array)
+ elif state == "present" and pod:
+ update_pod(module, array)
+ elif state == "absent" and pod and not module.params["stretch"]:
+ delete_pod(module, array)
+ elif state == "present" and destroyed:
+ recover_pod(module, array)
+ elif state == "absent" and destroyed:
+ eradicate_pod(module, array)
+ elif state == "absent" and not pod:
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pod_replica.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pod_replica.py
new file mode 100644
index 000000000..87ace4eb3
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pod_replica.py
@@ -0,0 +1,279 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+
+DOCUMENTATION = """
+---
+module: purefa_pod_replica
+short_description: Manage ActiveDR pod replica links between Pure Storage FlashArrays
+version_added: '1.0.0'
+description:
+ - This module manages ActiveDR pod replica links between Pure Storage FlashArrays.
+author: Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - ActiveDR source pod name
+ required: true
+ type: str
+ state:
+ description:
+ - Creates or modifies a pod replica link
+ required: false
+ default: present
+ type: str
+ choices: [ "present", "absent" ]
+ target_array:
+ description:
+ - Remote array name to create replica on.
+ required: false
+ type: str
+ target_pod:
+ description:
+ - Name of target pod
+ - Must not be the same as the local pod.
+ type: str
+ required: false
+ pause:
+ description:
+ - Pause/unpause a pod replica link
+ required: false
+ type: bool
+extends_documentation_fragment:
+ - purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = """
+- name: Create new pod replica link from foo to bar on arrayB
+ purestorage.flasharray.purefa_pod_replica:
+ name: foo
+ target_array: arrayB
+ target_pod: bar
+ state: present
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Pause an pod replica link
+ purestorage.flasharray.purefa_pod_replica:
+ name: foo
+ pause: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete and eradicate pod replica link
+ purestorage.flasharray.purefa_pod_replica:
+ name: foo
+ state: absent
+ eradicate: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = """
+"""
+
+MIN_REQUIRED_API_VERSION = "1.19"
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ purefa_argument_spec,
+)
+
+
+def get_local_pod(module, array):
+ """Return Pod or None"""
+ try:
+ return array.get_pod(module.params["name"])
+ except Exception:
+ return None
+
+
+def get_local_rl(module, array):
+ """Return Pod Replica Link or None"""
+ try:
+ rlinks = array.list_pod_replica_links()
+ for link in range(0, len(rlinks)):
+ if rlinks[link]["local_pod_name"] == module.params["name"]:
+ return rlinks[link]
+ return None
+ except Exception:
+ return None
+
+
+def _get_arrays(array):
+ """Get Connected Arrays"""
+ arrays = []
+ array_details = array.list_array_connections()
+ for arraycnt in range(0, len(array_details)):
+ arrays.append(array_details[arraycnt]["array_name"])
+ return arrays
+
+
+def update_rl(module, array, local_rl):
+ """Create Pod Replica Link"""
+ changed = False
+ if module.params["pause"] is not None:
+ if local_rl["status"] != "paused" and module.params["pause"]:
+ changed = True
+ if not module.check_mode:
+ try:
+ array.pause_pod_replica_link(
+ local_pod_name=module.params["name"],
+ remote_pod_name=local_rl["remote_pod_name"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to pause replica link {0}.".format(
+ module.params["name"]
+ )
+ )
+ elif local_rl["status"] == "paused" and not module.params["pause"]:
+ changed = True
+ if not module.check_mode:
+ try:
+ array.resume_pod_replica_link(
+ local_pod_name=module.params["name"],
+ remote_pod_name=local_rl["remote_pod_name"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to resume replica link {0}.".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def create_rl(module, array):
+ """Create Pod Replica Link"""
+ changed = True
+ if not module.params["target_pod"]:
+ module.fail_json(msg="target_pod required to create a new replica link.")
+ if not module.params["target_array"]:
+ module.fail_json(msg="target_array required to create a new replica link.")
+ try:
+ connected_arrays = array.list_array_connections()
+ if connected_arrays == []:
+ module.fail_json(msg="No connected arrays.")
+ else:
+ good_array = False
+ for conn_array in range(0, len(connected_arrays)):
+ if connected_arrays[conn_array]["array_name"] == module.params[
+ "target_array"
+ ] and connected_arrays[conn_array]["status"] in [
+ "connected",
+ "connecting",
+ "partially_connected",
+ ]:
+ good_array = True
+ break
+ if not good_array:
+ module.fail_json(
+ msg="Target array {0} is not connected to the source array.".format(
+ module.params["target_array"]
+ )
+ )
+ else:
+ if not module.check_mode:
+ try:
+ array.create_pod_replica_link(
+ local_pod_name=module.params["name"],
+ remote_name=module.params["target_array"],
+ remote_pod_name=module.params["target_pod"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to create replica link {0} to target array {1}".format(
+ module.params["name"], module.params["target_array"]
+ )
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to create replica link for pod {0}.".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def delete_rl(module, array, local_rl):
+ """Delete Pod Replica Link"""
+ changed = True
+ if not module.check_mode:
+ try:
+ array.delete_pod_replica_link(
+ module.params["name"], remote_pod_name=local_rl["remote_pod_name"]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to delete replica link for pod {0}.".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True),
+ target_pod=dict(type="str"),
+ target_array=dict(type="str"),
+ pause=dict(type="bool"),
+ state=dict(default="present", choices=["present", "absent"]),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ state = module.params["state"]
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(msg="Purity v6.0.0 or higher required.")
+
+ local_pod = get_local_pod(module, array)
+ local_replica_link = get_local_rl(module, array)
+
+ if not local_pod:
+ module.fail_json(
+ msg="Selected local pod {0} does not exist.".format(module.params["name"])
+ )
+
+ if len(local_pod["arrays"]) > 1:
+ module.fail_json(
+ msg="Local Pod {0} is already stretched.".format(module.params["name"])
+ )
+
+ if local_replica_link:
+ if local_replica_link["status"] == "unhealthy":
+ module.fail_json(msg="Replca Link unhealthy - please check remote array")
+ if state == "present" and not local_replica_link:
+ create_rl(module, array)
+ elif state == "present" and local_replica_link:
+ update_rl(module, array, local_replica_link)
+ elif state == "absent" and local_replica_link:
+ delete_rl(module, array, local_replica_link)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_policy.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_policy.py
new file mode 100644
index 000000000..37017e4df
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_policy.py
@@ -0,0 +1,1606 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_policy
+version_added: '1.5.0'
+short_description: Manage FlashArray File System Policies
+description:
+- Manage FlashArray file system policies for NFS, SMB and snapshot
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Name of the policy
+ type: str
+ required: true
+ state:
+ description:
+ - Define whether the policy should exist or not.
+ default: present
+ choices: [ absent, present ]
+ type: str
+ policy:
+ description:
+ - The type of policy to use
+ choices: [ nfs, smb, snapshot, quota ]
+ required: true
+ type: str
+ enabled:
+ description:
+ - Define if policy is enabled or not
+ type: bool
+ default: true
+ smb_anon_allowed:
+ description:
+ - Specifies whether access to information is allowed for anonymous users
+ type: bool
+ default: false
+ client:
+ description:
+ - Specifies which SMB or NFS clients are given access
+ - Accepted notation, IP, IP mask, or hostname
+ type: str
+ smb_encrypt:
+ description:
+ - Specifies whether the remote client is required to use SMB encryption
+ type: bool
+ default: false
+ nfs_access:
+ description:
+ - Specifies access control for the export
+ choices: [ root-squash, no-root-squash, all-squash ]
+ type: str
+ default: no-root-squash
+ nfs_permission:
+ description:
+ - Specifies which read-write client access permissions are allowed for the export
+ choices: [ ro, rw ]
+ default: rw
+ type: str
+ user_mapping:
+ description:
+ - Defines if user mapping is enabled
+ type: bool
+ version_added: 1.14.0
+ snap_at:
+ description:
+ - Specifies the number of hours since midnight at which to take a snapshot
+ or the hour including AM/PM
+ - Can only be set on the rule with the smallest I(snap_every) value.
+ - Cannot be set if the I(snap_every) value is not measured in days.
+ - Can only be set for at most one rule in the same policy.
+ type: str
+ snap_every:
+ description:
+ - Specifies the interval between snapshots, in minutes.
+ - The value for all rules must be multiples of one another.
+ - Must be unique for each rule in the same policy.
+ - Value must be between 5 and 525600.
+ type: int
+ snap_keep_for:
+ description:
+ - Specifies the period that snapshots are retained before they are eradicated, in minutes.
+ - Cannot be less than the I(snap_every) value of the rule.
+ - Value must be unique for each rule in the same policy.
+ - Value must be between 5 and 525600.
+ type: int
+ snap_client_name:
+ description:
+ - The customizable portion of the client visible snapshot name.
+ type: str
+ snap_suffix:
+ description:
+ - The snapshot suffix name
+ - The suffix value can only be set for one rule in the same policy
+ - The suffix value can only be set on a rule with the same ``keep_for`` value and ``every`` value
+ - The suffix value can only be set on the rule with the largest ``keep_for`` value
+ - If not specified, defaults to a monotonically increasing number generated by the system.
+ type: str
+ version_added: 1.10.0
+ rename:
+ description:
+ - New name of policy
+ type: str
+ directory:
+ description:
+ - Directories to have the quota rule applied to.
+ type: list
+ elements: str
+ version_added: 1.9.0
+ quota_limit:
+ description:
+ - Logical space limit of the share in M, G, T or P units. See examples.
+ - If size is not set at filesystem creation time the filesystem size becomes unlimited.
+ - This value cannot be set to 0.
+ type: str
+ version_added: 1.9.0
+ quota_notifications:
+ description:
+ - Targets to notify when usage approaches the quota limit.
+ - The list of notification targets is a comma-separated string
+ - If not specified, notification targets are not assigned.
+ type: list
+ elements: str
+ choices: [ user, group ]
+ version_added: 1.9.0
+ quota_enforced:
+ description:
+ - Defines if the directory quota is enforced.
+ default: true
+ type: bool
+ ignore_usage:
+ description:
+ - Flag used to override checks for quota management
+ operations.
+ - If set to true, directory usage is not checked against the
+ quota_limits that are set.
+ - If set to false, the actual logical bytes in use are prevented
+ from exceeding the limits set on the directory.
+ - Client operations might be impacted.
+ - If the limit exceeds the quota, the client operation is not allowed.
+ default: false
+ type: bool
+ version_added: 1.9.0
+ anonuid:
+ description:
+ - The ID to which any users whose UID is affected by I(access) of
+ I(root-squash) or I(all-squash) will be mapped to.
+ - Clear using "".
+ type: str
+ default: "65534"
+ version_added: 1.14.0
+ anongid:
+ description:
+ - The ID to which any users whose GID is affected by I(access) of
+ I(root-squash) or I(all-squash) will be mapped to.
+ - This is ignored when I(user_mapping) is enabled.
+ - Clear using "".
+ type: str
+ default: "65534"
+ version_added: 1.14.0
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Create an NFS policy with initial rule
+ purestorage.flasharray.purefa_policy:
+ name: export1
+ policy: nfs
+ nfs_access: root-squash
+ nfs_permission: ro
+ client: client1
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create an empty NFS policy with no rules
+ purestorage.flasharray.purefa_policy:
+ name: export1
+ policy: nfs
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create an empty snapshot policy with no rules
+ purestorage.flasharray.purefa_policy:
+ name: snap1
+ policy: snapshot
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create an empty snapshot policy with single directory member
+ purestorage.flasharray.purefa_policy:
+ name: snap1
+ policy: snapshot
+ directory: "foo:bar"
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Disable a policy
+ purestorage.flasharray.purefa_policy:
+ name: export1
+ policy: nfs
+ enabled: false
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Add rule to existing NFS export policy
+ purestorage.flasharray.purefa_policy:
+ name: export1
+ policy: nfs
+ nfs_access: root-squash
+ nfs_permission: ro
+ client: client2
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Add rule to existing SMB export policy
+ purestorage.flasharray.purefa_policy:
+ name: export1
+ policy: smb
+ smb_encrypt: true
+ smb_anon_allowed: false
+ client: client1
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Add non-suffix rule to existing snapshot export policy
+ purestorage.flasharray.purefa_policy:
+ name: snap1
+ policy: snapshot
+ snap_client_name: foo
+ snap_every: 15
+ snap_keep_for: 1440
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Add suffix rule to existing snapshot export policy
+ purestorage.flasharray.purefa_policy:
+ name: snap1
+ policy: snapshot
+ snap_client_name: foo
+ snap_suffix: bar
+ snap_every: 1440
+ snap_keep_for: 1440
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete policy rule for a client
+ purestorage.flasharray.purefa_policy:
+ name: export1
+ policy: nfs
+ client: client2
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete policy
+ purestorage.flasharray.purefa_policy:
+ name: export1
+ policy: nfs
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create directory quota policy for directory bar
+ purestorage.flasharray.purefa_policy:
+ name: foo
+ directory:
+ - "foo:root"
+ - "bar:bin"
+ policy: quota
+ quota_limit: 10G
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete directory quota policy foo
+ purestorage.flasharray.purefa_policy:
+ name: foo
+ policy: quota
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create empty directory quota policy foo
+ purestorage.flasharray.purefa_policy:
+ name: foo
+ policy: quota
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Detach directory "foo:bar" from quota policy quota1
+ purestorage.flasharray.purefa_policy:
+ name: quota1
+ directory:
+ - "foo:bar"
+ state: absent
+ policy: quota
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Remove quota rule from quota policy foo
+ purestorage.flasharray.purefa_policy:
+ name: foo
+ policy: quota
+ quota_limit: 10G
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+HAS_PACKAGING = True
+try:
+ from packaging import version
+except ImportError:
+ HAS_PACKAGING = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ get_array,
+ purefa_argument_spec,
+)
+
+MIN_REQUIRED_API_VERSION = "2.3"
+MIN_QUOTA_API_VERSION = "2.7"
+MIN_SUFFIX_API_VERSION = "2.9"
+USER_MAP_VERSION = "2.15"
+ALL_SQUASH_VERSION = "2.16"
+
+
+def _human_to_bytes(size):
+ """Given a human-readable byte string (e.g. 2G, 30M),
+ return the number of bytes. Will return 0 if the argument has
+ unexpected form.
+ """
+ bytes = size[:-1]
+ unit = size[-1].upper()
+ if bytes.isdigit():
+ bytes = int(bytes)
+ if unit == "P":
+ bytes *= 1125899906842624
+ elif unit == "T":
+ bytes *= 1099511627776
+ elif unit == "G":
+ bytes *= 1073741824
+ elif unit == "M":
+ bytes *= 1048576
+ elif unit == "K":
+ bytes *= 1024
+ else:
+ bytes = 0
+ else:
+ bytes = 0
+ return bytes
+
+
+def _convert_to_millisecs(hour):
+ if hour[-2:].upper() == "AM" and hour[:2] == "12":
+ return 0
+ elif hour[-2:].upper() == "AM":
+ return int(hour[:-2]) * 3600000
+ elif hour[-2:].upper() == "PM" and hour[:2] == "12":
+ return 43200000
+ return (int(hour[:-2]) + 12) * 3600000
+
+
+def rename_policy(module, array):
+ """Rename a file system policy"""
+ changed = False
+ target_exists = bool(
+ array.get_policies(names=[module.params["rename"]]).status_code == 200
+ )
+ if target_exists:
+ module.fail_json(
+ msg="Rename failed - Target policy {0} already exists".format(
+ module.params["rename"]
+ )
+ )
+ if not module.check_mode:
+ changed = True
+ if module.params["policy"] == "nfs":
+ res = array.patch_policies_nfs(
+ names=[module.params["name"]],
+ policy=flasharray.PolicyPatch(name=module.params["rename"]),
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to rename NFS policy {0} to {1}".format(
+ module.params["name"], module.params["rename"]
+ )
+ )
+ elif module.params["policy"] == "smb":
+ res = array.patch_policies_smb(
+ names=[module.params["name"]],
+ policy=flasharray.PolicyPatch(name=module.params["rename"]),
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to rename SMB policy {0} to {1}".format(
+ module.params["name"], module.params["rename"]
+ )
+ )
+ elif module.params["policy"] == "snapshot":
+ res = array.patch_policies_snapshot(
+ names=[module.params["name"]],
+ policy=flasharray.PolicyPatch(name=module.params["rename"]),
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to rename snapshot policy {0} to {1}".format(
+ module.params["name"], module.params["rename"]
+ )
+ )
+ else:
+ res = array.patch_policies_quota(
+ names=[module.params["name"]],
+ policy=flasharray.PolicyPatch(name=module.params["rename"]),
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to rename quota policy {0} to {1}".format(
+ module.params["name"], module.params["rename"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def delete_policy(module, array):
+ """Delete a file system policy or rule within a policy"""
+ changed = True
+ if not module.check_mode:
+ changed = False
+ if module.params["policy"] == "nfs":
+ if not module.params["client"]:
+ res = array.delete_policies_nfs(names=[module.params["name"]])
+ if res.status_code == 200:
+ changed = True
+ else:
+ module.fail_json(
+ msg="Deletion of NFS policy {0} failed. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ else:
+ rules = list(
+ array.get_policies_nfs_client_rules(
+ policy_names=[module.params["name"]]
+ ).items
+ )
+ if rules:
+ rule_name = ""
+ for rule in range(0, len(rules)):
+ if rules[rule].client == module.params["client"]:
+ rule_name = rules[rule].name
+ break
+ if rule_name:
+ deleted = bool(
+ array.delete_policies_nfs_client_rules(
+ policy_names=[module.params["name"]], names=[rule_name]
+ ).status_code
+ == 200
+ )
+ if deleted:
+ changed = True
+ else:
+ module.fail_json(
+ msg="Failed to delete client {0} from NFS policy {1}. Error: {2}".format(
+ module.params["client"],
+ module.params["name"],
+ deleted.errors[0].message,
+ )
+ )
+ elif module.params["policy"] == "smb":
+ if not module.params["client"]:
+ res = array.delete_policies_smb(names=[module.params["name"]])
+ if res.status_code == 200:
+ changed = True
+ else:
+ module.fail_json(
+ msg="Deletion of SMB policy {0} failed. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ else:
+ rules = list(
+ array.get_policies_smb_client_rules(
+ policy_names=[module.params["name"]]
+ ).items
+ )
+ if rules:
+ rule_name = ""
+ for rule in range(0, len(rules)):
+ if rules[rule].client == module.params["client"]:
+ rule_name = rules[rule].name
+ break
+ if rule_name:
+ deleted = bool(
+ array.delete_policies_smb_client_rules(
+ policy_names=[module.params["name"]], names=[rule_name]
+ ).status_code
+ == 200
+ )
+ if deleted:
+ changed = True
+ else:
+ module.fail_json(
+ msg="Failed to delete client {0} from SMB policy {1}. Error: {2}".format(
+ module.params["client"],
+ module.params["name"],
+ deleted.errors[0].message,
+ )
+ )
+ elif module.params["policy"] == "snapshot":
+ if not module.params["snap_client_name"] and not module.params["directory"]:
+ res = array.delete_policies_snapshot(names=[module.params["name"]])
+ if res.status_code == 200:
+ changed = True
+ else:
+ module.fail_json(
+ msg="Deletion of Snapshot policy {0} failed. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ if module.params["directory"]:
+ dirs = []
+ old_dirs = []
+ current_dirs = list(
+ array.get_directories_policies_snapshot(
+ policy_names=[module.params["name"]]
+ ).items
+ )
+ if current_dirs:
+ for current_dir in range(0, len(current_dirs)):
+ dirs.append(current_dirs[current_dir].member.name)
+ for old_dir in range(0, len(module.params["directory"])):
+ if module.params["directory"][old_dir] in dirs:
+ old_dirs.append(module.params["directory"][old_dir])
+ else:
+ old_dirs = module.params["directory"]
+ if old_dirs:
+ changed = True
+ for rem_dir in range(0, len(old_dirs)):
+ if not module.check_mode:
+ directory_removed = (
+ array.delete_directories_policies_snapshot(
+ member_names=[old_dirs[rem_dir]],
+ policy_names=module.params["name"],
+ )
+ )
+ if directory_removed.status_code != 200:
+ module.fail_json(
+ msg="Failed to remove directory from Snapshot policy {0}. Error: {1}".format(
+ module.params["name"],
+ directory_removed.errors[0].message,
+ )
+ )
+ if module.params["snap_client_name"]:
+ rules = list(
+ array.get_policies_snapshot_rules(
+ policy_names=[module.params["name"]]
+ ).items
+ )
+ if rules:
+ rule_name = ""
+ for rule in range(0, len(rules)):
+ if rules[rule].client_name == module.params["snap_client_name"]:
+ rule_name = rules[rule].name
+ break
+ if rule_name:
+ deleted = bool(
+ array.delete_policies_snapshot_rules(
+ policy_names=[module.params["name"]], names=[rule_name]
+ ).status_code
+ == 200
+ )
+ if deleted:
+ changed = True
+ else:
+ module.fail_json(
+ msg="Failed to delete client {0} from Snapshot policy {1}. Error: {2}".format(
+ module.params["snap_client_name"],
+ module.params["name"],
+ deleted.errors[0].message,
+ )
+ )
+ else:
+ if module.params["quota_limit"]:
+ quota_limit = _human_to_bytes(module.params["quota_limit"])
+ rules = list(
+ array.get_policies_quota_rules(
+ policy_names=[module.params["name"]]
+ ).items
+ )
+ if rules:
+ for rule in range(0, len(rules)):
+ if rules[rule].quota_limit == quota_limit:
+ if (
+ module.params["quota_enforced"] == rules[rule].enforced
+ and ",".join(module.params["quota_notifications"])
+ == rules[rule].notifications
+ ):
+ res = array.delete_policies_quota_rules(
+ policy_names=[module.params["name"]],
+ names=[rules[rule].name],
+ )
+ if res.status_code == 200:
+ changed = True
+ else:
+ module.fail_json(
+ msg="Deletion of Quota rule failed. Error: {0}".format(
+ res.errors[0].message
+ )
+ )
+ if module.params["directory"]:
+ members = list(
+ array.get_policies_quota_members(
+ policy_names=[module.params["name"]]
+ ).items
+ )
+ if members:
+ for member in range(0, len(members)):
+ if members[member].member.name in module.params["directory"]:
+ res = array.delete_policies_quota_members(
+ policy_names=[module.params["name"]],
+ member_names=[members[member].member.name],
+ member_types="directories",
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Deletion of Quota member {0} from policy {1}. Error: {2}".format(
+ members[member].member.name,
+ module.params["name"],
+ res.errors[0].message,
+ )
+ )
+ else:
+ changed = True
+ if not module.params["quota_limit"] and not module.params["directory"]:
+ members = list(
+ array.get_policies_quota_members(
+ policy_names=[module.params["name"]]
+ ).items
+ )
+ if members:
+ member_names = []
+ for member in range(0, len(members)):
+ member_names.append(members[member].member.name)
+ res = array.delete_policies_quota_members(
+ policy_names=[module.params["name"]],
+ member_names=member_names,
+ member_types="directories",
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Deletion of Quota members {0} failed. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ res = array.delete_policies_quota(names=[module.params["name"]])
+ if res.status_code == 200:
+ changed = True
+ else:
+ module.fail_json(
+ msg="Deletion of Quota policy {0} failed. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def create_policy(module, array, all_squash):
+ """Create a file system export"""
+ changed = True
+ if not module.check_mode:
+ changed = False
+ if module.params["policy"] == "nfs":
+ created = array.post_policies_nfs(
+ names=[module.params["name"]],
+ policy=flasharray.PolicyPost(enabled=module.params["enabled"]),
+ )
+
+ if created.status_code == 200:
+ policy = flasharray.PolicyNfsPost(
+ user_mapping_enabled=module.params["user_mapping"],
+ )
+ res = array.patch_policies_nfs(
+ names=[module.params["name"]], policy=policy
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to set NFS policy {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ if module.params["client"]:
+ if all_squash:
+ rules = flasharray.PolicyrulenfsclientpostRules(
+ access=module.params["nfs_access"],
+ anongid=module.params["anongid"],
+ anonuid=module.params["anonuid"],
+ client=module.params["client"],
+ permission=module.params["nfs_permission"],
+ )
+ else:
+ rules = flasharray.PolicyrulenfsclientpostRules(
+ access=module.params["nfs_access"],
+ client=module.params["client"],
+ permission=module.params["nfs_permission"],
+ )
+ rule = flasharray.PolicyRuleNfsClientPost(rules=[rules])
+ rule_created = array.post_policies_nfs_client_rules(
+ policy_names=[module.params["name"]], rules=rule
+ )
+ if rule_created.status_code != 200:
+ module.fail_json(
+ msg="Failed to create rule for NFS policy {0}. Error: {1}".format(
+ module.params["name"], rule_created.errors[0].message
+ )
+ )
+ changed = True
+ else:
+ module.fail_json(
+ msg="Failed to create NFS policy {0}. Error: {1}".format(
+ module.params["name"], created.errors[0].message
+ )
+ )
+ elif module.params["policy"] == "smb":
+ created = array.post_policies_smb(
+ names=[module.params["name"]],
+ policy=flasharray.PolicyPost(enabled=module.params["enabled"]),
+ )
+ if created.status_code == 200:
+ changed = True
+ if module.params["client"]:
+ rules = flasharray.PolicyrulesmbclientpostRules(
+ anonymous_access_allowed=module.params["smb_anon_allowed"],
+ client=module.params["client"],
+ smb_encryption_required=module.params["smb_encrypt"],
+ )
+ rule = flasharray.PolicyRuleSmbClientPost(rules=[rules])
+ rule_created = array.post_policies_smb_client_rules(
+ policy_names=[module.params["name"]], rules=rule
+ )
+ if rule_created.status_code != 200:
+ module.fail_json(
+ msg="Failed to create rule for SMB policy {0}. Error: {1}".format(
+ module.params["name"], rule_created.errors[0].message
+ )
+ )
+ else:
+ module.fail_json(
+ msg="Failed to create SMB policy {0}. Error: {1}".format(
+ module.params["name"], created.errors[0].message
+ )
+ )
+ elif module.params["policy"] == "snapshot":
+ if HAS_PACKAGING:
+ suffix_enabled = version.parse(
+ array.get_rest_version()
+ ) >= version.parse(MIN_SUFFIX_API_VERSION)
+ else:
+ suffix_enabled = False
+ created = array.post_policies_snapshot(
+ names=[module.params["name"]],
+ policy=flasharray.PolicyPost(enabled=module.params["enabled"]),
+ )
+ if created.status_code == 200:
+ changed = True
+ if module.params["snap_client_name"]:
+ if module.params["snap_keep_for"] < module.params["snap_every"]:
+ module.fail_json(
+ msg="Retention period (snap_keep_for) cannot be less than snapshot interval (snap_every)."
+ )
+ if module.params["snap_at"]:
+ if not module.params["snap_every"] % 1440 == 0:
+ module.fail_json(
+ msg="snap_at time can only be set if snap_every is multiple of 1440"
+ )
+ if suffix_enabled:
+ rules = flasharray.PolicyrulesnapshotpostRules(
+ at=_convert_to_millisecs(module.params["snap_at"]),
+ client_name=module.params["snap_client_name"],
+ every=module.params["snap_every"] * 60000,
+ keep_for=module.params["snap_keep_for"] * 60000,
+ suffix=module.params["snap_suffix"],
+ )
+ else:
+ rules = flasharray.PolicyrulesnapshotpostRules(
+ at=_convert_to_millisecs(module.params["snap_at"]),
+ client_name=module.params["snap_client_name"],
+ every=module.params["snap_every"] * 60000,
+ keep_for=module.params["snap_keep_for"] * 60000,
+ )
+ else:
+ if suffix_enabled:
+ rules = flasharray.PolicyrulesnapshotpostRules(
+ client_name=module.params["snap_client_name"],
+ every=module.params["snap_every"] * 60000,
+ keep_for=module.params["snap_keep_for"] * 60000,
+ suffix=module.params["snap_suffix"],
+ )
+ else:
+ rules = flasharray.PolicyrulesnapshotpostRules(
+ client_name=module.params["snap_client_name"],
+ every=module.params["snap_every"] * 60000,
+ keep_for=module.params["snap_keep_for"] * 60000,
+ )
+ rule = flasharray.PolicyRuleSnapshotPost(rules=[rules])
+ rule_created = array.post_policies_snapshot_rules(
+ policy_names=[module.params["name"]], rules=rule
+ )
+ if rule_created.status_code != 200:
+ module.fail_json(
+ msg="Failed to create rule for Snapshot policy {0}. Error: {1}".format(
+ module.params["name"], rule_created.errors[0].message
+ )
+ )
+ if module.params["directory"]:
+ policies = flasharray.DirectoryPolicyPost(
+ policies=[
+ flasharray.DirectorypolicypostPolicies(
+ policy=flasharray.Reference(name=module.params["name"])
+ )
+ ]
+ )
+ directory_added = array.post_directories_policies_snapshot(
+ member_names=module.params["directory"], policies=policies
+ )
+ if directory_added.status_code != 200:
+ module.fail_json(
+ msg="Failed to add directory for Snapshot policy {0}. Error: {1}".format(
+ module.params["name"],
+ directory_added.errors[0].message,
+ )
+ )
+ else:
+ module.fail_json(
+ msg="Failed to create Snapshot policy {0}. Error: {1}".format(
+ module.params["name"], created.errors[0].message
+ )
+ )
+ else:
+ created = array.post_policies_quota(
+ names=[module.params["name"]],
+ policy=flasharray.PolicyPost(enabled=module.params["enabled"]),
+ )
+ if created.status_code == 200:
+ changed = True
+ if module.params["quota_limit"]:
+ quota = _human_to_bytes(module.params["quota_limit"])
+ rules = flasharray.PolicyrulequotapostRules(
+ enforced=module.params["quota_enforced"],
+ quota_limit=quota,
+ notifications=",".join(module.params["quota_notifications"]),
+ )
+ rule = flasharray.PolicyRuleQuotaPost(rules=[rules])
+ quota_created = array.post_policies_quota_rules(
+ policy_names=[module.params["name"]],
+ rules=rule,
+ ignore_usage=module.params["ignore_usage"],
+ )
+ if quota_created.status_code != 200:
+ module.fail_json(
+ msg="Failed to create rule for Quota policy {0}. Error: {1}".format(
+ module.params["name"], quota_created.errors[0].message
+ )
+ )
+ if module.params["directory"]:
+ members = []
+ for mem in range(0, len(module.params["directory"])):
+ members.append(
+ flasharray.PolicymemberpostMembers(
+ member=flasharray.ReferenceWithType(
+ name=module.params["directory"][mem],
+ resource_type="directories",
+ )
+ )
+ )
+ member = flasharray.PolicyMemberPost(members=members)
+ members_created = array.post_policies_quota_members(
+ policy_names=[module.params["name"]],
+ members=member,
+ ignore_usage=module.params["ignore_usage"],
+ )
+ if members_created.status_code != 200:
+ module.fail_json(
+ msg="Failed to add members to Quota policy {0}. Error: {1}".format(
+ module.params["name"],
+ members_created.errors[0].message,
+ )
+ )
+ else:
+ module.fail_json(
+ msg="Failed to create Quota policy {0}. Error: {1}".format(
+ module.params["name"], created.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def update_policy(module, array, api_version, all_squash):
+ """Update an existing policy including add/remove rules"""
+ changed = (
+ changed_dir
+ ) = (
+ changed_rule
+ ) = changed_enable = changed_quota = changed_member = changed_user_map = False
+ if module.params["policy"] == "nfs":
+ try:
+ current_enabled = list(
+ array.get_policies_nfs(names=[module.params["name"]]).items
+ )[0].enabled
+ if USER_MAP_VERSION in api_version:
+ current_user_map = list(
+ array.get_policies_nfs(names=[module.params["name"]]).items
+ )[0].user_mapping_enabled
+ except Exception:
+ module.fail_json(
+ msg="Incorrect policy type specified for existing policy {0}".format(
+ module.params["name"]
+ )
+ )
+ if (
+ module.params["user_mapping"]
+ and current_user_map != module.params["user_mapping"]
+ ):
+ changed_user_map = True
+ if not module.check_mode:
+ res = array.patch_policies_nfs(
+ names=[module.params["name"]],
+ policy=flasharray.PolicyNfsPatch(
+ user_mapping_enabled=module.params["user_mapping"]
+ ),
+ )
+ if res.status_code != 200:
+ module.exit_json(
+ msg="Failed to enable/disable User Mapping for NFS policy {0}".format(
+ module.params["name"]
+ )
+ )
+ if current_enabled != module.params["enabled"]:
+ changed_enable = True
+ if not module.check_mode:
+ res = array.patch_policies_nfs(
+ names=[module.params["name"]],
+ policy=flasharray.PolicyPatch(enabled=module.params["enabled"]),
+ )
+ if res.status_code != 200:
+ module.exit_json(
+ msg="Failed to enable/disable NFS policy {0}".format(
+ module.params["name"]
+ )
+ )
+ if module.params["client"]:
+ rules = list(
+ array.get_policies_nfs_client_rules(
+ policy_names=[module.params["name"]]
+ ).items
+ )
+ if rules:
+ rule_name = ""
+ for rule in range(0, len(rules)):
+ if rules[rule].client == module.params["client"]:
+ rule_name = rules[rule].name
+ break
+ if not rule_name:
+ if all_squash:
+ rules = flasharray.PolicyrulenfsclientpostRules(
+ permission=module.params["nfs_permission"],
+ client=module.params["client"],
+ anongid=module.params["anongid"],
+ anonuid=module.params["anonuid"],
+ access=module.params["nfs_access"],
+ )
+ else:
+ rules = flasharray.PolicyrulenfsclientpostRules(
+ permission=module.params["nfs_permission"],
+ client=module.params["client"],
+ access=module.params["nfs_access"],
+ )
+ rule = flasharray.PolicyRuleNfsClientPost(rules=[rules])
+ changed_rule = True
+ if not module.check_mode:
+ rule_created = array.post_policies_nfs_client_rules(
+ policy_names=[module.params["name"]], rules=rule
+ )
+ if rule_created.status_code != 200:
+ module.fail_json(
+ msg="Failed to create new rule for NFS policy {0}. Error: {1}".format(
+ module.params["name"],
+ rule_created.errors[0].message,
+ )
+ )
+ else:
+ if all_squash:
+ rules = flasharray.PolicyrulenfsclientpostRules(
+ permission=module.params["nfs_permission"],
+ anongid=module.params["anongid"],
+ anonuid=module.params["anonuid"],
+ client=module.params["client"],
+ access=module.params["nfs_access"],
+ )
+ else:
+ rules = flasharray.PolicyrulenfsclientpostRules(
+ permission=module.params["nfs_permission"],
+ client=module.params["client"],
+ access=module.params["nfs_access"],
+ )
+ rule = flasharray.PolicyRuleNfsClientPost(rules=[rules])
+ changed_rule = True
+ if not module.check_mode:
+ rule_created = array.post_policies_nfs_client_rules(
+ policy_names=[module.params["name"]], rules=rule
+ )
+ if rule_created.status_code != 200:
+ module.fail_json(
+ msg="Failed to create new rule for SMB policy {0}. Error: {1}".format(
+ module.params["name"], rule_created.errors[0].message
+ )
+ )
+ elif module.params["policy"] == "smb":
+ try:
+ current_enabled = list(
+ array.get_policies_smb(names=[module.params["name"]]).items
+ )[0].enabled
+ except Exception:
+ module.fail_json(
+ msg="Incorrect policy type specified for existing policy {0}".format(
+ module.params["name"]
+ )
+ )
+ if current_enabled != module.params["enabled"]:
+ changed_enable = True
+ if not module.check_mode:
+ res = array.patch_policies_smb(
+ names=[module.params["name"]],
+ policy=flasharray.PolicyPatch(enabled=module.params["enabled"]),
+ )
+ if res.status_code != 200:
+ module.exit_json(
+ msg="Failed to enable/disable SMB policy {0}".format(
+ module.params["name"]
+ )
+ )
+ if module.params["client"]:
+ rules = list(
+ array.get_policies_smb_client_rules(
+ policy_names=[module.params["name"]]
+ ).items
+ )
+ if rules:
+ rule_name = ""
+ for rule in range(0, len(rules)):
+ if rules[rule].client == module.params["client"]:
+ rule_name = rules[rule].name
+ break
+ if not rule_name:
+ rules = flasharray.PolicyrulesmbclientpostRules(
+ anonymous_access_allowed=module.params["smb_anon_allowed"],
+ client=module.params["client"],
+ smb_encryption_required=module.params["smb_encrypt"],
+ )
+ rule = flasharray.PolicyRuleSmbClientPost(rules=[rules])
+ changed_rule = True
+ if not module.check_mode:
+ rule_created = array.post_policies_smb_client_rules(
+ policy_names=[module.params["name"]], rules=rule
+ )
+ if rule_created.status_code != 200:
+ module.fail_json(
+ msg="Failed to create new rule for SMB policy {0}. Error: {1}".format(
+ module.params["name"],
+ rule_created.errors[0].message,
+ )
+ )
+ else:
+ rules = flasharray.PolicyrulesmbclientpostRules(
+ anonymous_access_allowed=module.params["smb_anon_allowed"],
+ client=module.params["client"],
+ smb_encryption_required=module.params["smb_encrypt"],
+ )
+ rule = flasharray.PolicyRuleSmbClientPost(rules=[rules])
+ changed_rule = True
+ if not module.check_mode:
+ rule_created = array.post_policies_smb_client_rules(
+ policy_names=[module.params["name"]], rules=rule
+ )
+ if rule_created.status_code != 200:
+ module.fail_json(
+ msg="Failed to create new rule for SMB policy {0}. Error: {1}".format(
+ module.params["name"], rule_created.errors[0].message
+ )
+ )
+ elif module.params["policy"] == "snapshot":
+ if HAS_PACKAGING:
+ suffix_enabled = version.parse(array.get_rest_version()) >= version.parse(
+ MIN_SUFFIX_API_VERSION
+ )
+ else:
+ suffix_enabled = False
+ try:
+ current_enabled = list(
+ array.get_policies_snapshot(names=[module.params["name"]]).items
+ )[0].enabled
+ except Exception:
+ module.fail_json(
+ msg="Incorrect policy type specified for existing policy {0}".format(
+ module.params["name"]
+ )
+ )
+ if current_enabled != module.params["enabled"]:
+ changed_enable = True
+ if not module.check_mode:
+ res = array.patch_policies_snapshot(
+ names=[module.params["name"]],
+ policy=flasharray.PolicyPatch(enabled=module.params["enabled"]),
+ )
+ if res.status_code != 200:
+ module.exit_json(
+ msg="Failed to enable/disable snapshot policy {0}".format(
+ module.params["name"]
+ )
+ )
+ if module.params["directory"]:
+ dirs = []
+ new_dirs = []
+ current_dirs = list(
+ array.get_directories_policies_snapshot(
+ policy_names=[module.params["name"]]
+ ).items
+ )
+ if current_dirs:
+ for current_dir in range(0, len(current_dirs)):
+ dirs.append(current_dirs[current_dir].member.name)
+ for new_dir in range(0, len(module.params["directory"])):
+ if module.params["directory"][new_dir] not in dirs:
+ changed_dir = True
+ new_dirs.append(module.params["directory"][new_dir])
+ else:
+ new_dirs = module.params["directory"]
+ if new_dirs:
+ policies = flasharray.DirectoryPolicyPost(
+ policies=[
+ flasharray.DirectorypolicypostPolicies(
+ policy=flasharray.Reference(name=module.params["name"])
+ )
+ ]
+ )
+ changed_dir = True
+ for add_dir in range(0, len(new_dirs)):
+ if not module.check_mode:
+ directory_added = array.post_directories_policies_snapshot(
+ member_names=[new_dirs[add_dir]], policies=policies
+ )
+ if directory_added.status_code != 200:
+ module.fail_json(
+ msg="Failed to add new directory to Snapshot policy {0}. Error: {1}".format(
+ module.params["name"],
+ directory_added.errors[0].message,
+ )
+ )
+ if module.params["snap_client_name"]:
+ if module.params["snap_at"]:
+ if not module.params["snap_every"] % 1440 == 0:
+ module.fail_json(
+ msg="snap_at time can only be set if snap_every is multiple of 1440"
+ )
+ if module.params["snap_keep_for"] < module.params["snap_every"]:
+ module.fail_json(
+ msg="Retention period (snap_keep_for) cannot be less than snapshot interval (snap_every)."
+ )
+ if (
+ module.params["snap_keep_for"] != module.params["snap_every"]
+ and module.params["snap_suffix"]
+ ):
+ module.fail_json(
+ msg="Suffix (snap_suufix) can only be applied when `snap_keep_for` and `snap_every` are equal."
+ )
+ rules = list(
+ array.get_policies_snapshot_rules(
+ policy_names=[module.params["name"]]
+ ).items
+ )
+ if rules:
+ rule_name = ""
+ for rule in range(0, len(rules)):
+ if rules[rule].client_name == module.params["snap_client_name"]:
+ rule_name = rules[rule].name
+ break
+ if not rule_name:
+ if module.params["snap_keep_for"] < module.params["snap_every"]:
+ module.fail_json(
+ msg="Retention period (snap_keep_for) cannot be less than snapshot interval (snap_every)."
+ )
+ if module.params["snap_at"]:
+ if not module.params["snap_every"] % 1440 == 0:
+ module.fail_json(
+ msg="snap_at time can only be set if snap_every is multiple of 1440"
+ )
+ if suffix_enabled:
+ rules = flasharray.PolicyrulesnapshotpostRules(
+ at=_convert_to_millisecs(module.params["snap_at"]),
+ client_name=module.params["snap_client_name"],
+ every=module.params["snap_every"] * 60000,
+ keep_for=module.params["snap_keep_for"] * 60000,
+ suffix=module.params["snap_suffix"],
+ )
+ else:
+ rules = flasharray.PolicyrulesnapshotpostRules(
+ at=_convert_to_millisecs(module.params["snap_at"]),
+ client_name=module.params["snap_client_name"],
+ every=module.params["snap_every"] * 60000,
+ keep_for=module.params["snap_keep_for"] * 60000,
+ )
+ else:
+ if suffix_enabled:
+ rules = flasharray.PolicyrulesnapshotpostRules(
+ client_name=module.params["snap_client_name"],
+ every=module.params["snap_every"] * 60000,
+ keep_for=module.params["snap_keep_for"] * 60000,
+ suffix=module.params["snap_suffix"],
+ )
+ else:
+ rules = flasharray.PolicyrulesnapshotpostRules(
+ client_name=module.params["snap_client_name"],
+ every=module.params["snap_every"] * 60000,
+ keep_for=module.params["snap_keep_for"] * 60000,
+ )
+ rule = flasharray.PolicyRuleSnapshotPost(rules=[rules])
+ changed_rule = True
+ if not module.check_mode:
+ rule_created = array.post_policies_snapshot_rules(
+ policy_names=[module.params["name"]], rules=rule
+ )
+ if rule_created.status_code != 200:
+ err_no = len(rule_created.errors) - 1
+ module.fail_json(
+ msg="Failed to create new rule for Snapshot policy {0}. Error: {1}".format(
+ module.params["name"],
+ rule_created.errors[err_no].message,
+ )
+ )
+ else:
+ if module.params["snap_keep_for"] < module.params["snap_every"]:
+ module.fail_json(
+ msg="Retention period (snap_keep_for) cannot be less than snapshot interval (snap_every)."
+ )
+ if module.params["snap_at"]:
+ if not module.params["snap_every"] % 1440 == 0:
+ module.fail_json(
+ msg="snap_at time can only be set if snap_every is multiple of 1440"
+ )
+ if suffix_enabled:
+ rules = flasharray.PolicyrulesnapshotpostRules(
+ at=_convert_to_millisecs(module.params["snap_at"]),
+ client_name=module.params["snap_client_name"],
+ every=module.params["snap_every"] * 60000,
+ keep_for=module.params["snap_keep_for"] * 60000,
+ suffix=module.params["snap_suffix"],
+ )
+ else:
+ rules = flasharray.PolicyrulesnapshotpostRules(
+ at=_convert_to_millisecs(module.params["snap_at"]),
+ client_name=module.params["snap_client_name"],
+ every=module.params["snap_every"] * 60000,
+ keep_for=module.params["snap_keep_for"] * 60000,
+ )
+ else:
+ if suffix_enabled:
+ rules = flasharray.PolicyrulesnapshotpostRules(
+ client_name=module.params["snap_client_name"],
+ every=module.params["snap_every"] * 60000,
+ keep_for=module.params["snap_keep_for"] * 60000,
+ suffix=module.params["snap_suffix"],
+ )
+ else:
+ rules = flasharray.PolicyrulesnapshotpostRules(
+ client_name=module.params["snap_client_name"],
+ every=module.params["snap_every"] * 60000,
+ keep_for=module.params["snap_keep_for"] * 60000,
+ )
+ rule = flasharray.PolicyRuleSnapshotPost(rules=[rules])
+ changed_rule = True
+ if not module.check_mode:
+ rule_created = array.post_policies_snapshot_rules(
+ policy_names=[module.params["name"]], rules=rule
+ )
+ if rule_created.status_code != 200:
+ err_no = len(rule_created.errors) - 1
+ module.fail_json(
+ msg="Failed to create new rule for Snapshot policy {0}. Error: {1}".format(
+ module.params["name"],
+ rule_created.errors[err_no].message,
+ )
+ )
+ else:
+ current_enabled = list(
+ array.get_policies_quota(names=[module.params["name"]]).items
+ )[0].enabled
+ if current_enabled != module.params["enabled"]:
+ changed_quota = True
+ if not module.check_mode:
+ res = array.patch_policies_quota(
+ names=[module.params["name"]],
+ policy=flasharray.PolicyPatch(enabled=module.params["enabled"]),
+ )
+ if res.status_code != 200:
+ module.exit_json(
+ msg="Failed to enable/disable snapshot policy {0}".format(
+ module.params["name"]
+ )
+ )
+ if module.params["directory"]:
+ current_members = list(
+ array.get_policies_quota_members(
+ policy_names=[module.params["name"]]
+ ).items
+ )
+ if current_members:
+ if module.params["state"] == "absent":
+ for member in range(0, len(current_members)):
+ if (
+ current_members[member].member.name
+ in module.params["directory"]
+ ):
+ changed_member = True
+ if not module.check_mode:
+ res = array.delete_policies_quota_members(
+ policy_names=[module.params["name"]],
+ member_names=[current_members[member].member.name],
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete rule {0} from quota policy {1}. Error: {2}".format(
+ current_members[member].member.name,
+ module.params["name"],
+ rule_created.errors[0].message,
+ )
+ )
+ else:
+ members = []
+ cmembers = []
+ for cmem in range(0, len(current_members)):
+ cmembers.append(current_members[cmem].member.name)
+ mem_diff = list(set(module.params["directory"]) - set(cmembers))
+ if mem_diff:
+ for mem in range(0, len(mem_diff)):
+ members.append(
+ flasharray.PolicymemberpostMembers(
+ member=flasharray.ReferenceWithType(
+ name=mem_diff[mem],
+ resource_type="directories",
+ )
+ )
+ )
+ member = flasharray.PolicyMemberPost(members=members)
+ changed_member = True
+ if not module.check_mode:
+ members_created = array.post_policies_quota_members(
+ policy_names=[module.params["name"]],
+ members=member,
+ ignore_usage=module.params["ignore_usage"],
+ )
+ if members_created.status_code != 200:
+ module.fail_json(
+ msg="Failed to update members for Quota policy {0}. Error: {1}".format(
+ module.params["name"],
+ members_created.errors[0].message,
+ )
+ )
+ else:
+ members = []
+ for mem in range(0, len(module.params["directory"])):
+ members.append(
+ flasharray.PolicymemberpostMembers(
+ member=flasharray.ReferenceWithType(
+ name=module.params["directory"][mem],
+ resource_type="directories",
+ )
+ )
+ )
+ member = flasharray.PolicyMemberPost(members=members)
+ changed_member = True
+ if not module.check_mode:
+ members_created = array.post_policies_quota_members(
+ policy_names=[module.params["name"]],
+ members=member,
+ ignore_usage=module.params["ignore_usage"],
+ )
+ if members_created.status_code != 200:
+ module.fail_json(
+ msg="Failed to update members for Quota policy {0}. Error: {1}".format(
+ module.params["name"],
+ members_created.errors[0].message,
+ )
+ )
+ if module.params["quota_limit"]:
+ quota = _human_to_bytes(module.params["quota_limit"])
+ current_rules = list(
+ array.get_policies_quota_rules(
+ policy_names=[module.params["name"]]
+ ).items
+ )
+ if current_rules:
+ one_enforced = False
+ for check_rule in range(0, len(current_rules)):
+ if current_rules[check_rule].enforced:
+ one_enforced = True
+ for rule in range(0, len(current_rules)):
+ rule_exists = False
+ if not module.params["quota_notifications"]:
+ current_notifications = "none"
+ else:
+ current_notifications = ",".join(
+ module.params["quota_notifications"]
+ )
+ if bool(
+ (current_rules[rule].quota_limit == quota)
+ and (
+ current_rules[rule].enforced
+ == module.params["quota_enforced"]
+ )
+ and (current_rules[rule].notifications == current_notifications)
+ ):
+ rule_exists = True
+ break
+
+ if not rule_exists:
+ if module.params["quota_enforced"] and one_enforced:
+ module.fail_json(
+ msg="Only one enforced rule can be defined per policy"
+ )
+ rules = flasharray.PolicyrulequotapostRules(
+ enforced=module.params["quota_enforced"],
+ quota_limit=quota,
+ notifications=",".join(module.params["quota_notifications"]),
+ )
+ rule = flasharray.PolicyRuleQuotaPost(rules=[rules])
+ changed_quota = True
+ if not module.check_mode:
+ quota_created = array.post_policies_quota_rules(
+ policy_names=[module.params["name"]],
+ rules=rule,
+ ignore_usage=module.params["ignore_usage"],
+ )
+ if quota_created.status_code != 200:
+ module.fail_json(
+ msg="Failed to add new rule to Quota policy {0}. Error: {1}".format(
+ module.params["name"],
+ quota_created.errors[0].message,
+ )
+ )
+ else:
+ rules = flasharray.PolicyrulequotapostRules(
+ enforced=module.params["quota_enforced"],
+ quota_limit=quota,
+ notifications=",".join(module.params["quota_notifications"]),
+ )
+ rule = flasharray.PolicyRuleQuotaPost(rules=[rules])
+ changed_quota = True
+ if not module.check_mode:
+ quota_created = array.post_policies_quota_rules(
+ policy_names=[module.params["name"]],
+ rules=rule,
+ ignore_usage=module.params["ignore_usage"],
+ )
+ if quota_created.status_code != 200:
+ module.fail_json(
+ msg="Failed to add rule to Quota policy {0}. Error: {1}".format(
+ module.params["name"], quota_created.errors[0].message
+ )
+ )
+
+ if (
+ changed_rule
+ or changed_enable
+ or changed_quota
+ or changed_member
+ or changed_dir
+ or changed_user_map
+ ):
+ changed = True
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ nfs_access=dict(
+ type="str",
+ default="no-root-squash",
+ choices=["root-squash", "no-root-squash", "all-squash"],
+ ),
+ nfs_permission=dict(type="str", default="rw", choices=["rw", "ro"]),
+ policy=dict(
+ type="str", required=True, choices=["nfs", "smb", "snapshot", "quota"]
+ ),
+ name=dict(type="str", required=True),
+ rename=dict(type="str"),
+ client=dict(type="str"),
+ enabled=dict(type="bool", default=True),
+ snap_at=dict(type="str"),
+ snap_every=dict(type="int"),
+ snap_keep_for=dict(type="int"),
+ snap_client_name=dict(type="str"),
+ snap_suffix=dict(type="str"),
+ smb_anon_allowed=dict(type="bool", default=False),
+ smb_encrypt=dict(type="bool", default=False),
+ ignore_usage=dict(type="bool", default=False),
+ quota_enforced=dict(type="bool", default=True),
+ quota_limit=dict(type="str"),
+ anongid=dict(type="str", default="65534"),
+ anonuid=dict(type="str", default="65534"),
+ quota_notifications=dict(
+ type="list", elements="str", choices=["user", "group"]
+ ),
+ user_mapping=dict(type="bool"),
+ directory=dict(type="list", elements="str"),
+ )
+ )
+
+ required_together = [["snap_keep_for", "snap_every"]]
+ module = AnsibleModule(
+ argument_spec, required_together=required_together, supports_check_mode=True
+ )
+
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashArray REST version not supported. "
+ "Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
+ )
+ if module.params["policy"] == "quota" and MIN_QUOTA_API_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashArray REST version not supportedi for directory quotas. "
+ "Minimum version required: {0}".format(MIN_QUOTA_API_VERSION)
+ )
+ array = get_array(module)
+ state = module.params["state"]
+ if module.params["quota_notifications"]:
+ module.params["quota_notifications"].sort(reverse=True)
+ quota_notifications = []
+ [
+ quota_notifications.append(x)
+ for x in module.params["quota_notifications"]
+ if x not in quota_notifications
+ ]
+ module.params["quota_notifications"] = quota_notifications
+ else:
+ module.params["quota_notifications"] = []
+
+ if (
+ module.params["nfs_access"] == "all-squash"
+ and ALL_SQUASH_VERSION not in api_version
+ ):
+ module.fail_json(
+ msg="all-squash is not supported in this version of Purity//FA"
+ )
+
+ all_squash = ALL_SQUASH_VERSION in api_version
+ exists = bool(array.get_policies(names=[module.params["name"]]).status_code == 200)
+
+ if state == "present" and not exists:
+ create_policy(module, array, all_squash)
+ elif state == "present" and exists and module.params["rename"]:
+ rename_policy(module, array)
+ elif state == "present" and exists:
+ update_policy(module, array, api_version, all_squash)
+ elif state == "absent" and exists:
+ delete_policy(module, array)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_proxy.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_proxy.py
new file mode 100644
index 000000000..37dd7ac6a
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_proxy.py
@@ -0,0 +1,131 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2019, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_proxy
+version_added: '1.0.0'
+author:
+ - Pure Storage ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+short_description: Configure FlashArray phonehome HTTPs proxy settings
+description:
+- Set or erase configuration for the HTTPS phonehome proxy settings.
+options:
+ state:
+ description:
+ - Set or delete proxy configuration
+ default: present
+ type: str
+ choices: [ absent, present ]
+ host:
+ description:
+ - The proxy host name.
+ type: str
+ port:
+ description:
+ - The proxy TCP/IP port number.
+ type: int
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Delete exisitng proxy settings
+ purestorage.flasharray.purefa_proxy:
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Set proxy settings
+ purestorage.flasharray.purefa_proxy:
+ host: purestorage.com
+ port: 8080
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ purefa_argument_spec,
+)
+
+
+def delete_proxy(module, array):
+ """Delete proxy settings"""
+ changed = False
+ current_proxy = array.get(proxy=True)["proxy"]
+ if current_proxy != "":
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set(proxy="")
+ except Exception:
+ module.fail_json(msg="Delete proxy settigs failed")
+ module.exit_json(changed=changed)
+
+
+def create_proxy(module, array):
+ """Set proxy settings"""
+ changed = False
+ current_proxy = array.get(proxy=True)
+ if current_proxy is not None:
+ new_proxy = (
+ "https://" + module.params["host"] + ":" + str(module.params["port"])
+ )
+ if new_proxy != current_proxy["proxy"]:
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set(proxy=new_proxy)
+ except Exception:
+ module.fail_json(msg="Set phone home proxy failed.")
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ host=dict(type="str"),
+ port=dict(type="int"),
+ )
+ )
+
+ required_together = [["host", "port"]]
+
+ module = AnsibleModule(
+ argument_spec, required_together=required_together, supports_check_mode=True
+ )
+
+ state = module.params["state"]
+ array = get_system(module)
+
+ if state == "absent":
+ delete_proxy(module, array)
+ elif state == "present":
+ create_proxy(module, array)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_ra.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_ra.py
new file mode 100644
index 000000000..4899b0797
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_ra.py
@@ -0,0 +1,121 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_ra
+version_added: '1.0.0'
+short_description: Enable or Disable Pure Storage FlashArray Remote Assist
+description:
+- Enablke or Disable Remote Assist for a Pure Storage FlashArray.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Define state of remote assist
+ - When set to I(enable) the RA port can be exposed using the
+ I(debug) module.
+ type: str
+ default: enable
+ choices: [ enable, disable ]
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Enable Remote Assist port
+ purestorage.flasharray.purefa_ra:
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ register: result
+
+ debug:
+ msg: "Remote Assist: {{ result['ra_facts'] }}"
+
+- name: Disable Remote Assist port
+ purestorage.flasharray.purefa_ra:
+ state: disable
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ purefa_argument_spec,
+)
+
+
+def enable_ra(module, array):
+ """Enable Remote Assist"""
+ changed = False
+ ra_facts = {}
+ if not array.get_remote_assist_status()["status"] in ["connected", "enabled"]:
+ changed = True
+ if not module.check_mode:
+ try:
+ ra_data = array.enable_remote_assist()
+ ra_facts["fa_ra"] = {"name": ra_data["name"], "port": ra_data["port"]}
+ except Exception:
+ module.fail_json(msg="Enabling Remote Assist failed")
+ else:
+ if not module.check_mode:
+ try:
+ ra_data = array.get_remote_assist_status()
+ ra_facts["fa_ra"] = {"name": ra_data["name"], "port": ra_data["port"]}
+ except Exception:
+ module.fail_json(msg="Getting Remote Assist failed")
+ module.exit_json(changed=changed, ra_info=ra_facts)
+
+
+def disable_ra(module, array):
+ """Disable Remote Assist"""
+ changed = False
+ if array.get_remote_assist_status()["status"] in ["connected", "enabled"]:
+ changed = True
+ if not module.check_mode:
+ try:
+ array.disable_remote_assist()
+ except Exception:
+ module.fail_json(msg="Disabling Remote Assist failed")
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="enable", choices=["enable", "disable"]),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ array = get_system(module)
+
+ if module.params["state"] == "enable":
+ enable_ra(module, array)
+ else:
+ disable_ra(module, array)
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_saml.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_saml.py
new file mode 100644
index 000000000..9d5fc7443
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_saml.py
@@ -0,0 +1,340 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2022, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_saml
+version_added: '1.12.0'
+short_description: Manage FlashArray SAML2 service and identity providers
+description:
+- Enable or disable FlashArray SAML2 providers
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Name of the SAML2 identity provider (IdP)
+ type: str
+ required: true
+ state:
+ description:
+ - Define whether the API client should exist or not.
+ default: present
+ choices: [ absent, present ]
+ type: str
+ url:
+ description:
+ - The URL of the identity provider
+ type: str
+ array_url:
+ description:
+ - The URL of the FlashArray
+ type: str
+ metadata_url:
+ description:
+ - The URL of the identity provider metadata
+ type: str
+ enabled:
+ description:
+ - Defines the enabled state of the identity provider
+ default: false
+ type: bool
+ encrypt_asserts:
+ description:
+ - If set to true, SAML assertions will be encrypted by the identity provider
+ default: false
+ type: bool
+ sign_request:
+ description:
+ - If set to true, SAML requests will be signed by the service provider.
+ default: false
+ type: bool
+ x509_cert:
+ description:
+ - The X509 certificate that the service provider uses to verify the SAML
+ response signature from the identity provider
+ type: str
+ decryption_credential:
+ description:
+ - The credential used by the service provider to decrypt encrypted SAML assertions from the identity provider
+ type: str
+ signing_credential:
+ description:
+ - The credential used by the service provider to sign SAML requests
+ type: str
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Create (disabled) SAML2 SSO with only metadata URL
+ purestorage.flasharray.purefa_saml:
+ name: myIDP
+ array_url: "https://10.10.10.2"
+ metadata_url: "https://myidp.acme.com/adfs/ls"
+ x509_cert: "{{lookup('file', 'x509_cert_file') }}"
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Enable SAML2 SSO
+ purestorage.flasharray.purefa_saml:
+ name: myISO
+ enabled: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete SAML2 SSO
+ purestorage.flasharray.purefa_saml:
+ state: absent
+ name: myIDP
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient.flasharray import (
+ Saml2Sso,
+ Saml2SsoPost,
+ Saml2SsoSp,
+ Saml2SsoIdp,
+ ReferenceNoId,
+ )
+except ImportError:
+ HAS_PURESTORAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ get_array,
+ purefa_argument_spec,
+)
+
+MIN_REQUIRED_API_VERSION = "2.11"
+
+
+def delete_saml(module, array):
+ """Delete SSO SAML2 IdP"""
+ changed = True
+ if not module.check_mode:
+ try:
+ array.delete_sso_saml2_idps(names=[module.params["name"]])
+ except Exception:
+ module.fail_json(
+ msg="Failed to delete SAML2 IdP {0}".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def update_saml(module, array):
+ """Update SSO SAML2 IdP"""
+ changed = False
+ current_idp = list(array.get_sso_saml2_idps(names=[module.params["name"]]).items)[0]
+ old_idp = {
+ "array_url": current_idp.array_url,
+ "enabled": current_idp.enabled,
+ "sp_sign_cred": getattr(current_idp.sp.signing_credential, "name", None),
+ "sp_decrypt_cred": getattr(current_idp.sp.decryption_credential, "name", None),
+ "id_metadata": current_idp.idp.metadata_url,
+ "id_url": getattr(current_idp.idp, "url", None),
+ "id_sign_enabled": current_idp.idp.sign_request_enabled,
+ "id_encrypt_enabled": current_idp.idp.encrypt_assertion_enabled,
+ "id_cert": current_idp.idp.verification_certificate,
+ }
+ if module.params["url"]:
+ new_url = module.params["url"]
+ else:
+ new_url = old_idp["id_url"]
+ if module.params["array_url"]:
+ new_array_url = module.params["array_url"]
+ else:
+ new_array_url = old_idp["array_url"]
+ if module.params["enabled"] != old_idp["enabled"]:
+ new_enabled = module.params["enabled"]
+ else:
+ new_enabled = old_idp["enabled"]
+ if module.params["sign_request"] != old_idp["id_sign_enabled"]:
+ new_sign = module.params["sign_request"]
+ else:
+ new_sign = old_idp["id_sign_enabled"]
+ if module.params["encrypt_asserts"] != old_idp["id_encrypt_enabled"]:
+ new_encrypt = module.params["encrypt_asserts"]
+ else:
+ new_encrypt = old_idp["id_encrypt_enabled"]
+ if module.params["signing_credential"]:
+ new_sign_cred = module.params["signing_credential"]
+ else:
+ new_sign_cred = old_idp["sp_sign_cred"]
+ if module.params["decryption_credential"]:
+ new_decrypt_cred = module.params["decryption_credential"]
+ else:
+ new_decrypt_cred = old_idp["sp_decrypt_cred"]
+ if module.params["metadata_url"]:
+ new_meta_url = module.params["metadata_url"]
+ else:
+ new_meta_url = old_idp["id_metadata"]
+ if module.params["x509_cert"]:
+ new_cert = module.params["x509_cert"]
+ else:
+ new_cert = old_idp["id_cert"]
+ new_idp = {
+ "array_url": new_array_url,
+ "enabled": new_enabled,
+ "sp_sign_cred": new_sign_cred,
+ "sp_decrypt_cred": new_decrypt_cred,
+ "id_metadata": new_meta_url,
+ "id_sign_enabled": new_sign,
+ "id_encrypt_enabled": new_encrypt,
+ "id_url": new_url,
+ "id_cert": new_cert,
+ }
+ if old_idp != new_idp:
+ changed = True
+ if not module.check_mode:
+ sp = Saml2SsoSp(
+ decryption_credential=ReferenceNoId(name=new_idp["sp_decrypt_cred"]),
+ signing_credential=ReferenceNoId(name=new_idp["sp_sign_cred"]),
+ )
+ idp = Saml2SsoIdp(
+ url=new_idp["id_url"],
+ metadata_url=new_idp["id_metadata"],
+ sign_request_enabled=new_idp["id_sign_enabled"],
+ encrypt_assertion_enabled=new_idp["id_encrypt_enabled"],
+ verification_certificate=new_idp["id_cert"],
+ )
+ res = array.patch_sso_saml2_idps(
+ idp=Saml2Sso(
+ array_url=new_idp["array_url"],
+ idp=idp,
+ sp=sp,
+ enabled=new_idp["enabled"],
+ ),
+ names=[module.params["name"]],
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to update SAML2 IdP {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def create_saml(module, array):
+ """Create SAML2 IdP"""
+ changed = True
+ if not module.check_mode:
+ sp = Saml2SsoSp(
+ decryption_credential=ReferenceNoId(
+ name=module.params["decryption_credential"]
+ ),
+ signing_credential=ReferenceNoId(name=module.params["signing_credential"]),
+ )
+ idp = Saml2SsoIdp(
+ url=module.params["url"],
+ metadata_url=module.params["metadata_url"],
+ sign_request_enabled=module.params["sign_request"],
+ encrypt_assertion_enabled=module.params["encrypt_asserts"],
+ verification_certificate=module.params["x509_cert"],
+ )
+ if not module.check_mode:
+ res = array.post_sso_saml2_idps(
+ idp=Saml2SsoPost(array_url=module.params["array_url"], idp=idp, sp=sp),
+ names=[module.params["name"]],
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to create SAML2 Identity Provider {0}. Error message: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ if module.params["enabled"]:
+ res = array.patch_sso_saml2_idps(
+ idp=Saml2Sso(enabled=module.params["enabled"]),
+ names=[module.params["name"]],
+ )
+ if res.status_code != 200:
+ array.delete_sso_saml2_idps(names=[module.params["name"]])
+ module.fail_json(
+ msg="Failed to create SAML2 Identity Provider {0}. Error message: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ name=dict(type="str", required=True),
+ url=dict(type="str"),
+ array_url=dict(type="str"),
+ metadata_url=dict(type="str"),
+ x509_cert=dict(type="str", no_log=True),
+ signing_credential=dict(type="str"),
+ decryption_credential=dict(type="str"),
+ enabled=dict(type="bool", default=False),
+ encrypt_asserts=dict(type="bool", default=False),
+ sign_request=dict(type="bool", default=False),
+ )
+ )
+
+ required_if = [
+ ["encrypt_asserts", True, ["decryption_credential"]],
+ ["sign_request", True, ["signing_credential"]],
+ ]
+
+ module = AnsibleModule(
+ argument_spec, supports_check_mode=True, required_if=required_if
+ )
+
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashArray REST version not supported. "
+ "Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
+ )
+ array = get_array(module)
+ state = module.params["state"]
+
+ try:
+ list(array.get_sso_saml2_idps(names=[module.params["name"]]).items)[0]
+ exists = True
+ except AttributeError:
+ exists = False
+ if not exists and state == "present":
+ create_saml(module, array)
+ elif exists and state == "present":
+ update_saml(module, array)
+ elif exists and state == "absent":
+ delete_saml(module, array)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_smis.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_smis.py
new file mode 100644
index 000000000..f752cb950
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_smis.py
@@ -0,0 +1,132 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_smis
+version_added: '1.0.0'
+short_description: Enable or disable FlashArray SMI-S features
+description:
+- Enable or disable FlashArray SMI-S Provider and/or SLP
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ slp:
+ description:
+ - Enable/Disable Service Locator Protocol
+ - Ports used are TCP 427 and UDP 427
+ type: bool
+ default: true
+ smis:
+ description:
+ - Enable/Disable SMI-S Provider
+ - Port used is TCP 5989
+ type: bool
+ default: true
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Enable SMI-S and SLP
+ purestorage.flasharray.purefa_smis:
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Disable SMI-S and SLP
+ purestorage.flasharray.purefa_smis:
+ smis: false
+ slp: false
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ get_array,
+ purefa_argument_spec,
+)
+
+MIN_REQUIRED_API_VERSION = "2.2"
+
+
+def update_smis(module, array):
+ """Update SMI-S features"""
+ changed = smis_changed = False
+ try:
+ current = list(array.get_smi_s().items)[0]
+ except Exception:
+ module.fail_json(msg="Failed to get current SMI-S settings.")
+ slp_enabled = current.slp_enabled
+ wbem_enabled = current.wbem_https_enabled
+ if slp_enabled != module.params["slp"]:
+ slp_enabled = module.params["slp"]
+ smis_changed = True
+ if wbem_enabled != module.params["smis"]:
+ wbem_enabled = module.params["smis"]
+ smis_changed = True
+ if smis_changed:
+ smi_s = flasharray.Smis(
+ slp_enabled=slp_enabled, wbem_https_enabled=wbem_enabled
+ )
+ changed = True
+ if not module.check_mode:
+ try:
+ array.patch_smi_s(smi_s=smi_s)
+ except Exception:
+ module.fail_json(msg="Failed to change SMI-S settings.")
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ smis=dict(type="bool", default=True),
+ slp=dict(type="bool", default=True),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashArray REST version not supported. "
+ "Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
+ )
+ array = get_array(module)
+
+ update_smis(module, array)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_smtp.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_smtp.py
new file mode 100644
index 000000000..d2c1a5e2b
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_smtp.py
@@ -0,0 +1,161 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_smtp
+version_added: '1.0.0'
+author:
+ - Pure Storage ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+short_description: Configure FlashArray SMTP settings
+description:
+- Set or erase configuration for the SMTP settings.
+- If username/password are set this will always force a change as there is
+ no way to see if the password is differnet from the current SMTP configuration.
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Set or delete SMTP configuration
+ default: present
+ type: str
+ choices: [ absent, present ]
+ password:
+ description:
+ - The SMTP password.
+ type: str
+ user:
+ description:
+ - The SMTP username.
+ type: str
+ relay_host:
+ description:
+ - IPv4 or IPv6 address or FQDN. A port number may be appended.
+ type: str
+ sender_domain:
+ description:
+ - Domain name.
+ type: str
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Delete exisitng SMTP settings
+ purestorage.flasharray.purefa_smtp:
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+- name: Set SMTP settings
+ purestorage.flasharray.purefa_smtp:
+ sender_domain: purestorage.com
+ password: account_password
+ user: smtp_account
+ relay_host: 10.2.56.78:2345
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ purefa_argument_spec,
+)
+
+
+def delete_smtp(module, array):
+ """Delete SMTP settings"""
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set_smtp(sender_domain="", user_name="", password="", relay_host="")
+ except Exception:
+ module.fail_json(msg="Delete SMTP settigs failed")
+ module.exit_json(changed=changed)
+
+
+def create_smtp(module, array):
+ """Set SMTP settings"""
+ changed = changed_sender = changed_relay = changed_creds = False
+ current_smtp = array.get_smtp()
+ if (
+ module.params["sender_domain"]
+ and current_smtp["sender_domain"] != module.params["sender_domain"]
+ ):
+ changed_sender = True
+ if not module.check_mode:
+ try:
+ array.set_smtp(sender_domain=module.params["sender_domain"])
+ except Exception:
+ module.fail_json(msg="Set SMTP sender domain failed.")
+ if (
+ module.params["relay_host"]
+ and current_smtp["relay_host"] != module.params["relay_host"]
+ ):
+ changed_relay = True
+ if not module.check_mode:
+ try:
+ array.set_smtp(relay_host=module.params["relay_host"])
+ except Exception:
+ module.fail_json(msg="Set SMTP relay host failed.")
+ if module.params["user"]:
+ changed_creds = True
+ if not module.check_mode:
+ try:
+ array.set_smtp(
+ user_name=module.params["user"], password=module.params["password"]
+ )
+ except Exception:
+ module.fail_json(msg="Set SMTP username/password failed.")
+ changed = bool(changed_sender or changed_relay or changed_creds)
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ sender_domain=dict(type="str"),
+ password=dict(type="str", no_log=True),
+ user=dict(type="str"),
+ relay_host=dict(type="str"),
+ )
+ )
+
+ required_together = [["user", "password"]]
+
+ module = AnsibleModule(
+ argument_spec, required_together=required_together, supports_check_mode=True
+ )
+
+ state = module.params["state"]
+ array = get_system(module)
+
+ if state == "absent":
+ delete_smtp(module, array)
+ elif state == "present":
+ create_smtp(module, array)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_snap.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_snap.py
new file mode 100644
index 000000000..db567a398
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_snap.py
@@ -0,0 +1,640 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_snap
+version_added: '1.0.0'
+short_description: Manage volume snapshots on Pure Storage FlashArrays
+description:
+- Create or delete volumes and volume snapshots on Pure Storage FlashArray.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - The name of the source volume.
+ type: str
+ required: true
+ suffix:
+ description:
+ - Suffix of snapshot name.
+ - Not used during creation if I(offload) is provided.
+ type: str
+ target:
+ description:
+ - Name of target volume if creating from snapshot.
+ - Name of new snapshot suffix if renaming a snapshot
+ type: str
+ overwrite:
+ description:
+ - Define whether to overwrite existing volume when creating from snapshot.
+ type: bool
+ default: false
+ offload:
+ description:
+ - Only valid for Purity//FA 6.1 or higher
+ - Name of offload target for the snapshot.
+ - Target can be either another FlashArray or an Offload Target
+ - This is only applicable for creation, deletion and eradication of snapshots
+ - I(state) of I(copy) is not supported.
+ - I(suffix) is not supported for offload snapshots.
+ type: str
+ state:
+ description:
+ - Define whether the volume snapshot should exist or not.
+ choices: [ absent, copy, present, rename ]
+ type: str
+ default: present
+ eradicate:
+ description:
+ - Define whether to eradicate the snapshot on delete or leave in trash.
+ type: bool
+ default: false
+ ignore_repl:
+ description:
+ - Only valid for Purity//FA 6.1 or higher
+ - If set to true, allow destruction/eradication of snapshots in use by replication.
+ - If set to false, allow destruction/eradication of snapshots not in use by replication
+ type: bool
+ default: false
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Create snapshot foo.ansible
+ purestorage.flasharray.purefa_snap:
+ name: foo
+ suffix: ansible
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: present
+
+- name: Create R/W clone foo_clone from snapshot foo.snap
+ purestorage.flasharray.purefa_snap:
+ name: foo
+ suffix: snap
+ target: foo_clone
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: copy
+
+- name: Create R/W clone foo_clone from remote mnapshot arrayB:foo.snap
+ purestorage.flasharray.purefa_snap:
+ name: arrayB:foo
+ suffix: snap
+ target: foo_clone
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: copy
+
+- name: Overwrite existing volume foo_clone with snapshot foo.snap
+ purestorage.flasharray.purefa_snap:
+ name: foo
+ suffix: snap
+ target: foo_clone
+ overwrite: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: copy
+
+- name: Delete and eradicate snapshot named foo.snap
+ purestorage.flasharray.purefa_snap:
+ name: foo
+ suffix: snap
+ eradicate: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: absent
+
+- name: Rename snapshot foo.fred to foo.dave
+ purestorage.flasharray.purefa_snap:
+ name: foo
+ suffix: fred
+ target: dave
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: rename
+
+- name: Create a remote volume snapshot on offload device arrayB
+ purestorage.flasharray.purefa_snap:
+ name: foo
+ offload: arrayB
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete and eradicate a volume snapshot foo.1 on offload device arrayB
+ purestorage.flasharray.purefa_snap:
+ name: foo
+ suffix: 1
+ offload: arrayB
+ eradicate: true
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+HAS_PUREERROR = True
+try:
+ from purestorage import PureHTTPError
+except ImportError:
+ HAS_PUREERROR = False
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_array,
+ get_system,
+ purefa_argument_spec,
+)
+from datetime import datetime
+
+GET_SEND_API = "2.4"
+
+
+def _check_offload(module, array):
+ try:
+ offload = list(array.get_offloads(names=[module.params["offload"]]).items)[0]
+ if offload.status == "connected":
+ return True
+ return False
+ except Exception:
+ return False
+
+
+def _check_target(module, array):
+ try:
+ target = list(
+ array.get_array_connections(names=[module.params["offload"]]).items
+ )[0]
+ if target.status == "connected":
+ return True
+ return False
+ except Exception:
+ return False
+
+
+def _check_offload_snapshot(module, array):
+ """Return Remote Snapshot (active or deleted) or None"""
+ source_array = list(array.get_arrays().items)[0].name
+ snapname = (
+ source_array + ":" + module.params["name"] + "." + module.params["suffix"]
+ )
+ if _check_offload(module, array):
+ res = array.get_remote_volume_snapshots(
+ on=module.params["offload"], names=[snapname], destroyed=False
+ )
+ else:
+ res = array.get_volume_snapshots(names=[snapname], destroyed=False)
+ if res.status_code != 200:
+ return None
+ return list(res.items)[0]
+
+
+def get_volume(module, array):
+ """Return Volume or None"""
+ try:
+ return array.get_volume(module.params["name"])
+ except Exception:
+ return None
+
+
+def get_target(module, array):
+ """Return Volume or None"""
+ try:
+ return array.get_volume(module.params["target"])
+ except Exception:
+ return None
+
+
+def get_deleted_snapshot(module, array, arrayv6):
+ """Return Deleted Snapshot"""
+ snapname = module.params["name"] + "." + module.params["suffix"]
+ if module.params["offload"]:
+ source_array = list(arrayv6.get_arrays().items)[0].name
+ snapname = module.params["name"] + "." + module.params["suffix"]
+ full_snapname = source_array + ":" + snapname
+ if _check_offload(module, arrayv6):
+ res = arrayv6.get_remote_volume_snapshots(
+ on=module.params["offload"], names=[full_snapname], destroyed=True
+ )
+ else:
+ res = arrayv6.get_volume_snapshots(names=[snapname], destroyed=True)
+ if res.status_code == 200:
+ return list(res.items)[0].destroyed
+ else:
+ return False
+ else:
+ try:
+ return bool(
+ array.get_volume(snapname, snap=True, pending=True)[0]["time_remaining"]
+ != ""
+ )
+ except Exception:
+ return False
+
+
+def get_snapshot(module, array):
+ """Return Snapshot or None"""
+ try:
+ snapname = module.params["name"] + "." + module.params["suffix"]
+ for snaps in array.get_volume(module.params["name"], snap=True, pending=False):
+ if snaps["name"] == snapname:
+ return True
+ except Exception:
+ return False
+
+
+def create_snapshot(module, array, arrayv6):
+ """Create Snapshot"""
+ changed = False
+ if module.params["offload"]:
+ module.params["suffix"] = None
+ changed = True
+ if not module.check_mode:
+ res = arrayv6.post_remote_volume_snapshots(
+ source_names=[module.params["name"]], on=module.params["offload"]
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to create remote snapshot for volume {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ else:
+ remote_snap = list(res.items)[0].name
+ module.params["suffix"] = remote_snap.split(".")[1]
+ else:
+ changed = True
+ if not module.check_mode:
+ try:
+ array.create_snapshot(
+ module.params["name"], suffix=module.params["suffix"]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to create snapshot for volume {0}".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed, suffix=module.params["suffix"])
+
+
+def create_from_snapshot(module, array):
+ """Create Volume from Snapshot"""
+ source = module.params["name"] + "." + module.params["suffix"]
+ tgt = get_target(module, array)
+ if tgt is None:
+ changed = True
+ if not module.check_mode:
+ array.copy_volume(source, module.params["target"])
+ elif tgt is not None and module.params["overwrite"]:
+ changed = True
+ if not module.check_mode:
+ array.copy_volume(
+ source, module.params["target"], overwrite=module.params["overwrite"]
+ )
+ elif tgt is not None and not module.params["overwrite"]:
+ changed = False
+ module.exit_json(changed=changed)
+
+
+def recover_snapshot(module, array, arrayv6):
+ """Recover Snapshot"""
+ changed = False
+ snapname = module.params["name"] + "." + module.params["suffix"]
+ if module.params["offload"] and _check_offload(module, arrayv6):
+ source_array = list(array.get_arrays().items)[0].name
+ snapname = source_array + module.params["name"] + "." + module.params["suffix"]
+ changed = True
+ if not module.check_mode:
+ res = arrayv6.patch_remote_volume_snapshots(
+ names=[snapname],
+ on=module.params["offload"],
+ remote_volume_snapshot=flasharray.DestroyedPatchPost(destroyed=False),
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to recover remote snapshot {0}".format(snapname)
+ )
+ else:
+ changed = True
+ if not module.check_mode:
+ try:
+ array.recover_volume(snapname)
+ except Exception:
+ module.fail_json(msg="Recovery of snapshot {0} failed".format(snapname))
+ module.exit_json(changed=changed)
+
+
+def update_snapshot(module, array):
+ """Update Snapshot - basically just rename..."""
+ changed = True
+ if not module.check_mode:
+ current_name = module.params["name"] + "." + module.params["suffix"]
+ new_name = module.params["name"] + "." + module.params["target"]
+ res = array.patch_volume_snapshots(
+ names=[current_name],
+ volume_snapshot=flasharray.VolumeSnapshotPatch(name=new_name),
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to rename {0} to {1}. Error: {2}".format(
+ current_name, new_name, res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def delete_snapshot(module, array, arrayv6):
+ """Delete Snapshot"""
+ changed = False
+ snapname = module.params["name"] + "." + module.params["suffix"]
+ if module.params["offload"] and _check_offload(module, arrayv6):
+ source_array = list(arrayv6.get_arrays().items)[0].name
+ full_snapname = source_array + ":" + snapname
+ changed = True
+ if not module.check_mode:
+ res = arrayv6.patch_remote_volume_snapshots(
+ names=[full_snapname],
+ on=module.params["offload"],
+ volume_snapshot=flasharray.VolumeSnapshotPatch(destroyed=True),
+ replication_snapshot=module.params["ignore_repl"],
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete remote snapshot {0}. Error: {1}".format(
+ snapname, res.errors[0].message
+ )
+ )
+ if module.params["eradicate"]:
+ res = arrayv6.delete_remote_volume_snapshots(
+ names=[full_snapname],
+ on=module.params["offload"],
+ replication_snapshot=module.params["ignore_repl"],
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to eradicate remote snapshot {0}. Error: {1}".format(
+ snapname, res.errors[0].message
+ )
+ )
+ elif module.params["offload"] and _check_target(module, arrayv6):
+ changed = True
+ if not module.check_mode:
+ res = arrayv6.patch_volume_snapshots(
+ names=[snapname],
+ volume_snapshot=flasharray.DestroyedPatchPost(destroyed=True),
+ replication_snapshot=module.params["ignore_repl"],
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete remote snapshot {0}. Error: {1}".format(
+ snapname, res.errors[0].message
+ )
+ )
+ if module.params["eradicate"]:
+ res = arrayv6.delete_volume_snapshots(
+ names=[snapname], replication_snapshot=module.params["ignore_repl"]
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to eradicate remote snapshot {0}. Error: {1}".format(
+ snapname, res.errors[0].message
+ )
+ )
+ else:
+ changed = True
+ if not module.check_mode:
+ api_version = array._list_available_rest_versions()
+ if GET_SEND_API in api_version:
+ module.warn("here")
+ res = arrayv6.patch_volume_snapshots(
+ names=[snapname],
+ volume_snapshot=flasharray.DestroyedPatchPost(destroyed=True),
+ replication_snapshot=module.params["ignore_repl"],
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete remote snapshot {0}. Error: {1}".format(
+ snapname, res.errors[0].message
+ )
+ )
+ if module.params["eradicate"]:
+ res = arrayv6.delete_volume_snapshots(
+ names=[snapname],
+ replication_snapshot=module.params["ignore_repl"],
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to eradicate remote snapshot {0}. Error: {1}".format(
+ snapname, res.errors[0].message
+ )
+ )
+ else:
+ try:
+ array.destroy_volume(snapname)
+ if module.params["eradicate"]:
+ try:
+ array.eradicate_volume(snapname)
+ except PureHTTPError as err:
+ module.fail_json(
+ msg="Error eradicating snapshot. Error: {0}".format(
+ err.text
+ )
+ )
+ except PureHTTPError as err:
+ module.fail_json(
+ msg="Error deleting snapshot. Error: {0}".format(err.text)
+ )
+ module.exit_json(changed=changed)
+
+
+def eradicate_snapshot(module, array, arrayv6):
+ """Eradicate snapshot"""
+ changed = True
+ snapname = module.params["name"] + "." + module.params["suffix"]
+ if not module.check_mode:
+ if module.params["offload"] and _check_offload(module, arrayv6):
+ source_array = list(arrayv6.get_arrays().items)[0].name
+ full_snapname = source_array + ":" + snapname
+ res = arrayv6.delete_remote_volume_snapshots(
+ names=[full_snapname],
+ on=module.params["offload"],
+ replication_snapshot=module.params["ignore_repl"],
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to eradicate remote snapshot {0}. Error: {1}".format(
+ snapname, res.errors[0].message
+ )
+ )
+ elif module.params["offload"] and _check_target(module, arrayv6):
+ res = arrayv6.delete_volume_snapshots(
+ names=[snapname], replication_snapshot=module.params["ignore_repl"]
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to eradicate remote snapshot {0}. Error: {1}".format(
+ snapname, res.errors[0].message
+ )
+ )
+ else:
+ try:
+ array.eradicate_volume(snapname)
+ except Exception:
+ changed = False
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True),
+ suffix=dict(type="str"),
+ target=dict(type="str"),
+ offload=dict(type="str"),
+ ignore_repl=dict(type="bool", default=False),
+ overwrite=dict(type="bool", default=False),
+ eradicate=dict(type="bool", default=False),
+ state=dict(
+ type="str",
+ default="present",
+ choices=["absent", "copy", "present", "rename"],
+ ),
+ )
+ )
+
+ required_if = [("state", "copy", ["target", "suffix"])]
+
+ module = AnsibleModule(
+ argument_spec, required_if=required_if, supports_check_mode=True
+ )
+ if not HAS_PUREERROR:
+ module.fail_json(msg="purestorage sdk is required for this module")
+ pattern1 = re.compile(
+ "^(?=.*[a-zA-Z-])[a-zA-Z0-9]([a-zA-Z0-9-]{0,63}[a-zA-Z0-9])?$"
+ )
+ pattern2 = re.compile("^([1-9])([0-9]{0,63}[0-9])?$")
+
+ state = module.params["state"]
+ if module.params["suffix"] is None:
+ suffix = "snap-" + str(
+ (datetime.utcnow() - datetime(1970, 1, 1, 0, 0, 0, 0)).total_seconds()
+ )
+ module.params["suffix"] = suffix.replace(".", "")
+ else:
+ if not module.params["offload"]:
+ if not (
+ pattern1.match(module.params["suffix"])
+ or pattern2.match(module.params["suffix"])
+ ) and state not in [
+ "absent",
+ "rename",
+ ]:
+ module.fail_json(
+ msg="Suffix name {0} does not conform to suffix name rules".format(
+ module.params["suffix"]
+ )
+ )
+ if state == "rename" and module.params["target"] is not None:
+ if not pattern1.match(module.params["target"]):
+ module.fail_json(
+ msg="Suffix target {0} does not conform to suffix name rules".format(
+ module.params["target"]
+ )
+ )
+
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+ if GET_SEND_API not in api_version:
+ arrayv6 = None
+ if module.params["offload"]:
+ module.fail_json(
+ msg="Purity 6.1, or higher, is required to support single volume offload snapshots"
+ )
+ if state == "rename":
+ module.fail_json(
+ msg="Purity 6.1, or higher, is required to support snapshot rename"
+ )
+ else:
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+ arrayv6 = get_array(module)
+ if module.params["offload"]:
+ if not _check_offload(module, arrayv6) and not _check_target(
+ module, arrayv6
+ ):
+ module.fail_json(
+ msg="Selected offload {0} not connected.".format(
+ module.params["offload"]
+ )
+ )
+ if (
+ state == "copy"
+ and module.params["offload"]
+ and not _check_target(module, arrayv6)
+ ):
+ module.fail_json(
+ msg="Snapshot copy is not supported when an offload target is defined"
+ )
+ destroyed = False
+ array_snap = False
+ offload_snap = False
+ volume = get_volume(module, array)
+ if module.params["offload"] and not _check_target(module, arrayv6):
+ offload_snap = _check_offload_snapshot(module, arrayv6)
+ if offload_snap is None:
+ offload_snap = False
+ else:
+ offload_snap = not offload_snap.destroyed
+ else:
+ array_snap = get_snapshot(module, array)
+ snap = array_snap or offload_snap
+
+ if not snap:
+ destroyed = get_deleted_snapshot(module, array, arrayv6)
+ if state == "present" and volume and not destroyed:
+ create_snapshot(module, array, arrayv6)
+ elif state == "present" and destroyed:
+ recover_snapshot(module, array, arrayv6)
+ elif state == "rename" and volume and snap:
+ update_snapshot(module, arrayv6)
+ elif state == "copy" and snap:
+ create_from_snapshot(module, array)
+ elif state == "absent" and snap and not destroyed:
+ delete_snapshot(module, array, arrayv6)
+ elif state == "absent" and destroyed and module.params["eradicate"]:
+ eradicate_snapshot(module, array, arrayv6)
+ elif state == "absent" and not snap:
+ module.exit_json(changed=False)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_snmp.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_snmp.py
new file mode 100644
index 000000000..b422f4835
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_snmp.py
@@ -0,0 +1,425 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2019, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_snmp
+version_added: '1.0.0'
+short_description: Configure FlashArray SNMP Managers
+description:
+- Manage SNMP managers on a Pure Storage FlashArray.
+- Changing of a named SNMP managers version is not supported.
+- This module is not idempotent and will always modify an
+ existing SNMP manager due to hidden parameters that cannot
+ be compared to the play parameters.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Name of SNMP Manager
+ required: true
+ type: str
+ state:
+ description:
+ - Create or delete SNMP manager
+ type: str
+ default: present
+ choices: [ absent, present ]
+ auth_passphrase:
+ type: str
+ description:
+ - SNMPv3 only. Passphrase of 8 - 32 characters.
+ auth_protocol:
+ type: str
+ description:
+ - SNMP v3 only. Hash algorithm to use
+ choices: [ MD5, SHA ]
+ community:
+ type: str
+ description:
+ - SNMP v2c only. Manager community ID. Between 1 and 32 characters long.
+ host:
+ type: str
+ description:
+ - IPv4 or IPv6 address or FQDN to send trap messages to.
+ user:
+ type: str
+ description:
+ - SNMP v3 only. User ID recognized by the specified SNMP manager.
+ Must be between 1 and 32 characters.
+ version:
+ type: str
+ description:
+ - Version of SNMP protocol to use for the manager.
+ choices: [ v2c, v3 ]
+ default: v2c
+ notification:
+ type: str
+ description:
+ - Action to perform on event.
+ default: trap
+ choices: [ inform, trap ]
+ privacy_passphrase:
+ type: str
+ description:
+ - SNMPv3 only. Passphrase to encrypt SNMP messages.
+ Must be between 8 and 63 non-space ASCII characters.
+ privacy_protocol:
+ type: str
+ description:
+ - SNMP v3 only. Encryption protocol to use
+ choices: [ AES, DES ]
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Delete exisitng SNMP manager
+ purestorage.flasharray.purefa_snmp:
+ name: manager1
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+- name: Create v2c SNMP manager
+ puretorage.flasharray.purefa_snmp:
+ name: manager1
+ community: public
+ host: 10.21.22.23
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+- name: Create v3 SNMP manager
+ puretorage.flasharray.purefa_snmp:
+ name: manager2
+ version: v3
+ auth_protocol: MD5
+ auth_passphrase: password
+ host: 10.21.22.23
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+- name: Update existing SNMP manager
+ purestorage.flasharray.purefa_snmp:
+ name: manager1
+ community: private
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ purefa_argument_spec,
+)
+
+
+def update_manager(module, array):
+ """Update SNMP Manager"""
+ changed = False
+ try:
+ mgr = array.get_snmp_manager(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Failed to get configuration for SNMP manager {0}.".format(
+ module.params["name"]
+ )
+ )
+ if mgr["version"] != module.params["version"]:
+ module.fail_json(msg="Changing an SNMP managers version is not supported.")
+ elif module.params["version"] == "v2c":
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set_snmp_manager(
+ module.params["name"],
+ community=module.params["community"],
+ notification=module.params["notification"],
+ host=module.params["host"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to update SNMP manager {0}.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ if module.params["auth_protocol"] and module.params["privacy_protocol"]:
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set_snmp_manager(
+ module.params["name"],
+ auth_passphrase=module.params["auth_passphrase"],
+ auth_protocol=module.params["auth_protocol"],
+ privacy_passphrase=module.params["privacy_passphrase"],
+ privacy_protocol=module.params["privacy_protocol"],
+ notification=module.params["notification"],
+ user=module.params["user"],
+ host=module.params["host"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to update SNMP manager {0}.".format(
+ module.params["name"]
+ )
+ )
+ elif module.params["auth_protocol"] and not module.params["privacy_protocol"]:
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set_snmp_manager(
+ module.params["name"],
+ version=module.params["version"],
+ auth_passphrase=module.params["auth_passphrase"],
+ auth_protocol=module.params["auth_protocol"],
+ notification=module.params["notification"],
+ user=module.params["user"],
+ host=module.params["host"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to update SNMP manager {0}.".format(
+ module.params["name"]
+ )
+ )
+ elif not module.params["auth_protocol"] and module.params["privacy_protocol"]:
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set_snmp_manager(
+ module.params["name"],
+ version=module.params["version"],
+ privacy_passphrase=module.params["privacy_passphrase"],
+ privacy_protocol=module.params["privacy_protocol"],
+ notification=module.params["notification"],
+ user=module.params["user"],
+ host=module.params["host"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to update SNMP manager {0}.".format(
+ module.params["name"]
+ )
+ )
+ elif (
+ not module.params["auth_protocol"] and not module.params["privacy_protocol"]
+ ):
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set_snmp_manager(
+ module.params["name"],
+ version=module.params["version"],
+ notification=module.params["notification"],
+ user=module.params["user"],
+ host=module.params["host"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to update SNMP manager {0}.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ module.fail_json(
+ msg="Invalid parameters selected in update. Please raise issue in Ansible GitHub"
+ )
+
+ module.exit_json(changed=changed)
+
+
+def delete_manager(module, array):
+ """Delete SNMP Manager"""
+ changed = True
+ if not module.check_mode:
+ try:
+ array.delete_snmp_manager(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Delete SNMP manager {0} failed".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def create_manager(module, array):
+ """Create SNMP Manager"""
+ changed = True
+ if not module.check_mode:
+ if module.params["version"] == "v2c":
+ try:
+ array.create_snmp_manager(
+ module.params["name"],
+ version=module.params["version"],
+ community=module.params["community"],
+ notification=module.params["notification"],
+ host=module.params["host"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to create SNMP manager {0}.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ if module.params["auth_protocol"] and module.params["privacy_protocol"]:
+ try:
+ array.create_snmp_manager(
+ module.params["name"],
+ version=module.params["version"],
+ auth_passphrase=module.params["auth_passphrase"],
+ auth_protocol=module.params["auth_protocol"],
+ privacy_passphrase=module.params["privacy_passphrase"],
+ privacy_protocol=module.params["privacy_protocol"],
+ notification=module.params["notification"],
+ user=module.params["user"],
+ host=module.params["host"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to create SNMP manager {0}.".format(
+ module.params["name"]
+ )
+ )
+ elif (
+ module.params["auth_protocol"] and not module.params["privacy_protocol"]
+ ):
+ try:
+ array.create_snmp_manager(
+ module.params["name"],
+ version=module.params["version"],
+ auth_passphrase=module.params["auth_passphrase"],
+ auth_protocol=module.params["auth_protocol"],
+ notification=module.params["notification"],
+ user=module.params["user"],
+ host=module.params["host"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to create SNMP manager {0}.".format(
+ module.params["name"]
+ )
+ )
+ elif (
+ not module.params["auth_protocol"] and module.params["privacy_protocol"]
+ ):
+ try:
+ array.create_snmp_manager(
+ module.params["name"],
+ version=module.params["version"],
+ privacy_passphrase=module.params["privacy_passphrase"],
+ privacy_protocol=module.params["privacy_protocol"],
+ notification=module.params["notification"],
+ user=module.params["user"],
+ host=module.params["host"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to create SNMP manager {0}.".format(
+ module.params["name"]
+ )
+ )
+ elif (
+ not module.params["auth_protocol"]
+ and not module.params["privacy_protocol"]
+ ):
+ try:
+ array.create_snmp_manager(
+ module.params["name"],
+ version=module.params["version"],
+ notification=module.params["notification"],
+ user=module.params["user"],
+ host=module.params["host"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to create SNMP manager {0}.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ module.fail_json(
+ msg="Invalid parameters selected in create. Please raise issue in Ansible GitHub"
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True),
+ host=dict(type="str"),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ user=dict(type="str"),
+ notification=dict(type="str", choices=["inform", "trap"], default="trap"),
+ auth_passphrase=dict(type="str", no_log=True),
+ auth_protocol=dict(type="str", choices=["MD5", "SHA"]),
+ privacy_passphrase=dict(type="str", no_log=True),
+ privacy_protocol=dict(type="str", choices=["AES", "DES"]),
+ version=dict(type="str", default="v2c", choices=["v2c", "v3"]),
+ community=dict(type="str"),
+ )
+ )
+
+ required_together = [
+ ["auth_passphrase", "auth_protocol"],
+ ["privacy_passphrase", "privacy_protocol"],
+ ]
+ required_if = [
+ ["version", "v2c", ["community", "host"]],
+ ["version", "v3", ["host", "user"]],
+ ]
+
+ module = AnsibleModule(
+ argument_spec,
+ required_together=required_together,
+ required_if=required_if,
+ supports_check_mode=True,
+ )
+
+ state = module.params["state"]
+ array = get_system(module)
+ mgr_configured = False
+ mgrs = array.list_snmp_managers()
+ for mgr in range(0, len(mgrs)):
+ if mgrs[mgr]["name"] == module.params["name"]:
+ mgr_configured = True
+ break
+ if module.params["version"] == "v3":
+ if module.params["auth_passphrase"] and (
+ 8 > len(module.params["auth_passphrase"]) > 32
+ ):
+ module.fail_json(msg="auth_password must be between 8 and 32 characters")
+ if (
+ module.params["privacy_passphrase"]
+ and 8 > len(module.params["privacy_passphrase"]) > 63
+ ):
+ module.fail_json(msg="privacy_password must be between 8 and 63 characters")
+ if state == "absent" and mgr_configured:
+ delete_manager(module, array)
+ elif mgr_configured and state == "present":
+ update_manager(module, array)
+ elif not mgr_configured and state == "present":
+ create_manager(module, array)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_snmp_agent.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_snmp_agent.py
new file mode 100644
index 000000000..b9dc8ca94
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_snmp_agent.py
@@ -0,0 +1,267 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2019, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_snmp_agent
+version_added: '1.16.0'
+short_description: Configure the FlashArray SNMP Agent
+description:
+- Manage the I(localhost) SNMP Agent on a Pure Storage FlashArray.
+- This module is not idempotent and will always modify the SNMP Agent
+ due to hidden parameters that cannot be compared to the task parameters.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ type: str
+ description:
+ - Used to set or clear the SNMP v2c community string or the SNMP v3
+ auth and privacy protocols.
+ choices: [ absent, present ]
+ default: present
+ user:
+ type: str
+ description:
+ - SNMP v3 only. User ID which must be between 1 and 32 characters.
+ version:
+ type: str
+ description:
+ - Version of SNMP protocol to use for the manager.
+ choices: [ v2c, v3 ]
+ default: v2c
+ community:
+ type: str
+ description:
+ - SNMP v2c only. Manager community ID under which Purity//FA is to
+ communicate with the specified managers.
+ - To remove the string set I(state) to I(absent) with I(version)
+ set to I(v2c)
+ auth_passphrase:
+ type: str
+ description:
+ - SNMP v3 only. Passphrade used by Purity//FA to authenticate the
+ array wit hthe specified managers.
+ - Must be between 8 and 63 non-space ASCII characters.
+ auth_protocol:
+ type: str
+ description:
+ - SNMP v3 only. Encryption protocol to use
+ - To remove the privacy and auth protocols set I(state) to
+ I(absent) with I(version) set to I(v3)
+ choices: [ MD5, SHA ]
+ privacy_passphrase:
+ type: str
+ description:
+ - SNMP v3 only. Passphrase to encrypt SNMP messages.
+ Must be between 8 and 63 non-space ASCII characters.
+ privacy_protocol:
+ type: str
+ description:
+ - SNMP v3 only. Encryption protocol to use
+ - To remove the privacy and auth protocols set I(state) to
+ I(absent) with I(version) set to I(v3)
+ choices: [ AES, DES ]
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Clear SNMP agent v2c community string
+ purestorage.flasharray.purefa_snmp_agent:
+ version: v2c
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+- name: Clear SNMP agent v3 auth and privacy protocols
+ purestorage.flasharray.purefa_snmp_agent:
+ version: v3
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+- name: Update v2c SNMP agent
+ puretorage.flasharray.purefa_snmp_agent:
+ version: v2c
+ community: public
+ host: 10.21.22.23
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+- name: Update v3 SNMP manager
+ puretorage.flasharray.purefa_snmp_agent:
+ version: v3
+ auth_protocol: MD5
+ auth_passphrase: password
+ host: 10.21.22.23
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ get_array,
+ purefa_argument_spec,
+)
+
+MIN_REQUIRED_API_VERSION = "2.1"
+
+
+def update_agent(module, array, agent):
+ """Update SNMP Agent"""
+ changed = False
+ if module.params["version"] == "v2c":
+ changed = True
+ if not module.check_mode:
+ if module.params["state"] == "delete":
+ community = ""
+ elif module.params["state"] == "present" and module.params["community"]:
+ community = module.params["community"]
+ else:
+ community = ""
+ res = array.patch_snmp_agents(
+ snmp_agent=flasharray.SnmpAgentPatch(
+ name="localhost",
+ version="v2c",
+ v2c=flasharray.SnmpV2c(community=community),
+ )
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to update SNMP agent.Error: {0}".format(
+ res.errors[0].message
+ )
+ )
+ else:
+ if module.params["state"] == "delete":
+ changed = True
+ v3 = flasharray.SnmpV3Patch(
+ auth_protocol="",
+ privacy_protocol="",
+ user=module.params["user"],
+ )
+ elif module.params["auth_protocol"] and module.params["privacy_protocol"]:
+ changed = True
+ v3 = flasharray.SnmpV3Patch(
+ auth_passphrase=module.params["auth_passphrase"],
+ auth_protocol=module.params["auth_protocol"],
+ privacy_passphrase=module.params["privacy_passphrase"],
+ privacy_protocol=module.params["privacy_protocol"],
+ user=module.params["user"],
+ )
+ elif module.params["auth_protocol"] and not module.params["privacy_protocol"]:
+ changed = True
+ v3 = flasharray.SnmpV3Patch(
+ auth_passphrase=module.params["auth_passphrase"],
+ auth_protocol=module.params["auth_protocol"],
+ user=module.params["user"],
+ )
+ elif not module.params["auth_protocol"] and module.params["privacy_protocol"]:
+ changed = True
+ v3 = flasharray.SnmpV3Patch(
+ privacy_passphrase=module.params["privacy_passphrase"],
+ privacy_protocol=module.params["privacy_protocol"],
+ user=module.params["user"],
+ )
+ elif (
+ not module.params["auth_protocol"] and not module.params["privacy_protocol"]
+ ):
+ changed = True
+ v3 = flasharray.SnmpV3Patch(user=module.params["user"])
+
+ if not module.check_mode:
+ res = array.patch_snmp_agents(
+ snmp_agent=flasharray.SnmpAgentPatch(
+ name="localhost",
+ version=module.params["version"],
+ v3=v3,
+ )
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to update SNMP agent.Error: {0}".format(
+ res.errors[0].message
+ )
+ )
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ user=dict(type="str"),
+ auth_passphrase=dict(type="str", no_log=True),
+ auth_protocol=dict(type="str", choices=["MD5", "SHA"]),
+ privacy_passphrase=dict(type="str", no_log=True),
+ privacy_protocol=dict(type="str", choices=["AES", "DES"]),
+ version=dict(type="str", default="v2c", choices=["v2c", "v3"]),
+ community=dict(type="str"),
+ )
+ )
+
+ required_together = [
+ ["auth_passphrase", "auth_protocol"],
+ ["privacy_passphrase", "privacy_protocol"],
+ ]
+ required_if = [
+ ["version", "v3", ["user"]],
+ ]
+
+ module = AnsibleModule(
+ argument_spec,
+ required_together=required_together,
+ required_if=required_if,
+ supports_check_mode=True,
+ )
+
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashArray REST version not supported. "
+ "Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
+ )
+ array = get_array(module)
+
+ agent = list(array.get_snmp_agents().items)
+ if module.params["version"] == "v3":
+ if module.params["auth_passphrase"] and (
+ 8 > len(module.params["auth_passphrase"]) > 32
+ ):
+ module.fail_json(msg="auth_password must be between 8 and 32 characters")
+ if (
+ module.params["privacy_passphrase"]
+ and 8 > len(module.params["privacy_passphrase"]) > 63
+ ):
+ module.fail_json(msg="privacy_password must be between 8 and 63 characters")
+ update_agent(module, array, agent)
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_sso.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_sso.py
new file mode 100644
index 000000000..c1199215f
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_sso.py
@@ -0,0 +1,119 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2021, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["deprecated"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_sso
+version_added: '1.9.0'
+deprecated:
+ removed_in: '2.0.0'
+ why: Superceeded by M(purestorage.flasharray.purefa_admin)
+ alternative: Use M(purestorage.flasharray.purefa_admin) instead.
+short_description: Configure Pure Storage FlashArray Single Sign-On
+description:
+- Enable or disable Single Sign-On (SSO) to give LDAP users the ability
+ to navigate seamlessly from Pure1 Manage to the current array through a
+ single login.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Enable or disable the array Signle Sign-On from Pure1 Manage
+ default: present
+ type: str
+ choices: [ present, absent ]
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Enable SSO
+ purestorage.flasharray.purefa_sso:
+ state: present
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Disable SSO
+ purestorage.flasharray.purefa_sso:
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient.flasharray import AdminSettings
+except ImportError:
+ HAS_PURESTORAGE = False
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ get_array,
+ purefa_argument_spec,
+)
+
+SSO_API_VERSION = "2.2"
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["present", "absent"]),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+
+ state = module.params["state"]
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+ changed = False
+ if SSO_API_VERSION in api_version:
+ array = get_array(module)
+ current_sso = list(array.get_admins_settings().items)[0].single_sign_on_enabled
+ if (state == "present" and not current_sso) or (
+ state == "absent" and current_sso
+ ):
+ changed = True
+ if not module.check_mode:
+ res = array.patch_admins_settings(
+ admin_settings=AdminSettings(
+ single_sign_on_enabled=bool(state == "present")
+ )
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to change Single Sign-On status. Error: {0}".format(
+ res.errors[0].message
+ )
+ )
+ else:
+ module.fail_json(msg="Purity version does not support Single Sign-On")
+ module.exit_json(changed=changed)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_subnet.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_subnet.py
new file mode 100644
index 000000000..efce8db9e
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_subnet.py
@@ -0,0 +1,327 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+
+DOCUMENTATION = """
+---
+module: purefa_subnet
+version_added: '1.0.0'
+short_description: Manage network subnets in a Pure Storage FlashArray
+description:
+ - This module manages the network subnets on a Pure Storage FlashArray.
+author: Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Subnet name.
+ required: true
+ type: str
+ state:
+ description:
+ - Create or delete subnet.
+ required: false
+ default: present
+ choices: [ "present", "absent" ]
+ type: str
+ enabled:
+ description:
+ - whether the subnet should be enabled or not
+ default: true
+ type: bool
+ prefix:
+ description:
+ - Set the IPv4 or IPv6 address to be associated with the subnet.
+ required: false
+ type: str
+ gateway:
+ description:
+ - IPv4 or IPv6 address of subnet gateway.
+ required: false
+ type: str
+ mtu:
+ description:
+ - MTU size of the subnet. Range is 568 to 9000.
+ required: false
+ default: 1500
+ type: int
+ vlan:
+ description:
+ - VLAN ID. Range is 0 to 4094.
+ required: false
+ type: int
+extends_documentation_fragment:
+ - purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = """
+- name: Create subnet subnet100
+ purestorage.flasharray.purefa_subnet:
+ name: subnet100
+ vlan: 100
+ gateway: 10.21.200.1
+ prefix: "10.21.200.0/24"
+ mtu: 9000
+ state: present
+ fa_url: 10.10.10.2
+ api_token: c6033033-fe69-2515-a9e8-966bb7fe4b40
+
+- name: Disable subnet subnet100
+ purestorage.flasharray.purefa_subnet:
+ name: subnet100
+ enabled: false
+ fa_url: 10.10.10.2
+ api_token: c6033033-fe69-2515-a9e8-966bb7fe4b40
+
+- name: Delete subnet subnet100
+ purestorage.flasharray.purefa_subnet:
+ name: subnet100
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: c6033033-fe69-2515-a9e8-966bb7fe4b40"""
+
+RETURN = """
+"""
+
+try:
+ from netaddr import IPNetwork
+
+ HAS_NETADDR = True
+except ImportError:
+ HAS_NETADDR = False
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ purefa_argument_spec,
+)
+
+
+def _get_subnet(module, array):
+ """Return subnet or None"""
+ subnet = {}
+ try:
+ subnet = array.get_subnet(module.params["name"])
+ except Exception:
+ return None
+ return subnet
+
+
+def update_subnet(module, array, subnet):
+ """Modify subnet settings"""
+ changed = False
+ current_state = {
+ "mtu": subnet["mtu"],
+ "vlan": subnet["vlan"],
+ "prefix": subnet["prefix"],
+ "gateway": subnet["gateway"],
+ }
+ if not module.params["prefix"]:
+ prefix = subnet["prefix"]
+ else:
+ if module.params["gateway"] and module.params["gateway"] not in IPNetwork(
+ module.params["prefix"]
+ ):
+ module.fail_json(msg="Gateway and subnet are not compatible.")
+ elif (
+ not module.params["gateway"]
+ and subnet["gateway"]
+ and subnet["gateway"] not in IPNetwork(module.params["prefix"])
+ ):
+ module.fail_json(msg="Gateway and subnet are not compatible.")
+ prefix = module.params["prefix"]
+ if not module.params["vlan"]:
+ vlan = subnet["vlan"]
+ else:
+ if not 0 <= module.params["vlan"] <= 4094:
+ module.fail_json(
+ msg="VLAN {0} is out of range (0 to 4094)".format(module.params["vlan"])
+ )
+ else:
+ vlan = module.params["vlan"]
+ if not module.params["mtu"]:
+ mtu = subnet["mtu"]
+ else:
+ if not 568 <= module.params["mtu"] <= 9000:
+ module.fail_json(
+ msg="MTU {0} is out of range (568 to 9000)".format(module.params["mtu"])
+ )
+ else:
+ mtu = module.params["mtu"]
+ if not module.params["gateway"]:
+ gateway = subnet["gateway"]
+ else:
+ if module.params["gateway"] not in IPNetwork(prefix):
+ module.fail_json(msg="Gateway and subnet are not compatible.")
+ gateway = module.params["gateway"]
+ new_state = {"prefix": prefix, "mtu": mtu, "gateway": gateway, "vlan": vlan}
+ if new_state != current_state:
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set_subnet(
+ subnet["name"],
+ prefix=new_state["prefix"],
+ mtu=new_state["mtu"],
+ vlan=new_state["vlan"],
+ gateway=new_state["gateway"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to change settings for subnet {0}.".format(
+ subnet["name"]
+ )
+ )
+ if subnet["enabled"] != module.params["enabled"]:
+ if module.params["enabled"]:
+ changed = True
+ if not module.check_mode:
+ try:
+ array.enable_subnet(subnet["name"])
+ except Exception:
+ module.fail_json(
+ msg="Failed to enable subnet {0}.".format(subnet["name"])
+ )
+ else:
+ changed = True
+ if not module.check_mode:
+ try:
+ array.disable_subnet(subnet["name"])
+ except Exception:
+ module.fail_json(
+ msg="Failed to disable subnet {0}.".format(subnet["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def create_subnet(module, array):
+ """Create subnet"""
+ changed = True
+ if not module.params["prefix"]:
+ module.fail_json(msg="Prefix required when creating subnet.")
+ else:
+ if module.params["gateway"] and module.params["gateway"] not in IPNetwork(
+ module.params["prefix"]
+ ):
+ module.fail_json(msg="Gateway and subnet are not compatible.")
+ prefix = module.params["prefix"]
+ if module.params["vlan"]:
+ if not 0 <= module.params["vlan"] <= 4094:
+ module.fail_json(
+ msg="VLAN {0} is out of range (0 to 4094)".format(module.params["vlan"])
+ )
+ else:
+ vlan = module.params["vlan"]
+ else:
+ vlan = 0
+ if module.params["mtu"]:
+ if not 568 <= module.params["mtu"] <= 9000:
+ module.fail_json(
+ msg="MTU {0} is out of range (568 to 9000)".format(module.params["mtu"])
+ )
+ else:
+ mtu = module.params["mtu"]
+ if module.params["gateway"]:
+ if module.params["gateway"] not in IPNetwork(prefix):
+ module.fail_json(msg="Gateway and subnet are not compatible.")
+ gateway = module.params["gateway"]
+ else:
+ gateway = ""
+ if not module.check_mode:
+ try:
+ array.create_subnet(
+ module.params["name"],
+ prefix=prefix,
+ mtu=mtu,
+ vlan=vlan,
+ gateway=gateway,
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to create subnet {0}.".format(module.params["name"])
+ )
+ if module.params["enabled"]:
+ if not module.check_mode:
+ try:
+ array.enable_subnet(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Failed to enable subnet {0}.".format(module.params["name"])
+ )
+ else:
+ if not module.check_mode:
+ try:
+ array.disable_subnet(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Failed to disable subnet {0}.".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def delete_subnet(module, array):
+ """Delete subnet"""
+ changed = True
+ if not module.check_mode:
+ try:
+ array.delete_subnet(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Failed to delete subnet {0}".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True),
+ prefix=dict(type="str"),
+ state=dict(type="str", default="present", choices=["present", "absent"]),
+ gateway=dict(type="str"),
+ enabled=dict(type="bool", default=True),
+ mtu=dict(type="int", default=1500),
+ vlan=dict(type="int"),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_NETADDR:
+ module.fail_json(msg="netaddr module is required")
+ pattern = re.compile(r"[a-zA-Z0-9]([a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?$")
+ if not pattern.match(module.params["name"]):
+ module.fail_json(
+ msg="name must be between 1 and 63 characters in length and begin and end "
+ "with a letter or number. The name must include at least one letter or '-'."
+ )
+ state = module.params["state"]
+ array = get_system(module)
+ subnet = _get_subnet(module, array)
+ if state == "present" and not subnet:
+ create_subnet(module, array)
+ if state == "present" and subnet:
+ update_subnet(module, array, subnet)
+ elif state == "absent" and subnet:
+ delete_subnet(module, array)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_syslog.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_syslog.py
new file mode 100644
index 000000000..adb385ca4
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_syslog.py
@@ -0,0 +1,218 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_syslog
+version_added: '1.0.0'
+short_description: Configure Pure Storage FlashArray syslog settings
+description:
+- Configure syslog configuration for Pure Storage FlashArrays.
+- Add or delete an individual syslog server to the existing
+ list of serves.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Create or delete syslog servers configuration
+ default: present
+ type: str
+ choices: [ absent, present ]
+ protocol:
+ description:
+ - Protocol which server uses
+ required: true
+ type: str
+ choices: [ tcp, tls, udp ]
+ port:
+ description:
+ - Port at which the server is listening. If no port is specified
+ the system will use 514
+ type: str
+ address:
+ description:
+ - Syslog server address.
+ This field supports IPv4, IPv6 or FQDN.
+ An invalid IP addresses will cause the module to fail.
+ No validation is performed for FQDNs.
+ type: str
+ required: true
+ name:
+ description:
+ - A user-specified name.
+ The name must be locally unique and cannot be changed.
+ - Only applicable with FlashArrays running Purity//FA 6.0 or higher.
+ type: str
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Delete exisitng syslog server entries
+ purestorage.flasharray.purefa_syslog:
+ address: syslog1.com
+ protocol: tcp
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Set array syslog servers
+ purestorage.flasharray.purefa_syslog:
+ state: present
+ address: syslog1.com
+ protocol: udp
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_array,
+ get_system,
+ purefa_argument_spec,
+)
+
+
+SYSLOG_NAME_API = "2.4"
+
+
+def delete_syslog(module, array):
+ """Delete Syslog Server"""
+ changed = False
+ noport_address = module.params["protocol"] + "://" + module.params["address"]
+
+ if module.params["port"]:
+ full_address = noport_address + ":" + module.params["port"]
+ else:
+ full_address = noport_address
+
+ address_list = array.get(syslogserver=True)["syslogserver"]
+
+ if address_list:
+ for address in range(0, len(address_list)):
+ if address_list[address] == full_address:
+ del address_list[address]
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set(syslogserver=address_list)
+ break
+ except Exception:
+ module.fail_json(
+ msg="Failed to remove syslog server: {0}".format(
+ full_address
+ )
+ )
+
+ module.exit_json(changed=changed)
+
+
+def add_syslog(module, array, arrayv6):
+ """Add Syslog Server"""
+ changed = False
+ noport_address = module.params["protocol"] + "://" + module.params["address"]
+
+ if module.params["port"]:
+ full_address = noport_address + ":" + module.params["port"]
+ else:
+ full_address = noport_address
+
+ address_list = array.get(syslogserver=True)["syslogserver"]
+ exists = False
+
+ if address_list:
+ for address in range(0, len(address_list)):
+ if address_list[address] == full_address:
+ exists = True
+ break
+ if not exists:
+ if arrayv6 and module.params["name"]:
+ changed = True
+ if not module.check_mode:
+ res = arrayv6.post_syslog_servers(
+ names=[module.params["name"]],
+ syslog_server=flasharray.SyslogServer(
+ name=module.params["name"], uri=full_address
+ ),
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Adding syslog server {0} failed. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ else:
+ changed = True
+ if not module.check_mode:
+ try:
+ address_list.append(full_address)
+ array.set(syslogserver=address_list)
+ except Exception:
+ module.fail_json(
+ msg="Failed to add syslog server: {0}".format(full_address)
+ )
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ address=dict(type="str", required=True),
+ protocol=dict(type="str", choices=["tcp", "tls", "udp"], required=True),
+ port=dict(type="str"),
+ name=dict(type="str"),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ array = get_system(module)
+
+ if module.params["name"] and not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+
+ api_version = array._list_available_rest_versions()
+
+ if SYSLOG_NAME_API in api_version and module.params["name"]:
+ arrayv6 = get_array(module)
+ else:
+ arrayv6 = None
+
+ if module.params["state"] == "absent":
+ delete_syslog(module, array)
+ else:
+ add_syslog(module, array, arrayv6)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_syslog_settings.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_syslog_settings.py
new file mode 100644
index 000000000..fce6dffa3
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_syslog_settings.py
@@ -0,0 +1,171 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2021, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_syslog_settings
+version_added: '1.10.0'
+short_description: Manage FlashArray syslog servers settings
+description:
+- Manage FlashArray syslog servers settings
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ severity:
+ description:
+ - Logging severity threshold for which events will be forwarded to the
+ configured syslog servers.
+ default: info
+ choices: [ debug, info, notice ]
+ type: str
+ ca_certificate:
+ type: str
+ description:
+ - The text of the CA certificate for condifured syslog servers.
+ - Includes the "-----BEGIN CERTIFICATE-----" and "-----END CERTIFICATE-----" lines
+ - Does not exceed 3000 characters in length
+ - To delete the existing CA certifcate use the special string `DELETE`
+ tls_audit:
+ type: bool
+ default: true
+ description:
+ - If messages that are necessary in order to audit TLS negotiations
+ performed by the array are forwared to the syslog servers.
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Change syslog servers settings
+ purestorage.flasharray.purefa_syslog_servers:
+ tls_audit: false
+ severity: debug
+ ca_certificate: "{{lookup('file', 'example.crt') }}"
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete existing CA certifcate for syslog servers settings
+ purestorage.flasharray.purefa_syslog_servers:
+ ca_certificate: DELETE
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ get_array,
+ purefa_argument_spec,
+)
+
+MIN_REQUIRED_API_VERSION = "2.9"
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ severity=dict(
+ type="str",
+ default="info",
+ choices=["info", "debug", "notice"],
+ ),
+ tls_audit=dict(type="bool", default=True),
+ ca_certificate=dict(type="str", no_log=True),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec,
+ supports_check_mode=True,
+ )
+
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(
+ msg="Purity//FA version not supported. Minimum version required: 6.2.0"
+ )
+
+ array = get_array(module)
+ changed = cert_change = False
+ if module.params["ca_certificate"] and len(module.params["ca_certificate"]) > 3000:
+ module.fail_json(msg="Certificate exceeds 3000 characters")
+ current = list(array.get_syslog_servers_settings().items)[0]
+ try:
+ if current.ca_certificate:
+ pass
+ except AttributeError:
+ current.ca_certificate = None
+ if current.tls_audit_enabled != module.params["tls_audit"]:
+ changed = True
+ new_tls = module.params["tls_audit"]
+ else:
+ new_tls = current.tls_audit_enabled
+ if current.logging_severity != module.params["severity"]:
+ changed = True
+ new_sev = module.params["severity"]
+ else:
+ new_sev = current.logging_severity
+ if module.params["ca_certificate"]:
+ if module.params["ca_certificate"].upper() == "DELETE":
+ if current.ca_certificate:
+ cert_change = changed = True
+ new_cert = ""
+ elif current.ca_certificate != module.params["ca_certificate"]:
+ cert_change = changed = True
+ new_cert = module.params["ca_certificate"]
+ if changed and not module.check_mode:
+ if cert_change:
+ res = array.patch_syslog_servers_settings(
+ syslog_server_settings=flasharray.SyslogServerSettings(
+ ca_certificate=new_cert,
+ tls_audit_enabled=new_tls,
+ logging_severity=new_sev,
+ )
+ )
+ else:
+ res = array.patch_syslog_servers_settings(
+ syslog_server_settings=flasharray.SyslogServerSettings(
+ tls_audit_enabled=new_tls, logging_severity=new_sev
+ )
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Changing syslog settings failed. Error: {0}".format(
+ res.errors[0].message
+ )
+ )
+
+ module.exit_json(changed=changed)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_timeout.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_timeout.py
new file mode 100644
index 000000000..e5d041fa3
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_timeout.py
@@ -0,0 +1,116 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_timeout
+version_added: '1.0.0'
+short_description: Configure Pure Storage FlashArray GUI idle timeout
+description:
+- Configure GUI idle timeout for Pure Storage FlashArrays.
+- This does not affect existing GUI sessions.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Set or disable the GUI idle timeout
+ default: present
+ type: str
+ choices: [ present, absent ]
+ timeout:
+ description:
+ - Minutes for idle timeout.
+ type: int
+ default: 30
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Set GUI idle timeout to 25 minutes
+ purestorage.flasharray.purefa_timeout:
+ timeout: 25
+ state: present
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Disable idle timeout
+ purestorage.flasharray.purefa_timeout:
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ purefa_argument_spec,
+)
+
+
+def set_timeout(module, array):
+ """Set GUI idle timeout"""
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set(idle_timeout=module.params["timeout"])
+ except Exception:
+ module.fail_json(msg="Failed to set GUI idle timeout")
+
+ module.exit_json(changed=changed)
+
+
+def disable_timeout(module, array):
+ """Disable idle timeout"""
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set(idle_timeout=0)
+ except Exception:
+ module.fail_json(msg="Failed to disable GUI idle timeout")
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ timeout=dict(type="int", default=30),
+ state=dict(type="str", default="present", choices=["present", "absent"]),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ state = module.params["state"]
+ if 5 < module.params["timeout"] > 180 and module.params["timeout"] != 0:
+ module.fail_json(msg="Timeout value must be between 5 and 180 minutes")
+ array = get_system(module)
+ current_timeout = array.get(idle_timeout=True)["idle_timeout"]
+ if state == "present" and current_timeout != module.params["timeout"]:
+ set_timeout(module, array)
+ elif state == "absent" and current_timeout != 0:
+ disable_timeout(module, array)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_token.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_token.py
new file mode 100644
index 000000000..fa66fe308
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_token.py
@@ -0,0 +1,225 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_token
+version_added: '1.0.0'
+short_description: Create or delete an API token for an existing admin user
+description:
+- Create or delete an API token for an existing admin user.
+- Uses username/password to create/delete the API token.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Create or delete API token
+ type: str
+ default: present
+ choices: [ present, absent ]
+ recreate:
+ description:
+ - Recreates the API token, overwriting the existing API token if present
+ type: bool
+ default: false
+ username:
+ description:
+ - Username of the admin user to create API token for
+ type: str
+ password:
+ description:
+ - Password of the admin user to create API token for.
+ type: str
+ fa_url:
+ description:
+ - FlashArray management IPv4 address or Hostname.
+ type: str
+ timeout:
+ description:
+ - The duration of API token validity.
+ - Valid values are weeks (w), days(d), hours(h), minutes(m) and seconds(s).
+ type: str
+"""
+
+EXAMPLES = r"""
+- name: Create API token with no expiration
+ purefa_token:
+ username: pureuser
+ password: secret
+ state: present
+ fa_url: 10.10.10.2
+- name: Create API token with 23 days expiration
+ purefa_token:
+ username: pureuser
+ password: secret
+ state: present
+ timeout: 23d
+ fa_url: 10.10.10.2
+- name: Delete API token
+ purefa_token:
+ username: pureuser
+ password: secret
+ state: absent
+ fa_url: 10.10.10.2
+"""
+
+RETURN = r"""
+purefa_token:
+ description: API token for user
+ returned: changed
+ type: str
+ sample: e649f439-49be-3806-f774-a35cbbc4c2d2
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_array,
+)
+from os import environ
+import platform
+
+VERSION = 1.0
+USER_AGENT_BASE = "Ansible_token"
+TIMEOUT_API_VERSION = "2.2"
+
+HAS_PURESTORAGE = True
+try:
+ from purestorage import purestorage
+except ImportError:
+ HAS_PURESTORAGE = False
+
+
+def _convert_time_to_millisecs(timeout):
+ if timeout[-1:].lower() not in ["w", "d", "h", "m", "s"]:
+ return 0
+ try:
+ if timeout[-1:].lower() == "w":
+ return int(timeout[:-1]) * 7 * 86400000
+ elif timeout[-1:].lower() == "d":
+ return int(timeout[:-1]) * 86400000
+ elif timeout[-1:].lower() == "h":
+ return int(timeout[:-1]) * 3600000
+ elif timeout[-1:].lower() == "m":
+ return int(timeout[:-1]) * 60000
+ except Exception:
+ return 0
+
+
+def get_session(module):
+ """Return System Object or Fail"""
+ user_agent = "%(base)s %(class)s/%(version)s (%(platform)s)" % {
+ "base": USER_AGENT_BASE,
+ "class": __name__,
+ "version": VERSION,
+ "platform": platform.platform(),
+ }
+
+ array_name = module.params["fa_url"]
+ username = module.params["username"]
+ password = module.params["password"]
+
+ if HAS_PURESTORAGE:
+ if array_name and username and password:
+ system = purestorage.FlashArray(
+ array_name, username=username, password=password, user_agent=user_agent
+ )
+ elif environ.get("PUREFA_URL"):
+ if environ.get("PUREFA_USERNAME") and environ.get("PUREFA_PASSWORD"):
+ url = environ.get("PUREFA_URL")
+ username = environ.get("PUREFA_USERNAME")
+ password = environ.get("PUREFA_PASSWORD")
+ system = purestorage.FlashArray(
+ url, username=username, password=password, user_agent=user_agent
+ )
+ else:
+ module.fail_json(
+ msg="You must set PUREFA_URL and PUREFA_USERNAME, PUREFA_PASSWORD "
+ "environment variables or the fa_url, username and password "
+ "module arguments"
+ )
+ try:
+ system.get()
+ except Exception:
+ module.fail_json(
+ msg="Pure Storage FlashArray authentication failed. Check your credentials"
+ )
+ else:
+ module.fail_json(msg="purestorage SDK is not installed.")
+ return system
+
+
+def main():
+ argument_spec = dict(
+ fa_url=dict(required=False),
+ username=dict(type="str", required=False),
+ password=dict(no_log=True, required=False),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ recreate=dict(type="bool", default=False),
+ timeout=dict(type="str"),
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=False)
+ array = get_session(module)
+ changed = False
+
+ if module.params["username"]:
+ username = module.params["username"]
+ else:
+ username = environ.get("PUREFA_USERNAME")
+ state = module.params["state"]
+ recreate = module.params["recreate"]
+
+ result = array.get_api_token(admin=username)
+ api_version = array._list_available_rest_versions()
+ if state == "present" and result["api_token"] is None:
+ result = array.create_api_token(admin=username)
+ changed = True
+ elif state == "present" and recreate:
+ result = array.delete_api_token(admin=username)
+ result = array.create_api_token(admin=username)
+ changed = True
+ elif state == "absent" and result["api_token"]:
+ result = array.delete_api_token(admin=username)
+ changed = True
+
+ api_token = result["api_token"]
+
+ if (
+ TIMEOUT_API_VERSION in api_version
+ and module.params["timeout"]
+ and state == "present"
+ ):
+ module.params["api_token"] = api_token
+ array6 = get_array(module)
+ ttl = _convert_time_to_millisecs(module.params["timeout"])
+ if ttl != 0:
+ changed = True
+ array6.delete_admins_api_tokens(names=[username])
+ res = array6.post_admins_api_tokens(names=[username], timeout=ttl)
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to set token lifetime. Error: {0}".format(
+ res.errors[0].message
+ )
+ )
+ else:
+ api_token = list(res.items)[0].api_token.token
+ module.exit_json(changed=changed, purefa_token=api_token)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_user.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_user.py
new file mode 100644
index 000000000..8544c5393
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_user.py
@@ -0,0 +1,278 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_user
+version_added: '1.0.0'
+short_description: Create, modify or delete FlashArray local user account
+description:
+- Create, modify or delete local users on a Pure Stoage FlashArray.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Create, delete or update local user account
+ default: present
+ type: str
+ choices: [ absent, present ]
+ name:
+ description:
+ - The name of the local user account
+ type: str
+ required: true
+ role:
+ description:
+ - Sets the local user's access level to the array
+ type: str
+ choices: [ readonly, ops_admin, storage_admin, array_admin ]
+ password:
+ description:
+ - Password for the local user.
+ type: str
+ old_password:
+ description:
+ - If changing an existing password, you must provide the old password for security
+ type: str
+ api:
+ description:
+ - Define whether to create an API token for this user
+ - Token can be exposed using the I(debug) module
+ type: bool
+ default: false
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Create new user ansible with API token
+ purestorage.flasharray.purefa_user:
+ name: ansible
+ password: apassword
+ role: storage_admin
+ api: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ register: result
+
+ debug:
+ msg: "API Token: {{ result['user_info']['user_api'] }}"
+
+- name: Change role type for existing user
+ purestorage.flasharray.purefa_user:
+ name: ansible
+ role: array_admin
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Change password type for existing user (NOT IDEMPOTENT)
+ purestorage.flasharray.purefa_user:
+ name: ansible
+ password: anewpassword
+ old_password: apassword
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Change API token for existing user
+ purestorage.flasharray.purefa_user:
+ name: ansible
+ api: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ register: result
+
+ debug:
+ msg: "API Token: {{ result['user_info']['user_api'] }}"
+"""
+
+RETURN = r"""
+"""
+
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ purefa_argument_spec,
+)
+
+MIN_REQUIRED_API_VERSION = "1.14"
+
+
+def get_user(module, array):
+ """Return Local User Account or None"""
+ user = None
+ users = array.list_admins()
+ for acct in range(0, len(users)):
+ if users[acct]["name"] == module.params["name"]:
+ user = users[acct]
+ return user
+
+
+def create_user(module, array):
+ """Create or Update Local User Account"""
+ changed = api_changed = role_changed = passwd_changed = False
+ user = get_user(module, array)
+ role = module.params["role"]
+ user_token = {}
+ if not user:
+ changed = True
+ if not module.check_mode:
+ try:
+ if not role:
+ role = "readonly"
+ array.create_admin(
+ module.params["name"], role=role, password=module.params["password"]
+ )
+ if module.params["api"]:
+ try:
+ user_token["user_api"] = array.create_api_token(
+ module.params["name"]
+ )["api_token"]
+ except Exception:
+ array.delete_user(module.params["name"])
+ module.fail_json(
+ msg="Local User {0}: Creation failed".format(
+ module.params["name"]
+ )
+ )
+ except Exception:
+ module.fail_json(
+ msg="Local User {0}: Creation failed".format(module.params["name"])
+ )
+ else:
+ if module.params["password"] and not module.params["old_password"]:
+ module.exit_json(changed=changed)
+ if module.params["password"] and module.params["old_password"]:
+ if module.params["old_password"] and (
+ module.params["password"] != module.params["old_password"]
+ ):
+ passwd_changed = True
+ if not module.check_mode:
+ try:
+ array.set_admin(
+ module.params["name"],
+ password=module.params["password"],
+ old_password=module.params["old_password"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Local User {0}: Password reset failed. "
+ "Check old password.".format(module.params["name"])
+ )
+ else:
+ module.fail_json(
+ msg="Local User Account {0}: Password change failed - "
+ "Check both old and new passwords".format(module.params["name"])
+ )
+ if module.params["api"]:
+ try:
+ if not array.get_api_token(module.params["name"])["api_token"] is None:
+ if not module.check_mode:
+ array.delete_api_token(module.params["name"])
+ api_changed = True
+ if not module.check_mode:
+ user_token["user_api"] = array.create_api_token(
+ module.params["name"]
+ )["api_token"]
+ except Exception:
+ module.fail_json(
+ msg="Local User {0}: API token change failed".format(
+ module.params["name"]
+ )
+ )
+ if module.params["role"] and module.params["role"] != user["role"]:
+ if module.params["name"] != "pureuser":
+ role_changed = True
+ if not module.check_mode:
+ try:
+ array.set_admin(
+ module.params["name"], role=module.params["role"]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Local User {0}: Role changed failed".format(
+ module.params["name"]
+ )
+ )
+ else:
+ module.warn("Role for 'pureuser' cannot be modified.")
+ changed = bool(passwd_changed or role_changed or api_changed)
+ module.exit_json(changed=changed, user_info=user_token)
+
+
+def delete_user(module, array):
+ """Delete Local User Account"""
+ changed = False
+ if get_user(module, array):
+ changed = True
+ if not module.check_mode:
+ try:
+ array.delete_admin(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Object Store Account {0}: Deletion failed".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(required=True, type="str"),
+ role=dict(
+ type="str",
+ choices=["readonly", "ops_admin", "storage_admin", "array_admin"],
+ ),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ password=dict(type="str", no_log=True),
+ old_password=dict(type="str", no_log=True),
+ api=dict(type="bool", default=False),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ state = module.params["state"]
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashArray REST version not supported. "
+ "Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
+ )
+ pattern = re.compile("^[a-z0-9]([a-z0-9-]{0,30}[a-z0-9])?$")
+ if not pattern.match(module.params["name"]):
+ module.fail_json(
+ msg="name must contain a minimum of 1 and a maximum of 32 characters "
+ "(alphanumeric or `-`). All letters must be lowercase."
+ )
+
+ if state == "absent":
+ delete_user(module, array)
+ elif state == "present":
+ create_user(module, array)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_vg.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_vg.py
new file mode 100644
index 000000000..febb0d5a2
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_vg.py
@@ -0,0 +1,685 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_vg
+version_added: '1.0.0'
+short_description: Manage volume groups on Pure Storage FlashArrays
+description:
+- Create, delete or modify volume groups on Pure Storage FlashArrays.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - The name of the volume group.
+ - Multi-volume-group support available from Purity//FA 6.0.0
+ B(***NOTE***) Manual deletion or eradication of individual volume groups created
+ using multi-volume-group will cause idempotency to fail
+ - Multi-volume-group support only exists for volume group creation
+ type: str
+ required: true
+ state:
+ description:
+ - Define whether the volume group should exist or not.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ eradicate:
+ description:
+ - Define whether to eradicate the volume group on delete and leave in trash.
+ type : bool
+ default: false
+ bw_qos:
+ description:
+ - Bandwidth limit for vgroup in M or G units.
+ M will set MB/s
+ G will set GB/s
+ To clear an existing QoS setting use 0 (zero)
+ type: str
+ iops_qos:
+ description:
+ - IOPs limit for vgroup - use value or K or M
+ K will mean 1000
+ M will mean 1000000
+ To clear an existing IOPs setting use 0 (zero)
+ type: str
+ count:
+ description:
+ - Number of volume groups to be created in a multiple volume group creation
+ - Only supported from Purity//FA v6.0.0 and higher
+ type: int
+ start:
+ description:
+ - Number at which to start the multiple volume group creation index
+ - Only supported from Purity//FA v6.0.0 and higher
+ type: int
+ default: 0
+ digits:
+ description:
+ - Number of digits to use for multiple volume group count. This
+ will pad the index number with zeros where necessary
+ - Only supported from Purity//FA v6.0.0 and higher
+ - Range is between 1 and 10
+ type: int
+ default: 1
+ suffix:
+ description:
+ - Suffix string, if required, for multiple volume group create
+ - Volume group names will be formed as I(<name>#I<suffix>), where
+ I(#) is a placeholder for the volume index
+ See associated descriptions
+ - Only supported from Purity//FA v6.0.0 and higher
+ type: str
+ priority_operator:
+ description:
+ - DMM Priority Adjustment operator
+ type: str
+ choices: [ +, '-' ]
+ default: +
+ version_added: '1.13.0'
+ priority_value:
+ description:
+ - DMM Priority Adjustment value
+ type: int
+ choices: [ 0, 10 ]
+ default: 0
+ version_added: '1.13.0'
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Create new volune group
+ purestorage.flasharray.purefa_vg:
+ name: foo
+ bw_qos: 50M
+ iops_qos: 100
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create 10 volune groups of pattern foo#bar with QoS
+ purestorage.flasharray.purefa_vg:
+ name: foo
+ suffix: bar
+ count: 10
+ start: 10
+ digits: 3
+ bw_qos: 50M
+ iops_qos: 100
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Update volune group QoS limits
+ purestorage.flasharray.purefa_vg:
+ name: foo
+ bw_qos: 0
+ iops_qos: 5555
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Update volune group DMM Priority Adjustment (Purity//FA 6.1.2+)
+ purestorage.flasharray.purefa_vg:
+ name: foo
+ priority_operator: '-'
+ priority_value: 10
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Destroy volume group
+ purestorage.flasharray.purefa_vg:
+ name: foo
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: absent
+
+- name: Recover deleted volune group - no changes are made to the volume group on recovery
+ purestorage.flasharray.purefa_vg:
+ name: foo
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Destroy and Eradicate volume group
+ purestorage.flasharray.purefa_vg:
+ name: foo
+ eradicate: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: absent
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_array,
+ get_system,
+ purefa_argument_spec,
+)
+
+
+VGROUP_API_VERSION = "1.13"
+VG_IOPS_VERSION = "1.17"
+MULTI_VG_VERSION = "2.2"
+PRIORITY_API_VERSION = "2.11"
+
+
+def human_to_bytes(size):
+ """Given a human-readable byte string (e.g. 2G, 30M),
+ return the number of bytes. Will return 0 if the argument has
+ unexpected form.
+ """
+ bytes = size[:-1]
+ unit = size[-1].upper()
+ if bytes.isdigit():
+ bytes = int(bytes)
+ if unit == "P":
+ bytes *= 1125899906842624
+ elif unit == "T":
+ bytes *= 1099511627776
+ elif unit == "G":
+ bytes *= 1073741824
+ elif unit == "M":
+ bytes *= 1048576
+ elif unit == "K":
+ bytes *= 1024
+ else:
+ bytes = 0
+ else:
+ bytes = 0
+ return bytes
+
+
+def human_to_real(iops):
+ """Given a human-readable IOPs string (e.g. 2K, 30M),
+ return the real number. Will return 0 if the argument has
+ unexpected form.
+ """
+ digit = iops[:-1]
+ unit = iops[-1].upper()
+ if unit.isdigit():
+ digit = iops
+ elif digit.isdigit():
+ digit = int(digit)
+ if unit == "M":
+ digit *= 1000000
+ elif unit == "K":
+ digit *= 1000
+ else:
+ digit = 0
+ else:
+ digit = 0
+ return digit
+
+
+def get_multi_vgroups(module, destroyed=False):
+ """Return True is all volume groups exist or None"""
+ names = []
+ array = get_array(module)
+ for vg_num in range(
+ module.params["start"], module.params["count"] + module.params["start"]
+ ):
+ names.append(
+ module.params["name"]
+ + str(vg_num).zfill(module.params["digits"])
+ + module.params["suffix"]
+ )
+ return bool(
+ array.get_volume_groups(names=names, destroyed=destroyed).status_code == 200
+ )
+
+
+def get_pending_vgroup(module, array):
+ """Get Deleted Volume Group"""
+ vgroup = None
+ for vgrp in array.list_vgroups(pending=True):
+ if vgrp["name"] == module.params["name"] and vgrp["time_remaining"]:
+ vgroup = vgrp
+ break
+
+ return vgroup
+
+
+def get_vgroup(module, array):
+ """Get Volume Group"""
+ vgroup = None
+ for vgrp in array.list_vgroups():
+ if vgrp["name"] == module.params["name"]:
+ vgroup = vgrp
+ break
+
+ return vgroup
+
+
+def make_vgroup(module, array):
+ """Create Volume Group"""
+ changed = True
+ api_version = array._list_available_rest_versions()
+ if (
+ module.params["bw_qos"]
+ or module.params["iops_qos"]
+ and VG_IOPS_VERSION in api_version
+ ):
+ if module.params["bw_qos"] and not module.params["iops_qos"]:
+ if int(human_to_bytes(module.params["bw_qos"])) in range(
+ 1048576, 549755813888
+ ):
+ changed = True
+ if not module.check_mode:
+ try:
+ array.create_vgroup(
+ module.params["name"],
+ bandwidth_limit=module.params["bw_qos"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Vgroup {0} creation failed.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ module.fail_json(
+ msg="Bandwidth QoS value {0} out of range.".format(
+ module.params["bw_qos"]
+ )
+ )
+ elif module.params["iops_qos"] and not module.params["bw_qos"]:
+ if int(human_to_real(module.params["iops_qos"])) in range(100, 100000000):
+ changed = True
+ if not module.check_mode:
+ try:
+ array.create_vgroup(
+ module.params["name"], iops_limit=module.params["iops_qos"]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Vgroup {0} creation failed.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ module.fail_json(
+ msg="IOPs QoS value {0} out of range.".format(
+ module.params["iops_qos"]
+ )
+ )
+ else:
+ bw_qos_size = int(human_to_bytes(module.params["bw_qos"]))
+ if int(human_to_real(module.params["iops_qos"])) in range(
+ 100, 100000000
+ ) and bw_qos_size in range(1048576, 549755813888):
+ changed = True
+ if not module.check_mode:
+ try:
+ array.create_vgroup(
+ module.params["name"],
+ iops_limit=module.params["iops_qos"],
+ bandwidth_limit=module.params["bw_qos"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Vgroup {0} creation failed.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ module.fail_json(msg="IOPs or Bandwidth QoS value out of range.")
+ else:
+ changed = True
+ if not module.check_mode:
+ try:
+ array.create_vgroup(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="creation of volume group {0} failed.".format(
+ module.params["name"]
+ )
+ )
+ if PRIORITY_API_VERSION in api_version:
+ array = get_array(module)
+ volume_group = flasharray.VolumeGroup(
+ priority_adjustment=flasharray.PriorityAdjustment(
+ priority_adjustment_operator=module.params["priority_operator"],
+ priority_adjustment_value=module.params["priority_value"],
+ ),
+ )
+ if not module.check_mode:
+ res = array.patch_volume_groups(
+ names=[module.params["name"]], volume_group=volume_group
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to set priority adjustment for volume group {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+
+ module.exit_json(changed=changed)
+
+
+def make_multi_vgroups(module, array):
+ """Create multiple Volume Groups"""
+ changed = True
+ bw_qos_size = iops_qos_size = 0
+ names = []
+ api_version = array._list_available_rest_versions()
+ array = get_array(module)
+ for vg_num in range(
+ module.params["start"], module.params["count"] + module.params["start"]
+ ):
+ names.append(
+ module.params["name"]
+ + str(vg_num).zfill(module.params["digits"])
+ + module.params["suffix"]
+ )
+ if module.params["bw_qos"]:
+ bw_qos = int(human_to_bytes(module.params["bw_qos"]))
+ if bw_qos in range(1048576, 549755813888):
+ bw_qos_size = bw_qos
+ else:
+ module.fail_json(msg="Bandwidth QoS value out of range.")
+ if module.params["iops_qos"]:
+ iops_qos = int(human_to_real(module.params["iops_qos"]))
+ if iops_qos in range(100, 100000000):
+ iops_qos_size = iops_qos
+ else:
+ module.fail_json(msg="IOPs QoS value out of range.")
+ if bw_qos_size != 0 and iops_qos_size != 0:
+ volume_group = flasharray.VolumeGroupPost(
+ qos=flasharray.Qos(bandwidth_limit=bw_qos_size, iops_limit=iops_qos_size)
+ )
+ elif bw_qos_size == 0 and iops_qos_size == 0:
+ volume_group = flasharray.VolumeGroupPost()
+ elif bw_qos_size == 0 and iops_qos_size != 0:
+ volume_group = flasharray.VolumeGroupPost(
+ qos=flasharray.Qos(iops_limit=iops_qos_size)
+ )
+ elif bw_qos_size != 0 and iops_qos_size == 0:
+ volume_group = flasharray.VolumeGroupPost(
+ qos=flasharray.Qos(bandwidth_limit=bw_qos_size)
+ )
+ if not module.check_mode:
+ res = array.post_volume_groups(names=names, volume_group=volume_group)
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Multi-Vgroup {0}#{1} creation failed: {2}".format(
+ module.params["name"],
+ module.params["suffix"],
+ res.errors[0].message,
+ )
+ )
+ if PRIORITY_API_VERSION in api_version:
+ volume_group = flasharray.VolumeGroup(
+ priority_adjustment=flasharray.PriorityAdjustment(
+ priority_adjustment_operator=module.params["priority_operator"],
+ priority_adjustment_value=module.params["priority_value"],
+ ),
+ )
+ res = array.patch_volume_groups(names=names, volume_group=volume_group)
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to set priority adjustments for multi-vgroup {0}#{1}. Error: {2}".format(
+ module.params["name"],
+ module.params["suffix"],
+ res.errors[0].message,
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def update_vgroup(module, array):
+ """Update Volume Group"""
+ changed = False
+ api_version = array._list_available_rest_versions()
+ if PRIORITY_API_VERSION in api_version:
+ arrayv6 = get_array(module)
+ vg_prio = list(arrayv6.get_volume_groups(names=[module.params["name"]]).items)[
+ 0
+ ].priority_adjustment
+ if (
+ module.params["priority_operator"]
+ and vg_prio.priority_adjustment_operator
+ != module.params["priority_operator"]
+ ):
+ changed = True
+ new_operator = module.params["priority_operator"]
+ else:
+ new_operator = vg_prio.priority_adjustment_operator
+ if vg_prio.priority_adjustment_value != module.params["priority_value"]:
+ changed = True
+ new_value = module.params["priority_value"]
+ else:
+ new_value = vg_prio.priority_adjustment_value
+ if changed and not module.check_mode:
+ volume_group = flasharray.VolumeGroup(
+ priority_adjustment=flasharray.PriorityAdjustment(
+ priority_adjustment_operator=new_operator,
+ priority_adjustment_value=new_value,
+ )
+ )
+ res = arrayv6.patch_volume_groups(
+ names=[module.params["name"]], volume_group=volume_group
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to changfe DMM Priority for volume group {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ if VG_IOPS_VERSION in api_version:
+ try:
+ vg_qos = array.get_vgroup(module.params["name"], qos=True)
+ except Exception:
+ module.fail_json(
+ msg="Failed to get QoS settings for vgroup {0}.".format(
+ module.params["name"]
+ )
+ )
+ if VG_IOPS_VERSION in api_version:
+ if vg_qos["bandwidth_limit"] is None:
+ vg_qos["bandwidth_limit"] = 0
+ if vg_qos["iops_limit"] is None:
+ vg_qos["iops_limit"] = 0
+ if module.params["bw_qos"] and VG_IOPS_VERSION in api_version:
+ if human_to_bytes(module.params["bw_qos"]) != vg_qos["bandwidth_limit"]:
+ if module.params["bw_qos"] == "0":
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set_vgroup(module.params["name"], bandwidth_limit="")
+ except Exception:
+ module.fail_json(
+ msg="Vgroup {0} Bandwidth QoS removal failed.".format(
+ module.params["name"]
+ )
+ )
+ elif int(human_to_bytes(module.params["bw_qos"])) in range(
+ 1048576, 549755813888
+ ):
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set_vgroup(
+ module.params["name"],
+ bandwidth_limit=module.params["bw_qos"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Vgroup {0} Bandwidth QoS change failed.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ module.fail_json(
+ msg="Bandwidth QoS value {0} out of range.".format(
+ module.params["bw_qos"]
+ )
+ )
+ if module.params["iops_qos"] and VG_IOPS_VERSION in api_version:
+ if human_to_real(module.params["iops_qos"]) != vg_qos["iops_limit"]:
+ if module.params["iops_qos"] == "0":
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set_vgroup(module.params["name"], iops_limit="")
+ except Exception:
+ module.fail_json(
+ msg="Vgroup {0} IOPs QoS removal failed.".format(
+ module.params["name"]
+ )
+ )
+ elif int(human_to_real(module.params["iops_qos"])) in range(100, 100000000):
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set_vgroup(
+ module.params["name"], iops_limit=module.params["iops_qos"]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Vgroup {0} IOPs QoS change failed.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ module.fail_json(
+ msg="Bandwidth QoS value {0} out of range.".format(
+ module.params["bw_qos"]
+ )
+ )
+
+ module.exit_json(changed=changed)
+
+
+def recover_vgroup(module, array):
+ """Recover Volume Group"""
+ changed = True
+ if not module.check_mode:
+ try:
+ array.recover_vgroup(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Recovery of volume group {0} failed.".format(module.params["name"])
+ )
+
+ module.exit_json(changed=changed)
+
+
+def eradicate_vgroup(module, array):
+ """Eradicate Volume Group"""
+ changed = True
+ if not module.check_mode:
+ try:
+ array.eradicate_vgroup(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Eradicating vgroup {0} failed.".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def delete_vgroup(module, array):
+ """Delete Volume Group"""
+ changed = True
+ if not module.check_mode:
+ try:
+ array.destroy_vgroup(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Deleting vgroup {0} failed.".format(module.params["name"])
+ )
+ if module.params["eradicate"]:
+ eradicate_vgroup(module, array)
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ bw_qos=dict(type="str"),
+ iops_qos=dict(type="str"),
+ count=dict(type="int"),
+ start=dict(type="int", default=0),
+ digits=dict(type="int", default=1),
+ suffix=dict(type="str"),
+ priority_operator=dict(type="str", choices=["+", "-"], default="+"),
+ priority_value=dict(type="int", choices=[0, 10], default=0),
+ eradicate=dict(type="bool", default=False),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ state = module.params["state"]
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+ if VGROUP_API_VERSION not in api_version:
+ module.fail_json(msg="API version does not support volume groups.")
+
+ vgroup = get_vgroup(module, array)
+ xvgroup = get_pending_vgroup(module, array)
+
+ if module.params["count"]:
+ if not HAS_PURESTORAGE:
+ module.fail_json(
+ msg="py-pure-client sdk is required to support 'count' parameter"
+ )
+ if MULTI_VG_VERSION not in api_version:
+ module.fail_json(
+ msg="'count' parameter is not supported until Purity//FA 6.0.0 or higher"
+ )
+ if module.params["digits"] and module.params["digits"] not in range(1, 10):
+ module.fail_json(msg="'digits' must be in the range of 1 to 10")
+ if module.params["start"] < 0:
+ module.fail_json(msg="'start' must be a positive number")
+ vgroup = get_multi_vgroups(module)
+ if state == "present" and not vgroup:
+ make_multi_vgroups(module, array)
+ elif state == "absent" and not vgroup:
+ module.exit_json(changed=False)
+ else:
+ module.warn("Method not yet supported for multi-vgroup")
+ else:
+ if xvgroup and state == "present":
+ recover_vgroup(module, array)
+ elif vgroup and state == "absent":
+ delete_vgroup(module, array)
+ elif xvgroup and state == "absent" and module.params["eradicate"]:
+ eradicate_vgroup(module, array)
+ elif not vgroup and not xvgroup and state == "present":
+ make_vgroup(module, array)
+ elif vgroup and state == "present":
+ update_vgroup(module, array)
+ elif vgroup is None and state == "absent":
+ module.exit_json(changed=False)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_vlan.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_vlan.py
new file mode 100644
index 000000000..e804e334d
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_vlan.py
@@ -0,0 +1,267 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+
+DOCUMENTATION = """
+---
+module: purefa_vlan
+version_added: '1.0.0'
+short_description: Manage network VLAN interfaces in a Pure Storage FlashArray
+description:
+ - This module manages the VLAN network interfaces on a Pure Storage FlashArray.
+author: Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Interface name, including controller indentifier.
+ - VLANs are only supported on iSCSI, NVMe-RoCE and file
+ physical interfaces
+ required: true
+ type: str
+ state:
+ description:
+ - State of existing interface (on/off).
+ required: false
+ default: present
+ choices: [ "present", "absent" ]
+ type: str
+ enabled:
+ description:
+ - Define if VLAN interface is enabled or not.
+ required: false
+ default: true
+ type: bool
+ address:
+ description:
+ - IPv4 or IPv6 address of interface.
+ required: false
+ type: str
+ subnet:
+ description:
+ - Name of subnet interface associated with.
+ required: true
+ type: str
+extends_documentation_fragment:
+ - purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = """
+- name: Configure and enable VLAN interface ct0.eth8 for subnet test
+ purestorage.flasharray.purefa_vlan:
+ name: ct0.eth8
+ subnet: test
+ address: 10.21.200.18
+ state: present
+ fa_url: 10.10.10.2
+ api_token: c6033033-fe69-2515-a9e8-966bb7fe4b40
+
+- name: Disable VLAN interface for subnet test on ct1.eth2
+ purestorage.flasharray.purefa_vlan:
+ name: ct1.eth2
+ subnet: test
+ enabled: false
+ fa_url: 10.10.10.2
+ api_token: c6033033-fe69-2515-a9e8-966bb7fe4b40
+
+- name: Delete VLAN inteface for subnet test on ct0.eth4
+ purestorage.flasharray.purefa_vlan:
+ name: ct0.eth4
+ subnet: test
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: c6033033-fe69-2515-a9e8-966bb7fe4b40"""
+
+RETURN = """
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ purefa_argument_spec,
+)
+
+
+def _get_subnet(module, array):
+ """Return subnet or None"""
+ subnet = {}
+ try:
+ subnet = array.get_subnet(module.params["subnet"])
+ except Exception:
+ return None
+ return subnet
+
+
+def _get_interface(module, array):
+ """Return Interface or None"""
+ interface = {}
+ if "ct" in module.params["name"]:
+ try:
+ interfaces = array.list_network_interfaces()
+ except Exception:
+ return None
+ for ints in range(0, len(interfaces)):
+ if interfaces[ints]["name"] == module.params["name"]:
+ interface = interfaces[ints]
+ break
+ return interface
+
+
+def _get_vif(array, interface, subnet):
+ """Return VLAN Interface or None"""
+ vif_info = {}
+ vif_name = interface["name"] + "." + str(subnet["vlan"])
+ try:
+ interfaces = array.list_network_interfaces()
+ except Exception:
+ return None
+ for ints in range(0, len(interfaces)):
+ if interfaces[ints]["name"] == vif_name:
+ vif_info = interfaces[ints]
+ break
+ return vif_info
+
+
+def create_vif(module, array, interface, subnet):
+ """Create VLAN Interface"""
+ changed = True
+ if not module.check_mode:
+ vif_name = interface["name"] + "." + str(subnet["vlan"])
+ if module.params["address"]:
+ try:
+ array.create_vlan_interface(
+ vif_name, module.params["subnet"], address=module.params["address"]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to create VLAN interface {0}.".format(vif_name)
+ )
+ else:
+ try:
+ array.create_vlan_interface(vif_name, module.params["subnet"])
+ except Exception:
+ module.fail_json(
+ msg="Failed to create VLAN interface {0}.".format(vif_name)
+ )
+ if not module.params["enabled"]:
+ try:
+ array.set_network_interface(vif_name, enabled=False)
+ except Exception:
+ module.fail_json(
+ msg="Failed to disable VLAN interface {0} on creation.".format(
+ vif_name
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def update_vif(module, array, interface, subnet):
+ """Modify VLAN Interface settings"""
+ changed = False
+ vif_info = _get_vif(array, interface, subnet)
+ vif_name = vif_info["name"]
+ if module.params["address"]:
+ if module.params["address"] != vif_info["address"]:
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set_network_interface(
+ vif_name, address=module.params["address"]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to change IP address for VLAN interface {0}.".format(
+ subnet
+ )
+ )
+
+ if module.params["enabled"] != vif_info["enabled"]:
+ if module.params["enabled"]:
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set_network_interface(vif_name, enabled=True)
+ except Exception:
+ module.fail_json(
+ msg="Failed to enable VLAN interface {0}.".format(vif_name)
+ )
+ else:
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set_network_interface(vif_name, enabled=False)
+ except Exception:
+ module.fail_json(
+ msg="Failed to disable VLAN interface {0}.".format(vif_name)
+ )
+
+ module.exit_json(changed=changed)
+
+
+def delete_vif(module, array, subnet):
+ """Delete VLAN Interface"""
+ changed = True
+ if not module.check_mode:
+ vif_name = module.params["name"] + "." + str(subnet["vlan"])
+ try:
+ array.delete_vlan_interface(vif_name)
+ except Exception:
+ module.fail_json(msg="Failed to delete VLAN inerface {0}".format(vif_name))
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True),
+ subnet=dict(type="str", required=True),
+ enabled=dict(type="bool", default=True),
+ state=dict(type="str", default="present", choices=["present", "absent"]),
+ address=dict(type="str"),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ state = module.params["state"]
+ array = get_system(module)
+ subnet = _get_subnet(module, array)
+ interface = _get_interface(module, array)
+ if not subnet:
+ module.fail_json(msg="Invalid subnet specified.")
+ if not interface:
+ module.fail_json(msg="Invalid interface specified.")
+ if ("iscsi" or "nvme-roce" or "nvme-tcp" or "file") not in interface["services"]:
+ module.fail_json(
+ msg="Invalid interface specified - must have service type of iSCSI, NVMe-RoCE, NVMe-TCP or file enabled."
+ )
+ if subnet["vlan"]:
+ vif_name = module.params["name"] + "." + str(subnet["vlan"])
+ vif = bool(vif_name in subnet["interfaces"])
+
+ if state == "present" and not vif:
+ create_vif(module, array, interface, subnet)
+ elif state == "present" and vif:
+ update_vif(module, array, interface, subnet)
+ elif state == "absent" and vif:
+ delete_vif(module, array, subnet)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_vnc.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_vnc.py
new file mode 100644
index 000000000..48e154c77
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_vnc.py
@@ -0,0 +1,161 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_vnc
+version_added: '1.0.0'
+short_description: Enable or Disable VNC port for installed apps
+description:
+- Enablke or Disable VNC access for installed apps
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Define state of VNC
+ type: str
+ default: present
+ choices: [ present, absent ]
+ name:
+ description:
+ - Name od app
+ type: str
+ required: true
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Enable VNC for application test
+ purestorage.flasharray.purefa_vnc:
+ name: test
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Disable VNC for application test
+ purestorage.flasharray.purefa_vnc:
+ name: test
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+vnc:
+ description: VNC port information for application
+ type: dict
+ returned: success
+ contains:
+ status:
+ description: Status of application
+ type: str
+ sample: 'healthy'
+ index:
+ description: Application index number
+ type: int
+ version:
+ description: Application version installed
+ type: str
+ sample: '5.2.1'
+ vnc:
+ description: IP address and port number for VNC connection
+ type: dict
+ sample: ['10.21.200.34:5900']
+ name:
+ description: Application name
+ type: str
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ purefa_argument_spec,
+)
+
+MIN_REQUIRED_API_VERSION = "1.17"
+
+
+def enable_vnc(module, array, app):
+ """Enable VNC port"""
+ changed = False
+ vnc_fact = []
+ if not app["vnc_enabled"]:
+ try:
+ if not module.check_mode:
+ array.enable_app_vnc(module.params["name"])
+ vnc_fact = array.get_app_node(module.params["name"])
+ changed = True
+ except Exception:
+ module.fail_json(
+ msg="Enabling VNC for {0} failed".format(module.params["name"])
+ )
+ module.exit_json(changed=changed, vnc=vnc_fact)
+
+
+def disable_vnc(module, array, app):
+ """Disable VNC port"""
+ changed = False
+ if app["vnc_enabled"]:
+ try:
+ if not module.check_mode:
+ array.disable_app_vnc(module.params["name"])
+ changed = True
+ except Exception:
+ module.fail_json(
+ msg="Disabling VNC for {0} failed".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["present", "absent"]),
+ name=dict(type="str", required=True),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashArray REST version not supported. "
+ "Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
+ )
+ try:
+ app = array.get_app(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Selected application {0} does not exist".format(module.params["name"])
+ )
+ if not app["enabled"]:
+ module.fail_json(
+ msg="Application {0} is not enabled".format(module.params["name"])
+ )
+ if module.params["state"] == "present":
+ enable_vnc(module, array, app)
+ else:
+ disable_vnc(module, array, app)
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_volume.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_volume.py
new file mode 100644
index 000000000..c3c92f6d4
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_volume.py
@@ -0,0 +1,1726 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_volume
+version_added: '1.0.0'
+short_description: Manage volumes on Pure Storage FlashArrays
+description:
+- Create, delete or extend the capacity of a volume on Pure Storage FlashArray.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - The name of the volume.
+ - Volume could be created in a POD with this syntax POD_NAME::VOLUME_NAME.
+ - Volume could be created in a volume group with this syntax VG_NAME/VOLUME_NAME.
+ - Multi-volume support available from Purity//FA 6.0.0
+ B(***NOTE***) Manual deletion or eradication of individual volumes created
+ using multi-volume will cause idempotency to fail
+ - Multi-volume support only exists for volume creation
+ type: str
+ required: true
+ target:
+ description:
+ - The name of the target volume, if copying.
+ type: str
+ state:
+ description:
+ - Define whether the volume should exist or not.
+ default: present
+ choices: [ absent, present ]
+ type: str
+ eradicate:
+ description:
+ - Define whether to eradicate the volume on delete or leave in trash.
+ type: bool
+ default: false
+ overwrite:
+ description:
+ - Define whether to overwrite a target volume if it already exisits.
+ type: bool
+ default: false
+ size:
+ description:
+ - Volume size in M, G, T or P units.
+ type: str
+ count:
+ description:
+ - Number of volumes to be created in a multiple volume creation
+ - Only supported from Purity//FA v6.0.0 and higher
+ type: int
+ start:
+ description:
+ - Number at which to start the multiple volume creation index
+ - Only supported from Purity//FA v6.0.0 and higher
+ type: int
+ default: 0
+ digits:
+ description:
+ - Number of digits to use for multiple volume count. This
+ will pad the index number with zeros where necessary
+ - Only supported from Purity//FA v6.0.0 and higher
+ - Range is between 1 and 10
+ type: int
+ default: 1
+ suffix:
+ description:
+ - Suffix string, if required, for multiple volume create
+ - Volume names will be formed as I(<name>#I<suffix>), where
+ I(#) is a placeholder for the volume index
+ See associated descriptions
+ - Only supported from Purity//FA v6.0.0 and higher
+ type: str
+ bw_qos:
+ description:
+ - Bandwidth limit for volume in M or G units.
+ M will set MB/s
+ G will set GB/s
+ To clear an existing QoS setting use 0 (zero)
+ type: str
+ aliases: [ qos ]
+ iops_qos:
+ description:
+ - IOPs limit for volume - use value or K or M
+ K will mean 1000
+ M will mean 1000000
+ To clear an existing IOPs setting use 0 (zero)
+ type: str
+ move:
+ description:
+ - Move a volume in and out of a pod or vgroup
+ - Provide the name of pod or vgroup to move the volume to
+ - Pod and Vgroup names must be unique in the array
+ - To move to the local array, specify C(local)
+ - This is not idempotent - use C(ignore_errors) in the play
+ type: str
+ rename:
+ description:
+ - Value to rename the specified volume to.
+ - Rename only applies to the container the current volumes is in.
+ - There is no requirement to specify the pod or vgroup name as this is implied.
+ type: str
+ pgroup:
+ description:
+ - Name of exisitng, not deleted, protection group to add volume to
+ - Only application for volume(s) creation
+ - Superceeded from Purity//FA 6.3.4 by I(add_to_pgs)
+ type: str
+ version_added: 1.8.0
+ priority_operator:
+ description:
+ - DMM Priority Adjustment operator
+ type: str
+ choices: [ '=', '+', '-' ]
+ version_added: '1.13.0'
+ priority_value:
+ description:
+ - DMM Priority Adjustment value
+ type: int
+ choices: [ -10, 0, 10 ]
+ version_added: '1.13.0'
+ with_default_protection:
+ description:
+ - Whether to add the default container protection groups to
+ those specified in I(add_to_pgs) as the inital protection
+ of a new volume.
+ type: bool
+ default: true
+ version_added: '1.14.0'
+ add_to_pgs:
+ description:
+ - A new volume will be added to the specified protection groups
+ on creation
+ type: list
+ elements: str
+ version_added: '1.14.0'
+ promotion_state:
+ description:
+ - Promote or demote the volume so that the volume starts or
+ stops accepting write requests.
+ type: str
+ choices: [ promoted, demoted ]
+ version_added: '1.16.0'
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Create new volume named foo with a QoS limit
+ purestorage.flasharray.purefa_volume:
+ name: foo
+ size: 1T
+ bw_qos: 58M
+ iops_qos: 23K
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: present
+
+- name: Create new volume named foo with a DMM priority (Purity//FA 6.1.2+)
+ purestorage.flasharray.purefa_volume:
+ name: foo
+ size: 1T
+ priority_operator: +
+ priorty_value: 10
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: present
+
+- name: Create new volume named foo in pod bar in protection group pg1
+ purestorage.flasharray.purefa_volume:
+ name: bar::foo
+ prgoup: pg1
+ size: 1T
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: present
+
+- name: Create 10 volumes with index starting at 10 but padded with 3 digits
+ purestorage.flasharray.purefa_volume:
+ name: foo
+ size: 1T
+ suffix: bar
+ count: 10
+ start: 10
+ digits: 3
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: present
+
+- name: Extend the size of an existing volume named foo
+ purestorage.flasharray.purefa_volume:
+ name: foo
+ size: 2T
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: present
+
+- name: Delete and eradicate volume named foo
+ purestorage.flasharray.purefa_volume:
+ name: foo
+ eradicate: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: absent
+
+- name: Create clone of volume bar named foo
+ purestorage.flasharray.purefa_volume:
+ name: foo
+ target: bar
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: present
+
+- name: Overwrite volume bar with volume foo
+ purestorage.flasharray.purefa_volume:
+ name: foo
+ target: bar
+ overwrite: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: present
+
+- name: Clear volume QoS from volume foo
+ purestorage.flasharray.purefa_volume:
+ name: foo
+ bw_qos: 0
+ iops_qos: 0
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: present
+
+- name: Move local volume foo from local array to pod bar
+ purestorage.flasharray.purefa_volume:
+ name: foo
+ move: bar
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Move volume foo in pod bar to local array
+ purestorage.flasharray.purefa_volume:
+ name: bar::foo
+ move: local
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Move volume foo in pod bar to vgroup fin
+ purestorage.flasharray.purefa_volume:
+ name: bar::foo
+ move: fin
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+volume:
+ description: A dictionary describing the changed volume. Only some
+ attributes below will be returned with various actions.
+ type: dict
+ returned: success
+ contains:
+ source:
+ description: Volume name of source volume used for volume copy
+ type: str
+ serial:
+ description: Volume serial number
+ type: str
+ sample: '361019ECACE43D83000120A4'
+ nvme_nguid:
+ description: Volume NVMe namespace globally unique identifier
+ type: str
+ sample: 'eui.00cd6b99ef25864724a937c5000be684'
+ page83_naa:
+ description: Volume NAA canonical name
+ type: str
+ sample: 'naa.624a9370361019ecace43db3000120a4'
+ created:
+ description: Volume creation time
+ type: str
+ sample: '2019-03-13T22:49:24Z'
+ name:
+ description: Volume name
+ type: str
+ size:
+ description: Volume size in bytes
+ type: int
+ bandwidth_limit:
+ description: Volume bandwidth limit in bytes/sec
+ type: int
+ iops_limit:
+ description: Volume IOPs limit
+ type: int
+ priority_operator:
+ description: DMM Priority Adjustment operator
+ type: str
+ priority_value:
+ description: DMM Priority Adjustment value
+ type: int
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+import re
+import time
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_array,
+ get_system,
+ purefa_argument_spec,
+)
+
+
+QOS_API_VERSION = "1.14"
+VGROUPS_API_VERSION = "1.13"
+POD_API_VERSION = "1.13"
+AC_QOS_VERSION = "1.16"
+OFFLOAD_API_VERSION = "1.16"
+IOPS_API_VERSION = "1.17"
+MULTI_VOLUME_VERSION = "2.2"
+PROMOTE_API_VERSION = "1.19"
+PURE_OUI = "naa.624a9370"
+PRIORITY_API_VERSION = "2.11"
+DEFAULT_API_VERSION = "2.16"
+VOLUME_PROMOTION_API_VERSION = "2.2"
+
+
+def _create_nguid(serial):
+ nguid = "eui.00" + serial[0:14] + "24a937" + serial[-10:]
+ return nguid
+
+
+def get_pod(module, array):
+ """Get ActiveCluster Pod"""
+ pod_name = module.params["pgroup"].split("::")[0]
+ try:
+ return array.get_pod(pod=pod_name)
+ except Exception:
+ return None
+
+
+def get_pending_pgroup(module, array):
+ """Get Protection Group"""
+ pgroup = None
+ if ":" in module.params["pgroup"]:
+ if "::" not in module.params["pgroup"]:
+ for pgrp in array.list_pgroups(pending=True, on="*"):
+ if pgrp["name"] == module.params["pgroup"] and pgrp["time_remaining"]:
+ pgroup = pgrp
+ break
+ else:
+ for pgrp in array.list_pgroups(pending=True):
+ if pgrp["name"] == module.params["pgroup"] and pgrp["time_remaining"]:
+ pgroup = pgrp
+ break
+ else:
+ for pgrp in array.list_pgroups(pending=True):
+ if pgrp["name"] == module.params["pgroup"] and pgrp["time_remaining"]:
+ pgroup = pgrp
+ break
+
+ return pgroup
+
+
+def get_pgroup(module, array):
+ """Get Protection Group"""
+ pgroup = None
+ if ":" in module.params["pgroup"]:
+ if "::" not in module.params["pgroup"]:
+ for pgrp in array.list_pgroups(on="*"):
+ if pgrp["name"] == module.params["pgroup"]:
+ pgroup = pgrp
+ break
+ else:
+ for pgrp in array.list_pgroups():
+ if pgrp["name"] == module.params["pgroup"]:
+ pgroup = pgrp
+ break
+ else:
+ for pgrp in array.list_pgroups():
+ if pgrp["name"] == module.params["pgroup"]:
+ pgroup = pgrp
+ break
+
+ return pgroup
+
+
+def human_to_bytes(size):
+ """Given a human-readable byte string (e.g. 2G, 30M),
+ return the number of bytes. Will return 0 if the argument has
+ unexpected form.
+ """
+ bytes = size[:-1]
+ unit = size[-1].upper()
+ if bytes.isdigit():
+ bytes = int(bytes)
+ if unit == "P":
+ bytes *= 1125899906842624
+ elif unit == "T":
+ bytes *= 1099511627776
+ elif unit == "G":
+ bytes *= 1073741824
+ elif unit == "M":
+ bytes *= 1048576
+ elif unit == "K":
+ bytes *= 1024
+ else:
+ bytes = 0
+ else:
+ bytes = 0
+ return bytes
+
+
+def human_to_real(iops):
+ """Given a human-readable IOPs string (e.g. 2K, 30M),
+ return the real number. Will return 0 if the argument has
+ unexpected form.
+ """
+ digit = iops[:-1]
+ unit = iops[-1].upper()
+ if unit.isdigit():
+ digit = iops
+ elif digit.isdigit():
+ digit = int(digit)
+ if unit == "M":
+ digit *= 1000000
+ elif unit == "K":
+ digit *= 1000
+ else:
+ digit = 0
+ else:
+ digit = 0
+ return digit
+
+
+def get_multi_volumes(module, destroyed=False):
+ """Return True is all volumes exist or None"""
+ names = []
+ array = get_array(module)
+ for vol_num in range(
+ module.params["start"], module.params["count"] + module.params["start"]
+ ):
+ names.append(
+ module.params["name"]
+ + str(vol_num).zfill(module.params["digits"])
+ + module.params["suffix"]
+ )
+ return bool(array.get_volumes(names=names, destroyed=destroyed).status_code == 200)
+
+
+def get_volume(module, array):
+ """Return Volume or None"""
+ try:
+ return array.get_volume(module.params["name"])
+ except Exception:
+ return None
+
+
+def get_endpoint(name, array):
+ """Return Endpoint or None"""
+ try:
+ return array.get_volume(name, pending=True, protocol_endpoint=True)
+ except Exception:
+ return None
+
+
+def get_destroyed_volume(vol, array):
+ """Return Destroyed Volume or None"""
+ try:
+ return bool(array.get_volume(vol, pending=True)["time_remaining"] != "")
+ except Exception:
+ return False
+
+
+def get_destroyed_endpoint(vol, array):
+ """Return Destroyed Endpoint or None"""
+ try:
+ return bool(
+ array.get_volume(vol, protocol_endpoint=True, pending=True)[
+ "time_remaining"
+ ]
+ != ""
+ )
+ except Exception:
+ return False
+
+
+def get_target(module, array):
+ """Return Volume or None"""
+ try:
+ return array.get_volume(module.params["target"])
+ except Exception:
+ return None
+
+
+def check_vgroup(module, array):
+ """Check is the requested VG to create volume in exists"""
+ vg_exists = False
+ api_version = array._list_available_rest_versions()
+ if VGROUPS_API_VERSION in api_version:
+ vg_name = module.params["name"].split("/")[0]
+ try:
+ vgs = array.list_vgroups()
+ except Exception:
+ module.fail_json(msg="Failed to get volume groups list. Check array.")
+ for vgroup in range(0, len(vgs)):
+ if vg_name == vgs[vgroup]["name"]:
+ vg_exists = True
+ break
+ else:
+ module.fail_json(
+ msg="VG volumes are not supported. Please upgrade your FlashArray."
+ )
+ return vg_exists
+
+
+def check_pod(module, array):
+ """Check is the requested pod to create volume in exists"""
+ pod_exists = False
+ api_version = array._list_available_rest_versions()
+ if POD_API_VERSION in api_version:
+ pod_name = module.params["name"].split("::")[0]
+ try:
+ pods = array.list_pods()
+ except Exception:
+ module.fail_json(msg="Failed to get pod list. Check array.")
+ for pod in range(0, len(pods)):
+ if pod_name == pods[pod]["name"]:
+ pod_exists = True
+ break
+ else:
+ module.fail_json(
+ msg="Pod volumes are not supported. Please upgrade your FlashArray."
+ )
+ return pod_exists
+
+
+def create_volume(module, array):
+ """Create Volume"""
+ volfact = []
+ changed = False
+ api_version = array._list_available_rest_versions()
+ if "/" in module.params["name"] and not check_vgroup(module, array):
+ module.fail_json(
+ msg="Failed to create volume {0}. Volume Group does not exist.".format(
+ module.params["name"]
+ )
+ )
+ if "::" in module.params["name"]:
+ if not check_pod(module, array):
+ module.fail_json(
+ msg="Failed to create volume {0}. Pod does not exist".format(
+ module.params["name"]
+ )
+ )
+ pod_name = module.params["name"].split("::")[0]
+ if PROMOTE_API_VERSION in api_version:
+ if array.get_pod(pod_name)["promotion_status"] == "demoted":
+ module.fail_json(msg="Volume cannot be created in a demoted pod")
+ if module.params["bw_qos"] or module.params["iops_qos"]:
+ if AC_QOS_VERSION not in api_version:
+ module.warn(
+ "Pods cannot cannot contain volumes with QoS settings. Ignoring..."
+ )
+ module.params["bw_qos"] = module.params["iops_qos"] = None
+ if not module.params["size"]:
+ module.fail_json(msg="Size for a new volume must be specified")
+ if module.params["bw_qos"] or module.params["iops_qos"]:
+ if module.params["bw_qos"] and QOS_API_VERSION in api_version:
+ if module.params["iops_qos"] and IOPS_API_VERSION in api_version:
+ if module.params["bw_qos"] and not module.params["iops_qos"]:
+ if int(human_to_bytes(module.params["bw_qos"])) in range(
+ 1048576, 549755813888
+ ):
+ changed = True
+ if not module.check_mode:
+ try:
+ volfact = array.create_volume(
+ module.params["name"],
+ module.params["size"],
+ bandwidth_limit=module.params["bw_qos"],
+ )
+ volfact["page83_naa"] = (
+ PURE_OUI + volfact["serial"].lower()
+ )
+ volfact["nvme_nguid"] = _create_nguid(
+ volfact["serial"].lower()
+ )
+ except Exception:
+ module.fail_json(
+ msg="Volume {0} creation failed.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ module.fail_json(
+ msg="Bandwidth QoS value {0} out of range.".format(
+ module.params["bw_qos"]
+ )
+ )
+ elif module.params["iops_qos"] and not module.params["bw_qos"]:
+ if (
+ 100000000
+ >= int(human_to_real(module.params["iops_qos"]))
+ >= 100
+ ):
+ changed = True
+ if not module.check_mode:
+ try:
+ volfact = array.create_volume(
+ module.params["name"],
+ module.params["size"],
+ iops_limit=module.params["iops_qos"],
+ )
+ volfact["page83_naa"] = (
+ PURE_OUI + volfact["serial"].lower()
+ )
+ volfact["nvme_nguid"] = _create_nguid(
+ volfact["serial"].lower()
+ )
+ except Exception:
+ module.fail_json(
+ msg="Volume {0} creation failed.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ module.fail_json(
+ msg="IOPs QoS value {0} out of range.".format(
+ module.params["iops_qos"]
+ )
+ )
+ else:
+ bw_qos_size = int(human_to_bytes(module.params["bw_qos"]))
+ if int(human_to_real(module.params["iops_qos"])) in range(
+ 100, 100000000
+ ) and bw_qos_size in range(1048576, 549755813888):
+ changed = True
+ if not module.check_mode:
+ try:
+ volfact = array.create_volume(
+ module.params["name"],
+ module.params["size"],
+ iops_limit=module.params["iops_qos"],
+ bandwidth_limit=module.params["bw_qos"],
+ )
+ volfact["page83_naa"] = (
+ PURE_OUI + volfact["serial"].lower()
+ )
+ volfact["nvme_nguid"] = _create_nguid(
+ volfact["serial"].lower()
+ )
+ except Exception:
+ module.fail_json(
+ msg="Volume {0} creation failed.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ module.fail_json(
+ msg="IOPs or Bandwidth QoS value out of range."
+ )
+ else:
+ if module.params["bw_qos"]:
+ if int(human_to_bytes(module.params["bw_qos"])) in range(
+ 1048576, 549755813888
+ ):
+ changed = True
+ if not module.check_mode:
+ try:
+ volfact = array.create_volume(
+ module.params["name"],
+ module.params["size"],
+ bandwidth_limit=module.params["bw_qos"],
+ )
+ volfact["page83_naa"] = (
+ PURE_OUI + volfact["serial"].lower()
+ )
+ volfact["nvme_nguid"] = _create_nguid(
+ volfact["serial"].lower()
+ )
+ except Exception:
+ module.fail_json(
+ msg="Volume {0} creation failed.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ module.fail_json(
+ msg="Bandwidth QoS value {0} out of range.".format(
+ module.params["bw_qos"]
+ )
+ )
+ else:
+ changed = True
+ if not module.check_mode:
+ try:
+ volfact = array.create_volume(
+ module.params["name"], module.params["size"]
+ )
+ volfact["page83_naa"] = PURE_OUI + volfact["serial"].lower()
+ volfact["nvme_nguid"] = _create_nguid(
+ volfact["serial"].lower()
+ )
+ except Exception:
+ module.fail_json(
+ msg="Volume {0} creation failed.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ changed = True
+ if not module.check_mode:
+ try:
+ volfact = array.create_volume(
+ module.params["name"], module.params["size"]
+ )
+ volfact["page83_naa"] = PURE_OUI + volfact["serial"].lower()
+ volfact["nvme_nguid"] = _create_nguid(volfact["serial"].lower())
+ except Exception:
+ module.fail_json(
+ msg="Volume {0} creation failed.".format(module.params["name"])
+ )
+ if VOLUME_PROMOTION_API_VERSION in api_version and module.params["promotion_state"]:
+ arrayv6 = get_array(module)
+ volume = flasharray.VolumePatch(
+ requested_promotion_state=module.params["promotion_state"]
+ )
+ changed = True
+ if not module.check_mode:
+ res = arrayv6.patch_volumes(names=[module.params["name"]], volume=volume)
+ if res.status_code != 200:
+ arrayv6.patch_volumes(
+ names=[module.params["name"]],
+ volume=flasharray.VolumePatch(destroyed=True),
+ )
+ arrayv6.delete_volumes(names=[module.params["name"]])
+ module.fail_json(
+ msg="Failed to set Promotion State for volume {0}.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ volfact["promotion_state"] = module.params["promotion_state"]
+ if PRIORITY_API_VERSION in api_version and module.params["priority_operator"]:
+ arrayv6 = get_array(module)
+ volume = flasharray.VolumePatch(
+ priority_adjustment=flasharray.PriorityAdjustment(
+ priority_adjustment_operator=module.params["priority_operator"],
+ priority_adjustment_value=module.params["priority_value"],
+ )
+ )
+ res = arrayv6.patch_volumes(names=[module.params["name"]], volume=volume)
+ if res.status_code != 200:
+ arrayv6.patch_volumes(
+ names=[module.params["name"]],
+ volume=flasharray.VolumePatch(destroyed=True),
+ )
+ arrayv6.delete_volumes(names=[module.params["name"]])
+ module.fail_json(
+ msg="Failed to set DMM Priority Adjustment on volume {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ else:
+ volfact["priority_operator"] = module.params["priority_operator"]
+ volfact["priority_value"] = module.params["priority_value"]
+ if module.params["pgroup"] and DEFAULT_API_VERSION not in api_version:
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set_pgroup(
+ module.params["pgroup"], addvollist=[module.params["name"]]
+ )
+ except Exception:
+ module.warn_json(
+ "Failed to add {0} to protection group {1}.".format(
+ module.params["name"], module.params["pgroup"]
+ )
+ )
+
+ module.exit_json(changed=changed, volume=volfact)
+
+
+def create_multi_volume(module, array, single=False):
+ """Create Volume"""
+ volfact = {}
+ changed = True
+ api_version = array._list_available_rest_versions()
+ bw_qos_size = iops_qos_size = 0
+ names = []
+ if "/" in module.params["name"] and not check_vgroup(module, array):
+ module.fail_json(
+ msg="Multi-volume create failed. Volume Group {0} does not exist.".format(
+ module.params["name"].split("/")[0]
+ )
+ )
+ if "::" in module.params["name"]:
+ if not check_pod(module, array):
+ module.fail_json(
+ msg="Multi-volume create failed. Pod {0} does not exist".format(
+ module.params["name"].split(":")[0]
+ )
+ )
+ pod_name = module.params["name"].split("::")[0]
+ if PROMOTE_API_VERSION in api_version:
+ if array.get_pod(pod_name)["promotion_status"] == "demoted":
+ module.fail_json(msg="Volume cannot be created in a demoted pod")
+ array = get_array(module)
+ if not single:
+ for vol_num in range(
+ module.params["start"], module.params["count"] + module.params["start"]
+ ):
+ names.append(
+ module.params["name"]
+ + str(vol_num).zfill(module.params["digits"])
+ + module.params["suffix"]
+ )
+ else:
+ names.append(module.params["name"])
+ if module.params["bw_qos"]:
+ bw_qos = int(human_to_bytes(module.params["bw_qos"]))
+ if bw_qos in range(1048576, 549755813888):
+ bw_qos_size = bw_qos
+ else:
+ module.fail_json(msg="Bandwidth QoS value out of range.")
+ if module.params["iops_qos"]:
+ iops_qos = int(human_to_real(module.params["iops_qos"]))
+ if iops_qos in range(100, 100000000):
+ iops_qos_size = iops_qos
+ else:
+ module.fail_json(msg="IOPs QoS value out of range.")
+ if bw_qos_size != 0 and iops_qos_size != 0:
+ vols = flasharray.VolumePost(
+ provisioned=human_to_bytes(module.params["size"]),
+ qos=flasharray.Qos(bandwidth_limit=bw_qos_size, iops_limit=iops_qos_size),
+ subtype="regular",
+ )
+ elif bw_qos_size == 0 and iops_qos_size == 0:
+ vols = flasharray.VolumePost(
+ provisioned=human_to_bytes(module.params["size"]), subtype="regular"
+ )
+ elif bw_qos_size == 0 and iops_qos_size != 0:
+ vols = flasharray.VolumePost(
+ provisioned=human_to_bytes(module.params["size"]),
+ qos=flasharray.Qos(iops_limit=iops_qos_size),
+ subtype="regular",
+ )
+ elif bw_qos_size != 0 and iops_qos_size == 0:
+ vols = flasharray.VolumePost(
+ provisioned=human_to_bytes(module.params["size"]),
+ qos=flasharray.Qos(bandwidth_limit=bw_qos_size),
+ subtype="regular",
+ )
+ if not module.check_mode:
+ if DEFAULT_API_VERSION in api_version:
+ if module.params["add_to_pgs"]:
+ add_to_pgs = []
+ for add_pg in range(0, len(module.params["add_to_pgs"])):
+ add_to_pgs.append(
+ flasharray.FixedReference(
+ name=module.params["add_to_pgs"][add_pg]
+ )
+ )
+ res = array.post_volumes(
+ names=names,
+ volume=vols,
+ with_default_protection=module.params["with_default_protection"],
+ add_to_protection_groups=add_to_pgs,
+ )
+ else:
+ res = array.post_volumes(
+ names=names,
+ volume=vols,
+ with_default_protection=module.params["with_default_protection"],
+ )
+ else:
+ res = array.post_volumes(names=names, volume=vols)
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Multi-Volume {0}#{1} creation failed: {2}".format(
+ module.params["name"],
+ module.params["suffix"],
+ res.errors[0].message,
+ )
+ )
+ else:
+ if (
+ VOLUME_PROMOTION_API_VERSION in api_version
+ and module.params["promotion_state"]
+ ):
+ volume = flasharray.VolumePatch(
+ requested_promotion_state=module.params["promotion_state"]
+ )
+ prom_res = array.patch_volumes(names=names, volume=volume)
+ if prom_res.status_code != 200:
+ array.patch_volumes(
+ names=names,
+ volume=flasharray.VolumePatch(destroyed=True),
+ )
+ array.delete_volumes(names=names)
+ module.warn(
+ "Failed to set promotion status on volumes. Error: {0}".format(
+ prom_res.errors[0].message
+ )
+ )
+ if (
+ PRIORITY_API_VERSION in api_version
+ and module.params["priority_operator"]
+ ):
+ volume = flasharray.VolumePatch(
+ priority_adjustment=flasharray.PriorityAdjustment(
+ priority_adjustment_operator=module.params["priority_operator"],
+ priority_adjustment_value=module.params["priority_value"],
+ )
+ )
+ prio_res = array.patch_volumes(names=names, volume=volume)
+ if prio_res.status_code != 200:
+ array.patch_volumes(
+ names=names,
+ volume=flasharray.VolumePatch(destroyed=True),
+ )
+ array.delete_volumes(names=names)
+ module.fail_json(
+ msg="Failed to set DMM Priority Adjustment on volumes. Error: {0}".format(
+ prio_res.errors[0].message
+ )
+ )
+ prio_temp = list(prio_res.items)
+ temp = list(res.items)
+ for count in range(0, len(temp)):
+ vol_name = temp[count].name
+ volfact[vol_name] = {
+ "size": temp[count].provisioned,
+ "serial": temp[count].serial,
+ "created": time.strftime(
+ "%Y-%m-%d %H:%M:%S", time.localtime(temp[count].created / 1000)
+ ),
+ "page83_naa": PURE_OUI + temp[count].serial.lower(),
+ "nvme_nguid": _create_nguid(temp[count].serial.lower()),
+ }
+ if bw_qos_size != 0:
+ volfact[vol_name]["bandwidth_limit"] = temp[
+ count
+ ].qos.bandwidth_limit
+ if iops_qos_size != 0:
+ volfact[vol_name]["iops_limit"] = temp[count].qos.iops_limit
+ if (
+ VOLUME_PROMOTION_API_VERSION in api_version
+ and module.params["promotion_state"]
+ ):
+ volfact[vol_name]["promotion_status"] = prio_temp[
+ count
+ ].promotion_status
+ if (
+ PRIORITY_API_VERSION in api_version
+ and module.params["priority_operator"]
+ ):
+ volfact[vol_name]["priority_operator"] = prio_temp[
+ count
+ ].priority_adjustment.priority_adjustment_operator
+ volfact[vol_name]["priority_value"] = prio_temp[
+ count
+ ].priority_adjustment.priority_adjustment_value
+
+ if module.params["pgroup"] and DEFAULT_API_VERSION not in api_version:
+ if not module.check_mode:
+ res = array.post_protection_groups_volumes(
+ group_names=[module.params["pgroup"]], member_names=names
+ )
+ if res.status_code != 200:
+ module.warn(
+ "Failed to add {0} to protection group {1}.".format(
+ module.params["name"], module.params["pgroup"]
+ )
+ )
+
+ module.exit_json(changed=changed, volume=volfact)
+
+
+def copy_from_volume(module, array):
+ """Create Volume Clone"""
+ volfact = []
+ changed = False
+ tgt = get_target(module, array)
+ api_version = array._list_available_rest_versions()
+ arrayv6 = get_array(module)
+ if tgt is None:
+ changed = True
+ if not module.check_mode:
+ if DEFAULT_API_VERSION in api_version:
+ if module.params["add_to_pgs"]:
+ add_to_pgs = []
+ for add_pg in range(0, len(module.params["add_to_pgs"])):
+ add_to_pgs.append(
+ flasharray.FixedReference(
+ name=module.params["add_to_pgs"][add_pg]
+ )
+ )
+ res = arrayv6.post_volumes(
+ with_default_protection=module.params[
+ "with_default_protection"
+ ],
+ add_to_protection_groups=add_to_pgs,
+ names=[module.params["target"]],
+ volume=flasharray.VolumePost(
+ source=flasharray.Reference(name=module.params["name"])
+ ),
+ )
+ else:
+ res = arrayv6.post_volumes(
+ with_default_protection=module.params[
+ "with_default_protection"
+ ],
+ names=[module.params["target"]],
+ volume=flasharray.VolumePost(
+ source=flasharray.Reference(name=module.params["name"])
+ ),
+ )
+
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to copy volume {0} to {1}. Error: {2}".format(
+ module.params["name"],
+ module.params["target"],
+ res.errors[0].message,
+ )
+ )
+ vol_data = list(res.items)
+ volfact = {
+ "size": vol_data[0].provisioned,
+ "serial": vol_data[0].serial,
+ "created": time.strftime(
+ "%Y-%m-%d %H:%M:%S", time.localtime(vol_data[0].created / 1000)
+ ),
+ "page83_naa": PURE_OUI + vol_data[0].serial.lower(),
+ "nvme_nguid": _create_nguid(vol_data[0].serial.lower()),
+ }
+ else:
+ try:
+ volfact = array.copy_volume(
+ module.params["name"], module.params["target"]
+ )
+ volfact["page83_naa"] = PURE_OUI + volfact["serial"].lower()
+ volfact["nvme_nguid"] = _create_nguid(volfact["serial"].lower())
+ changed = True
+ except Exception:
+ module.fail_json(
+ msg="Copy volume {0} to volume {1} failed.".format(
+ module.params["name"], module.params["target"]
+ )
+ )
+ elif tgt is not None and module.params["overwrite"]:
+ changed = True
+ if not module.check_mode:
+ if DEFAULT_API_VERSION not in api_version:
+ try:
+ volfact = array.copy_volume(
+ module.params["name"],
+ module.params["target"],
+ overwrite=module.params["overwrite"],
+ )
+ volfact["page83_naa"] = PURE_OUI + volfact["serial"].lower()
+ volfact["nvme_nguid"] = _create_nguid(volfact["serial"].lower())
+ changed = True
+ except Exception:
+ module.fail_json(
+ msg="Copy volume {0} to volume {1} failed.".format(
+ module.params["name"], module.params["target"]
+ )
+ )
+ else:
+ res = arrayv6.post_volumes(
+ overwrite=module.params["overwrite"],
+ names=[module.params["target"]],
+ volume=flasharray.VolumePost(
+ source=flasharray.Reference(name=module.params["name"])
+ ),
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to copy volume {0} to {1}. Error: {2}".format(
+ module.params["name"],
+ module.params["target"],
+ res.errors[0].message,
+ )
+ )
+ vol_data = list(res.items)
+ volfact = {
+ "size": vol_data[0].provisioned,
+ "serial": vol_data[0].serial,
+ "created": time.strftime(
+ "%Y-%m-%d %H:%M:%S", time.localtime(vol_data[0].created / 1000)
+ ),
+ "page83_naa": PURE_OUI + vol_data[0].serial.lower(),
+ "nvme_nguid": _create_nguid(vol_data[0].serial.lower()),
+ }
+
+ module.exit_json(changed=changed, volume=volfact)
+
+
+def update_volume(module, array):
+ """Update Volume size and/or QoS"""
+ volfact = {}
+ changed = False
+ arrayv6 = None
+ api_version = array._list_available_rest_versions()
+ if MULTI_VOLUME_VERSION in api_version:
+ arrayv6 = get_array(module)
+ vol = array.get_volume(module.params["name"])
+ vol_qos = array.get_volume(module.params["name"], qos=True)
+ if QOS_API_VERSION in api_version:
+ if vol_qos["bandwidth_limit"] is None:
+ vol_qos["bandwidth_limit"] = 0
+ if IOPS_API_VERSION in api_version:
+ if vol_qos["iops_limit"] is None:
+ vol_qos["iops_limit"] = 0
+ if "::" in module.params["name"]:
+ if module.params["bw_qos"] or module.params["iops_qos"]:
+ if AC_QOS_VERSION not in api_version:
+ module.warn(
+ "Pods cannot cannot contain volumes with QoS settings. Ignoring..."
+ )
+ module.params["bw_qos"] = module.params["iops_qos"] = None
+ if module.params["size"]:
+ if human_to_bytes(module.params["size"]) != vol["size"]:
+ if human_to_bytes(module.params["size"]) > vol["size"]:
+ changed = True
+ if not module.check_mode:
+ try:
+ volfact = array.extend_volume(
+ module.params["name"], module.params["size"]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Volume {0} resize failed.".format(
+ module.params["name"]
+ )
+ )
+ if module.params["bw_qos"] and QOS_API_VERSION in api_version:
+ if int(human_to_bytes(module.params["bw_qos"])) != int(
+ vol_qos["bandwidth_limit"]
+ ):
+ if module.params["bw_qos"] == "0":
+ changed = True
+ if not module.check_mode:
+ try:
+ volfact = array.set_volume(
+ module.params["name"], bandwidth_limit=""
+ )
+ except Exception:
+ module.fail_json(
+ msg="Volume {0} Bandwidth QoS removal failed.".format(
+ module.params["name"]
+ )
+ )
+ elif int(human_to_bytes(module.params["bw_qos"])) in range(
+ 1048576, 549755813888
+ ):
+ changed = True
+ if not module.check_mode:
+ try:
+ volfact = array.set_volume(
+ module.params["name"],
+ bandwidth_limit=module.params["bw_qos"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Volume {0} Bandwidth QoS change failed.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ module.fail_json(
+ msg="Bandwidth QoS value {0} out of range.".format(
+ module.params["bw_qos"]
+ )
+ )
+ if module.params["iops_qos"] and IOPS_API_VERSION in api_version:
+ if int(human_to_real(module.params["iops_qos"])) != int(vol_qos["iops_limit"]):
+ if module.params["iops_qos"] == "0":
+ changed = True
+ if not module.check_mode:
+ try:
+ volfact = array.set_volume(module.params["name"], iops_limit="")
+ except Exception:
+ module.fail_json(
+ msg="Volume {0} IOPs QoS removal failed.".format(
+ module.params["name"]
+ )
+ )
+ elif int(human_to_real(module.params["iops_qos"])) in range(100, 100000000):
+ changed = True
+ if not module.check_mode:
+ try:
+ volfact = array.set_volume(
+ module.params["name"], iops_limit=module.params["iops_qos"]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Volume {0} IOPs QoS change failed.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ module.fail_json(
+ msg="Bandwidth QoS value {0} out of range.".format(
+ module.params["bw_qos"]
+ )
+ )
+ if VOLUME_PROMOTION_API_VERSION in api_version and module.params["promotion_state"]:
+ vol6 = list(arrayv6.get_volumes(names=[module.params["name"]]).items)[0]
+ if module.params["promotion_state"] != vol6.promotion_status:
+ volume_patch = flasharray.VolumePatch(
+ requested_promotion_state=module.params["promotion_state"]
+ )
+ changed = True
+ if not module.check_mode:
+ prom_res = arrayv6.patch_volumes(
+ names=[module.params["name"]], volume=volume_patch
+ )
+ if prom_res.status_code != 200:
+ module.fail_json(
+ msg="Failed to change promotion status for volume {0}.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ if not volfact:
+ volfact = array.get_volume(module.params["name"])
+ volfact["promotion_status"] = module.params["promotion_state"]
+ if PRIORITY_API_VERSION in api_version and module.params["priority_operator"]:
+ volv6 = list(arrayv6.get_volumes(names=[module.params["name"]]).items)[0]
+ change_prio = False
+ if (
+ module.params["priority_operator"]
+ != volv6.priority_adjustment.priority_adjustment_operator
+ ):
+ change_prio = True
+ newop = module.params["priority_operator"]
+ else:
+ newop = volv6.priority_adjustment.priority_adjustment_operator
+ if (
+ module.params["priority_value"]
+ and module.params["priority_value"]
+ != volv6.priority_adjustment.priority_adjustment_value
+ ):
+ change_prio = True
+ newval = module.params["priority_value"]
+ elif (
+ not module.params["priority_value"]
+ and volv6.priority_adjustment.priority_adjustment_value != 0
+ ):
+ change_prio = True
+ newval = 0
+ else:
+ newval = volv6.priority_adjustment.priority_adjustment_value
+ volumepatch = flasharray.VolumePatch(
+ priority_adjustment=flasharray.PriorityAdjustment(
+ priority_adjustment_operator=newop,
+ priority_adjustment_value=newval,
+ )
+ )
+ if change_prio and not module.check_mode:
+ changed = True
+ prio_res = arrayv6.patch_volumes(
+ names=[module.params["name"]], volume=volumepatch
+ )
+ if prio_res.status_code != 200:
+ module.fail_json(
+ msg="Failed to change DMM Priority Adjustment for {0}. Error: {1}".format(
+ module.params["name"], prio_res.errors[0].message
+ )
+ )
+ else:
+ if not volfact:
+ volfact = array.get_volume(module.params["name"])
+ volfact["priority_operator"] = module.params["priority_operator"]
+ volfact["priority_value"] = module.params["priority_value"]
+ if MULTI_VOLUME_VERSION in api_version:
+ volume_data = list(arrayv6.get_volumes(names=[module.params["name"]]).items)[0]
+ updatefacts = {
+ "name": volume_data.name,
+ "size": volume_data.provisioned,
+ "serial": volume_data.serial,
+ "created": time.strftime(
+ "%Y-%m-%d %H:%M:%S", time.localtime(volume_data.created / 1000)
+ ),
+ "page83_naa": PURE_OUI + volume_data.serial.lower(),
+ "nvme_nguid": _create_nguid(volume_data.serial.lower()),
+ }
+ else:
+ updatefacts = array.get_volume(module.params["name"])
+ vol_fact = {**volfact, **updatefacts}
+ module.exit_json(changed=changed, volume=vol_fact)
+
+
+def rename_volume(module, array):
+ """Rename volume within a container, ie pod, vgroup or local array"""
+ volfact = []
+ changed = False
+ pod_name = ""
+ vgroup_name = ""
+ target_name = module.params["rename"]
+ target_exists = False
+ if "::" in module.params["name"]:
+ pod_name = module.params["name"].split("::")[0]
+ target_name = pod_name + "::" + module.params["rename"]
+ try:
+ array.get_volume(target_name, pending=True)
+ target_exists = True
+ except Exception:
+ target_exists = False
+ elif "/" in module.params["name"]:
+ vgroup_name = module.params["name"].split("/")[0]
+ target_name = vgroup_name + "/" + module.params["rename"]
+ try:
+ array.get_volume(target_name, pending=True)
+ target_exists = True
+ except Exception:
+ target_exists = False
+ else:
+ try:
+ array.get_volume(target_name, pending=True)
+ target_exists = True
+ except Exception:
+ target_exists = False
+ if target_exists and get_endpoint(target_name, array):
+ module.fail_json(
+ msg="Target volume {0} is a protocol-endpoinnt".format(target_name)
+ )
+ if not target_exists:
+ if get_destroyed_endpoint(target_name, array):
+ module.fail_json(
+ msg="Target volume {0} is a destroyed protocol-endpoinnt".format(
+ target_name
+ )
+ )
+ else:
+ changed = True
+ if not module.check_mode:
+ try:
+ volfact = array.rename_volume(
+ module.params["name"], module.params["rename"]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Rename volume {0} to {1} failed.".format(
+ module.params["name"], module.params["rename"]
+ )
+ )
+ else:
+ module.fail_json(msg="Target volume {0} already exists.".format(target_name))
+
+ module.exit_json(changed=changed, volume=volfact)
+
+
+def move_volume(module, array):
+ """Move volume between pods, vgroups or local array"""
+ volfact = []
+ changed = vgroup_exists = target_exists = pod_exists = False
+ api_version = array._list_available_rest_versions()
+ pod_name = ""
+ vgroup_name = ""
+ volume_name = module.params["name"]
+ if "::" in module.params["name"]:
+ volume_name = module.params["name"].split("::")[1]
+ pod_name = module.params["name"].split("::")[0]
+ if "/" in module.params["name"]:
+ volume_name = module.params["name"].split("/")[1]
+ vgroup_name = module.params["name"].split("/")[0]
+ if module.params["move"] == "local":
+ target_location = ""
+ if "::" not in module.params["name"]:
+ if "/" not in module.params["name"]:
+ module.fail_json(
+ msg="Source and destination [local] cannot be the same."
+ )
+ try:
+ target_exists = array.get_volume(volume_name, pending=True)
+ except Exception:
+ target_exists = False
+ if target_exists:
+ module.fail_json(msg="Target volume {0} already exists".format(volume_name))
+ else:
+ try:
+ pod_exists = array.get_pod(module.params["move"])
+ if len(pod_exists["arrays"]) > 1:
+ module.fail_json(msg="Volume cannot be moved into a stretched pod")
+ if pod_exists["link_target_count"] != 0:
+ module.fail_json(msg="Volume cannot be moved into a linked source pod")
+ if PROMOTE_API_VERSION in api_version:
+ if pod_exists["promotion_status"] == "demoted":
+ module.fail_json(msg="Volume cannot be moved into a demoted pod")
+ pod_exists = bool(pod_exists)
+ except Exception:
+ pod_exists = False
+ if pod_exists:
+ try:
+ target_exists = bool(
+ array.get_volume(
+ module.params["move"] + "::" + volume_name, pending=True
+ )
+ )
+ except Exception:
+ target_exists = False
+ try:
+ vgroup_exists = bool(array.get_vgroup(module.params["move"]))
+ except Exception:
+ vgroup_exists = False
+ if vgroup_exists:
+ try:
+ target_exists = bool(
+ array.get_volume(
+ module.params["move"] + "/" + volume_name, pending=True
+ )
+ )
+ except Exception:
+ target_exists = False
+ if target_exists:
+ module.fail_json(msg="Volume of same name already exists in move location")
+ if pod_exists and vgroup_exists:
+ module.fail_json(
+ msg="Move location {0} matches both a pod and a vgroup. Please rename one of these.".format(
+ module.params["move"]
+ )
+ )
+ if not pod_exists and not vgroup_exists:
+ module.fail_json(
+ msg="Move location {0} does not exist.".format(module.params["move"])
+ )
+ if "::" in module.params["name"]:
+ pod = array.get_pod(module.params["move"])
+ if len(pod["arrays"]) > 1:
+ module.fail_json(msg="Volume cannot be moved out of a stretched pod")
+ if pod["linked_target_count"] != 0:
+ module.fail_json(
+ msg="Volume cannot be moved out of a linked source pod"
+ )
+ if PROMOTE_API_VERSION in api_version:
+ if pod["promotion_status"] == "demoted":
+ module.fail_json(msg="Volume cannot be moved out of a demoted pod")
+ if "/" in module.params["name"]:
+ if (
+ vgroup_name == module.params["move"]
+ or pod_name == module.params["move"]
+ ):
+ module.fail_json(msg="Source and destination cannot be the same")
+ target_location = module.params["move"]
+ if get_endpoint(target_location, array):
+ module.fail_json(
+ msg="Target volume {0} is a protocol-endpoinnt".format(target_location)
+ )
+ changed = True
+ if not module.check_mode:
+ try:
+ volfact = array.move_volume(module.params["name"], target_location)
+ except Exception:
+ if target_location == "":
+ target_location = "[local]"
+ module.fail_json(
+ msg="Move of volume {0} to {1} failed.".format(
+ module.params["name"], target_location
+ )
+ )
+ module.exit_json(changed=changed, volume=volfact)
+
+
+def delete_volume(module, array):
+ """Delete Volume"""
+ changed = True
+ volfact = []
+ if not module.check_mode:
+ try:
+ array.destroy_volume(module.params["name"])
+ if module.params["eradicate"]:
+ try:
+ volfact = array.eradicate_volume(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Eradicate volume {0} failed.".format(module.params["name"])
+ )
+ except Exception:
+ module.fail_json(
+ msg="Delete volume {0} failed.".format(module.params["name"])
+ )
+ module.exit_json(changed=changed, volume=volfact)
+
+
+def eradicate_volume(module, array):
+ """Eradicate Deleted Volume"""
+ changed = True
+ volfact = []
+ if not module.check_mode:
+ if module.params["eradicate"]:
+ try:
+ array.eradicate_volume(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Eradication of volume {0} failed".format(module.params["name"])
+ )
+ module.exit_json(changed=changed, volume=volfact)
+
+
+def recover_volume(module, array):
+ """Recover Deleted Volume"""
+ changed = True
+ volfact = []
+ if not module.check_mode:
+ try:
+ array.recover_volume(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Recovery of volume {0} failed".format(module.params["name"])
+ )
+ volfact = array.get_volume(module.params["name"])
+ volfact["page83_naa"] = PURE_OUI + volfact["serial"].lower()
+ volfact["nvme_nguid"] = _create_nguid(volfact["serial"].lower())
+ module.exit_json(changed=changed, volume=volfact)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True),
+ target=dict(type="str"),
+ move=dict(type="str"),
+ rename=dict(type="str"),
+ overwrite=dict(type="bool", default=False),
+ eradicate=dict(type="bool", default=False),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ bw_qos=dict(type="str", aliases=["qos"]),
+ iops_qos=dict(type="str"),
+ pgroup=dict(type="str"),
+ count=dict(type="int"),
+ start=dict(type="int", default=0),
+ digits=dict(type="int", default=1),
+ suffix=dict(type="str"),
+ priority_operator=dict(type="str", choices=["+", "-", "="]),
+ priority_value=dict(type="int", choices=[-10, 0, 10]),
+ size=dict(type="str"),
+ with_default_protection=dict(type="bool", default=True),
+ add_to_pgs=dict(type="list", elements="str"),
+ promotion_state=dict(type="str", choices=["promoted", "demoted"]),
+ )
+ )
+
+ mutually_exclusive = [
+ ["size", "target"],
+ ["move", "rename", "target", "eradicate"],
+ ["rename", "move", "target", "eradicate"],
+ ]
+ required_together = [["priority_operator", "priority_value"]]
+
+ module = AnsibleModule(
+ argument_spec,
+ mutually_exclusive=mutually_exclusive,
+ required_together=required_together,
+ supports_check_mode=True,
+ )
+
+ size = module.params["size"]
+ bw_qos = module.params["bw_qos"]
+ iops_qos = module.params["iops_qos"]
+ state = module.params["state"]
+ destroyed = False
+ array = get_system(module)
+ volume = get_volume(module, array)
+ api_version = array._list_available_rest_versions()
+ endpoint = get_endpoint(module.params["name"], array)
+
+ if endpoint:
+ module.fail_json(
+ msg="Volume {0} is an endpoint. Use purefa_endpoint module.".format(
+ module.params["name"]
+ )
+ )
+
+ if module.params["pgroup"]:
+ pattern = re.compile("^[a-zA-Z0-9]([a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?$")
+ if ":" in module.params["pgroup"] and OFFLOAD_API_VERSION not in api_version:
+ module.fail_json(
+ msg="API version does not support offload protection groups."
+ )
+ if "::" in module.params["pgroup"] and POD_API_VERSION not in api_version:
+ module.fail_json(
+ msg="API version does not support ActiveCluster protection groups."
+ )
+ if ":" in module.params["pgroup"]:
+ if "::" in module.params["pgroup"]:
+ pgname = module.params["pgroup"].split("::")[1]
+ else:
+ pgname = module.params["pgroup"].split(":")[1]
+ if not pattern.match(pgname):
+ module.fail_json(
+ msg="Protection Group name {0} does not conform to naming convention".format(
+ pgname
+ )
+ )
+ else:
+ if not pattern.match(module.params["pgroup"]):
+ module.fail_json(
+ msg="Protection Group name {0} does not conform to naming convention".format(
+ pgname
+ )
+ )
+ pgroup = get_pgroup(module, array)
+ xpgroup = get_pending_pgroup(module, array)
+ if "::" in module.params["pgroup"]:
+ if not get_pod(module, array):
+ module.fail_json(
+ msg="Pod {0} does not exist.".format(
+ module.params["pgroup"].split("::")[0]
+ )
+ )
+ if not pgroup:
+ if xpgroup:
+ module.fail_json(
+ msg="Protection Group {0} is currently deleted. Please restore to use.".format(
+ module.params["pgroup"]
+ )
+ )
+ else:
+ module.fail_json(
+ msg="Protection Group {0} does not exist.".format(
+ module.params["pgroup"]
+ )
+ )
+
+ if not volume:
+ destroyed = get_destroyed_volume(module.params["name"], array)
+ target = get_target(module, array)
+ if module.params["count"]:
+ if not HAS_PURESTORAGE:
+ module.fail_json(
+ msg="py-pure-client sdk is required to support 'count' parameter"
+ )
+ if MULTI_VOLUME_VERSION not in api_version:
+ module.fail_json(
+ msg="'count' parameter is not supported until Purity//FA 6.0.0 or higher"
+ )
+ if module.params["digits"] and module.params["digits"] not in range(1, 10):
+ module.fail_json(msg="'digits' must be in the range of 1 to 10")
+ if module.params["start"] < 0:
+ module.fail_json(msg="'start' must be a positive number")
+ volume = get_multi_volumes(module)
+ if state == "present" and not volume and size:
+ create_multi_volume(module, array)
+ elif state == "present" and not volume and not size:
+ module.fail_json(msg="Size must be specified to create a new volume")
+ elif state == "absent" and not volume:
+ module.exit_json(changed=False)
+ else:
+ module.warn("Method not yet supported for multi-volume")
+ else:
+ if state == "present" and not volume and not destroyed and size:
+ if DEFAULT_API_VERSION in api_version:
+ create_multi_volume(module, array, True)
+ else:
+ create_volume(module, array)
+ elif (
+ state == "present"
+ and volume
+ and (size or bw_qos or iops_qos or module.params["promotion_state"])
+ ):
+ update_volume(module, array)
+ elif state == "present" and not volume and module.params["move"]:
+ module.fail_json(
+ msg="Volume {0} cannot be moved - does not exist (maybe deleted)".format(
+ module.params["name"]
+ )
+ )
+ elif state == "present" and volume and module.params["move"]:
+ move_volume(module, array)
+ elif state == "present" and volume and module.params["rename"]:
+ rename_volume(module, array)
+ elif (
+ state == "present"
+ and destroyed
+ and not module.params["move"]
+ and not module.params["rename"]
+ ):
+ recover_volume(module, array)
+ elif state == "present" and destroyed and module.params["move"]:
+ module.fail_json(
+ msg="Volume {0} exists, but in destroyed state".format(
+ module.params["name"]
+ )
+ )
+ elif state == "present" and volume and target:
+ copy_from_volume(module, array)
+ elif state == "present" and volume and not target:
+ copy_from_volume(module, array)
+ elif state == "absent" and volume:
+ delete_volume(module, array)
+ elif state == "absent" and destroyed:
+ eradicate_volume(module, array)
+ elif state == "present":
+ if not volume and not size:
+ module.fail_json(msg="Size must be specified to create a new volume")
+ elif state == "absent" and not volume:
+ module.exit_json(changed=False)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_volume_tags.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_volume_tags.py
new file mode 100644
index 000000000..e9c7fdb7c
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_volume_tags.py
@@ -0,0 +1,295 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_volume_tags
+version_added: '1.0.0'
+short_description: Manage volume tags on Pure Storage FlashArrays
+description:
+- Manage volume tags for volumes on Pure Storage FlashArray.
+- Requires a minimum of Purity 6.0.0
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - The name of the volume.
+ type: str
+ required: true
+ namespace:
+ description:
+ - The name of tag namespace
+ default: default
+ type: str
+ copyable:
+ description:
+ - Define whether the volume tags are inherited on volume copies.
+ default: true
+ type: bool
+ kvp:
+ description:
+ - List of key value pairs to assign to the volume.
+ - Seperate the key from the value using a colon (:) only.
+ - All items in list will use I(namespace) and I(copyable) settings.
+ - Maximum of 5 tags per volume
+ - See examples for exact formatting requirements
+ type: list
+ elements: str
+ required: true
+ state:
+ description:
+ - Define whether the volume tag(s) should exist or not.
+ default: present
+ choices: [ absent, present ]
+ type: str
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Create new tags in namespace test for volume foo
+ purestorage.flasharray.purefa_volume_tags:
+ name: foo
+ namespace: test
+ copyable: false
+ kvp:
+ - 'key1:value1'
+ - 'key2:value2'
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Remove an existing tag in namespace test for volume foo
+ purestorage.flasharray.purefa_volume_tags:
+ name: foo
+ namespace: test
+ kvp:
+ - 'key1:value1'
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: absent
+
+- name: Update an existing tag in namespace test for volume foo
+ purestorage.flasharray.purefa_volume_tags:
+ name: foo
+ namespace: test
+ kvp:
+ - 'key1:value2'
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: present
+"""
+
+RETURN = r"""
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ purefa_argument_spec,
+)
+
+
+TAGS_API_VERSION = "1.19"
+
+
+def get_volume(module, array):
+ """Return Volume or None"""
+ try:
+ return array.get_volume(module.params["name"], pending=True)
+ except Exception:
+ return None
+
+
+def get_endpoint(name, array):
+ """Return Endpoint or None"""
+ try:
+ return array.get_volume(name, pending=True, protocol_endpoint=True)
+ except Exception:
+ return None
+
+
+def create_tag(module, array):
+ """Create Volume Tag"""
+ changed = True
+ if not module.check_mode:
+ for tag in range(0, len(module.params["kvp"])):
+ try:
+ array.add_tag_to_volume(
+ module.params["name"],
+ copyable=module.params["copyable"],
+ namespace=module.params["namespace"],
+ key=module.params["kvp"][tag].split(":")[0],
+ value=module.params["kvp"][tag].split(":")[1],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to add tag KVP {0} to volume {1}".format(
+ module.params["kvp"][tag], module.params["name"]
+ )
+ )
+
+ module.exit_json(changed=changed)
+
+
+def update_tag(module, array, current_tags):
+ """Update Volume Tag"""
+ changed = False
+ for tag in range(0, len(module.params["kvp"])):
+ tag_exists = False
+ for current_tag in range(0, len(current_tags)):
+ if (
+ module.params["kvp"][tag].split(":")[0]
+ == current_tags[current_tag]["key"]
+ and module.params["namespace"] == current_tags[current_tag]["namespace"]
+ ):
+ tag_exists = True
+ if (
+ module.params["kvp"][tag].split(":")[1]
+ != current_tags[current_tag]["value"]
+ ):
+ changed = True
+ if not module.check_mode:
+ try:
+ array.add_tag_to_volume(
+ module.params["name"],
+ namespace=module.params["namespace"],
+ key=module.params["kvp"][tag].split(":")[0],
+ value=module.params["kvp"][tag].split(":")[1],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to update tag '{0}' from volume {1}".format(
+ module.params["kvp"][tag].split(":")[0],
+ module.params["name"],
+ )
+ )
+
+ if not tag_exists:
+ changed = True
+ if not module.check_mode:
+ try:
+ array.add_tag_to_volume(
+ module.params["name"],
+ namespace=module.params["namespace"],
+ key=module.params["kvp"][tag].split(":")[0],
+ value=module.params["kvp"][tag].split(":")[1],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to add tag KVP {0} to volume {1}".format(
+ module.params["kvp"][tag].split(":")[0],
+ module.params["name"],
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def delete_tag(module, array, current_tags):
+ """Delete Tag"""
+ changed = False
+ for tag in range(0, len(module.params["kvp"])):
+ for current_tag in range(0, len(current_tags)):
+ if (
+ module.params["kvp"][tag].split(":")[0]
+ == current_tags[current_tag]["key"]
+ and module.params["namespace"] == current_tags[current_tag]["namespace"]
+ ):
+ changed = True
+ if not module.check_mode:
+ try:
+ array.remove_tag_from_volume(
+ module.params["name"],
+ namespace=module.params["namespace"],
+ key=module.params["kvp"][tag].split(":")[0],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to remove tag KVP '{0}' from volume {1}".format(
+ module.params["kvp"][tag], module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True),
+ copyable=dict(type="bool", default=True),
+ namespace=dict(type="str", default="default"),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ kvp=dict(type="list", elements="str", required=True),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ state = module.params["state"]
+ if module.params["kvp"] is not None:
+ module.params["kvp"] = sorted(module.params["kvp"][0:5])
+ else:
+ module.fail_json(msg="No KVPs specified. Minimum of 1 is required.")
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+
+ if TAGS_API_VERSION not in api_version:
+ module.fail_json(
+ msg="Volume tags are not supported. Purity 6.0.0, or higher, is required."
+ )
+
+ volume = get_volume(module, array)
+ endpoint = get_endpoint(module.params["name"], array)
+
+ if not volume:
+ module.fail_json(msg="Volume {0} does not exist.".format(module.params["name"]))
+ if endpoint:
+ module.fail_json(
+ msg="Volume {0} is an endpoint. Tags not allowed.".format(
+ module.params["name"]
+ )
+ )
+ if "." in module.params["name"]:
+ current_tags = array.get_volume(
+ module.params["name"],
+ snap=True,
+ pending=True,
+ tags=True,
+ namespace=module.params["namespace"],
+ )
+ else:
+ current_tags = array.get_volume(
+ module.params["name"],
+ pending=True,
+ tags=True,
+ namespace=module.params["namespace"],
+ )
+
+ if state == "present" and not current_tags:
+ create_tag(module, array)
+ elif state == "absent" and not current_tags:
+ module.exit_json(changed=False)
+ elif state == "present" and current_tags:
+ update_tag(module, array, current_tags)
+ elif state == "absent" and current_tags:
+ delete_tag(module, array, current_tags)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()