summaryrefslogtreecommitdiffstats
path: root/ansible_collections/purestorage/flashblade/plugins
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-18 05:52:22 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-18 05:52:22 +0000
commit38b7c80217c4e72b1d8988eb1e60bb6e77334114 (patch)
tree356e9fd3762877d07cde52d21e77070aeff7e789 /ansible_collections/purestorage/flashblade/plugins
parentAdding upstream version 7.7.0+dfsg. (diff)
downloadansible-38b7c80217c4e72b1d8988eb1e60bb6e77334114.tar.xz
ansible-38b7c80217c4e72b1d8988eb1e60bb6e77334114.zip
Adding upstream version 9.4.0+dfsg.upstream/9.4.0+dfsg
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'ansible_collections/purestorage/flashblade/plugins')
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/module_utils/purefb.py48
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_bucket.py304
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_bucket_replica.py63
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_connect.py8
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_ds.py28
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_fs.py174
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_fs_replica.py40
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_hardware.py187
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_info.py658
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_inventory.py60
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_lag.py23
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_policy.py908
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_s3acc.py167
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_s3user.py69
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_timeout.py2
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_userpolicy.py1
16 files changed, 2089 insertions, 651 deletions
diff --git a/ansible_collections/purestorage/flashblade/plugins/module_utils/purefb.py b/ansible_collections/purestorage/flashblade/plugins/module_utils/purefb.py
index cf987a3e5..87b27a821 100644
--- a/ansible_collections/purestorage/flashblade/plugins/module_utils/purefb.py
+++ b/ansible_collections/purestorage/flashblade/plugins/module_utils/purefb.py
@@ -32,6 +32,12 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
+HAS_DISTRO = True
+try:
+ import distro
+except ImportError:
+ HAS_DISTRO = False
+
HAS_PURITY_FB = True
try:
from purity_fb import PurityFb
@@ -47,19 +53,27 @@ except ImportError:
from os import environ
import platform
-VERSION = "1.4"
+VERSION = "1.5"
USER_AGENT_BASE = "Ansible"
API_AGENT_VERSION = "1.5"
def get_blade(module):
"""Return System Object or Fail"""
- user_agent = "%(base)s %(class)s/%(version)s (%(platform)s)" % {
- "base": USER_AGENT_BASE,
- "class": __name__,
- "version": VERSION,
- "platform": platform.platform(),
- }
+ if HAS_DISTRO:
+ user_agent = "%(base)s %(class)s/%(version)s (%(platform)s)" % {
+ "base": USER_AGENT_BASE,
+ "class": __name__,
+ "version": VERSION,
+ "platform": distro.name(pretty=True),
+ }
+ else:
+ user_agent = "%(base)s %(class)s/%(version)s (%(platform)s)" % {
+ "base": USER_AGENT_BASE,
+ "class": __name__,
+ "version": VERSION,
+ "platform": platform.platform(),
+ }
blade_name = module.params["fb_url"]
api = module.params["api_token"]
@@ -100,12 +114,20 @@ def get_blade(module):
def get_system(module):
"""Return System Object or Fail"""
- user_agent = "%(base)s %(class)s/%(version)s (%(platform)s)" % {
- "base": USER_AGENT_BASE,
- "class": __name__,
- "version": VERSION,
- "platform": platform.platform(),
- }
+ if HAS_DISTRO:
+ user_agent = "%(base)s %(class)s/%(version)s (%(platform)s)" % {
+ "base": USER_AGENT_BASE,
+ "class": __name__,
+ "version": VERSION,
+ "platform": distro.name(pretty=True),
+ }
+ else:
+ user_agent = "%(base)s %(class)s/%(version)s (%(platform)s)" % {
+ "base": USER_AGENT_BASE,
+ "class": __name__,
+ "version": VERSION,
+ "platform": platform.platform(),
+ }
blade_name = module.params["fb_url"]
api = module.params["api_token"]
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_bucket.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_bucket.py
index 67b6b1545..27cd7e317 100644
--- a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_bucket.py
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_bucket.py
@@ -59,11 +59,72 @@ options:
description:
- The type of bucket to be created. Also referred to a VSO Mode.
- Requires Purity//FB 3.3.3 or higher
- - I(multi-site) type can only be used after feature is
+ - I(multi-site-writable) type can only be used after feature is
enabled by Pure Technical Support
type: str
- choices: [ "classic", "multi-site" ]
+ choices: [ "classic", "multi-site-writable" ]
version_added: '1.10.0'
+ quota:
+ description:
+ - User quota in M, G, T or P units. This cannot be 0.
+ - This value will override the object store account's default bucket quota.
+ type: str
+ version_added: '1.12.0'
+ hard_limit:
+ description:
+ - Whether the I(quota) value is enforced or not.
+ - If not provided the object store account default value will be used.
+ type: bool
+ version_added: '1.12.0'
+ retention_lock:
+ description:
+ - Set retention lock level for the bucket
+ - Once set to I(ratcheted) can only be lowered by Pure Technical Services
+ type: str
+ choices: [ "ratcheted", "unlocked" ]
+ default: unlocked
+ version_added: '1.12.0'
+ retention_mode:
+ description:
+ - The retention mode used to apply locks on new objects if none is specified by the S3 client
+ - Use "" to clear
+ - Once set to I(compliance) this can only be changed by contacting Pure Technical Services
+ type: str
+ choices: [ "compliance", "governance", "" ]
+ version_added: '1.12.0'
+ object_lock_enabled:
+ description:
+ - If set to true, then S3 APIs relating to object lock may be used
+ type: bool
+ default: false
+ version_added: '1.12.0'
+ freeze_locked_objects:
+ description:
+ - If set to true, a locked object will be read-only and no new versions of
+ the object may be created due to modifications
+ - After enabling, can be disabled only by contacting Pure Technical Services
+ type: bool
+ default: false
+ version_added: '1.12.0'
+ default_retention:
+ description:
+ - The retention period, in days, used to apply locks on new objects if
+ none is specified by the S3 client
+ - Valid values between 1 and 365000
+ - Use "" to clear
+ type: str
+ version_added: '1.12.0'
+ block_new_public_policies:
+ description:
+ - If set to true, adding bucket policies that grant public access to a bucket is not allowed.
+ type: bool
+ version_added: 1.15.0
+ block_public_access:
+ description:
+ - If set to true, access to a bucket with a public policy is restricted to only authenticated
+ users within the account that bucket belongs to.
+ type: bool
+ version_added: 1.15.0
extends_documentation_fragment:
- purestorage.flashblade.purestorage.fb
"""
@@ -125,7 +186,7 @@ try:
except ImportError:
HAS_PYPURECLIENT = False
-from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.basic import AnsibleModule, human_to_bytes
from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
get_blade,
get_system,
@@ -135,7 +196,9 @@ from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb impo
MIN_REQUIRED_API_VERSION = "1.5"
VERSIONING_VERSION = "1.9"
-VSO_VERSION = "2.4"
+VSO_VERSION = "2.5"
+QUOTA_VERSION = "2.8"
+MODE_VERSION = "2.12"
def get_s3acc(module, blade):
@@ -161,18 +224,56 @@ def get_bucket(module, blade):
def create_bucket(module, blade):
"""Create bucket"""
changed = True
+ bladev2 = get_system(module)
if not module.check_mode:
try:
api_version = blade.api_version.list_versions().versions
- if VSO_VERSION in api_version and module.params["mode"]:
- bladev2 = get_system(module)
- res = bladev2.post_buckets(
- names=[module.params["name"]],
- bucket=flashblade.BucketPost(
+ if VSO_VERSION in api_version:
+ account_defaults = list(
+ bladev2.get_object_store_accounts(
+ names=[module.params["account"]]
+ ).items
+ )[0]
+ if QUOTA_VERSION in api_version:
+ if not module.params["hard_limit"]:
+ module.params["hard_limit"] = (
+ account_defaults.hard_limit_enabled
+ )
+ if module.params["quota"]:
+ quota = str(human_to_bytes(module.params["quota"]))
+ else:
+ if not account_defaults.quota_limit:
+ quota = ""
+ else:
+ quota = str(account_defaults.quota_limit)
+ if not module.params["retention_mode"]:
+ module.params["retention_mode"] = ""
+ if not module.params["default_retention"]:
+ module.params["default_retention"] = ""
+ else:
+ module.params["default_retention"] = str(
+ int(module.params["default_retention"]) * 86400000
+ )
+ if module.params["object_lock_enabled"]:
+ bucket = flashblade.BucketPost(
+ account=flashblade.Reference(name=module.params["account"]),
+ bucket_type=module.params["mode"],
+ hard_limit_enabled=module.params["hard_limit"],
+ quota_limit=quota,
+ )
+ else:
+ bucket = flashblade.BucketPost(
+ account=flashblade.Reference(name=module.params["account"]),
+ bucket_type=module.params["mode"],
+ hard_limit_enabled=module.params["hard_limit"],
+ quota_limit=quota,
+ )
+ else:
+ bucket = flashblade.BucketPost(
account=flashblade.Reference(name=module.params["account"]),
bucket_type=module.params["mode"],
- ),
- )
+ )
+ res = bladev2.post_buckets(names=[module.params["name"]], bucket=bucket)
if res.status_code != 200:
module.fail_json(
msg="Object Store Bucket {0} creation failed. Error: {1}".format(
@@ -180,37 +281,79 @@ def create_bucket(module, blade):
res.errors[0].message,
)
)
- elif VERSIONING_VERSION in api_version:
- attr = BucketPost()
- attr.account = Reference(name=module.params["account"])
- blade.buckets.create_buckets(names=[module.params["name"]], bucket=attr)
- else:
- attr = Bucket()
- attr.account = Reference(name=module.params["account"])
- blade.buckets.create_buckets(
- names=[module.params["name"]], account=attr
- )
- if (
- module.params["versioning"] != "absent"
- and VERSIONING_VERSION in api_version
- ):
- try:
- blade.buckets.update_buckets(
- names=[module.params["name"]],
- bucket=BucketPatch(versioning=module.params["versioning"]),
+ if QUOTA_VERSION in api_version:
+ bucket = flashblade.BucketPatch(
+ retention_lock=module.params["retention_lock"],
+ object_lock_config=flashblade.ObjectLockConfigRequestBody(
+ default_retention_mode=module.params["retention_mode"],
+ enabled=module.params["object_lock_enabled"],
+ freeze_locked_objects=module.params[
+ "freeze_locked_objects"
+ ],
+ default_retention=module.params["default_retention"],
+ ),
+ versioning=module.params["versioning"],
)
- except Exception:
+ else:
+ bucket = flashblade.BucketPatch(
+ retention_lock=module.params["retention_lock"],
+ versioning=module.params["versioning"],
+ )
+ res = bladev2.patch_buckets(
+ names=[module.params["name"]], bucket=bucket
+ )
+ if res.status_code != 200:
module.fail_json(
- msg="Object Store Bucket {0} Created but versioning state failed".format(
- module.params["name"]
+ msg="Object Store Bucket {0} creation update failed. Error: {1}".format(
+ module.params["name"],
+ res.errors[0].message,
)
)
+ else:
+ attr = BucketPost()
+ attr.account = Reference(name=module.params["account"])
+ blade.buckets.create_buckets(names=[module.params["name"]], bucket=attr)
+ if module.params["versioning"] != "absent":
+ try:
+ blade.buckets.update_buckets(
+ names=[module.params["name"]],
+ bucket=BucketPatch(versioning=module.params["versioning"]),
+ )
+ except Exception:
+ module.fail_json(
+ msg="Object Store Bucket {0} Created but versioning state failed".format(
+ module.params["name"]
+ )
+ )
except Exception:
+ blade.buckets.update_buckets(
+ names=[module.params["name"]], bucket=BucketPatch(destroyed=True)
+ )
+ blade.buckets.delete_buckets(names=[module.params["name"]])
module.fail_json(
msg="Object Store Bucket {0}: Creation failed".format(
module.params["name"]
)
)
+ if MODE_VERSION in api_version:
+ if not module.params["block_new_public_policies"]:
+ module.params["block_new_public_policies"] = False
+ if not module.params["block_public_access"]:
+ module.params["block_public_access"] = False
+ pac = BucketPatch(
+ public_access_config=flashblade.PublicAccessConfig(
+ block_new_public_policies=module.params[
+ "block_new_public_policies"
+ ],
+ block_public_access=module.params["block_public_access"],
+ )
+ )
+ res = bladev2.patch_buckets(bucket=pac, names=[module.params["name"]])
+ if res.status_code != 200:
+ module.warn(
+ msg="Failed to set Public Access config correctly for bucket {0}. "
+ "Error: {1}".format(module.params["name"], res.errors[0].message)
+ )
module.exit_json(changed=changed)
@@ -272,13 +415,42 @@ def recover_bucket(module, blade):
def update_bucket(module, blade, bucket):
"""Update Bucket"""
changed = False
+ change_pac = False
+ bladev2 = get_system(module)
+ bucket_detail = list(bladev2.get_buckets(names=[module.params["name"]]).items)[0]
api_version = blade.api_version.list_versions().versions
if VSO_VERSION in api_version:
- if module.params["mode"]:
- bladev2 = get_system(module)
- bucket_detail = bladev2.get_buckets(names=[module.params["name"]])
- if list(bucket_detail.items)[0].bucket_type != module.params["mode"]:
- module.warn("Changing bucket type is not permitted.")
+ if module.params["mode"] and bucket_detail.bucket_type != module.params["mode"]:
+ module.warn("Changing bucket type is not permitted.")
+ if QUOTA_VERSION in api_version:
+ if (
+ bucket_detail.retention_lock == "ratcheted"
+ and getattr(
+ bucket_detail.object_lock_config, "default_retention_mode", None
+ )
+ == "compliance"
+ and module.params["retention_mode"] != "compliance"
+ ):
+ module.warn(
+ "Changing retention_mode can onlt be performed by Pure Technical Support."
+ )
+ if not module.params["object_lock_enabled"] and getattr(
+ bucket_detail.object_lock_config, "enabled", False
+ ):
+ module.warn("Object lock cannot be disabled.")
+ if not module.params["freeze_locked_objects"] and getattr(
+ bucket_detail.object_lock_config, "freeze_locked_objects", False
+ ):
+ module.warn("Freeze locked onjects cannot be disabled.")
+ if getattr(bucket_detail.object_lock_config, "default_retention", 0) > 1:
+ if (
+ bucket_detail.object_lock_config.default_retention / 86400000
+ > int(module.params["default_retention"])
+ and bucket_detail.retention_lock == "ratcheted"
+ ):
+ module.warn(
+ "Default retention can only be reduced by Pure Technical Support."
+ )
if VERSIONING_VERSION in api_version:
if bucket.versioning != "none":
@@ -316,7 +488,39 @@ def update_bucket(module, blade, bucket):
module.params["name"]
)
)
- module.exit_json(changed=changed)
+ if MODE_VERSION in api_version:
+ current_pac = {
+ "block_new_public_policies": bucket_detail.public_access_config.block_new_public_policies,
+ "block_public_access": bucket_detail.public_access_config.block_public_access,
+ }
+ if module.params["block_new_public_policies"] is None:
+ new_public_policies = current_pac["block_new_public_policies"]
+ else:
+ new_public_policies = module.params["block_new_public_policies"]
+ if module.params["block_public_access"] is None:
+ new_public_access = current_pac["block_public_access"]
+ else:
+ new_public_access = module.params["block_public_access"]
+ new_pac = {
+ "block_new_public_policies": new_public_policies,
+ "block_public_access": new_public_access,
+ }
+ if current_pac != new_pac:
+ change_pac = True
+ pac = BucketPatch(
+ public_access_config=flashblade.PublicAccessConfig(
+ block_new_public_policies=new_pac.block_new_public_policies,
+ block_public_access=new_pac.block_public_access,
+ )
+ )
+ if change_pac and not module.check_mode:
+ res = bladev2.patch_buckets(bucket=pac, names=[module.params["name"]])
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to update Public Access config correctly for bucket {0}. "
+ "Error: {1}".format(module.params["name"], res.errors[0].message)
+ )
+ module.exit_json(changed=(changed or change_pac))
def eradicate_bucket(module, blade):
@@ -341,7 +545,21 @@ def main():
name=dict(required=True),
account=dict(required=True),
eradicate=dict(default="false", type="bool"),
- mode=dict(type="str", choices=["classic", "multi-site"]),
+ mode=dict(
+ type="str",
+ choices=["classic", "multi-site-writable"],
+ ),
+ retention_mode=dict(type="str", choices=["compliance", "governance", ""]),
+ default_retention=dict(type="str"),
+ retention_lock=dict(
+ type="str", choices=["ratcheted", "unlocked"], default="unlocked"
+ ),
+ hard_limit=dict(type="bool"),
+ block_new_public_policies=dict(type="bool"),
+ block_public_access=dict(type="bool"),
+ object_lock_enabled=dict(type="bool", default=False),
+ freeze_locked_objects=dict(type="bool", default=False),
+ quota=dict(type="str"),
versioning=dict(
default="absent", choices=["enabled", "suspended", "absent"]
),
@@ -362,9 +580,13 @@ def main():
api_version = blade.api_version.list_versions().versions
if MIN_REQUIRED_API_VERSION not in api_version:
module.fail_json(msg="Purity//FB must be upgraded to support this module.")
- if module.params["mode"] and VSO_VERSION not in api_version:
- module.fail_json(msg="VSO mode requires Purity//FB 3.3.3 or higher.")
+ # From REST 2.12 classic is no longer the default mode
+ if MODE_VERSION in api_version:
+ if not module.params["mode"]:
+ module.params["mode"] = "multi-site-writable"
+ elif not module.params["mode"]:
+ module.params["mode"] = "classic"
bucket = get_bucket(module, blade)
if not get_s3acc(module, blade):
module.fail_json(
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_bucket_replica.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_bucket_replica.py
index 6ac3775ae..265fd5481 100644
--- a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_bucket_replica.py
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_bucket_replica.py
@@ -58,6 +58,14 @@ options:
- Name of remote credential name to use.
required: false
type: str
+ cascading:
+ description:
+ - Objects replicated to this bucket via a replica link from
+ another array will also be replicated by this link to the
+ remote bucket
+ type: bool
+ default: false
+ version_added: "1.14.0"
extends_documentation_fragment:
- purestorage.flashblade.purestorage.fb
"""
@@ -96,11 +104,19 @@ try:
except ImportError:
HAS_PURITY_FB = False
+HAS_PYPURECLIENT = True
+try:
+ from pypureclient import flashblade
+except ImportError:
+ HAS_PYPURECLIENT = False
+
MIN_REQUIRED_API_VERSION = "1.9"
+CASCADE_API_VERSION = "2.2"
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
get_blade,
+ get_system,
purefb_argument_spec,
)
@@ -167,24 +183,46 @@ def get_connected(module, blade):
def create_rl(module, blade, remote_cred):
"""Create Bucket Replica Link"""
changed = True
+ api_version = blade.api_version.list_versions().versions
if not module.check_mode:
- try:
- if not module.params["target_bucket"]:
- module.params["target_bucket"] = module.params["name"]
- else:
- module.params["target_bucket"] = module.params["target_bucket"].lower()
- blade.bucket_replica_links.create_bucket_replica_links(
+ if not module.params["target_bucket"]:
+ module.params["target_bucket"] = module.params["name"]
+ else:
+ module.params["target_bucket"] = module.params["target_bucket"].lower()
+ if CASCADE_API_VERSION in api_version:
+ bladev2 = get_system(module)
+ new_rl = flashblade.BucketReplicaLinkPost(
+ cascading_enabled=module.params["cascading"],
+ paused=module.params["paused"],
+ )
+ res = bladev2.post_bucket_replica_links(
local_bucket_names=[module.params["name"]],
remote_bucket_names=[module.params["target_bucket"]],
remote_credentials_names=[remote_cred.name],
- bucket_replica_link=BucketReplicaLink(paused=module.params["paused"]),
+ bucket_replica_link=new_rl,
)
- except Exception:
- module.fail_json(
- msg="Failed to create bucket replica link {0}.".format(
- module.params["name"]
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to create bucket replica link {0}.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ try:
+ blade.bucket_replica_links.create_bucket_replica_links(
+ local_bucket_names=[module.params["name"]],
+ remote_bucket_names=[module.params["target_bucket"]],
+ remote_credentials_names=[remote_cred.name],
+ bucket_replica_link=BucketReplicaLink(
+ paused=module.params["paused"]
+ ),
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to create bucket replica link {0}.".format(
+ module.params["name"]
+ )
)
- )
module.exit_json(changed=changed)
@@ -245,6 +283,7 @@ def main():
target=dict(type="str"),
target_bucket=dict(type="str"),
paused=dict(type="bool", default=False),
+ cascading=dict(type="bool", default=False),
credential=dict(type="str"),
state=dict(default="present", choices=["present", "absent"]),
)
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_connect.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_connect.py
index 508c6a322..846351453 100644
--- a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_connect.py
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_connect.py
@@ -117,7 +117,9 @@ RETURN = r"""
HAS_PURITYFB = True
try:
- from purity_fb import PurityFb, ArrayConnection, ArrayConnectionPost
+ from purity_fb import PurityFb
+ from purity_fb import ArrayConnection as ArrayConnectionv1
+ from purity_fb import ArrayConnectionPost as ArrayConnectionPostv1
except ImportError:
HAS_PURITYFB = False
@@ -224,7 +226,7 @@ def create_connection(module, blade):
.items[0]
.connection_key
)
- connection_info = ArrayConnectionPost(
+ connection_info = ArrayConnectionPostv1(
management_address=module.params["target_url"],
encrypted=module.params["encrypted"],
connection_key=connection_key,
@@ -346,7 +348,7 @@ def update_connection(module, blade, target_blade):
module.fail_json(
msg="Cannot turn array connection encryption on if file system replica links exist"
)
- new_attr = ArrayConnection(encrypted=module.params["encrypted"])
+ new_attr = ArrayConnectionv1(encrypted=module.params["encrypted"])
changed = True
if not module.check_mode:
try:
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_ds.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_ds.py
index 6433d3d9d..2a81648e5 100644
--- a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_ds.py
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_ds.py
@@ -67,6 +67,15 @@ options:
description:
- Sets the password of the bind_user user name account.
type: str
+ force_bind_password:
+ type: bool
+ default: true
+ description:
+ - Will force the bind password to be reset even if the bind user password
+ is unchanged.
+ - If set to I(false) and I(bind_user) is unchanged the password will not
+ be reset.
+ version_added: 1.16.0
bind_user:
description:
- Sets the user name that can be used to bind to and query the directory.
@@ -257,6 +266,8 @@ def delete_ds(module, blade):
def update_ds(module, blade):
"""Update Directory Service"""
mod_ds = False
+ changed = False
+ password_required = False
attr = {}
try:
ds_now = blade.directory_services.list_directory_services(
@@ -278,21 +289,31 @@ def update_ds(module, blade):
if sorted(module.params["uri"][0:30]) != sorted(ds_now.uris):
attr["uris"] = module.params["uri"][0:30]
mod_ds = True
+ password_required = True
if module.params["base_dn"]:
if module.params["base_dn"] != ds_now.base_dn:
attr["base_dn"] = module.params["base_dn"]
mod_ds = True
if module.params["bind_user"]:
if module.params["bind_user"] != ds_now.bind_user:
+ password_required = True
attr["bind_user"] = module.params["bind_user"]
mod_ds = True
+ elif module.params["force_bind_password"]:
+ password_required = True
+ mod_ds = True
if module.params["enable"]:
if module.params["enable"] != ds_now.enabled:
attr["enabled"] = module.params["enable"]
mod_ds = True
- if module.params["bind_password"]:
- attr["bind_password"] = module.params["bind_password"]
- mod_ds = True
+ if password_required:
+ if module.params["bind_password"]:
+ attr["bind_password"] = module.params["bind_password"]
+ mod_ds = True
+ else:
+ module.fail_json(
+ msg="'bind_password' must be provided for this task"
+ )
if module.params["dstype"] == "smb":
if module.params["join_ou"] != ds_now.smb.join_ou:
attr["smb"] = {"join_ou": module.params["join_ou"]}
@@ -397,6 +418,7 @@ def main():
state=dict(type="str", default="present", choices=["absent", "present"]),
enable=dict(type="bool", default=False),
bind_password=dict(type="str", no_log=True),
+ force_bind_password=dict(type="bool", default=True),
bind_user=dict(type="str"),
base_dn=dict(type="str"),
join_ou=dict(type="str"),
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_fs.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_fs.py
index a07180793..8d332e8b7 100644
--- a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_fs.py
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_fs.py
@@ -179,6 +179,27 @@ options:
- Only valid for Purity//FB 3.3.0 or higher
type: str
version_added: "1.9.0"
+ share_policy:
+ description:
+ - Name of SMB share policy to assign to filesystem
+ - Only valid with REST 2.10 or higher
+ - Remove policy with empty string
+ type: str
+ version_added: "1.12.0"
+ client_policy:
+ description:
+ - Name of SMB client policy to assign to filesystem
+ - Only valid with REST 2.10 or higher
+ - Remove policy with empty string
+ type: str
+ version_added: "1.12.0"
+ continuous_availability:
+ description:
+ - Deifines if the file system will be continuously available during
+ disruptive scenarios such as network disruption, blades failover, etc
+ type: bool
+ default: true
+ version_added: "1.15.0"
extends_documentation_fragment:
- purestorage.flashblade.purestorage.fb
"""
@@ -267,6 +288,7 @@ try:
FileSystemPatch,
NfsPatch,
Reference,
+ Smb,
)
except ImportError:
HAS_PYPURECLIENT = False
@@ -290,6 +312,8 @@ NFSV4_API_VERSION = "1.6"
REPLICATION_API_VERSION = "1.9"
MULTIPROTOCOL_API_VERSION = "1.11"
EXPORT_POLICY_API_VERSION = "2.3"
+SMB_POLICY_API_VERSION = "2.10"
+CA_API_VERSION = "2.12"
def get_fs(module, blade):
@@ -488,12 +512,71 @@ def create_fs(module, blade):
res.errors[0].message,
)
)
+ if SMB_POLICY_API_VERSION in api_version:
+ system = get_system(module)
+ if module.params["client_policy"]:
+ export_attr = FileSystemPatch(
+ smb=Smb(
+ client_policy=Reference(name=module.params["client_policy"])
+ )
+ )
+ res = system.patch_file_systems(
+ names=[module.params["name"]], file_system=export_attr
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Filesystem {0} created, but failed to assign client "
+ "policy {1}. Error: {2}".format(
+ module.params["name"],
+ module.params["client_policy"],
+ res.errors[0].message,
+ )
+ )
+ if module.params["share_policy"]:
+ export_attr = FileSystemPatch(
+ smb=Smb(share_policy=Reference(name=module.params["share_policy"]))
+ )
+ res = system.patch_file_systems(
+ names=[module.params["name"]], file_system=export_attr
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Filesystem {0} created, but failed to assign share "
+ "policy {1}. Error: {2}".format(
+ module.params["name"],
+ module.params["share_policy"],
+ res.errors[0].message,
+ )
+ )
+ if CA_API_VERSION in api_version:
+ ca_attr = FileSystemPatch(
+ smb=Smb(
+ continuous_availability_enabled=module.params[
+ "continuous_availability"
+ ]
+ )
+ )
+ res = system.patch_file_systems(
+ names=[module.params["name"]], file_system=ca_attr
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Filesystem {0} created, but failed to set continuous availability"
+ "Error: {1}".format(
+ module.params["name"],
+ res.errors[0].message,
+ )
+ )
+
module.exit_json(changed=changed)
def modify_fs(module, blade):
"""Modify Filesystem"""
changed = False
+ change_export = False
+ change_share = False
+ change_ca = False
mod_fs = False
attr = {}
if module.params["policy"] and module.params["policy_state"] == "present":
@@ -689,7 +772,7 @@ def modify_fs(module, blade):
module.params["name"]
)
)
- attr["requested_promotion_state"] = module.params["promote"]
+ attr["requested_promotion_state"] = "demoted"
mod_fs = True
if mod_fs:
changed = True
@@ -721,12 +804,12 @@ def modify_fs(module, blade):
module.params["name"], message
)
)
+ system = get_system(module)
+ current_fs = list(
+ system.get_file_systems(filter="name='" + module.params["name"] + "'").items
+ )[0]
if EXPORT_POLICY_API_VERSION in api_version and module.params["export_policy"]:
- system = get_system(module)
change_export = False
- current_fs = list(
- system.get_file_systems(filter="name='" + module.params["name"] + "'").items
- )[0]
if (
current_fs.nfs.export_policy.name
and current_fs.nfs.export_policy.name != module.params["export_policy"]
@@ -752,8 +835,84 @@ def modify_fs(module, blade):
res.errors[0].message,
)
)
+ if SMB_POLICY_API_VERSION in api_version and module.params["client_policy"]:
+ change_client = False
+ if (
+ current_fs.smb.client_policy.name
+ and current_fs.smb.client_policy.name != module.params["client_policy"]
+ ):
+ change_client = True
+ if not current_fs.smb.client_policy.name and module.params["client_policy"]:
+ change_client = True
+ if change_client and not module.check_mode:
+ client_attr = FileSystemPatch(
+ smb=Smb(client_policy=Reference(name=module.params["client_policy"]))
+ )
+ res = system.patch_file_systems(
+ names=[module.params["name"]], file_system=client_attr
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to modify client policy {1} for "
+ "filesystem {0}. Error: {2}".format(
+ module.params["name"],
+ module.params["client_policy"],
+ res.errors[0].message,
+ )
+ )
+ if SMB_POLICY_API_VERSION in api_version and module.params["share_policy"]:
+ change_share = False
+ if (
+ current_fs.smb.share_policy.name
+ and current_fs.smb.share_policy.name != module.params["share_policy"]
+ ):
+ change_share = True
+ if not current_fs.smb.share_policy.name and module.params["share_policy"]:
+ change_share = True
+ if change_share and not module.check_mode:
+ share_attr = FileSystemPatch(
+ smb=Smb(share_policy=Reference(name=module.params["share_policy"]))
+ )
+ res = system.patch_file_systems(
+ names=[module.params["name"]], file_system=share_attr
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to modify share policy {1} for "
+ "filesystem {0}. Error: {2}".format(
+ module.params["name"],
+ module.params["share_policy"],
+ res.errors[0].message,
+ )
+ )
+ if CA_API_VERSION in api_version:
+ change_ca = False
+ if (
+ module.params["continuous_availability"]
+ != current_fs.continuous_availability_enabled
+ ):
+ change_ca = True
+ if not module.check_mode:
+ ca_attr = FileSystemPatch(
+ smb=Smb(
+ continuous_availability_enabled=module.params[
+ "continuous_availability"
+ ]
+ )
+ )
+ res = system.patch_file_systems(
+ names=[module.params["name"]], file_system=ca_attr
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to modify continuous availability for "
+ "filesystem {0}. Error: {1}".format(
+ module.params["name"],
+ res.errors[0].message,
+ )
+ )
- module.exit_json(changed=changed)
+ module.exit_json(changed=(changed or change_export or change_share or change_ca))
def _delete_fs(module, blade):
@@ -910,6 +1069,9 @@ def main():
),
size=dict(type="str"),
export_policy=dict(type="str"),
+ share_policy=dict(type="str"),
+ client_policy=dict(type="str"),
+ continuous_availability=dict(type="bool", default="true"),
)
)
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_fs_replica.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_fs_replica.py
index f96903788..ca52a64bd 100644
--- a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_fs_replica.py
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_fs_replica.py
@@ -53,6 +53,12 @@ options:
- Name of filesystem snapshot policy to apply to the replica link.
required: false
type: str
+ in_progress:
+ description:
+ - Confirmation that you wish to delete a filesystem replica link
+ - This may cancel any in-progress replication transfers)
+ type: bool
+ default: false
extends_documentation_fragment:
- purestorage.flashblade.purestorage.fb
"""
@@ -97,9 +103,12 @@ MIN_REQUIRED_API_VERSION = "1.9"
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
get_blade,
+ get_system,
purefb_argument_spec,
)
+DELETE_RL_API_VERSION = "2.10"
+
def get_local_fs(module, blade):
"""Return Filesystem or None"""
@@ -241,6 +250,30 @@ def delete_rl_policy(module, blade):
module.exit_json(changed=changed)
+def delete_rl(module, blade):
+ """Delete filesystem replica link"""
+ changed = True
+ if not module.check_mode:
+ res = list(
+ blade.delete_file_system_replica_links(
+ local_file_system_names=[module.params["name"]],
+ remote_file_system_names=[module.params["target_fs"]],
+ remote_names=[module.params["target_array"]],
+ cancel_in_progress_transfers=module.params["in_progress"],
+ )
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete replica link from {0} to {1}:{2}. Error: {3}".format(
+ module.params["name"],
+ module.params["target_array"],
+ module.params["target_fs"],
+ res.errors[0].message,
+ )
+ )
+ module.exit_jsob(changed=changed)
+
+
def main():
argument_spec = purefb_argument_spec()
argument_spec.update(
@@ -249,6 +282,7 @@ def main():
target_fs=dict(type="str"),
target_array=dict(type="str"),
policy=dict(type="str"),
+ in_progress=dict(type="bool", default=False),
state=dict(default="present", choices=["present", "absent"]),
)
)
@@ -296,6 +330,12 @@ def main():
policy = None
if state == "present" and not local_replica_link:
create_rl(module, blade)
+ elif state == "absent" and local_replica_link:
+ if DELETE_RL_API_VERSION not in versions:
+ module.fail_json("Deleting a replica link requires REST 2.10 or higher")
+ else:
+ bladev6 = get_system(module)
+ delete_rl(module, bladev6)
elif state == "present" and local_replica_link and policy:
add_rl_policy(module, blade)
elif state == "absent" and policy:
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_hardware.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_hardware.py
new file mode 100644
index 000000000..49849156b
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_hardware.py
@@ -0,0 +1,187 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2021, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_hardware
+version_added: '1.15.0'
+short_description: Manage FlashBlade Hardware
+description:
+- Enable or disable FlashBlade visual identification lights and set connector parameters
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Name of hardware component
+ type: str
+ required: true
+ enabled:
+ description:
+ - State of the component identification LED
+ type: bool
+ speed:
+ description:
+ - If the component specified is a connector, set the configured speed
+ of each lane in the connector in gigabits-per-second
+ type: int
+ choices: [ 10, 25, 40 ]
+ ports:
+ description:
+ - If the component specificed is a connector, the number of configured
+ ports in the connector
+ type: int
+ choices: [ 1, 4 ]
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Set connector to be 4 x 40Gb ports
+ purestorage.flashblade.purefb_hardware:
+ name: "CH1.FM1.ETH1"
+ speed: 40
+ ports: 4
+ fb_url: 10.10.10.2
+ api_token: T-68618f31-0c9e-4e57-aa44-5306a2cf10e3
+
+- name: Enable identification LED
+ purestorage.flashblade.purefb_hardware:
+ name: "CH1.FB1"
+ enabled: True
+ fb_url: 10.10.10.2
+ api_token: T-68618f31-0c9e-4e57-aa44-5306a2cf10e3
+
+- name: Disable identification LED
+ purestorage.flashblade.purefb_hardware:
+ name: "CH1.FB1"
+ enabled: False
+ fb_url: 10.10.10.2
+ api_token: T-68618f31-0c9e-4e57-aa44-5306a2cf10e3
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flashblade
+except ImportError:
+ HAS_PURESTORAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_system,
+ purefb_argument_spec,
+)
+
+MIN_REQUIRED_API_VERSION = "2.2"
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ enabled=dict(type="bool"),
+ name=dict(type="str", required=True),
+ speed=dict(
+ type="int",
+ choices=[10, 25, 40],
+ ),
+ ports=dict(
+ type="int",
+ choices=[1, 4],
+ ),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+
+ blade = get_system(module)
+ api_version = list(blade.get_versions().items)
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashBlade REST version not supported. "
+ "Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
+ )
+ if module.params["speed"]:
+ speed = module.params["speed"] * 1000000000
+ changed = False
+ change_connector = False
+ hardware = None
+ res = blade.get_hardware(names=[module.params["name"]])
+ if res.status_code == 200:
+ hardware = list(res.items)[0]
+ if hardware.identify_enabled != module.params["enabled"]:
+ changed = True
+ if not module.check_mode:
+ res = blade.patch_hardware(
+ names=[module.params["name"]],
+ hardware=flashblade.Hardware(
+ identify_enabled=module.params["enabled"]
+ ),
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to set identification LED for {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ res = blade.get_hardware_connectors(names=[module.params["name"]])
+ if res.status_code == 200:
+ if res.status_code == 200:
+ connector = list(res.items)[0]
+ if connector.port_count != module.params["ports"]:
+ new_port = module.params["ports"]
+ changed = True
+ if not module.check_mode:
+ res = blade.patch_hardware_connectors(
+ names=[module.params["name"]],
+ hardware_connector=flashblade.HardwareConnector(
+ port_count=module.params["ports"]
+ ),
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to change connector port count {0}. Error: Invalid port count".format(
+ module.params["name"]
+ )
+ )
+ if connector.lane_speed != speed:
+ new_speed = speed
+ changed = True
+ if not module.check_mode:
+ res = blade.patch_hardware_connectors(
+ names=[module.params["name"]],
+ hardware_connector=flashblade.HardwareConnector(
+ lane_speed=speed
+ ),
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to change connector lane speed {0}. Error: Invalid lane speed".format(
+ module.params["name"]
+ )
+ )
+
+ module.exit_json(changed=changed)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_info.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_info.py
index 8525bd8e3..033312e82 100644
--- a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_info.py
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_info.py
@@ -81,357 +81,7 @@ RETURN = r"""
purefb_info:
description: Returns the information collected from the FlashBlade
returned: always
- type: complex
- sample: {
- "admins": {
- "pureuser": {
- "api_token_timeout": null,
- "local": true,
- "public_key": null
- },
- "another_user": {
- "api_token_timeout": null,
- "local": false,
- "public_key": null
- },
- },
- "buckets": {
- "central": {
- "account_name": "jake",
- "bucket_type": "classic",
- "created": 1628900154000,
- "data_reduction": null,
- "destroyed": false,
- "id": "43758f09-9e71-7bf7-5757-2028a95a2b65",
- "lifecycle_rules": {},
- "object_count": 0,
- "snapshot_space": 0,
- "time_remaining": null,
- "total_physical_space": 0,
- "unique_space": 0,
- "versioning": "none",
- "virtual_space": 0
- },
- "test": {
- "account_name": "acme",
- "bucket_type": "classic",
- "created": 1630591952000,
- "data_reduction": 3.6,
- "destroyed": false,
- "id": "d5f6149c-fbef-f3c5-58b6-8fd143110ba9",
- "lifecycle_rules": {
- "test": {
- "abort_incomplete_multipart_uploads_after (days)": 1,
- "cleanup_expired_object_delete_marker": true,
- "enabled": true,
- "keep_current_version_for (days)": null,
- "keep_current_version_until": "2023-12-21",
- "keep_previous_version_for (days)": null,
- "prefix": "foo"
- }
- },
- },
- },
- "capacity": {
- "aggregate": {
- "data_reduction": 1.1179228,
- "snapshots": 0,
- "total_physical": 17519748439,
- "unique": 17519748439,
- "virtual": 19585726464
- },
- "file-system": {
- "data_reduction": 1.3642412,
- "snapshots": 0,
- "total_physical": 4748219708,
- "unique": 4748219708,
- "virtual": 6477716992
- },
- "object-store": {
- "data_reduction": 1.0263462,
- "snapshots": 0,
- "total_physical": 12771528731,
- "unique": 12771528731,
- "virtual": 6477716992
- },
- "total": 83359896948925
- },
- "config": {
- "alert_watchers": {
- "enabled": true,
- "name": "notify@acmestorage.com"
- },
- "array_management": {
- "base_dn": null,
- "bind_password": null,
- "bind_user": null,
- "enabled": false,
- "name": "management",
- "services": [
- "management"
- ],
- "uris": []
- },
- "directory_service_roles": {
- "array_admin": {
- "group": null,
- "group_base": null
- },
- "ops_admin": {
- "group": null,
- "group_base": null
- },
- "readonly": {
- "group": null,
- "group_base": null
- },
- "storage_admin": {
- "group": null,
- "group_base": null
- }
- },
- "dns": {
- "domain": "demo.acmestorage.com",
- "name": "demo-fb-1",
- "nameservers": [
- "8.8.8.8"
- ],
- "search": [
- "demo.acmestorage.com"
- ]
- },
- "nfs_directory_service": {
- "base_dn": null,
- "bind_password": null,
- "bind_user": null,
- "enabled": false,
- "name": "nfs",
- "services": [
- "nfs"
- ],
- "uris": []
- },
- "ntp": [
- "0.ntp.pool.org"
- ],
- "smb_directory_service": {
- "base_dn": null,
- "bind_password": null,
- "bind_user": null,
- "enabled": false,
- "name": "smb",
- "services": [
- "smb"
- ],
- "uris": []
- },
- "smtp": {
- "name": "demo-fb-1",
- "relay_host": null,
- "sender_domain": "acmestorage.com"
- },
- "ssl_certs": {
- "certificate": "-----BEGIN CERTIFICATE-----\n\n-----END CERTIFICATE-----",
- "common_name": "Acme Storage",
- "country": "US",
- "email": null,
- "intermediate_certificate": null,
- "issued_by": "Acme Storage",
- "issued_to": "Acme Storage",
- "key_size": 4096,
- "locality": null,
- "name": "global",
- "organization": "Acme Storage",
- "organizational_unit": "Acme Storage",
- "passphrase": null,
- "private_key": null,
- "state": null,
- "status": "self-signed",
- "valid_from": "1508433967000",
- "valid_to": "2458833967000"
- }
- },
- "default": {
- "blades": 15,
- "buckets": 7,
- "filesystems": 2,
- "flashblade_name": "demo-fb-1",
- "object_store_accounts": 1,
- "object_store_users": 1,
- "purity_version": "2.2.0",
- "snapshots": 1,
- "total_capacity": 83359896948925,
- "smb_mode": "native"
- },
- "filesystems": {
- "k8s-pvc-d24b1357-579e-11e8-811f-ecf4bbc88f54": {
- "default_group_quota": 0,
- "default_user_quota": 0,
- "destroyed": false,
- "fast_remove": false,
- "hard_limit": true,
- "nfs_rules": "10.21.255.0/24(rw,no_root_squash)",
- "provisioned": 21474836480,
- "snapshot_enabled": false
- },
- "z": {
- "default_group_quota": 0,
- "default_user_quota": 0,
- "destroyed": false,
- "fast_remove": false,
- "hard_limit": false,
- "provisioned": 1073741824,
- "snapshot_enabled": false
- }
- },
- "lag": {
- "uplink": {
- "lag_speed": 0,
- "port_speed": 40000000000,
- "ports": [
- {
- "name": "CH1.FM1.ETH1.1"
- },
- {
- "name": "CH1.FM1.ETH1.2"
- },
- ],
- "status": "healthy"
- }
- },
- "network": {
- "fm1.admin0": {
- "address": "10.10.100.6",
- "gateway": "10.10.100.1",
- "mtu": 1500,
- "netmask": "255.255.255.0",
- "services": [
- "support"
- ],
- "type": "vip",
- "vlan": 2200
- },
- "fm2.admin0": {
- "address": "10.10.100.7",
- "gateway": "10.10.100.1",
- "mtu": 1500,
- "netmask": "255.255.255.0",
- "services": [
- "support"
- ],
- "type": "vip",
- "vlan": 2200
- },
- "nfs1": {
- "address": "10.10.100.4",
- "gateway": "10.10.100.1",
- "mtu": 1500,
- "netmask": "255.255.255.0",
- "services": [
- "data"
- ],
- "type": "vip",
- "vlan": 2200
- },
- "vir0": {
- "address": "10.10.100.5",
- "gateway": "10.10.100.1",
- "mtu": 1500,
- "netmask": "255.255.255.0",
- "services": [
- "management"
- ],
- "type": "vip",
- "vlan": 2200
- }
- },
- "performance": {
- "aggregate": {
- "bytes_per_op": 0,
- "bytes_per_read": 0,
- "bytes_per_write": 0,
- "read_bytes_per_sec": 0,
- "reads_per_sec": 0,
- "usec_per_other_op": 0,
- "usec_per_read_op": 0,
- "usec_per_write_op": 0,
- "write_bytes_per_sec": 0,
- "writes_per_sec": 0
- },
- "http": {
- "bytes_per_op": 0,
- "bytes_per_read": 0,
- "bytes_per_write": 0,
- "read_bytes_per_sec": 0,
- "reads_per_sec": 0,
- "usec_per_other_op": 0,
- "usec_per_read_op": 0,
- "usec_per_write_op": 0,
- "write_bytes_per_sec": 0,
- "writes_per_sec": 0
- },
- "nfs": {
- "bytes_per_op": 0,
- "bytes_per_read": 0,
- "bytes_per_write": 0,
- "read_bytes_per_sec": 0,
- "reads_per_sec": 0,
- "usec_per_other_op": 0,
- "usec_per_read_op": 0,
- "usec_per_write_op": 0,
- "write_bytes_per_sec": 0,
- "writes_per_sec": 0
- },
- "s3": {
- "bytes_per_op": 0,
- "bytes_per_read": 0,
- "bytes_per_write": 0,
- "read_bytes_per_sec": 0,
- "reads_per_sec": 0,
- "usec_per_other_op": 0,
- "usec_per_read_op": 0,
- "usec_per_write_op": 0,
- "write_bytes_per_sec": 0,
- "writes_per_sec": 0
- }
- },
- "snapshots": {
- "z.188": {
- "destroyed": false,
- "source": "z",
- "source_destroyed": false,
- "suffix": "188"
- }
- },
- "subnet": {
- "new-mgmt": {
- "gateway": "10.10.100.1",
- "interfaces": [
- {
- "name": "fm1.admin0"
- },
- {
- "name": "fm2.admin0"
- },
- {
- "name": "nfs1"
- },
- {
- "name": "vir0"
- }
- ],
- "lag": "uplink",
- "mtu": 1500,
- "prefix": "10.10.100.0/24",
- "services": [
- "data",
- "management",
- "support"
- ],
- "vlan": 2200
- }
- }
- }
+ type: dict
"""
@@ -458,6 +108,9 @@ VSO_VERSION = "2.4"
DRIVES_API_VERSION = "2.5"
SECURITY_API_VERSION = "2.7"
BUCKET_API_VERSION = "2.8"
+SMB_CLIENT_API_VERSION = "2.10"
+SPACE_API_VERSION = "2.11"
+PUBLIC_API_VERSION = "2.12"
def _millisecs_to_time(millisecs):
@@ -711,12 +364,12 @@ def generate_config_dict(blade):
"engine_id": snmp_agents.items[agent].engine_id,
}
if config_info["snmp_agents"][agent_name]["version"] == "v3":
- config_info["snmp_agents"][agent_name][
- "auth_protocol"
- ] = snmp_agents.items[agent].v3.auth_protocol
- config_info["snmp_agents"][agent_name][
- "privacy_protocol"
- ] = snmp_agents.items[agent].v3.privacy_protocol
+ config_info["snmp_agents"][agent_name]["auth_protocol"] = (
+ snmp_agents.items[agent].v3.auth_protocol
+ )
+ config_info["snmp_agents"][agent_name]["privacy_protocol"] = (
+ snmp_agents.items[agent].v3.privacy_protocol
+ )
config_info["snmp_agents"][agent_name]["user"] = snmp_agents.items[
agent
].v3.user
@@ -730,12 +383,12 @@ def generate_config_dict(blade):
"notification": snmp_managers.items[manager].notification,
}
if config_info["snmp_managers"][mgr_name]["version"] == "v3":
- config_info["snmp_managers"][mgr_name][
- "auth_protocol"
- ] = snmp_managers.items[manager].v3.auth_protocol
- config_info["snmp_managers"][mgr_name][
- "privacy_protocol"
- ] = snmp_managers.items[manager].v3.privacy_protocol
+ config_info["snmp_managers"][mgr_name]["auth_protocol"] = (
+ snmp_managers.items[manager].v3.auth_protocol
+ )
+ config_info["snmp_managers"][mgr_name]["privacy_protocol"] = (
+ snmp_managers.items[manager].v3.privacy_protocol
+ )
config_info["snmp_managers"][mgr_name]["user"] = snmp_managers.items[
manager
].v3.user
@@ -920,33 +573,77 @@ def generate_network_dict(blade):
return net_info
-def generate_capacity_dict(blade):
+def generate_capacity_dict(module, blade):
capacity_info = {}
- total_cap = blade.arrays.list_arrays_space()
- file_cap = blade.arrays.list_arrays_space(type="file-system")
- object_cap = blade.arrays.list_arrays_space(type="object-store")
- capacity_info["total"] = total_cap.items[0].capacity
- capacity_info["aggregate"] = {
- "data_reduction": total_cap.items[0].space.data_reduction,
- "snapshots": total_cap.items[0].space.snapshots,
- "total_physical": total_cap.items[0].space.total_physical,
- "unique": total_cap.items[0].space.unique,
- "virtual": total_cap.items[0].space.virtual,
- }
- capacity_info["file-system"] = {
- "data_reduction": file_cap.items[0].space.data_reduction,
- "snapshots": file_cap.items[0].space.snapshots,
- "total_physical": file_cap.items[0].space.total_physical,
- "unique": file_cap.items[0].space.unique,
- "virtual": file_cap.items[0].space.virtual,
- }
- capacity_info["object-store"] = {
- "data_reduction": object_cap.items[0].space.data_reduction,
- "snapshots": object_cap.items[0].space.snapshots,
- "total_physical": object_cap.items[0].space.total_physical,
- "unique": object_cap.items[0].space.unique,
- "virtual": file_cap.items[0].space.virtual,
- }
+ api_version = blade.api_version.list_versions().versions
+ if SPACE_API_VERSION in api_version:
+ blade2 = get_system(module)
+ total_cap = list(blade2.get_arrays_space().items)[0]
+ file_cap = list(blade2.get_arrays_space(type="file-system").items)[0]
+ object_cap = list(blade2.get_arrays_space(type="object-store").items)[0]
+ capacity_info["total"] = total_cap.space.capacity
+ capacity_info["aggregate"] = {
+ "data_reduction": total_cap.space.data_reduction,
+ "snapshots": total_cap.space.snapshots,
+ "total_physical": total_cap.space.total_physical,
+ "unique": total_cap.space.unique,
+ "virtual": total_cap.space.virtual,
+ "total_provisioned": total_cap.space.total_provisioned,
+ "available_provisioned": total_cap.space.available_provisioned,
+ "available_ratio": total_cap.space.available_ratio,
+ "destroyed": total_cap.space.destroyed,
+ "destroyed_virtual": total_cap.space.destroyed_virtual,
+ }
+ capacity_info["file-system"] = {
+ "data_reduction": file_cap.space.data_reduction,
+ "snapshots": file_cap.space.snapshots,
+ "total_physical": file_cap.space.total_physical,
+ "unique": file_cap.space.unique,
+ "virtual": file_cap.space.virtual,
+ "total_provisioned": total_cap.space.total_provisioned,
+ "available_provisioned": total_cap.space.available_provisioned,
+ "available_ratio": total_cap.space.available_ratio,
+ "destroyed": total_cap.space.destroyed,
+ "destroyed_virtual": total_cap.space.destroyed_virtual,
+ }
+ capacity_info["object-store"] = {
+ "data_reduction": object_cap.space.data_reduction,
+ "snapshots": object_cap.space.snapshots,
+ "total_physical": object_cap.space.total_physical,
+ "unique": object_cap.space.unique,
+ "virtual": file_cap.space.virtual,
+ "total_provisioned": total_cap.space.total_provisioned,
+ "available_provisioned": total_cap.space.available_provisioned,
+ "available_ratio": total_cap.space.available_ratio,
+ "destroyed": total_cap.space.destroyed,
+ "destroyed_virtual": total_cap.space.destroyed_virtual,
+ }
+ else:
+ total_cap = blade.arrays.list_arrays_space()
+ file_cap = blade.arrays.list_arrays_space(type="file-system")
+ object_cap = blade.arrays.list_arrays_space(type="object-store")
+ capacity_info["total"] = total_cap.items[0].capacity
+ capacity_info["aggregate"] = {
+ "data_reduction": total_cap.items[0].space.data_reduction,
+ "snapshots": total_cap.items[0].space.snapshots,
+ "total_physical": total_cap.items[0].space.total_physical,
+ "unique": total_cap.items[0].space.unique,
+ "virtual": total_cap.items[0].space.virtual,
+ }
+ capacity_info["file-system"] = {
+ "data_reduction": file_cap.items[0].space.data_reduction,
+ "snapshots": file_cap.items[0].space.snapshots,
+ "total_physical": file_cap.items[0].space.total_physical,
+ "unique": file_cap.items[0].space.unique,
+ "virtual": file_cap.items[0].space.virtual,
+ }
+ capacity_info["object-store"] = {
+ "data_reduction": object_cap.items[0].space.data_reduction,
+ "snapshots": object_cap.items[0].space.snapshots,
+ "total_physical": object_cap.items[0].space.total_physical,
+ "unique": object_cap.items[0].space.unique,
+ "virtual": file_cap.items[0].space.virtual,
+ }
return capacity_info
@@ -973,6 +670,17 @@ def generate_snap_dict(blade):
snap_info[snapshot]["source_location"] = snaps.items[
snap
].source_location.name
+ snap_info[snapshot]["policies"] = []
+ if PUBLIC_API_VERSION in api_version:
+ for policy in range(0, len(snaps.items[snap].policies)):
+ snap_info[snapshot]["policies"].append(
+ {
+ "name": snaps.items[snap].policies[policy].name,
+ "location": snaps.items[snap]
+ .policies[policy]
+ .location.name,
+ }
+ )
return snap_info
@@ -1065,6 +773,19 @@ def generate_bucket_dict(module, blade):
"total_physical_space": buckets.items[bckt].space.total_physical,
"unique_space": buckets.items[bckt].space.unique,
"virtual_space": buckets.items[bckt].space.virtual,
+ "total_provisioned_space": getattr(
+ buckets.items[bckt].space, "total_provisioned", None
+ ),
+ "available_provisioned_space": getattr(
+ buckets.items[bckt].space, "available_provisioned", None
+ ),
+ "available_ratio": getattr(
+ buckets.items[bckt].space, "available_ratio", None
+ ),
+ "destroyed_space": getattr(buckets.items[bckt].space, "destroyed", None),
+ "destroyed_virtual_space": getattr(
+ buckets.items[bckt].space, "destroyed_virtual", None
+ ),
"created": buckets.items[bckt].created,
"destroyed": buckets.items[bckt].destroyed,
"time_remaining": buckets.items[bckt].time_remaining,
@@ -1139,6 +860,19 @@ def generate_bucket_dict(module, blade):
bucket
].object_lock_config.freeze_locked_objects,
}
+ if buckets[bucket].object_lock_config.enabled:
+ bucket_info[buckets[bucket].name]["object_lock_config"][
+ "default_retention"
+ ] = getattr(
+ buckets[bucket].object_lock_config, "default_retention", ""
+ )
+ bucket_info[buckets[bucket].name]["object_lock_config"][
+ "default_retention_mode"
+ ] = getattr(
+ buckets[bucket].object_lock_config,
+ "default_retention_mode",
+ "",
+ )
bucket_info[buckets[bucket].name]["eradication_config"] = {
"eradication_delay": buckets[
bucket
@@ -1147,6 +881,19 @@ def generate_bucket_dict(module, blade):
bucket
].eradication_config.manual_eradication,
}
+ if PUBLIC_API_VERSION in api_version:
+ bucket_info[buckets[bucket].name]["public_status"] = buckets[
+ bucket
+ ].public_status
+ bucket_info[buckets[bucket].name]["public_access_config"] = {
+ "block_new_public_policies": buckets[
+ bucket
+ ].public_access_config.block_new_public_policies,
+ "block_public_access": buckets[
+ bucket
+ ].public_access_config.block_public_access,
+ }
+
return bucket_info
@@ -1181,10 +928,50 @@ def generate_ad_dict(blade):
"service_principals": ad_account.service_principal_names,
"join_ou": ad_account.join_ou,
"encryption_types": ad_account.encryption_types,
+ "global_catalog_servers": getattr(
+ ad_account, "global_catalog_servers", None
+ ),
}
return ad_info
+def generate_bucket_access_policies_dict(blade):
+ policies_info = {}
+ policies = list(blade.get_buckets_bucket_access_policies().items)
+ for policy in range(0, len(policies)):
+ policy_name = policies[policy].name
+ policies_info[policy_name] = {
+ "description": policies[policy].description,
+ "enabled": policies[policy].enabled,
+ "local": policies[policy].is_local,
+ "rules": [],
+ }
+ for rule in range(0, len(policies[policy].rules)):
+ policies_info[policy_name]["rules"].append(
+ {
+ "actions": policies[policy].rules[rule].actions,
+ "resources": policies[policy].rules[rule].resources,
+ "all_principals": policies[policy].rules[rule].principals.all,
+ "effect": policies[policy].rules[rule].effect,
+ "name": policies[policy].rules[rule].name,
+ }
+ )
+ return policies_info
+
+
+def generate_bucket_cross_object_policies_dict(blade):
+ policies_info = {}
+ policies = list(blade.get_buckets_cross_origin_resource_sharing_policies().items)
+ for policy in range(0, len(policies)):
+ policy_name = policies[policy].name
+ policies_info[policy_name] = {
+ "allowed_headers": policies[policy].allowed_headers,
+ "allowed_methods": policies[policy].allowed_methods,
+ "allowed_origins": policies[policy].allowed_origins,
+ }
+ return policies_info
+
+
def generate_object_store_access_policies_dict(blade):
policies_info = {}
policies = list(blade.get_object_store_access_policies().items)
@@ -1247,6 +1034,45 @@ def generate_nfs_export_policies_dict(blade):
return policies_info
+def generate_smb_client_policies_dict(blade):
+ policies_info = {}
+ policies = list(blade.get_smb_client_policies().items)
+ for policy in range(0, len(policies)):
+ policy_name = policies[policy].name
+ policies_info[policy_name] = {
+ "local": policies[policy].is_local,
+ "enabled": policies[policy].enabled,
+ "version": policies[policy].version,
+ "rules": [],
+ }
+ for rule in range(0, len(policies[policy].rules)):
+ policies_info[policy_name]["rules"].append(
+ {
+ "name": policies[policy].rules[rule].name,
+ "change": getattr(policies[policy].rules[rule], "change", None),
+ "full_control": getattr(
+ policies[policy].rules[rule], "full_control", None
+ ),
+ "principal": getattr(
+ policies[policy].rules[rule], "principal", None
+ ),
+ "read": getattr(policies[policy].rules[rule], "read", None),
+ "client": getattr(policies[policy].rules[rule], "client", None),
+ "index": getattr(policies[policy].rules[rule], "index", None),
+ "policy_version": getattr(
+ policies[policy].rules[rule], "policy_version", None
+ ),
+ "encryption": getattr(
+ policies[policy].rules[rule], "encryption", None
+ ),
+ "permission": getattr(
+ policies[policy].rules[rule], "permission", None
+ ),
+ }
+ )
+ return policies_info
+
+
def generate_object_store_accounts_dict(blade):
account_info = {}
accounts = list(blade.get_object_store_accounts().items)
@@ -1259,6 +1085,19 @@ def generate_object_store_accounts_dict(blade):
"total_physical_space": accounts[account].space.total_physical,
"unique_space": accounts[account].space.unique,
"virtual_space": accounts[account].space.virtual,
+ "total_provisioned_space": getattr(
+ accounts[account].space, "total_provisioned", None
+ ),
+ "available_provisioned_space": getattr(
+ accounts[account].space, "available_provisioned", None
+ ),
+ "available_ratio": getattr(
+ accounts[account].space, "available_ratio", None
+ ),
+ "destroyed_space": getattr(accounts[account].space, "destroyed", None),
+ "destroyed_virtual_space": getattr(
+ accounts[account].space, "destroyed_virtual", None
+ ),
"quota_limit": getattr(accounts[account], "quota_limit", None),
"hard_limit_enabled": getattr(
accounts[account], "hard_limit_enabled", None
@@ -1277,6 +1116,17 @@ def generate_object_store_accounts_dict(blade):
}
except AttributeError:
pass
+ try:
+ account_info[acc_name]["public_access_config"] = {
+ "block_new_public_policies": accounts[
+ account
+ ].public_access_config.block_new_public_policies,
+ "block_public_access": accounts[
+ account
+ ].public_access_config.block_public_access,
+ }
+ except AttributeError:
+ pass
acc_users = list(
blade.get_object_store_users(filter='name="' + acc_name + '/*"').items
)
@@ -1413,6 +1263,24 @@ def generate_fs_dict(module, blade):
"quota": fs_user_quotas[user_quota].quota,
"usage": fs_user_quotas[user_quota].usage,
}
+ if PUBLIC_API_VERSION in api_version:
+ for v2fs in range(0, len(fsys_v2)):
+ if fsys_v2[v2fs].name == share:
+ fs_info[share]["smb_client_policy"] = getattr(
+ fsys_v2[v2fs].smb.client_policy, "name", None
+ )
+ fs_info[share]["smb_share_policy"] = getattr(
+ fsys_v2[v2fs].smb.share_policy, "name", None
+ )
+ fs_info[share]["smb_continuous_availability_enabled"] = fsys_v2[
+ v2fs
+ ].smb.continuous_availability_enabled
+ fs_info[share]["multi_protocol_access_control_style"] = getattr(
+ fsys_v2[v2fs].multi_protocol, "access_control_style", None
+ )
+ fs_info[share]["multi_protocol_safeguard_acls"] = fsys_v2[
+ v2fs
+ ].multi_protocol.safeguard_acls
return fs_info
@@ -1433,6 +1301,7 @@ def generate_drives_dict(blade):
"raw_capacity": getattr(drives[drive], "raw_capacity", None),
"status": getattr(drives[drive], "status", None),
"details": getattr(drives[drive], "details", None),
+ "type": getattr(drives[drive], "type", None),
}
return drives_info
@@ -1495,7 +1364,7 @@ def main():
if "config" in subset or "all" in subset:
info["config"] = generate_config_dict(blade)
if "capacity" in subset or "all" in subset:
- info["capacity"] = generate_capacity_dict(blade)
+ info["capacity"] = generate_capacity_dict(module, blade)
if "lags" in subset or "all" in subset:
info["lag"] = generate_lag_dict(blade)
if "network" in subset or "all" in subset:
@@ -1537,8 +1406,17 @@ def main():
info["access_policies"] = generate_object_store_access_policies_dict(
blade
)
+ if PUBLIC_API_VERSION in api_version:
+ info["bucket_access_policies"] = generate_bucket_access_policies_dict(
+ blade
+ )
+ info["bucket_cross_origin_policies"] = (
+ generate_bucket_cross_object_policies_dict(blade)
+ )
if NFS_POLICY_API_VERSION in api_version:
info["export_policies"] = generate_nfs_export_policies_dict(blade)
+ if SMB_CLIENT_API_VERSION in api_version:
+ info["share_policies"] = generate_smb_client_policies_dict(blade)
if "drives" in subset or "all" in subset and DRIVES_API_VERSION in api_version:
info["drives"] = generate_drives_dict(blade)
module.exit_json(changed=False, purefb_info=info)
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_inventory.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_inventory.py
index b17bc3f9e..1ef96f870 100644
--- a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_inventory.py
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_inventory.py
@@ -47,65 +47,7 @@ RETURN = r"""
purefb_inventory:
description: Returns the inventory information for the FlashBlade
returned: always
- type: complex
- sample: {
- "blades": {
- "CH1.FB1": {
- "model": "FB-17TB",
- "serial": "PPCXA1942AFF5",
- "slot": 1,
- "status": "healthy"
- }
- },
- "chassis": {
- "CH1": {
- "index": 1,
- "model": null,
- "serial": "PMPAM163402AE",
- "slot": null,
- "status": "healthy"
- }
- },
- "controllers": {},
- "ethernet": {
- "CH1.FM1.ETH1": {
- "model": "624410002",
- "serial": "APF16360021PRV",
- "slot": 1,
- "speed": 40000000000,
- "status": "healthy"
- }
- },
- "fans": {
- "CH1.FM1.FAN1": {
- "slot": 1,
- "status": "healthy"
- }
- },
- "modules": {
- "CH1.FM1": {
- "model": "EFM-110",
- "serial": "PSUFS1640002C",
- "slot": 1,
- "status": "healthy"
- },
- "CH1.FM2": {
- "model": "EFM-110",
- "serial": "PSUFS1640004A",
- "slot": 2,
- "status": "healthy"
- }
- },
- "power": {
- "CH1.PWR1": {
- "model": "DS1600SPE-3",
- "serial": "M0500E00D8AJZ",
- "slot": 1,
- "status": "healthy"
- }
- },
- "switch": {}
- }
+ type: dict
"""
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_lag.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_lag.py
index e5c46e730..8bf3ce48a 100644
--- a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_lag.py
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_lag.py
@@ -163,16 +163,19 @@ def update_lag(module, blade):
)
new_ports = []
for port in range(0, len(module.params["ports"])):
- new_ports.append(
- module.params["ports"][port].split(".")[0].upper()
- + ".FM1."
- + module.params["ports"][port].split(".")[1].upper()
- )
- new_ports.append(
- module.params["ports"][port].split(".")[0].upper()
- + ".FM2."
- + module.params["ports"][port].split(".")[1].upper()
- )
+ if module.params["ports"][port].split(".")[0].upper()[0] != "X":
+ new_ports.append(
+ module.params["ports"][port].split(".")[0].upper()
+ + ".FM1."
+ + module.params["ports"][port].split(".")[1].upper()
+ )
+ new_ports.append(
+ module.params["ports"][port].split(".")[0].upper()
+ + ".FM2."
+ + module.params["ports"][port].split(".")[1].upper()
+ )
+ else:
+ new_ports.append(module.params["ports"][port].upper())
ports = []
for final_port in range(0, len(new_ports)):
ports.append(flashblade.FixedReference(name=new_ports[final_port]))
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_policy.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_policy.py
index 273166de8..ebe70aa48 100644
--- a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_policy.py
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_policy.py
@@ -52,7 +52,7 @@ options:
- Type of policy
default: snapshot
type: str
- choices: [ snapshot, access, nfs ]
+ choices: [ snapshot, access, nfs, smb_share, smb_client ]
version_added: "1.9.0"
account:
description:
@@ -72,7 +72,7 @@ options:
Rules are additive.
type: str
default: allow
- choices: [ allow ]
+ choices: [ allow, deny ]
version_added: "1.9.0"
actions:
description:
@@ -83,6 +83,7 @@ options:
choices:
- s3:*
- s3:AbortMultipartUpload
+ - s3:BypassGovernanceRetention
- s3:CreateBucket
- s3:DeleteBucket
- s3:DeleteObject
@@ -94,7 +95,12 @@ options:
- s3:GetLifecycleConfiguration
- s3:GetObject
- s3:GetObjectAcl
+ - s3:GetObjectLegalHold
+ - s3:GetObjectLockConfiguration
+ - s3:GetObjectRetention
+ - s3:GetObjectTagging
- s3:GetObjectVersion
+ - s3:GetObjectVersionTagging
- s3:ListAllMyBuckets
- s3:ListBucket
- s3:ListBucketMultipartUploads
@@ -103,6 +109,10 @@ options:
- s3:PutBucketVersioning
- s3:PutLifecycleConfiguration
- s3:PutObject
+ - s3:PutObjectLegalHold
+ - s3:PutObjectLockConfiguration
+ - s3:PutObjectRetention
+ - s3:ResolveSafemodeConflicts
version_added: "1.9.0"
object_resources:
description:
@@ -213,7 +223,7 @@ options:
description:
- Any user whose UID is affected by an I(access) of `root_squash` or `all_squash`
will have their UID mapped to anonuid.
- The defaultis null, which means 65534.
+ The default is null, which means 65534.
Use "" to clear.
type: str
version_added: "1.9.0"
@@ -241,7 +251,6 @@ options:
- Accepted notation is a single IP address, subnet in CIDR notation, netgroup, or
anonymous (*).
type: str
- default: "*"
version_added: "1.9.0"
fileid_32bit:
description:
@@ -284,8 +293,8 @@ options:
version_added: "1.9.0"
rename:
description:
- - New name for export policy
- - Only applies to NFS export policies
+ - New name for policy
+ - Only applies to NFS and SMB policies
type: str
version_added: "1.10.0"
destroy_snapshots:
@@ -294,6 +303,47 @@ options:
type: bool
version_added: '1.11.0'
default: false
+ principal:
+ description:
+ - The user or group who is the subject of this rule, and their domain
+ type: str
+ version_added: '1.12.0'
+ change:
+ description:
+ - The state of the SMB share principals Change access permission.
+ - Setting to "" will clear the current setting
+ type: str
+ choices: [ allow, deny, "" ]
+ version_added: '1.12.0'
+ read:
+ description:
+ - The state of the SMB share principals Read access permission.
+ - Setting to "" will clear the current setting
+ type: str
+ choices: [ allow, deny, "" ]
+ version_added: '1.12.0'
+ full_control:
+ description:
+ - The state of the SMB share principals Full Control access permission.
+ - Setting to "" will clear the current setting
+ type: str
+ choices: [ allow, deny, "" ]
+ version_added: '1.12.0'
+ smb_encryption:
+ description:
+ - The status of SMB encryption in a client policy rule
+ type: str
+ choices: [ disabled, optional, required ]
+ default: optional
+ version_added: '1.12.0'
+ desc:
+ description:
+ - A description of an object store policy,
+ optionally specified when the policy is created.
+ - Cannot be modified for an existing policy.
+ type: str
+ default: ""
+ version_added: '1.14.0'
extends_documentation_fragment:
- purestorage.flashblade.purestorage.fb
"""
@@ -359,6 +409,20 @@ EXAMPLES = r"""
object_resources: "*"
fb_url: 10.10.10.2
api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Create an empty SMB client policy
+ purestorage.flashblade.purefb_policy:
+ name: test_smb_client
+ policy_type: smb_client
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Create an SMB client policy with a client rule
+ purestorage.flashblade.purefb_policy:
+ name: test_smb_client
+ policy_type: smb_client
+ client: "10.0.1.0/24"
+ permission: rw
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
- name: Create an empty NFS export policy
purestorage.flashblade.purefb_policy:
name: test_nfs_export
@@ -460,7 +524,9 @@ RETURN = r"""
HAS_PURITYFB = True
try:
- from purity_fb import Policy, PolicyRule, PolicyPatch
+ from purity_fb import Policy as Policyv1
+ from purity_fb import PolicyRule as PolicyRulev1
+ from purity_fb import PolicyPatch as PolicyPatchv1
except ImportError:
HAS_PURITYFB = False
@@ -473,7 +539,13 @@ try:
NfsExportPolicy,
NfsExportPolicyRule,
Policy,
+ PolicyPatch,
PolicyRule,
+ SmbSharePolicyRule,
+ SmbSharePolicy,
+ SmbClientPolicyRule,
+ SmbClientPolicy,
+ ObjectStoreAccessPolicyPost,
)
except ImportError:
HAS_PYPURECLIENT = False
@@ -503,6 +575,8 @@ SNAPSHOT_POLICY_API_VERSION = "2.1"
ACCESS_POLICY_API_VERSION = "2.2"
NFS_POLICY_API_VERSION = "2.3"
NFS_RENAME_API_VERSION = "2.4"
+SMB_POLICY_API_VERSION = "2.10"
+SMB_ENCRYPT_API_VERSION = "2.11"
def _convert_to_millisecs(hour):
@@ -596,6 +670,614 @@ def _get_local_tz(module, timezone="UTC"):
return timezone
+def delete_smb_share_policy(module, blade):
+ """Delete SMB Share Policy, or Rule
+
+ If principal is provided then delete the principal rule if it exists.
+ """
+
+ changed = False
+ policy_delete = True
+ if module.params["principal"]:
+ policy_delete = False
+ prin_rule = blade.get_smb_share_policies_rules(
+ policy_names=[module.params["name"]],
+ filter="principal='" + module.params["principal"] + "'",
+ )
+ if prin_rule.status_code == 200:
+ rule = list(prin_rule.items)[0]
+ changed = True
+ if not module.check_mode:
+ res = blade.delete_smb_share_policies_rules(names=[rule.name])
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete rule for principal {0} in policy {1}. "
+ "Error: {2}".format(
+ module.params["principal"],
+ module.params["name"],
+ res.errors[0].message,
+ )
+ )
+ if policy_delete:
+ changed = True
+ if not module.check_mode:
+ res = blade.delete_smb_share_policies(names=[module.params["name"]])
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete SMB share policy {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def rename_smb_share_policy(module, blade):
+ """Rename SMB Share Policy"""
+
+ changed = True
+ if not module.check_mode:
+ res = blade.patch_smb_share_policies(
+ names=[module.params["name"]],
+ policy=SmbSharePolicy(name=module.params["rename"]),
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to rename SMB share policy {0} to {1}. Error: {2}".format(
+ module.params["name"],
+ module.params["rename"],
+ res.errors[0].message,
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def create_smb_share_policy(module, blade):
+ """Create SMB Share Policy"""
+ changed = True
+ if not module.check_mode:
+ res = blade.post_smb_share_policies(names=[module.params["name"]])
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to create SMB share policy {0}.Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ if not module.params["enabled"]:
+ res = blade.patch_smb_share_policies(
+ policy=SmbSharePolicy(enabled=False), names=[module.params["name"]]
+ )
+ if res.status_code != 200:
+ blade.delete_smb_share_policies(names=[module.params["name"]])
+ module.fail_json(
+ msg="Failed to create SMB share policy {0}.Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ if not module.params["principal"]:
+ module.fail_json(msg="principal is required to create a new rule")
+ else:
+ rule = SmbSharePolicyRule(
+ principal=module.params["principal"],
+ change=module.params["change"],
+ read=module.params["read"],
+ full_control=module.params["full_control"],
+ )
+ res = blade.post_smb_share_policies_rules(
+ policy_names=[module.params["name"]],
+ rule=rule,
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to create rule for policy {0}. Error: {1}".format(
+ module.params["name"],
+ res.errors[0].message,
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def update_smb_share_policy(module, blade):
+ """Update SMB Share Policy Rule"""
+
+ changed = False
+ if module.params["principal"]:
+ current_policy_rule = blade.get_smb_share_policies_rules(
+ policy_names=[module.params["name"]],
+ filter="principal='" + module.params["principal"] + "'",
+ )
+ if (
+ current_policy_rule.status_code == 200
+ and current_policy_rule.total_item_count == 0
+ ):
+ rule = SmbSharePolicyRule(
+ principal=module.params["principal"],
+ change=module.params["change"],
+ read=module.params["read"],
+ full_control=module.params["full_control"],
+ )
+ changed = True
+ if not module.check_mode:
+ if module.params["before_rule"]:
+ before_name = (
+ module.params["name"] + "." + str(module.params["before_rule"])
+ )
+ res = blade.post_smb_share_policies_rules(
+ policy_names=[module.params["name"]],
+ rule=rule,
+ before_rule_name=before_name,
+ )
+ else:
+ res = blade.post_smb_share_policies_rules(
+ policy_names=[module.params["name"]],
+ rule=rule,
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to create rule for principal {0} "
+ "in policy {1}. Error: {2}".format(
+ module.params["principal"],
+ module.params["name"],
+ res.errors[0].message,
+ )
+ )
+ else:
+ rules = list(current_policy_rule.items)
+ cli_count = None
+ old_policy_rule = rules[0]
+ current_rule = {
+ "principal": sorted(old_policy_rule.principal),
+ "read": sorted(old_policy_rule.read),
+ "change": sorted(old_policy_rule.change),
+ "full_control": sorted(old_policy_rule.full_control),
+ }
+ if module.params["read"]:
+ if module.params["read"] == "":
+ new_read = ""
+ else:
+ new_read = module.params["read"]
+ else:
+ new_read = current_rule["read"]
+ if module.params["full_control"]:
+ if module.params["full_control"] == "":
+ new_full_control = ""
+ else:
+ new_full_control = module.params["full_control"]
+ else:
+ new_full_control = current_rule["full_control"]
+ if module.params["change"]:
+ if module.params["change"] == "":
+ new_change = ""
+ else:
+ new_change = module.params["change"]
+ else:
+ new_change = current_rule["change"]
+ if module.params["principal"]:
+ new_principal = module.params["principal"]
+ else:
+ new_principal = current_rule["principal"]
+ new_rule = {
+ "principal": new_principal,
+ "read": new_read,
+ "change": new_change,
+ "full_control": new_full_control,
+ }
+ if current_rule != new_rule:
+ changed = True
+ if not module.check_mode:
+ rule = SmbSharePolicyRule(
+ principal=module.params["principal"],
+ change=module.params["change"],
+ read=module.params["read"],
+ full_control=module.params["full_control"],
+ )
+ res = blade.patch_smb_share_policies_rules(
+ names=[
+ module.params["name"] + "." + str(old_policy_rule.index)
+ ],
+ rule=rule,
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to update SMB share rule {0}. Error: {1}".format(
+ module.params["name"]
+ + "."
+ + str(old_policy_rule.index),
+ res.errors[0].message,
+ )
+ )
+ if (
+ module.params["before_rule"]
+ and module.params["before_rule"] != old_policy_rule.index
+ ):
+ changed = True
+ if not module.check_mode:
+ before_name = (
+ module.params["name"] + "." + str(module.params["before_rule"])
+ )
+ res = blade.patch_smb_share_policies_rules(
+ names=[
+ module.params["name"] + "." + str(old_policy_rule.index)
+ ],
+ rule=SmbSharePolicyRule(),
+ before_rule_name=before_name,
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to move SMB share rule {0}. Error: {1}".format(
+ module.params["name"]
+ + "."
+ + str(old_policy_rule.index),
+ res.errors[0].message,
+ )
+ )
+ current_policy = list(
+ blade.get_smb_share_policies(names=[module.params["name"]]).items
+ )[0]
+ if current_policy.enabled != module.params["enabled"]:
+ changed = True
+ if not module.check_mode:
+ res = blade.patch_smb_share_policies(
+ policy=SmbSharePolicy(enabled=module.params["enabled"]),
+ names=[module.params["name"]],
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to change state of SMB share policy {0}.Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def delete_smb_client_policy(module, blade):
+ """Delete SMB CLient Policy, or Rule
+
+ If client is provided then delete the client rule if it exists.
+ """
+
+ changed = False
+ policy_delete = True
+ if module.params["client"]:
+ policy_delete = False
+ res = blade.get_smb_client_policies_rules(
+ policy_names=[module.params["name"]],
+ filter="client='" + module.params["client"] + "'",
+ )
+ if res.status_code == 200:
+ if res.total_item_count == 0:
+ pass
+ elif res.total_item_count == 1:
+ rule = list(res.items)[0]
+ if module.params["client"] == rule.client:
+ changed = True
+ if not module.check_mode:
+ res = blade.delete_smb_client_policies_rules(names=[rule.name])
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete rule for client {0} in policy {1}. "
+ "Error: {2}".format(
+ module.params["client"],
+ module.params["name"],
+ res.errors[0].message,
+ )
+ )
+ else:
+ rules = list(res.items)
+ for cli in range(0, len(rules)):
+ if rules[cli].client == "*":
+ changed = True
+ if not module.check_mode:
+ res = blade.delete_smb_client_policies_rules(
+ names=[rules[cli].name]
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete rule for client {0} in policy {1}. "
+ "Error: {2}".format(
+ module.params["client"],
+ module.params["name"],
+ res.errors[0].message,
+ )
+ )
+ if policy_delete:
+ changed = True
+ if not module.check_mode:
+ res = blade.delete_smb_client_policies(names=[module.params["name"]])
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete SMB client policy {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def rename_smb_client_policy(module, blade):
+ """Rename SMB Client Policy"""
+
+ changed = True
+ if not module.check_mode:
+ res = blade.patch_smb_client_policies(
+ names=[module.params["name"]],
+ policy=SmbClientPolicy(name=module.params["rename"]),
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to rename SMB client policy {0} to {1}. Error: {2}".format(
+ module.params["name"],
+ module.params["rename"],
+ res.errors[0].message,
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def create_smb_client_policy(module, blade):
+ """Create SMB Client Policy"""
+ changed = True
+ versions = blade.api_version.list_versions().versions
+ if not module.check_mode:
+ res = blade.post_smb_client_policies(names=[module.params["name"]])
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to create SMB client policy {0}.Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ if not module.params["enabled"]:
+ res = blade.patch_smb_client_policies(
+ policy=SmbClientPolicy(enabled=False), names=[module.params["name"]]
+ )
+ if res.status_code != 200:
+ blade.delete_smb_client_policies(names=[module.params["name"]])
+ module.fail_json(
+ msg="Failed to create SMB client policy {0}.Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ if not module.params["client"]:
+ module.fail_json(msg="client is required to create a new rule")
+ else:
+ if SMB_ENCRYPT_API_VERSION in versions:
+ rule = SmbClientPolicyRule(
+ client=module.params["client"],
+ permission=module.params["permission"],
+ access=module.params["access"],
+ encryption=module.params["smb_encryption"],
+ )
+ else:
+ rule = SmbClientPolicyRule(
+ client=module.params["client"],
+ access=module.params["access"],
+ permission=module.params["permission"],
+ )
+ res = blade.post_smb_client_policies_rules(
+ policy_names=[module.params["name"]],
+ rule=rule,
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to rule for policy {0}. Error: {1}".format(
+ module.params["name"],
+ res.errors[0].message,
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def update_smb_client_policy(module, blade):
+ """Update SMB Client Policy Rule"""
+
+ changed = False
+ versions = blade.api_version.list_versions().versions
+ if module.params["client"]:
+ current_policy_rule = blade.get_smb_client_policies_rules(
+ policy_names=[module.params["name"]],
+ filter="client='" + module.params["client"] + "'",
+ )
+ if (
+ current_policy_rule.status_code == 200
+ and current_policy_rule.total_item_count == 0
+ ):
+ if SMB_ENCRYPT_API_VERSION in versions:
+ rule = SmbClientPolicyRule(
+ client=module.params["client"],
+ permission=module.params["permission"],
+ access=module.params["access"],
+ encryption=module.params["smb_encryption"],
+ )
+ else:
+ rule = SmbClientPolicyRule(
+ client=module.params["client"],
+ permission=module.params["permission"],
+ access=module.params["access"],
+ )
+ changed = True
+ if not module.check_mode:
+ if module.params["before_rule"]:
+ before_name = (
+ module.params["name"] + "." + str(module.params["before_rule"])
+ )
+ res = blade.post_smb_client_policies_rules(
+ policy_names=[module.params["name"]],
+ rule=rule,
+ before_rule_name=before_name,
+ )
+ else:
+ res = blade.post_smb_client_policies_rules(
+ policy_names=[module.params["name"]],
+ rule=rule,
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to create rule for client {0} "
+ "in policy {1}. Error: {2}".format(
+ module.params["client"],
+ module.params["name"],
+ res.errors[0].message,
+ )
+ )
+ else:
+ rules = list(current_policy_rule.items)
+ cli_count = None
+ done = False
+ if module.params["client"] == "*":
+ for cli in range(0, len(rules)):
+ if rules[cli].client == "*":
+ cli_count = cli
+ if not cli_count:
+ if SMB_ENCRYPT_API_VERSION in versions:
+ rule = SmbClientPolicyRule(
+ client=module.params["client"],
+ permission=module.params["permission"],
+ access=module.params["access"],
+ encryption=module.params["smb_encryption"],
+ )
+ else:
+ rule = SmbClientPolicyRule(
+ client=module.params["client"],
+ permission=module.params["permission"],
+ access=module.params["access"],
+ )
+ done = True
+ changed = True
+ if not module.check_mode:
+ if module.params["before_rule"]:
+ res = blade.post_smb_client_policies_rules(
+ policy_names=[module.params["name"]],
+ rule=rule,
+ before_rule_name=(
+ module.params["name"]
+ + "."
+ + str(module.params["before_rule"]),
+ ),
+ )
+ else:
+ res = blade.post_smb_client_policies_rules(
+ policy_names=[module.params["name"]],
+ rule=rule,
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to create rule for "
+ "client {0} in policy {1}. Error: {2}".format(
+ module.params["client"],
+ module.params["name"],
+ res.errors[0].message,
+ )
+ )
+ if not done:
+ old_policy_rule = rules[0]
+ if SMB_ENCRYPT_API_VERSION in versions:
+ current_rule = {
+ "client": sorted(old_policy_rule.client),
+ "permission": sorted(old_policy_rule.permission),
+ "encryption": old_policy_rule.encryption,
+ }
+ else:
+ current_rule = {
+ "client": sorted(old_policy_rule.client),
+ "permission": sorted(old_policy_rule.permission),
+ }
+ if SMB_ENCRYPT_API_VERSION in versions:
+ if module.params["smb_encryption"]:
+ new_encryption = module.params["smb_encryption"]
+ else:
+ new_encryption = current_rule["encryption"]
+ if module.params["permission"]:
+ new_permission = sorted(module.params["permission"])
+ else:
+ new_permission = sorted(current_rule["permission"])
+ if module.params["client"]:
+ new_client = sorted(module.params["client"])
+ else:
+ new_client = sorted(current_rule["client"])
+ if SMB_ENCRYPT_API_VERSION in versions:
+ new_rule = {
+ "client": new_client,
+ "permission": new_permission,
+ "encryption": new_encryption,
+ }
+ else:
+ new_rule = {
+ "client": new_client,
+ "permission": new_permission,
+ }
+ if current_rule != new_rule:
+ changed = True
+ if not module.check_mode:
+ if SMB_ENCRYPT_API_VERSION in versions:
+ rule = SmbClientPolicyRule(
+ client=module.params["client"],
+ permission=module.params["permission"],
+ encryption=module.params["smb_encryption"],
+ )
+ else:
+ rule = SmbClientPolicyRule(
+ client=module.params["client"],
+ permission=module.params["permission"],
+ )
+ res = blade.patch_smb_client_policies_rules(
+ names=[
+ module.params["name"] + "." + str(old_policy_rule.index)
+ ],
+ rule=rule,
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to update SMB client rule {0}. Error: {1}".format(
+ module.params["name"]
+ + "."
+ + str(old_policy_rule.index),
+ res.errors[0].message,
+ )
+ )
+ if (
+ module.params["before_rule"]
+ and module.params["before_rule"] != old_policy_rule.index
+ ):
+ changed = True
+ if not module.check_mode:
+ before_name = (
+ module.params["name"]
+ + "."
+ + str(module.params["before_rule"])
+ )
+ res = blade.patch_smb_client_policies_rules(
+ names=[
+ module.params["name"] + "." + str(old_policy_rule.index)
+ ],
+ rule=SmbClientPolicyRule(),
+ before_rule_name=before_name,
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to move SMB client rule {0}. Error: {1}".format(
+ module.params["name"]
+ + "."
+ + str(old_policy_rule.index),
+ res.errors[0].message,
+ )
+ )
+ current_policy = list(
+ blade.get_smb_client_policies(names=[module.params["name"]]).items
+ )[0]
+ if current_policy.enabled != module.params["enabled"]:
+ changed = True
+ if not module.check_mode:
+ res = blade.patch_smb_client_policies(
+ policy=SmbClientPolicy(enabled=module.params["enabled"]),
+ names=[module.params["name"]],
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to change state of SMB client policy {0}.Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
def delete_nfs_policy(module, blade):
"""Delete NFS Export Policy, or Rule
@@ -926,9 +1608,7 @@ def create_nfs_policy(module, blade):
module.params["name"], res.errors[0].message
)
)
- if not module.params["client"]:
- module.fail_json(msg="client is required to create a new rule")
- else:
+ if module.params["client"]:
rule = NfsExportPolicyRule(
client=module.params["client"],
permission=module.params["permission"],
@@ -1061,8 +1741,12 @@ def create_os_policy(module, blade):
"""Create Object Store Access Policy"""
changed = True
policy_name = module.params["account"] + "/" + module.params["name"]
+ versions = list(blade.get_versions().items)
if not module.check_mode:
- res = blade.post_object_store_access_policies(names=[policy_name])
+ res = blade.post_object_store_access_policies(
+ names=[policy_name],
+ policy=ObjectStoreAccessPolicyPost(description=module.params["desc"]),
+ )
if res.status_code != 200:
module.fail_json(
msg="Failed to create access policy {0}.".format(policy_name)
@@ -1078,11 +1762,19 @@ def create_os_policy(module, blade):
s3_delimiters=module.params["s3_delimiters"],
s3_prefixes=module.params["s3_prefixes"],
)
- rule = PolicyRuleObjectAccessPost(
- actions=module.params["actions"],
- resources=module.params["object_resources"],
- conditions=conditions,
- )
+ if SMB_ENCRYPT_API_VERSION in versions:
+ rule = PolicyRuleObjectAccessPost(
+ actions=module.params["actions"],
+ resources=module.params["object_resources"],
+ conditions=conditions,
+ effect=module.params["effect"],
+ )
+ else:
+ rule = PolicyRuleObjectAccessPost(
+ actions=module.params["actions"],
+ resources=module.params["object_resources"],
+ conditions=conditions,
+ )
res = blade.post_object_store_access_policies_rules(
policy_names=policy_name,
names=[module.params["rule"]],
@@ -1118,22 +1810,30 @@ def update_os_policy(module, blade):
policy_names=[policy_name], names=[module.params["rule"]]
)
if current_policy_rule.status_code != 200:
- conditions = PolicyRuleObjectAccessCondition(
- source_ips=module.params["source_ips"],
- s3_delimiters=module.params["s3_delimiters"],
- s3_prefixes=module.params["s3_prefixes"],
- )
- rule = PolicyRuleObjectAccessPost(
- actions=module.params["actions"],
- resources=module.params["object_resources"],
- conditions=conditions,
- )
- res = blade.post_object_store_access_policies_rules(
- policy_names=policy_name,
- names=[module.params["rule"]],
- enforce_action_restrictions=module.params["ignore_enforcement"],
- rule=rule,
- )
+ changed = True
+ if not module.check_mode:
+ conditions = PolicyRuleObjectAccessCondition(
+ source_ips=module.params["source_ips"],
+ s3_delimiters=module.params["s3_delimiters"],
+ s3_prefixes=module.params["s3_prefixes"],
+ )
+ rule = PolicyRuleObjectAccessPost(
+ actions=module.params["actions"],
+ resources=module.params["object_resources"],
+ conditions=conditions,
+ )
+ res = blade.post_object_store_access_policies_rules(
+ policy_names=policy_name,
+ names=[module.params["rule"]],
+ enforce_action_restrictions=module.params["ignore_enforcement"],
+ rule=rule,
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to create rule {0} in policy {1}. Error: {2}".format(
+ module.params["rule"], policy_name, res.errors[0].message
+ )
+ )
else:
old_policy_rule = list(current_policy_rule.items)[0]
current_rule = {
@@ -1500,10 +2200,10 @@ def create_policy(module, blade):
msg="every parameter is out of range (300 to 34560000)"
)
if module.params["at"]:
- attr = Policy(
+ attr = Policyv1(
enabled=module.params["enabled"],
rules=[
- PolicyRule(
+ PolicyRulev1(
keep_for=module.params["keep_for"] * 1000,
every=module.params["every"] * 1000,
at=_convert_to_millisecs(module.params["at"]),
@@ -1512,17 +2212,17 @@ def create_policy(module, blade):
],
)
else:
- attr = Policy(
+ attr = Policyv1(
enabled=module.params["enabled"],
rules=[
- PolicyRule(
+ PolicyRulev1(
keep_for=module.params["keep_for"] * 1000,
every=module.params["every"] * 1000,
)
],
)
else:
- attr = Policy(enabled=module.params["enabled"])
+ attr = Policyv1(enabled=module.params["enabled"])
blade.policies.create_policies(names=[module.params["name"]], policy=attr)
except Exception:
module.fail_json(
@@ -1798,11 +2498,11 @@ def update_policy(module, blade, policy):
changed = True
if not module.check_mode:
try:
- attr = PolicyPatch()
+ attr = PolicyPatchv1()
attr.enabled = module.params["enabled"]
if at_time:
attr.add_rules = [
- PolicyRule(
+ PolicyRulev1(
keep_for=module.params["keep_for"] * 1000,
every=module.params["every"] * 1000,
at=at_time,
@@ -1811,13 +2511,13 @@ def update_policy(module, blade, policy):
]
else:
attr.add_rules = [
- PolicyRule(
+ PolicyRulev1(
keep_for=module.params["keep_for"] * 1000,
every=module.params["every"] * 1000,
)
]
attr.remove_rules = [
- PolicyRule(
+ PolicyRulev1(
keep_for=current_policy["keep_for"] * 1000,
every=current_policy["every"] * 1000,
at=current_policy["at"],
@@ -1842,7 +2542,9 @@ def main():
type="str", default="present", choices=["absent", "present", "copy"]
),
policy_type=dict(
- type="str", default="snapshot", choices=["snapshot", "access", "nfs"]
+ type="str",
+ default="snapshot",
+ choices=["snapshot", "access", "nfs", "smb_share", "smb_client"],
),
enabled=dict(type="bool", default=True),
timezone=dict(type="str"),
@@ -1858,13 +2560,14 @@ def main():
rename=dict(type="str"),
rule=dict(type="str"),
user=dict(type="str"),
- effect=dict(type="str", default="allow", choices=["allow"]),
+ effect=dict(type="str", default="allow", choices=["allow", "deny"]),
actions=dict(
type="list",
elements="str",
choices=[
"s3:*",
"s3:AbortMultipartUpload",
+ "s3:BypassGovernanceRetention",
"s3:CreateBucket",
"s3:DeleteBucket",
"s3:DeleteObject",
@@ -1876,7 +2579,12 @@ def main():
"s3:GetLifecycleConfiguration",
"s3:GetObject",
"s3:GetObjectAcl",
+ "s3:GetObjectLegalHold",
+ "s3:GetObjectLockConfiguration",
+ "s3:GetObjectRetention",
+ "s3:GetObjectTagging",
"s3:GetObjectVersion",
+ "s3:GetObjectVersionTagging",
"s3:ListAllMyBuckets",
"s3:ListBucket",
"s3:ListBucketMultipartUploads",
@@ -1885,6 +2593,10 @@ def main():
"s3:PutBucketVersioning",
"s3:PutLifecycleConfiguration",
"s3:PutObject",
+ "s3:PutObjectLegalHold",
+ "s3:PutObjectLockConfiguration",
+ "s3:PutObjectRetention",
+ "s3:ResolveSafemodeConflicts",
],
),
object_resources=dict(type="list", elements="str"),
@@ -1901,7 +2613,7 @@ def main():
anonuid=dict(type="str"),
anongid=dict(type="str"),
atime=dict(type="bool", default=True),
- client=dict(type="str", default="*"),
+ client=dict(type="str"),
fileid_32bit=dict(type="bool", default=False),
permission=dict(type="str", choices=["rw", "ro"], default="ro"),
secure=dict(type="bool", default=False),
@@ -1913,6 +2625,16 @@ def main():
default=["sys"],
),
before_rule=dict(type="int"),
+ principal=dict(type="str"),
+ change=dict(type="str", choices=["deny", "allow", ""]),
+ read=dict(type="str", choices=["deny", "allow", ""]),
+ full_control=dict(type="str", choices=["deny", "allow", ""]),
+ smb_encryption=dict(
+ type="str",
+ default="optional",
+ choices=["disabled", "optional", "required"],
+ ),
+ desc=dict(type="str", default=""),
)
)
@@ -1920,6 +2642,8 @@ def main():
required_if = [
["policy_type", "access", ["account", "name"]],
["policy_type", "nfs", ["name"]],
+ ["policy_type", "smb_client", ["name"]],
+ ["policy_type", "smb_share", ["name"]],
]
module = AnsibleModule(
@@ -2037,6 +2761,102 @@ def main():
create_nfs_policy(module, blade)
elif state == "absent" and policy:
delete_nfs_policy(module, blade)
+ elif module.params["policy_type"] == "smb_client":
+ if SMB_POLICY_API_VERSION not in versions:
+ module.fail_json(
+ msg=(
+ "Minimum FlashBlade REST version required: {0}".format(
+ SMB_POLICY_API_VERSION
+ )
+ )
+ )
+ if not HAS_PYPURECLIENT:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+ blade = get_system(module)
+ try:
+ policy = list(
+ blade.get_smb_client_policies(names=[module.params["name"]]).items
+ )[0]
+ except AttributeError:
+ policy = None
+ if module.params["rename"]:
+ try:
+ new_policy = list(
+ blade.get_smb_client_policies(names=[module.params["rename"]]).items
+ )[0]
+ except AttributeError:
+ new_policy = None
+ if policy and state == "present" and not module.params["rename"]:
+ if module.params["before_rule"]:
+ res = blade.get_smb_client_policies_rules(
+ policy_names=[module.params["name"]],
+ names=[
+ module.params["name"] + "." + str(module.params["before_rule"])
+ ],
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Rule index {0} does not exist.".format(
+ module.params["before_rule"]
+ )
+ )
+ update_smb_client_policy(module, blade)
+ elif (
+ state == "present" and module.params["rename"] and policy and not new_policy
+ ):
+ rename_smb_client_policy(module, blade)
+ elif state == "present" and not policy and not module.params["rename"]:
+ create_smb_client_policy(module, blade)
+ elif state == "absent" and policy:
+ delete_smb_client_policy(module, blade)
+ elif module.params["policy_type"] == "smb_share":
+ if SMB_POLICY_API_VERSION not in versions:
+ module.fail_json(
+ msg=(
+ "Minimum FlashBlade REST version required: {0}".format(
+ SMB_POLICY_API_VERSION
+ )
+ )
+ )
+ if not HAS_PYPURECLIENT:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+ blade = get_system(module)
+ try:
+ policy = list(
+ blade.get_smb_share_policies(names=[module.params["name"]]).items
+ )[0]
+ except AttributeError:
+ policy = None
+ if module.params["rename"]:
+ try:
+ new_policy = list(
+ blade.get_smb_share_policies(names=[module.params["rename"]]).items
+ )[0]
+ except AttributeError:
+ new_policy = None
+ if policy and state == "present" and not module.params["rename"]:
+ if module.params["before_rule"]:
+ res = blade.get_smb_share_policies_rules(
+ policy_names=[module.params["name"]],
+ names=[
+ module.params["name"] + "." + str(module.params["before_rule"])
+ ],
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Rule index {0} does not exist.".format(
+ module.params["before_rule"]
+ )
+ )
+ update_smb_share_policy(module, blade)
+ elif (
+ state == "present" and module.params["rename"] and policy and not new_policy
+ ):
+ rename_smb_share_policy(module, blade)
+ elif state == "present" and not policy and not module.params["rename"]:
+ create_smb_share_policy(module, blade)
+ elif state == "absent" and policy:
+ delete_smb_share_policy(module, blade)
elif SNAPSHOT_POLICY_API_VERSION in versions:
if not HAS_PYPURECLIENT:
module.fail_json(msg="py-pure-client sdk is required for this module")
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_s3acc.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_s3acc.py
index 034731994..33aa9a30f 100644
--- a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_s3acc.py
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_s3acc.py
@@ -38,6 +38,7 @@ options:
quota:
description:
- The effective quota limit to be applied against the size of the account in bytes.
+ - Values can be entered as K, M, T or P
- If set to '' (empty string), the account is unlimited in size.
version_added: 1.11.0
type: str
@@ -48,11 +49,11 @@ options:
will still be sent if the account has a value set for I(quota_limit).
version_added: 1.11.0
type: bool
- default: false
default_quota:
description:
- The value of this field will be used to configure the I(quota_limit) field of newly created buckets
associated with this object store account, if the bucket creation does not specify its own value.
+ - Values can be entered as K, M, T or P
- If set to '' (empty string), the bucket default is unlimited in size.
version_added: 1.11.0
type: str
@@ -62,13 +63,23 @@ options:
associated with this object store account, if the bucket creation does not specify its own value.
version_added: 1.11.0
type: bool
- default: false
+ block_new_public_policies:
+ description:
+ - If set to true, adding bucket policies that grant public access to a bucket is not allowed.
+ type: bool
+ version_added: 1.15.0
+ block_public_access:
+ description:
+ - If set to true, access to a bucket with a public policy is restricted to only authenticated
+ users within the account that bucket belongs to.
+ type: bool
+ version_added: 1.15.0
extends_documentation_fragment:
- purestorage.flashblade.purestorage.fb
"""
EXAMPLES = r"""
-- name: Crrate object store account foo (with no quotas)
+- name: Create object store account foo (with no quotas)
purestorage.flashblade.purefb_s3acc:
name: foo
fb_url: 10.10.10.2
@@ -97,11 +108,15 @@ RETURN = r"""
HAS_PURESTORAGE = True
try:
- from pypureclient.flashblade import ObjectStoreAccountPatch, BucketDefaults
+ from pypureclient.flashblade import (
+ ObjectStoreAccountPatch,
+ BucketDefaults,
+ PublicAccessConfig,
+ )
except ImportError:
HAS_PURESTORAGE = False
-from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.basic import AnsibleModule, human_to_bytes
from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
get_blade,
get_system,
@@ -111,6 +126,7 @@ from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb impo
MIN_REQUIRED_API_VERSION = "1.3"
QUOTA_API_VERSION = "2.1"
+PUBLIC_API_VERSION = "2.12"
def get_s3acc(module, blade):
@@ -126,16 +142,28 @@ def get_s3acc(module, blade):
def update_s3acc(module):
"""Update Object Store Account"""
changed = False
+ public = False
blade = get_system(module)
acc_settings = list(
blade.get_object_store_accounts(names=[module.params["name"]]).items
)[0]
- current_account = {
- "hard_limit": acc_settings.hard_limit_enabled,
- "default_hard_limit": acc_settings.bucket_defaults.hard_limit_enabled,
- "quota": str(acc_settings.quota_limit),
- "default_quota": str(acc_settings.bucket_defaults.quota_limit),
- }
+ if getattr(acc_settings, "public_access_config", None):
+ public = True
+ current_account = {
+ "hard_limit": acc_settings.hard_limit_enabled,
+ "default_hard_limit": acc_settings.bucket_defaults.hard_limit_enabled,
+ "quota": str(acc_settings.quota_limit),
+ "default_quota": str(acc_settings.bucket_defaults.quota_limit),
+ "block_new_public_policies": acc_settings.public_access_config.block_new_public_policies,
+ "block_public_access": acc_settings.public_access_config.block_public_access,
+ }
+ else:
+ current_account = {
+ "hard_limit": acc_settings.hard_limit_enabled,
+ "default_hard_limit": acc_settings.bucket_defaults.hard_limit_enabled,
+ "quota": str(acc_settings.quota_limit),
+ "default_quota": str(acc_settings.bucket_defaults.quota_limit),
+ }
if current_account["quota"] == "None":
current_account["quota"] = ""
if current_account["default_quota"] == "None":
@@ -144,12 +172,48 @@ def update_s3acc(module):
module.params["quota"] = current_account["quota"]
if module.params["default_quota"] is None:
module.params["default_quota"] = current_account["default_quota"]
- new_account = {
- "hard_limit": module.params["hard_limit"],
- "default_hard_limit": module.params["default_hard_limit"],
- "quota": module.params["quota"],
- "default_quota": module.params["default_quota"],
- }
+ if not module.params["default_quota"]:
+ module.params["default_quota"] = ""
+ if not module.params["quota"]:
+ quota = ""
+ else:
+ quota = str(human_to_bytes(module.params["quota"]))
+ if not module.params["default_quota"]:
+ default_quota = ""
+ else:
+ default_quota = str(human_to_bytes(module.params["default_quota"]))
+ if module.params["hard_limit"] is None:
+ hard_limit = current_account["hard_limit"]
+ else:
+ hard_limit = module.params["hard_limit"]
+ if module.params["default_hard_limit"] is None:
+ default_hard_limit = current_account["default_hard_limit"]
+ else:
+ default_hard_limit = module.params["default_hard_limit"]
+ if public:
+ if module.params["block_new_public_policies"] is None:
+ new_public_policies = current_account["block_new_public_policies"]
+ else:
+ new_public_policies = module.params["block_new_public_policies"]
+ if module.params["block_public_access"] is None:
+ public_access = current_account["block_public_access"]
+ else:
+ public_access = module.params["block_public_access"]
+ new_account = {
+ "hard_limit": hard_limit,
+ "default_hard_limit": default_hard_limit,
+ "quota": quota,
+ "default_quota": default_quota,
+ "block_new_public_policies": new_public_policies,
+ "block_public_access": public_access,
+ }
+ else:
+ new_account = {
+ "hard_limit": module.params["hard_limit"],
+ "default_hard_limit": module.params["default_hard_limit"],
+ "quota": quota,
+ "default_quota": default_quota,
+ }
if new_account != current_account:
changed = True
if not module.check_mode:
@@ -169,12 +233,14 @@ def update_s3acc(module):
msg="Failed to update account {0}. "
"Error: {1}".format(module.params["name"], res.errors[0].message)
)
+
module.exit_json(changed=changed)
def create_s3acc(module, blade):
"""Create Object Store Account"""
changed = True
+ versions = blade.api_version.list_versions().versions
if not module.check_mode:
try:
blade.object_store_accounts.create_object_store_accounts(
@@ -188,27 +254,26 @@ def create_s3acc(module, blade):
)
if module.params["quota"] or module.params["default_quota"]:
blade2 = get_system(module)
- if module.params["quota"] and not module.params["default_quota"]:
- osa = ObjectStoreAccountPatch(
- hard_limit_enabled=module.params["hard_limit"],
- quota_limit=module.params["quota"],
- )
- if not module.params["quota"] and module.params["default_quota"]:
- osa = ObjectStoreAccountPatch(
- bucket_defaults=BucketDefaults(
- hard_limit_enabled=module.params["default_hard_limit"],
- quota_limit=module.params["default_quota"],
- )
- )
+ if not module.params["default_quota"]:
+ default_quota = ""
else:
- osa = ObjectStoreAccountPatch(
- hard_limit_enabled=module.params["hard_limit"],
- quota_limit=module.params["quota"],
- bucket_defaults=BucketDefaults(
- hard_limit_enabled=module.params["default_hard_limit"],
- quota_limit=module.params["default_quota"],
- ),
- )
+ default_quota = str(human_to_bytes(module.params["default_quota"]))
+ if not module.params["quota"]:
+ quota = ""
+ else:
+ quota = str(human_to_bytes(module.params["quota"]))
+ if not module.params["hard_limit"]:
+ module.params["hard_limit"] = False
+ if not module.params["default_hard_limit"]:
+ module.params["default_hard_limit"] = False
+ osa = ObjectStoreAccountPatch(
+ hard_limit_enabled=module.params["hard_limit"],
+ quota_limit=quota,
+ bucket_defaults=BucketDefaults(
+ hard_limit_enabled=module.params["default_hard_limit"],
+ quota_limit=default_quota,
+ ),
+ )
res = blade2.patch_object_store_accounts(
object_store_account=osa, names=[module.params["name"]]
)
@@ -220,6 +285,28 @@ def create_s3acc(module, blade):
msg="Failed to set quotas correctly for account {0}. "
"Error: {1}".format(module.params["name"], res.errors[0].message)
)
+ if PUBLIC_API_VERSION in versions:
+ if not module.params["block_new_public_policies"]:
+ module.params["block_new_public_policies"] = False
+ if not module.params["block_public_access"]:
+ module.params["block_public_access"] = False
+ osa = ObjectStoreAccountPatch(
+ public_access_config=PublicAccessConfig(
+ block_new_public_policies=module.params[
+ "block_new_public_policies"
+ ],
+ block_public_access=module.params["block_public_access"],
+ )
+ )
+ res = blade2.patch_object_store_accounts(
+ object_store_account=osa, names=[module.params["name"]]
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to Public Access config correctly for account {0}. "
+ "Error: {1}".format(module.params["name"], res.errors[0].message)
+ )
+
module.exit_json(changed=changed)
@@ -258,8 +345,10 @@ def main():
argument_spec.update(
dict(
name=dict(required=True, type="str"),
- hard_limit=dict(type="bool", default=False),
- default_hard_limit=dict(type="bool", default=False),
+ hard_limit=dict(type="bool"),
+ default_hard_limit=dict(type="bool"),
+ block_new_public_policies=dict(type="bool"),
+ block_public_access=dict(type="bool"),
quota=dict(type="str"),
default_quota=dict(type="str"),
state=dict(default="present", choices=["present", "absent"]),
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_s3user.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_s3user.py
index 55bc05c3f..1905184b1 100644
--- a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_s3user.py
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_s3user.py
@@ -48,6 +48,12 @@ options:
- If enabled this will override I(imported_key)
type: bool
default: false
+ multiple_keys:
+ description:
+ - Allow multiple access keys to be created for the user.
+ type: bool
+ default: false
+ version_added: "1.12.0"
remove_key:
description:
- Access key to be removed from user
@@ -181,27 +187,29 @@ def update_s3user(module, blade):
key_count += 1
if not exists:
if key_count < 2:
- changed = True
- if not module.check_mode:
- try:
- if (
- module.params["access_key"]
- and module.params["imported_key"]
+ try:
+ if module.params["access_key"] and module.params["imported_key"]:
+ module.warn("'access_key: true' overrides imported keys")
+ if module.params["access_key"]:
+ if key_count == 0 or (
+ key_count >= 1 and module.params["multiple_keys"]
):
- module.warn("'access_key: true' overrides imported keys")
- if module.params["access_key"]:
- result = blade.object_store_access_keys.create_object_store_access_keys(
- object_store_access_key=ObjectStoreAccessKey(
- user={"name": user}
+ changed = True
+ if not module.check_mode:
+ result = blade.object_store_access_keys.create_object_store_access_keys(
+ object_store_access_key=ObjectStoreAccessKey(
+ user={"name": user}
+ )
)
- )
- s3user_facts["fb_s3user"] = {
- "user": user,
- "access_key": result.items[0].secret_access_key,
- "access_id": result.items[0].name,
- }
- else:
- if IMPORT_KEY_API_VERSION in versions:
+ s3user_facts["fb_s3user"] = {
+ "user": user,
+ "access_key": result.items[0].secret_access_key,
+ "access_id": result.items[0].name,
+ }
+ else:
+ if IMPORT_KEY_API_VERSION in versions:
+ changed = True
+ if not module.check_mode:
blade.object_store_access_keys.create_object_store_access_keys(
names=[module.params["imported_key"]],
object_store_access_key=ObjectStoreAccessKeyPost(
@@ -211,19 +219,19 @@ def update_s3user(module, blade):
],
),
)
- except Exception:
- if module.params["imported_key"]:
- module.fail_json(
- msg="Object Store User {0}: Access Key import failed".format(
- user
- )
+ except Exception:
+ if module.params["imported_key"]:
+ module.fail_json(
+ msg="Object Store User {0}: Access Key import failed".format(
+ user
)
- else:
- module.fail_json(
- msg="Object Store User {0}: Access Key creation failed".format(
- user
- )
+ )
+ else:
+ module.fail_json(
+ msg="Object Store User {0}: Access Key creation failed".format(
+ user
)
+ )
else:
module.warn(
"Object Store User {0}: Maximum Access Key count reached".format(
@@ -370,6 +378,7 @@ def main():
name=dict(required=True, type="str"),
account=dict(required=True, type="str"),
access_key=dict(default="false", type="bool"),
+ multiple_keys=dict(default="false", type="bool"),
imported_key=dict(type="str", no_log=False),
remove_key=dict(type="str", no_log=False),
imported_secret=dict(type="str", no_log=True),
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_timeout.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_timeout.py
index 21e83c002..79f53adc2 100644
--- a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_timeout.py
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_timeout.py
@@ -123,7 +123,7 @@ def main():
if 5 < module.params["timeout"] > 180 and module.params["timeout"] != 0:
module.fail_json(msg="Timeout value must be between 5 and 180 minutes")
blade = get_system(module)
- current_timeout = list(blade.get_arrays().items)[0].idle_timeout * 60000
+ current_timeout = list(blade.get_arrays().items)[0].idle_timeout / 60000
if state == "present" and current_timeout != module.params["timeout"]:
set_timeout(module, blade)
elif state == "absent" and current_timeout != 0:
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_userpolicy.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_userpolicy.py
index 6e7dbe49d..be8716454 100644
--- a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_userpolicy.py
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_userpolicy.py
@@ -249,6 +249,7 @@ def main():
names=[module.params["account"] + "/" + module.params["name"]]
).status_code
!= 200
+ and state != "show"
):
module.fail_json(
msg="Account User {0}/{1} does not exist".format(