summaryrefslogtreecommitdiffstats
path: root/ansible_collections/purestorage/flashblade
diff options
context:
space:
mode:
Diffstat (limited to 'ansible_collections/purestorage/flashblade')
-rw-r--r--ansible_collections/purestorage/flashblade/.git-blame-ignore-revs2
-rw-r--r--ansible_collections/purestorage/flashblade/.github/CONTRIBUTING.md19
-rw-r--r--ansible_collections/purestorage/flashblade/.github/ISSUE_TEMPLATE/bug_report.md38
-rw-r--r--ansible_collections/purestorage/flashblade/.github/ISSUE_TEMPLATE/feature_request.md20
-rw-r--r--ansible_collections/purestorage/flashblade/.github/pull_request_template.md25
-rw-r--r--ansible_collections/purestorage/flashblade/.github/workflows/ansible-lint.yml10
-rw-r--r--ansible_collections/purestorage/flashblade/.github/workflows/black.yaml11
-rw-r--r--ansible_collections/purestorage/flashblade/.github/workflows/main.yml62
-rw-r--r--ansible_collections/purestorage/flashblade/.github/workflows/stale.yml19
-rw-r--r--ansible_collections/purestorage/flashblade/.gitignore4
-rw-r--r--ansible_collections/purestorage/flashblade/.pylintrc587
-rw-r--r--ansible_collections/purestorage/flashblade/.yamllint7
-rw-r--r--ansible_collections/purestorage/flashblade/CHANGELOG.rst263
-rw-r--r--ansible_collections/purestorage/flashblade/COPYING.GPLv3674
-rw-r--r--ansible_collections/purestorage/flashblade/FILES.json1279
-rw-r--r--ansible_collections/purestorage/flashblade/LICENSE674
-rw-r--r--ansible_collections/purestorage/flashblade/MANIFEST.json37
-rw-r--r--ansible_collections/purestorage/flashblade/README.md98
-rw-r--r--ansible_collections/purestorage/flashblade/README.rst19
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/.plugin-cache.yaml254
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/changelog.yaml329
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/config.yaml31
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/101_fix_policy_and_timezone_error.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/105_max_access_key.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/107_add_remove_s3user_key.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/108_dns_search_fix.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/109_update_info.yaml3
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/111_dsrole_update_idempotency.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/112_fix_check_mode.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/113_policy_cleanup.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/114_certificate_update.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/115_multiprotocol.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/121_replication_perf.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/123_lifecycle_rule_fix.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/127_add_eula.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/128_add_32_to_info.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/129-virtualhost.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/131-apiclient.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/132_add_timeout.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/135_add_user_policies.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/136_add_s3user_policy.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/138_add_ad_module.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/139_add_keytabs.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/140_more_32_info.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/147_no_gateway.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/150_fix_joint_nfs_version_change.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/152_s3acc_lowercase.yaml3
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/153_add_quota.yaml3
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/154_add_snap_now.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/158_support_lags.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/159_add_lag.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/161_add_lifecycle_info.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/162_new_lifecycle.yaml3
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/163_admin_key.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/164_add_admin.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/166_lag_mac_note.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/167_fix_logins.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/169_pypureclient_fix.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/174_access_policies.yaml3
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/175_throttle_support.yaml3
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/176_nfs_export_policies.yaml4
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/179_fqcn.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/183_v2_connections.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/184_certificate_typos.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/185_nfs_export_rule.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/186_add_tz.yaml3
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/187_rename_nfs_policy.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/188_bucket_type.yaml3
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/191_add_quota_info.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/194_lists_for_service.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/200_proxy.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/202_multiple_snap_rules.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/205_fix_multi_lifecycle.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/211_change_booleans.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/212_object_account_quota.yaml3
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/213_sec_update.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/215_encrypt_sec_info.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/216_extra_bucket_info.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/217_inventory.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/218_object_account_info.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/220_s3user_key_fix.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/76_default_fs_size.yaml3
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/77_filesystem_policies_info.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/78_update_filesystem_replica_link.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/79_hide_connect_api.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/80_support_reverse_replica_link.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/81_purefb_fs_new_options.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/83_add_certgrp.yml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/84_add_cert.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/85_add_banner.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/86_add_syslog.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/88_add_lifecycle.yml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/90_delete_conn_fix.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/90_imported_keys.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/92_fix_ds_update.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/96_fix_update_connection.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/97_fix_encrpyted_array_connection_info.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/v1.3.0_summary.yaml33
-rw-r--r--ansible_collections/purestorage/flashblade/meta/runtime.yml2
-rw-r--r--ansible_collections/purestorage/flashblade/playbooks/.keep0
-rw-r--r--ansible_collections/purestorage/flashblade/playbooks/files/.keep0
-rw-r--r--ansible_collections/purestorage/flashblade/playbooks/roles/.keep0
-rw-r--r--ansible_collections/purestorage/flashblade/playbooks/tasks/.keep0
-rw-r--r--ansible_collections/purestorage/flashblade/playbooks/templates/.keep0
-rw-r--r--ansible_collections/purestorage/flashblade/playbooks/vars/.keep0
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/doc_fragments/purestorage.py42
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/module_utils/purefb.py148
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_ad.py404
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_admin.py137
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_alert.py245
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_apiclient.py250
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_banner.py143
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_bladename.py115
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_bucket.py398
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_bucket_replica.py313
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_certgrp.py249
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_certs.py198
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_connect.py574
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_dns.py175
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_ds.py470
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_dsrole.py213
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_eula.py131
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_fs.py944
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_fs_replica.py308
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_groupquota.py321
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_info.py1548
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_inventory.py279
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_keytabs.py254
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_lag.py315
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_lifecycle.py490
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_messages.py193
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_network.py224
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_ntp.py158
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_phonehome.py124
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_pingtrace.py277
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_policy.py2079
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_proxy.py155
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_ra.py126
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_remote_cred.py243
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_s3acc.py314
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_s3user.py436
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_smtp.py124
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_snap.py379
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_snmp_agent.py210
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_snmp_mgr.py357
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_subnet.py347
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_syslog.py198
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_target.py201
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_timeout.py136
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_tz.py206
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_user.py211
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_userpolicy.py269
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_userquota.py315
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_virtualhost.py142
-rw-r--r--ansible_collections/purestorage/flashblade/requirements.txt5
-rw-r--r--ansible_collections/purestorage/flashblade/roles/.keep0
-rw-r--r--ansible_collections/purestorage/flashblade/settings.json8
-rw-r--r--ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.10.txt2
-rw-r--r--ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.11.txt2
-rw-r--r--ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.12.txt2
-rw-r--r--ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.13.txt2
-rw-r--r--ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.14.txt2
-rw-r--r--ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.15.txt2
-rw-r--r--ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.16.txt2
-rw-r--r--ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.9.txt2
165 files changed, 20776 insertions, 0 deletions
diff --git a/ansible_collections/purestorage/flashblade/.git-blame-ignore-revs b/ansible_collections/purestorage/flashblade/.git-blame-ignore-revs
new file mode 100644
index 000000000..6d4d90c52
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/.git-blame-ignore-revs
@@ -0,0 +1,2 @@
+# Migrate code style to Black
+6c785d5453095cd0dfa4088f28b3fd4feeaafc6a
diff --git a/ansible_collections/purestorage/flashblade/.github/CONTRIBUTING.md b/ansible_collections/purestorage/flashblade/.github/CONTRIBUTING.md
new file mode 100644
index 000000000..48dc0d566
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/.github/CONTRIBUTING.md
@@ -0,0 +1,19 @@
+# WELCOME TO PURE STORAGE FLASHBLADE ANSIBLE COLLECTION GITHUB
+
+Hi! Nice to see you here!
+
+## QUESTIONS ?
+
+The GitHub issue tracker is not the best place for questions for various reasons, but the [mailing list](mailto:pure-ansible-team@purestorage.com) is a very helpful places for those things.
+
+## CONTRIBUTING ?
+
+By contributing you agree that these contributions are your own (or approved by your employer) and you grant a full, complete, irrevocable copyright license to all users and developers of the project, present and future, pursuant to the license of the project.
+
+## BUG TO REPORT ?
+
+You can report bugs or make enhancement requests at the [Ansible GitHub issue page](http://github.com/Pure-Storage-Ansible/FlashBlade-Collection/issues/new/choose) by filling out the issue template that will be presented.
+
+Also please make sure you are testing on the latest released version of Ansible or the development branch; see the [Installation Guide](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) for details.
+
+Thanks!
diff --git a/ansible_collections/purestorage/flashblade/.github/ISSUE_TEMPLATE/bug_report.md b/ansible_collections/purestorage/flashblade/.github/ISSUE_TEMPLATE/bug_report.md
new file mode 100644
index 000000000..dd84ea782
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/.github/ISSUE_TEMPLATE/bug_report.md
@@ -0,0 +1,38 @@
+---
+name: Bug report
+about: Create a report to help us improve
+title: ''
+labels: ''
+assignees: ''
+
+---
+
+**Describe the bug**
+A clear and concise description of what the bug is.
+
+**To Reproduce**
+Steps to reproduce the behavior:
+1. Go to '...'
+2. Click on '....'
+3. Scroll down to '....'
+4. See error
+
+**Expected behavior**
+A clear and concise description of what you expected to happen.
+
+**Screenshots**
+If applicable, add screenshots to help explain your problem.
+
+**Desktop (please complete the following information):**
+ - OS: [e.g. iOS]
+ - Browser [e.g. chrome, safari]
+ - Version [e.g. 22]
+
+**Smartphone (please complete the following information):**
+ - Device: [e.g. iPhone6]
+ - OS: [e.g. iOS8.1]
+ - Browser [e.g. stock browser, safari]
+ - Version [e.g. 22]
+
+**Additional context**
+Add any other context about the problem here.
diff --git a/ansible_collections/purestorage/flashblade/.github/ISSUE_TEMPLATE/feature_request.md b/ansible_collections/purestorage/flashblade/.github/ISSUE_TEMPLATE/feature_request.md
new file mode 100644
index 000000000..bbcbbe7d6
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/.github/ISSUE_TEMPLATE/feature_request.md
@@ -0,0 +1,20 @@
+---
+name: Feature request
+about: Suggest an idea for this project
+title: ''
+labels: ''
+assignees: ''
+
+---
+
+**Is your feature request related to a problem? Please describe.**
+A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
+
+**Describe the solution you'd like**
+A clear and concise description of what you want to happen.
+
+**Describe alternatives you've considered**
+A clear and concise description of any alternative solutions or features you've considered.
+
+**Additional context**
+Add any other context or screenshots about the feature request here.
diff --git a/ansible_collections/purestorage/flashblade/.github/pull_request_template.md b/ansible_collections/purestorage/flashblade/.github/pull_request_template.md
new file mode 100644
index 000000000..27079cb18
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/.github/pull_request_template.md
@@ -0,0 +1,25 @@
+##### SUMMARY
+<!--- Describe the change below, including rationale and design decisions -->
+
+<!--- HINT: Include "Fixes #nnn" if you are fixing an existing issue -->
+
+##### ISSUE TYPE
+<!--- Pick one below and delete the rest -->
+- Bugfix Pull Request
+- Docs Pull Request
+- Feature Pull Request
+- New Module Pull Request
+- New Role Pull Request
+
+##### COMPONENT NAME
+<!--- Write the short name of the module, plugin, task or feature below -->
+
+##### ADDITIONAL INFORMATION
+<!--- Include additional information to help people understand the change here -->
+<!--- A step-by-step reproduction of the problem is helpful if there is no related issue -->
+- All new PRs must include a changelog fragment
+- Details of naming convention and format can be found [here](https://docs.ansible.com/ansible/latest/community/development_process.html#creating-a-changelog-fragment)
+<!--- Paste verbatim command output below, e.g. before and after your change -->
+```paste below
+
+```
diff --git a/ansible_collections/purestorage/flashblade/.github/workflows/ansible-lint.yml b/ansible_collections/purestorage/flashblade/.github/workflows/ansible-lint.yml
new file mode 100644
index 000000000..0b2102184
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/.github/workflows/ansible-lint.yml
@@ -0,0 +1,10 @@
+name: Ansible Lint # feel free to pick your own name
+on: [push, pull_request]
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+ - name: Run ansible-lint
+ uses: ansible-community/ansible-lint-action@main
diff --git a/ansible_collections/purestorage/flashblade/.github/workflows/black.yaml b/ansible_collections/purestorage/flashblade/.github/workflows/black.yaml
new file mode 100644
index 000000000..e5f9711f6
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/.github/workflows/black.yaml
@@ -0,0 +1,11 @@
+name: Lint
+
+on: [push, pull_request]
+
+jobs:
+ lint:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+ - uses: actions/setup-python@v3
+ - uses: psf/black@stable
diff --git a/ansible_collections/purestorage/flashblade/.github/workflows/main.yml b/ansible_collections/purestorage/flashblade/.github/workflows/main.yml
new file mode 100644
index 000000000..e66ce2991
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/.github/workflows/main.yml
@@ -0,0 +1,62 @@
+name: Pure Storage Ansible CI
+
+on:
+ pull_request:
+ push:
+ schedule:
+ - cron: '25 10 * * *'
+
+jobs:
+ build:
+ name: Build flashblade on Ansible ${{ matrix.ansible }} (Python ${{ matrix.python-version }})
+ runs-on: ubuntu-20.04
+ strategy:
+ matrix:
+ ansible:
+ - stable-2.11
+ - stable-2.12
+ - stable-2.13
+ - stable-2.14
+ - stable-2.15
+ - devel
+ python-version:
+ - 3.8
+ - 3.9
+ - "3.10"
+ - "3.11"
+ exclude:
+ - python-version: "3.11"
+ ansible: stable-2.11
+ - python-version: "3.11"
+ ansible: stable-2.12
+ - python-version: "3.11"
+ ansible: stable-2.13
+ - python-version: "3.10"
+ ansible: stable-2.11
+ - python-version: 3.8
+ ansible: stable-2.14
+ - python-version: 3.8
+ ansible: stable-2.15
+ - python-version: 3.8
+ ansible: devel
+ steps:
+ - name: Check out code
+ uses: actions/checkout@v3
+
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v3
+ with:
+ python-version: ${{ matrix.python-version }}
+
+ - name: Install dependencies
+ run: |
+ python${{ matrix.python }} -m pip install --upgrade pip
+ python${{ matrix.python }} -m pip install https://github.com/ansible/ansible/archive/${{ matrix.ansible }}.tar.gz --disable-pip-version-check
+
+ - name: Run sanity tests
+ run: |
+ pwd
+ mkdir -p ansible_collections/purestorage/flashblade
+ rsync -av . ansible_collections/purestorage/flashblade --exclude ansible_collection/purestorage/flashblade
+ cd ansible_collections/purestorage/flashblade
+ ansible-test sanity -v --color --python ${{ matrix.python-version }} --docker
diff --git a/ansible_collections/purestorage/flashblade/.github/workflows/stale.yml b/ansible_collections/purestorage/flashblade/.github/workflows/stale.yml
new file mode 100644
index 000000000..7bbc0505b
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/.github/workflows/stale.yml
@@ -0,0 +1,19 @@
+name: Mark stale issues and pull requests
+
+on:
+ schedule:
+ - cron: "0 0 * * *"
+
+jobs:
+ stale:
+
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/stale@v1
+ with:
+ repo-token: ${{ secrets.GITHUB_TOKEN }}
+ stale-issue-message: 'Stale issue message'
+ stale-pr-message: 'Stale pull request message'
+ stale-issue-label: 'no-issue-activity'
+ stale-pr-label: 'no-pr-activity'
diff --git a/ansible_collections/purestorage/flashblade/.gitignore b/ansible_collections/purestorage/flashblade/.gitignore
new file mode 100644
index 000000000..c53f26278
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/.gitignore
@@ -0,0 +1,4 @@
+*.tar.gz
+*.pyc
+.pylintrc
+collections/ansible_collections/purestorage/flashblade/tests/output/*
diff --git a/ansible_collections/purestorage/flashblade/.pylintrc b/ansible_collections/purestorage/flashblade/.pylintrc
new file mode 100644
index 000000000..9570a2b59
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/.pylintrc
@@ -0,0 +1,587 @@
+[MASTER]
+
+# A comma-separated list of package or module names from where C extensions may
+# be loaded. Extensions are loading into the active Python interpreter and may
+# run arbitrary code
+extension-pkg-whitelist=
+
+# Add files or directories to the blacklist. They should be base names, not
+# paths.
+ignore=CVS
+
+# Add files or directories matching the regex patterns to the blacklist. The
+# regex matches against base names, not paths.
+ignore-patterns=
+
+# Python code to execute, usually for sys.path manipulation such as
+# pygtk.require().
+#init-hook=
+
+# Use multiple processes to speed up Pylint.
+jobs=1
+
+# List of plugins (as comma separated values of python modules names) to load,
+# usually to register additional checkers.
+load-plugins=
+
+# Pickle collected data for later comparisons.
+persistent=yes
+
+# Specify a configuration file.
+#rcfile=
+
+# When enabled, pylint would attempt to guess common misconfiguration and emit
+# user-friendly hints instead of false-positive error messages
+suggestion-mode=yes
+
+# Allow loading of arbitrary C extensions. Extensions are imported into the
+# active Python interpreter and may run arbitrary code.
+unsafe-load-any-extension=no
+
+
+[MESSAGES CONTROL]
+
+# Only show warnings with the listed confidence levels. Leave empty to show
+# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED
+confidence=
+
+# Disable the message, report, category or checker with the given id(s). You
+# can either give multiple identifiers separated by comma (,) or put this
+# option multiple times (only on the command line, not in the configuration
+# file where it should appear only once).You can also use "--disable=all" to
+# disable everything first and then reenable specific checks. For example, if
+# you want to run only the similarities checker, you can use "--disable=all
+# --enable=similarities". If you want to run only the classes checker, but have
+# no Warning level messages displayed, use"--disable=all --enable=classes
+# --disable=W"
+disable=
+ abstract-method,
+ access-member-before-definition,
+ ansible-deprecated-version,
+ arguments-differ,
+ assignment-from-no-return,
+ assignment-from-none,
+ attribute-defined-outside-init,
+ bad-continuation,
+ bad-indentation,
+ bad-mcs-classmethod-argument,
+ broad-except,
+ c-extension-no-member,
+ cell-var-from-loop,
+ chained-comparison,
+ comparison-with-callable,
+ consider-iterating-dictionary,
+ consider-merging-isinstance,
+ consider-using-dict-comprehension,
+ consider-using-enumerate,
+ consider-using-get,
+ consider-using-in,
+ consider-using-set-comprehension,
+ consider-using-ternary,
+ deprecated-lambda,
+ deprecated-method,
+ deprecated-module,
+ eval-used,
+ exec-used,
+ expression-not-assigned,
+ fixme,
+ function-redefined,
+ global-statement,
+ global-variable-undefined,
+ import-error,
+ import-self,
+ inconsistent-return-statements,
+ invalid-envvar-default,
+ invalid-name,
+ invalid-sequence-index,
+ keyword-arg-before-vararg,
+ len-as-condition,
+ line-too-long,
+ literal-comparison,
+ locally-disabled,
+ method-hidden,
+ misplaced-comparison-constant,
+ missing-docstring,
+ no-else-raise,
+ no-else-return,
+ no-init,
+ no-member,
+ no-name-in-module,
+ no-self-use,
+ no-value-for-parameter,
+ non-iterator-returned,
+ not-a-mapping,
+ not-an-iterable,
+ not-callable,
+ old-style-class,
+ pointless-statement,
+ pointless-string-statement,
+ possibly-unused-variable,
+ protected-access,
+ redefined-argument-from-local,
+ redefined-builtin,
+ redefined-outer-name,
+ redefined-variable-type,
+ reimported,
+ relative-import,
+ signature-differs,
+ simplifiable-if-expression,
+ simplifiable-if-statement,
+ subprocess-popen-preexec-fn,
+ super-init-not-called,
+ superfluous-parens,
+ too-few-public-methods,
+ too-many-ancestors,
+ too-many-arguments,
+ too-many-boolean-expressions,
+ too-many-branches,
+ too-many-function-args,
+ too-many-instance-attributes,
+ too-many-lines,
+ too-many-locals,
+ too-many-nested-blocks,
+ too-many-public-methods,
+ too-many-return-statements,
+ too-many-statements,
+ trailing-comma-tuple,
+ trailing-comma-tuple,
+ try-except-raise,
+ unbalanced-tuple-unpacking,
+ undefined-loop-variable,
+ unexpected-keyword-arg,
+ ungrouped-imports,
+ unidiomatic-typecheck,
+ unnecessary-pass,
+ unsubscriptable-object,
+ unsupported-assignment-operation,
+ unsupported-delete-operation,
+ unsupported-membership-test,
+ unused-argument,
+ unused-import,
+ unused-variable,
+ used-before-assignment,
+ useless-object-inheritance,
+ useless-return,
+ useless-super-delegation,
+ wrong-import-order,
+ wrong-import-position,
+
+# Enable the message, report, category or checker with the given id(s). You can
+# either give multiple identifier separated by comma (,) or put this option
+# multiple time (only on the command line, not in the configuration file where
+# it should appear only once). See also the "--disable" option for examples.
+enable=c-extension-no-member
+
+
+[REPORTS]
+
+# Python expression which should return a note less than 10 (10 is the highest
+# note). You have access to the variables errors warning, statement which
+# respectively contain the number of errors / warnings messages and the total
+# number of statements analyzed. This is used by the global evaluation report
+# (RP0004).
+evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
+
+# Template used to display messages. This is a python new-style format string
+# used to format the message information. See doc for all details
+#msg-template=
+
+# Set the output format. Available formats are text, parseable, colorized, json
+# and msvs (visual studio).You can also give a reporter class, eg
+# mypackage.mymodule.MyReporterClass.
+output-format=text
+
+# Tells whether to display a full report or only the messages
+reports=no
+
+# Activate the evaluation score.
+score=yes
+
+
+[REFACTORING]
+
+# Maximum number of nested blocks for function / method body
+max-nested-blocks=5
+
+# Complete name of functions that never returns. When checking for
+# inconsistent-return-statements if a never returning function is called then
+# it will be considered as an explicit return statement and no message will be
+# printed.
+never-returning-functions=optparse.Values,sys.exit
+
+
+[VARIABLES]
+
+# List of additional names supposed to be defined in builtins. Remember that
+# you should avoid to define new builtins when possible.
+additional-builtins=
+
+# Tells whether unused global variables should be treated as a violation.
+allow-global-unused-variables=yes
+
+# List of strings which can identify a callback function by name. A callback
+# name must start or end with one of those strings.
+callbacks=cb_,
+ _cb
+
+# A regular expression matching the name of dummy variables (i.e. expectedly
+# not used).
+dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_
+
+# Argument names that match this expression will be ignored. Default to name
+# with leading underscore
+ignored-argument-names=_.*|^ignored_|^unused_
+
+# Tells whether we should check for unused import in __init__ files.
+init-import=no
+
+# List of qualified module names which can have objects that can redefine
+# builtins.
+redefining-builtins-modules=six.moves,past.builtins,future.builtins,io,builtins
+
+
+[BASIC]
+
+# Naming style matching correct argument names
+argument-naming-style=snake_case
+
+# Regular expression matching correct argument names. Overrides argument-
+# naming-style
+#argument-rgx=
+
+# Naming style matching correct attribute names
+attr-naming-style=snake_case
+
+# Regular expression matching correct attribute names. Overrides attr-naming-
+# style
+#attr-rgx=
+
+# Bad variable names which should always be refused, separated by a comma
+bad-names=foo,
+ bar,
+ baz,
+ toto,
+ tutu,
+ tata,
+ _,
+
+# Naming style matching correct class attribute names
+class-attribute-naming-style=any
+
+# Regular expression matching correct class attribute names. Overrides class-
+# attribute-naming-style
+#class-attribute-rgx=
+
+# Naming style matching correct class names
+class-naming-style=PascalCase
+
+# Regular expression matching correct class names. Overrides class-naming-style
+#class-rgx=
+
+# Naming style matching correct constant names
+const-naming-style=UPPER_CASE
+
+# Regular expression matching correct constant names. Overrides const-naming-
+# style
+#const-rgx=
+
+# Minimum line length for functions/classes that require docstrings, shorter
+# ones are exempt.
+docstring-min-length=-1
+
+# Naming style matching correct function names
+function-naming-style=snake_case
+
+# Regular expression matching correct function names. Overrides function-
+# naming-style
+#function-rgx=
+
+# Good variable names which should always be accepted, separated by a comma
+good-names=i,
+ j,
+ k,
+ f,
+ e,
+ ex,
+ Run,
+ C,
+ __metaclass__,
+
+# Include a hint for the correct naming format with invalid-name
+include-naming-hint=no
+
+# Naming style matching correct inline iteration names
+inlinevar-naming-style=any
+
+# Regular expression matching correct inline iteration names. Overrides
+# inlinevar-naming-style
+#inlinevar-rgx=
+
+# Naming style matching correct method names
+method-naming-style=snake_case
+
+# Regular expression matching correct method names. Overrides method-naming-
+# style
+#method-rgx=
+
+# Naming style matching correct module names
+module-naming-style=snake_case
+
+# Regular expression matching correct module names. Overrides module-naming-
+# style
+#module-rgx=
+module-rgx=[a-z_][a-z0-9_-]{2,40}$
+method-rgx=[a-z_][a-z0-9_]{2,40}$
+function-rgx=[a-z_][a-z0-9_]{2,40}$
+
+# Colon-delimited sets of names that determine each other's naming style when
+# the name regexes allow several styles.
+name-group=
+
+# Regular expression which should only match function or class names that do
+# not require a docstring.
+no-docstring-rgx=^_
+
+# List of decorators that produce properties, such as abc.abstractproperty. Add
+# to this list to register other decorators that produce valid properties.
+property-classes=abc.abstractproperty
+
+# Naming style matching correct variable names
+variable-naming-style=snake_case
+
+# Regular expression matching correct variable names. Overrides variable-
+# naming-style
+#variable-rgx=
+
+
+[SPELLING]
+
+# Limits count of emitted suggestions for spelling mistakes
+max-spelling-suggestions=4
+
+# Spelling dictionary name. Available dictionaries: none. To make it working
+# install python-enchant package.
+spelling-dict=
+
+# List of comma separated words that should not be checked.
+spelling-ignore-words=
+
+# A path to a file that contains private dictionary; one word per line.
+spelling-private-dict-file=
+
+# Tells whether to store unknown words to indicated private dictionary in
+# --spelling-private-dict-file option instead of raising a message.
+spelling-store-unknown-words=no
+
+
+[FORMAT]
+
+# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
+expected-line-ending-format=
+
+# Regexp for a line that is allowed to be longer than the limit.
+ignore-long-lines=^\s*(# )?<?https?://\S+>?$
+
+# Number of spaces of indent required inside a hanging or continued line.
+indent-after-paren=4
+
+# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
+# tab).
+indent-string=' '
+
+# Maximum number of characters on a single line.
+max-line-length=160
+
+# Maximum number of lines in a module
+max-module-lines=1000
+
+# List of optional constructs for which whitespace checking is disabled. `dict-
+# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}.
+# `trailing-comma` allows a space between comma and closing bracket: (a, ).
+# `empty-line` allows space-only lines.
+no-space-check=trailing-comma,
+ dict-separator
+
+# Allow the body of a class to be on the same line as the declaration if body
+# contains single statement.
+single-line-class-stmt=no
+
+# Allow the body of an if to be on the same line as the test if there is no
+# else.
+single-line-if-stmt=no
+
+
+[TYPECHECK]
+
+# List of decorators that produce context managers, such as
+# contextlib.contextmanager. Add to this list to register other decorators that
+# produce valid context managers.
+contextmanager-decorators=contextlib.contextmanager
+
+# List of members which are set dynamically and missed by pylint inference
+# system, and so shouldn't trigger E1101 when accessed. Python regular
+# expressions are accepted.
+#generated-members=PurityFb.*
+
+# Tells whether missing members accessed in mixin class should be ignored. A
+# mixin class is detected if its name ends with "mixin" (case insensitive).
+ignore-mixin-members=yes
+
+# This flag controls whether pylint should warn about no-member and similar
+# checks whenever an opaque object is returned when inferring. The inference
+# can return multiple potential results while evaluating a Python object, but
+# some branches might not be evaluated, which results in partial inference. In
+# that case, it might be useful to still emit no-member and other checks for
+# the rest of the inferred objects.
+ignore-on-opaque-inference=yes
+
+# List of class names for which member attributes should not be checked (useful
+# for classes with dynamically set attributes). This supports the use of
+# qualified names.
+ignored-classes=optparse.Values,thread._local,_thread._local
+
+# List of module names for which member attributes should not be checked
+# (useful for modules/projects where namespaces are manipulated during runtime
+# and thus existing member attributes cannot be deduced by static analysis. It
+# supports qualified module names, as well as Unix pattern matching.
+ignored-modules=
+ _MovedItems,
+# Show a hint with possible names when a member name was not found. The aspect
+# of finding the hint is based on edit distance.
+missing-member-hint=yes
+
+# The minimum edit distance a name should have in order to be considered a
+# similar match for a missing member name.
+missing-member-hint-distance=1
+
+# The total number of similar names that should be taken in consideration when
+# showing a hint for a missing member.
+missing-member-max-choices=1
+
+
+[SIMILARITIES]
+
+# Ignore comments when computing similarities.
+ignore-comments=yes
+
+# Ignore docstrings when computing similarities.
+ignore-docstrings=yes
+
+# Ignore imports when computing similarities.
+ignore-imports=no
+
+# Minimum lines number of a similarity.
+min-similarity-lines=4
+
+
+[LOGGING]
+
+# Logging modules to check that the string format arguments are in logging
+# function parameter format
+logging-modules=logging
+
+
+[MISCELLANEOUS]
+
+# List of note tags to take in consideration, separated by a comma.
+notes=FIXME,
+ XXX,
+ TODO
+
+
+[CLASSES]
+
+# List of method names used to declare (i.e. assign) instance attributes.
+defining-attr-methods=__init__,
+ __new__,
+ setUp
+
+# List of member names, which should be excluded from the protected access
+# warning.
+exclude-protected=_asdict,
+ _fields,
+ _replace,
+ _source,
+ _make
+
+# List of valid names for the first argument in a class method.
+valid-classmethod-first-arg=cls
+
+# List of valid names for the first argument in a metaclass class method.
+valid-metaclass-classmethod-first-arg=mcs
+
+
+[DESIGN]
+
+# Maximum number of arguments for function / method
+max-args=5
+
+# Maximum number of attributes for a class (see R0902).
+max-attributes=7
+
+# Maximum number of boolean expressions in a if statement
+max-bool-expr=5
+
+# Maximum number of branch for function / method body
+max-branches=12
+
+# Maximum number of locals for function / method body
+max-locals=15
+
+# Maximum number of parents for a class (see R0901).
+max-parents=7
+
+# Maximum number of public methods for a class (see R0904).
+max-public-methods=20
+
+# Maximum number of return / yield for function / method body
+max-returns=6
+
+# Maximum number of statements in function / method body
+max-statements=50
+
+# Minimum number of public methods for a class (see R0903).
+min-public-methods=2
+
+
+[IMPORTS]
+
+# Allow wildcard imports from modules that define __all__.
+allow-wildcard-with-all=no
+
+# Analyse import fallback blocks. This can be used to support both Python 2 and
+# 3 compatible code, which means that the block might have code that exists
+# only in one or another interpreter, leading to false positives when analysed.
+analyse-fallback-blocks=no
+
+# Deprecated modules which should not be used, separated by a comma
+deprecated-modules=regsub,
+ TERMIOS,
+ Bastion,
+ rexec
+
+# Create a graph of external dependencies in the given file (report RP0402 must
+# not be disabled)
+ext-import-graph=
+
+# Create a graph of every (i.e. internal and external) dependencies in the
+# given file (report RP0402 must not be disabled)
+import-graph=
+
+# Create a graph of internal dependencies in the given file (report RP0402 must
+# not be disabled)
+int-import-graph=
+
+# Force import order to recognize a module as part of the standard
+# compatibility libraries.
+known-standard-library=
+
+# Force import order to recognize a module as part of a third party library.
+known-third-party=enchant
+
+
+[EXCEPTIONS]
+
+# Exceptions that will emit a warning when being caught. Defaults to
+# "Exception"
+overgeneral-exceptions=Exception
diff --git a/ansible_collections/purestorage/flashblade/.yamllint b/ansible_collections/purestorage/flashblade/.yamllint
new file mode 100644
index 000000000..6c19f43f7
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/.yamllint
@@ -0,0 +1,7 @@
+extends: default
+
+rules:
+ document-start: disable
+ indentation: disable
+ line-length:
+ max: 200
diff --git a/ansible_collections/purestorage/flashblade/CHANGELOG.rst b/ansible_collections/purestorage/flashblade/CHANGELOG.rst
new file mode 100644
index 000000000..c252af127
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/CHANGELOG.rst
@@ -0,0 +1,263 @@
+====================================
+Purestorage.Flashblade Release Notes
+====================================
+
+.. contents:: Topics
+
+
+v1.11.0
+=======
+
+Minor Changes
+-------------
+
+- purefb_info - Added `encryption` and `support_keys` information.
+- purefb_info - Added bucket quota and safemode information per bucket
+- purefb_info - Added security update version for Purity//FB 4.0.2, or higher
+- purefb_info - Updated object store account information
+- purefb_inventory - Added `part_number` to hardware item information.
+- purefb_policy - Added support for multiple rules in snapshot policies
+- purefb_proxy - Added new boolean parameter `secure`. Default of true (for backwards compatability) sets the protocol to be `https://`. False sets `http://`
+- purefb_s3acc - Added support for default bucket quotas and hard limits
+- purefb_s3acc - Added support for object account quota and hard limit
+
+Bugfixes
+--------
+
+- purefa_info - Fixed issue when more than 10 buckets have lifecycle rules.
+- purefb_s3user - Fix incorrect response when bad key/secret pair provided for new user
+
+New Modules
+-----------
+
+- purestorage.flashblade.purefb_pingtrace - Employ the internal FlashBlade ping and trace mechanisms
+
+v1.10.0
+=======
+
+Minor Changes
+-------------
+
+- All - Update documentation examples with FQCNs
+- purefb_ad - Allow service to be a list
+- purefb_bucket - Allow setting of bucket type to support VSO - requires Purity//FB 3.3.3 or higher
+- purefb_certs - Fix several misspellings of certificate
+- purefb_info - Added filesystem default, user and group quotas where available
+- purefb_info - Expose object store bucket type from Purity//FB 3.3.3
+- purefb_info - Show information for current timezone
+- purefb_policy - Allow rename of NFS Export Policies from Purity//FB 3.3.3
+- purefb_tz - Add support for FlashBlade timezone management
+
+Bugfixes
+--------
+
+- purefb_connect - Resolve connection issues between two FBs that are throttling capable
+- purefb_policy - Fix incorrect API call for NFS export policy rule creation
+
+New Modules
+-----------
+
+- purestorage.flashblade.purefb_messages - List FlashBlade Alert Messages
+- purestorage.flashblade.purefb_tz - Configure Pure Storage FlashBlade timezone
+
+v1.9.0
+======
+
+Minor Changes
+-------------
+
+- purefb_admin - New module to manage global admin settings
+- purefb_connect - Add support for array connections to have bandwidth throttling defined
+- purefb_fs - Add support for NFS export policies
+- purefb_info - Add NFS export policies and rules
+- purefb_info - Show array connections bandwidth throttle information
+- purefb_policy - Add NFS export policies, with rules, as a new policy type
+- purefb_policy - Add support for Object Store Access Policies, associated rules and user grants
+- purefb_policy - New parameter `policy_type` added. For backwards compatability, default to `snapshot` if not provided.
+
+v1.8.1
+======
+
+Minor Changes
+-------------
+
+- purefb.py - Use latest `pypureclient` SDK with fix for "best fit". No longer requires double login to negotiate best API version.
+
+v1.8.0
+======
+
+Minor Changes
+-------------
+
+- purefb.py - Add check to ensure FlashBlade uses the latest REST version possible for Purity version installed
+- purefb_info - Add object lifecycles rules to bucket subset
+- purefb_lifecycle - Add support for updated object lifecycle rules. See documentation for details of new parameters.
+- purefb_lifecycle - Change `keep_for` parameter to be `keep_previous_for`. `keep_for` is deprecated and will be removed in a later version.
+- purefb_user - Add support for managing user public key and user unlock
+
+Known Issues
+------------
+
+- purefb_lag - The mac_address field in the response is not populated. This will be fixed in a future FlashBlade update.
+
+v1.7.0
+======
+
+Minor Changes
+-------------
+
+- purefb_groupquota - New module for manage individual filesystem group quotas
+- purefb_lag - Add support for LAG management
+- purefb_snap - Add support for immeadiate snapshot to remote connected FlashBlade
+- purefb_subnet - Add support for multiple LAGs.
+- purefb_userquota - New module for manage individual filesystem user quotas
+
+Bugfixes
+--------
+
+- purefb_fs - Fix bug where changing the state of both NFS v3 and v4.1 at the same time ignored one of these.
+- purefb_s3acc - Ensure S3 Account Name is always lowercase
+- purefb_s3user - Ensure S3 Account Name is always lowercase
+- purefb_subnet - Allow subnet creation with no gateway
+
+New Modules
+-----------
+
+- purestorage.flashblade.purefb_groupquota - Manage filesystem group quotas
+- purestorage.flashblade.purefb_lag - Manage FlashBlade Link Aggregation Groups
+- purestorage.flashblade.purefb_userquota - Manage filesystem user quotas
+
+v1.6.0
+======
+
+Minor Changes
+-------------
+
+- purefa_virtualhost - New module to manage API Clients
+- purefb_ad - New module to manage Active Directory Account
+- purefb_eula - New module to sign EULA
+- purefb_info - Add Active Directory, Kerberos and Object Store Account information
+- purefb_info - Add extra info for Purity//FB 3.2+ systems
+- purefb_keytabs - New module to manage Kerberos Keytabs
+- purefb_s3user - Add access policy option to user creation
+- purefb_timeout - Add module to set GUI idle timeout
+- purefb_userpolicy - New module to manage object store user access policies
+- purefb_virtualhost - New module to manage Object Store Virtual Hosts
+
+New Modules
+-----------
+
+- purestorage.flashblade.purefb_ad - Manage FlashBlade Active Directory Account
+- purestorage.flashblade.purefb_apiclient - Manage FlashBlade API Clients
+- purestorage.flashblade.purefb_eula - Sign Pure Storage FlashBlade EULA
+- purestorage.flashblade.purefb_keytabs - Manage FlashBlade Kerberos Keytabs
+- purestorage.flashblade.purefb_timeout - Configure Pure Storage FlashBlade GUI idle timeout
+- purestorage.flashblade.purefb_userpolicy - Manage FlashBlade Object Store User Access Policies
+- purestorage.flashblade.purefb_virtualhost - Manage FlashBlade Object Store Virtual Hosts
+
+v1.5.0
+======
+
+Minor Changes
+-------------
+
+- purefb_certs - Add update functionality for array cert
+- purefb_fs - Add multiprotocol ACL support
+- purefb_info - Add information regarding filesystem multiprotocol (where available)
+- purefb_info - Add new parameter to provide details on admin users
+- purefb_info - Add replication performace statistics
+- purefb_s3user - Add ability to remove an S3 users existing access key
+
+Bugfixes
+--------
+
+- purefb_* - Return a correct value for `changed` in all modules when in check mode
+- purefb_dns - Deprecate search paramerter
+- purefb_dsrole - Resolve idempotency issue
+- purefb_lifecycle - Fix error when creating new bucket lifecycle rule.
+- purefb_policy - Ensure undeclared variables are set correctly
+- purefb_s3user - Fix maximum access_key count logic
+
+v1.4.0
+======
+
+Minor Changes
+-------------
+
+- purefb_banner - Module to manage the GUI and SSH login message
+- purefb_certgrp - Module to manage FlashBlade Certificate Groups
+- purefb_certs - Module to create and delete SSL certificates
+- purefb_connect - Support idempotency when exisitng connection is incoming
+- purefb_fs - Add new options for filesystem control (https://github.com/Pure-Storage-Ansible/FlashBlade-Collection/pull/81)
+- purefb_fs - Default filesystem size on creation changes from 32G to ``unlimited``
+- purefb_fs - Fix error in deletion and eradication of filesystem
+- purefb_fs_replica - Remove condition to attach/detach policies on unhealthy replica-link
+- purefb_info - Add support to list filesystem policies
+- purefb_lifecycle - Module to manage FlashBlade Bucket Lifecycle Rules
+- purefb_s3user - Add support for imported user access keys
+- purefb_syslog - Module to manage syslog server configuration
+
+Bugfixes
+--------
+
+- purefa_policy - Resolve multiple issues related to incorrect use of timezones
+- purefb_connect - Ensure changing encryption status on array connection is performed correctly
+- purefb_connect - Fix breaking change created in purity_fb SDK 1.9.2 for deletion of array connections
+- purefb_connect - Hide target array API token
+- purefb_ds - Ensure updating directory service configurations completes correctly
+- purefb_info - Fix issue getting array info when encrypted connection exists
+
+New Modules
+-----------
+
+- purestorage.flashblade.purefb_banner - Configure Pure Storage FlashBlade GUI and SSH MOTD message
+- purestorage.flashblade.purefb_certgrp - Manage FlashBlade Certifcate Groups
+- purestorage.flashblade.purefb_certs - Manage FlashBlade SSL Certifcates
+- purestorage.flashblade.purefb_lifecycle - Manage FlashBlade object lifecycles
+- purestorage.flashblade.purefb_syslog - Configure Pure Storage FlashBlade syslog settings
+
+v1.3.0
+======
+
+Release Summary
+---------------
+
+| Release Date: 2020-08-08
+| This changlelog describes all changes made to the modules and plugins included in this collection since Ansible 2.9.0
+
+
+Major Changes
+-------------
+
+- purefb_alert - manage alert email settings on a FlashBlade
+- purefb_bladename - manage FlashBlade name
+- purefb_bucket_replica - manage bucket replica links on a FlashBlade
+- purefb_connect - manage connections between FlashBlades
+- purefb_dns - manage DNS settings on a FlashBlade
+- purefb_fs_replica - manage filesystem replica links on a FlashBlade
+- purefb_inventory - get information about the hardware inventory of a FlashBlade
+- purefb_ntp - manage the NTP settings for a FlashBlade
+- purefb_phonehome - manage the phone home settings for a FlashBlade
+- purefb_policy - manage the filesystem snapshot policies for a FlashBlade
+- purefb_proxy - manage the phone home HTTP proxy settings for a FlashBlade
+- purefb_remote_cred - manage the Object Store Remote Credentials on a FlashBlade
+- purefb_snmp_agent - modify the FlashBlade SNMP Agent
+- purefb_snmp_mgr - manage SNMP Managers on a FlashBlade
+- purefb_target - manage remote S3-capable targets for a FlashBlade
+- purefb_user - manage local ``pureuser`` account password on a FlashBlade
+
+Minor Changes
+-------------
+
+- purefb_bucket - Versioning support added
+- purefb_info - new options added for information collection
+- purefb_network - Add replication service type
+- purefb_s3user - Limit ``access_key`` recreation to 3 times
+- purefb_s3user - return dict changed from ``ansible_facts`` to ``s3user_info``
+
+Bugfixes
+--------
+
+- purefb_bucket - Add warning message if ``state`` is ``absent`` without ``eradicate:``
+- purefb_fs - Add graceful exist when ``state`` is ``absent`` and filesystem not eradicated
+- purefb_fs - Add warning message if ``state`` is ``absent`` without ``eradicate``
diff --git a/ansible_collections/purestorage/flashblade/COPYING.GPLv3 b/ansible_collections/purestorage/flashblade/COPYING.GPLv3
new file mode 100644
index 000000000..94a9ed024
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/COPYING.GPLv3
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ <program> Copyright (C) <year> <name of author>
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<http://www.gnu.org/licenses/>.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+<http://www.gnu.org/philosophy/why-not-lgpl.html>.
diff --git a/ansible_collections/purestorage/flashblade/FILES.json b/ansible_collections/purestorage/flashblade/FILES.json
new file mode 100644
index 000000000..5cfa68659
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/FILES.json
@@ -0,0 +1,1279 @@
+{
+ "files": [
+ {
+ "name": ".",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/.keep",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/sanity",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.9.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "83175635ac646a5cfc169ec6316e0ea90071bf4e8e9a79212a413f326a3049bd",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.13.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "83175635ac646a5cfc169ec6316e0ea90071bf4e8e9a79212a413f326a3049bd",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.16.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "83175635ac646a5cfc169ec6316e0ea90071bf4e8e9a79212a413f326a3049bd",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.14.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "83175635ac646a5cfc169ec6316e0ea90071bf4e8e9a79212a413f326a3049bd",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.11.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "83175635ac646a5cfc169ec6316e0ea90071bf4e8e9a79212a413f326a3049bd",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.12.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "83175635ac646a5cfc169ec6316e0ea90071bf4e8e9a79212a413f326a3049bd",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.15.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "83175635ac646a5cfc169ec6316e0ea90071bf4e8e9a79212a413f326a3049bd",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.10.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "83175635ac646a5cfc169ec6316e0ea90071bf4e8e9a79212a413f326a3049bd",
+ "format": 1
+ },
+ {
+ "name": ".github",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".github/CONTRIBUTING.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7c429527b799623f57e6363e14ff8a319844c9120f4dfa18bcea3849cdc07128",
+ "format": 1
+ },
+ {
+ "name": ".github/workflows",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".github/workflows/black.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6fb3e0af2e41fb0618586a2990e6645fb9b29d1a7b64b7168c5d27af320569c8",
+ "format": 1
+ },
+ {
+ "name": ".github/workflows/ansible-lint.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4c85688d98b71e3a6594530a362cd5d2cf83842ceaccd0e0fc76e233777c1cef",
+ "format": 1
+ },
+ {
+ "name": ".github/workflows/stale.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0bdef4889afabcd627fc30711a0809c7468b8c9e64cbcebe1334f794a41e7bd9",
+ "format": 1
+ },
+ {
+ "name": ".github/workflows/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7f7d9b7fc9ac71a4ff36243422b04f4cf163a254c52e8ab647fb84807bc3ea21",
+ "format": 1
+ },
+ {
+ "name": ".github/ISSUE_TEMPLATE",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".github/ISSUE_TEMPLATE/feature_request.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1f48c52f209a971b8e7eae4120144d28fcf8ee38a7778a7b4d8cf1ab356617d2",
+ "format": 1
+ },
+ {
+ "name": ".github/ISSUE_TEMPLATE/bug_report.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0c8d64f29fb4536513653bf8c97da30f3340e2041b91c8952db1515d6b23a7b3",
+ "format": 1
+ },
+ {
+ "name": ".github/pull_request_template.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "565ead1b588caaa10cd6f2ed1bb6c809eb2ad93bf75da3a198690cac778432d6",
+ "format": 1
+ },
+ {
+ "name": "meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "meta/runtime.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "df18179bb2f5447a56ac92261a911649b96821c0b2c08eea62d5cc6b0195203f",
+ "format": 1
+ },
+ {
+ "name": ".yamllint",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2970fa4875092f99825ac0da3c82d2413ce973087b9945e68fdfa7b3b1e2012e",
+ "format": 1
+ },
+ {
+ "name": "README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3c4d2257a4a25daf934a2b149aaa3397371d32f99f0b7042ca51a1a5fe981917",
+ "format": 1
+ },
+ {
+ "name": "requirements.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "87cb6471722fa1096099f228091480939c5b7c3ac39c2819543324a7701e66a3",
+ "format": 1
+ },
+ {
+ "name": "playbooks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/.keep",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "playbooks/templates",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/templates/.keep",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "playbooks/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/files/.keep",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "playbooks/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/vars/.keep",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "playbooks/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/tasks/.keep",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "CHANGELOG.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9f92bbcfdf90122b0ffdbe430cd0ff9b2a3b1e3cd1c099e0436b251de8674d74",
+ "format": 1
+ },
+ {
+ "name": "settings.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f64528ffd800423e1d49a3c79cdd3892548a57177ea1a1caacbbcd275390b792",
+ "format": 1
+ },
+ {
+ "name": ".gitignore",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f3e019033a4ff6d651103704d47629e6d911cb949652bd5e6121d7a918dbc480",
+ "format": 1
+ },
+ {
+ "name": "changelogs",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "changelogs/config.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d1b77eeb2d9f7242075e746537713be29e397fe6954f13a1caf8b10695434b9b",
+ "format": 1
+ },
+ {
+ "name": "changelogs/.plugin-cache.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b528379cbf853914f8e8192b15e34bad21ea8c2b4de7faaab4f045fe1921fa4b",
+ "format": 1
+ },
+ {
+ "name": "changelogs/changelog.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "728d1a92a9effec8bd73c032a3bd53fc8eb4d9029c824a2b6e1179b6922bf488",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/186_add_tz.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "44209d75080c5e4f437f409bb37e0f16c662658a6243fa890339fc076dfa7cd3",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/81_purefb_fs_new_options.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "abb817b52fdfa70b538ca9efce8d642282383b6961c47bde20ce0a023d2b941d",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/101_fix_policy_and_timezone_error.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2e9c5c95b8333fee22646f4e83e9034172182b1e99c084725f08df48e45d3d47",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/163_admin_key.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bd290345ed66c0809e6be94cabb6f1823b7e0b3f61d6a88a13f16ae849ce4399",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/220_s3user_key_fix.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ae00607f47b12b62456cb037b31474be8b7de0820b46ced24fc4a96b43f0eb76",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/211_change_booleans.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f04fd18a42e321cb3818a579e14cc50a6d27935196ff04632e2db44f7b807322",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/217_inventory.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4832bed915e1a18327ab9d7c15c65f55094f08215a26028d426ca694a90c2ae7",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/174_access_policies.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "25f5a86a2a977555359c8088fab65902f1ee2b0cc3bc417a7383d5d5176d4802",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/152_s3acc_lowercase.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0a02995d6eeb1ac3968e952c61a552e5fc2feeef62ef7642d5f8714157da7d41",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/76_default_fs_size.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6d8689e8f46ab7d3286b7d3ee46dfa13a8bf0585cc9b197a5ca8271c9dd9590e",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/111_dsrole_update_idempotency.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "632e24ce20734ad2ee8d7938aaff910a2073fcd7f4fc633d27009ee7a16eff33",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/90_imported_keys.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ad1078e90875745edce8071846183eed39c3878156d14f96b5db78ab1c5be973",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/150_fix_joint_nfs_version_change.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0e1a7b9242317cf785fa07608c5a661bad07fc79e8fd187264d9263dc0609479",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/164_add_admin.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "53b89a2de09c79fcb3fdbdf82917985124d53f793046f1164c04a8578adb7df9",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/129-virtualhost.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0af56f02e1b7ad1ea585b3bbce897022faf28b448b69ea755951be3b5da40f7e",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/105_max_access_key.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fb9f5707e7466fe7c94479891f218bacd04ae45a37c2f207dcf51ac756fb7259",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/169_pypureclient_fix.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fb6e7bfc1c816ec77dadf6bd4ab040a8089b98a1c9c75ec15603d407c27ce3f2",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/183_v2_connections.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "700e1509315604807c70d5b186542e74e058e4f912b1fe796df41c3d8a125d57",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/96_fix_update_connection.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "828cc0c94acf44d1d373402a0cc657527d9fce8ac744319fbe0d8035684932b4",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/200_proxy.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "26631d7434c86b739bcd75c8905f8f668555217610cafb47f11a6e24937c7eb8",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/159_add_lag.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5b1d95e41e550ed7b8bdda62f09e9ae883915afd1b547d5f5bb863b21b803df3",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/115_multiprotocol.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "51375d2aac996039ee4d338cbb7cc8f9d77f423f8f519ab6f84012ff021812ae",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/85_add_banner.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ee600c3bcae632d7450ff3447192f8ca2d1622eecd67bc87c59fdd3dd8326bc6",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/185_nfs_export_rule.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8f53ac3485ed3849ca99fee6015e2767f636c1186a368b3d4e91ba6076afd7d4",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/128_add_32_to_info.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b18c7cf868d5699e4ad67e2d924c7a6323353147f8850757f7f2c4c7dda877c8",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/161_add_lifecycle_info.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b8c87e250274f2b5007ce0898c9bb6d79129faedaa8427a52377f86c24c6e90f",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/138_add_ad_module.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "972d7c56c40a909882eeb3c199f4b7dfd05b080d8b159d2f4915c3d86beb055d",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/202_multiple_snap_rules.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4ed9e6c99d409df00b7cd2cb4a60bee536b9e0608c107a0944fb3a738ec0bd9f",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/213_sec_update.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6b71174c00e982cada0d051fae5e28c853207ec6d0f42a783db35a9519733769",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/218_object_account_info.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ef0f569461747bfcb2f294a8317d113b829323f9e6994e652d4344b2590099fa",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/80_support_reverse_replica_link.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f3504f5e1acadaf52bd9d420373b7edce2015435232e5fa53282455361bcd440",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/84_add_cert.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1d286bf0fe3301a898bcdcad0bf70955732608eb51468097ca6d70ae269654d8",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/123_lifecycle_rule_fix.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "87a3f72b0ac11e72103dfb4766faecdd2b0c1fe5fad379e322c910c5134f7025",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/83_add_certgrp.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b7513178564ee1707090e4b3df65af56f28a71119e0ebf73b074dc9d2c0e1d65",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/97_fix_encrpyted_array_connection_info.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e140fbfc3ac4eaab3dd9c482e3beb37efd98ad4c3892b36f93ffb00d89c9283f",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/136_add_s3user_policy.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b97c8a102be108e8d74c9ec6d9aa73ec151fe7a77c676452d7b96cf75a4ecf6b",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/114_certificate_update.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ce77387c64b0714a4abe011d4eabc7b1a803058c1e7b407646ceb8249545e8aa",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/162_new_lifecycle.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bd6214f7380736e34ed7a21396f1842c6796afba6c3b7413536522d4b6d0b531",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/132_add_timeout.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8aea8125471f4717c0efa211756fb2086542362d9bee50295686dbce9ba86db7",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/154_add_snap_now.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6bde815114a219fd03941a080c2e6acebd5ef748e7f67503e8c3ef5f81decd54",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/158_support_lags.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "68b3e104addfa10fb7f2f974bff2e5dad2c950e261c603f37409f42ab7afed02",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/92_fix_ds_update.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8befcbbddf6fc2db62ff48b4f3a1030fe115fb7ababfc9b03c8e693628087337",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/86_add_syslog.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e42ee9ea2a2bffa465347a52a3fcf4bfaa51f377e7f33bf4a405eb46ae507442",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/191_add_quota_info.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "58ae5507364e9af847ac1806d27d6497bd36967ef3bdb34e3716cc294c178440",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/188_bucket_type.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3c8485b792ba73283807489b10a7b6df8298c5f932aaeec7b6b841b2f504464a",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/109_update_info.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "857bb23faa48e2d894f432cca4219681d7b3dab68473b3502dfe9319d751a3e1",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/121_replication_perf.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "372e2b49c1b2fb2f637e01023dd3a5146ee61171adbf619062ceb5e53a5d3e96",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/153_add_quota.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2b2517ea362d7128333d6fab7f99f6b70c4253d2807eae3ec417aa4451b3ae6c",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/v1.3.0_summary.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "64bd3d32085373ce61a414518c2ed87bfd003d163d3002d087f41f4a54b0b1a0",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/113_policy_cleanup.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "11023f4d159bc146016fe9e9f40d18edb659518cb9dbc733750146e00de2b05c",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/216_extra_bucket_info.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cf88a27b9c51eefd78e80b587012be110c967d0185597cac22cf5de86b73b053",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/212_object_account_quota.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2d9dd6bbb0f690de495ad9416117baf213d1d60f164fbcaedafa5f941ebeba28",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/215_encrypt_sec_info.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6915aa0ddabb1f73dbced52d0114b84317958f29a2ef7ea4dcd72a10952f8017",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/131-apiclient.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "92dd9507a2a0476d24f0c1e7a5342925be49b4a84142fe8e33f4a76f422283c3",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/107_add_remove_s3user_key.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8a2bb28b43962c08ea8916db02a401f8bd7b4989bd1aa04f201ed8c602d94124",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/88_add_lifecycle.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fdc6c425f03ffc0b4a008230f290f6ef37874a270909cb2ee311843dc08909f6",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/147_no_gateway.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0ca2ad2e1c1d60b110b87b2b37013bae6ee9daff64056f1dea691f2376cb8448",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/78_update_filesystem_replica_link.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "57a7b5ed892c4ea2f5149023b2bdde9481eb8c0a7593e4e76a4603e706971100",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/176_nfs_export_policies.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "36fc1c990afd6fb48068d113d6e4a6846368ad32523554acc9b9d9e5ba861161",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/112_fix_check_mode.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "11f8266ad857ed327ddbe8ef65f810a54e6c57df7ef24d1ec1d4c132abaa23a7",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/79_hide_connect_api.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b4cd3cbdb65de6b71cfbe179d56a42be2afbf6486e1ce0df9fdd3a7042bd57b0",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/167_fix_logins.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "426451dd9cb0925943b74eae2fe37702574efc7974f630a049737bfa74991ff3",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/194_lists_for_service.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9e139b9ea88f7700071e57500cff497a6be300d8425b4a4ddaba77c36a8dc128",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/175_throttle_support.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "738e0e9c2f7789b1c931b5563416ca436fd0e04401232a502e6ce59fd03da28f",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/139_add_keytabs.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c4d64b50797e36e3861e530b3e7c080277ebceb17ac5f58d4a08b8ac59c14d10",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/135_add_user_policies.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a0b78f5b1a5be3bfb87a00a4e638fad67600b0bab4cfddd72b3bfa4d2e217e3f",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/77_filesystem_policies_info.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8c7090d551cb59c49622a89c0ed25f12ad89104a9e2ab6708a01fc01fce9e049",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/127_add_eula.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9b092f3766cf4309ac60ab77c2e51142ffbc81eb4bfa4da581d531ee2de633ac",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/187_rename_nfs_policy.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d8b9f4112fea72954805eca3c01cf04524d5bd02a5b2559cdfef68c09d616e49",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/140_more_32_info.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4e57a10a71ab3dd1c151a6867c0da118a21e13df2ef8b9d2fbb799108ddebcd4",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/205_fix_multi_lifecycle.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c4080535eeb4ad5e56715dc1dd7683679072d027a65bce93a49adb4b56b68618",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/166_lag_mac_note.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b639987ccd53708ee210a1812bd8c6af30292a3a1b6b42c7b839dd7120967e13",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/90_delete_conn_fix.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "787138033d123fa59a9d3cdb424dc093183a020eebf1e76b46cbf059006e18e5",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/108_dns_search_fix.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "056e8181176826dc43b62100b6c50c8770680f0fcc37cf73737848233382b2e8",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/184_certificate_typos.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "827c27fb0d7c31d13e89e829db35890c97a16cf437149264074c1c6fa52be9be",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/179_fqcn.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d4c60f377dd4cd40de9c777a7d54f6d185afa785fdc45a751d67f2baccf9efdf",
+ "format": 1
+ },
+ {
+ "name": "LICENSE",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3972dc9744f6499f0f9b2dbf76696f2ae7ad8af9b23dde66d6af86c9dfb36986",
+ "format": 1
+ },
+ {
+ "name": "COPYING.GPLv3",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8ceb4b9ee5adedde47b31e975c1d90c73ad27b6b165a1dcd80c7c545eb65b903",
+ "format": 1
+ },
+ {
+ "name": "plugins",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_bladename.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1b21f650ae77744ba23b47de5b5bcf220ee68c77b127f569908c48eba08a8f24",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_proxy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "42514c4241a3e3f254d0cd0fd8a27f394a417990aed0dcc4888efc93fb2a2b7c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_admin.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "76c2ce2781241b7338e05f4d443090cb5fd5e7cb6fc1845ae5f78e9a0f9f5002",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_user.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f00d3920d4dadb950764884c97c5ff3b64f8cc0fb7760d04f328843658a33cc1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_dns.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9ebd127691bb88001865cba5e1813f0895111b8806c3c5fbfef5a21c24954bdb",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_tz.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4d08c8115e92f613d74e1bbf53a59a379f95513e3a7d231a9f745a9dfe1d23d5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_alert.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "80d6d4747cf607c7f73ac70a70a7c5f71c527c628f928e49b21de377f5cdbc25",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_remote_cred.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "51baa72db5641ac2a00f98b07cc626fc65d11412ae11c24e7c5f2a381d2b63df",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_ad.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "40baf6272707344af09ee6c329457532462df5fedf087fc58662e295847444df",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_connect.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "78d93cd41217bfcca2d6cc900b560fb0a03d16e502162e52eb89c0e432b08820",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_fs.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dfee64d096d76c62d7b9081845b29b4f924bc2d6e6e699c3ff2b0ceb1b3c5714",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_network.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "21398dfcfc59ad0c094ea608027bd44c121ecffc8fbff9ae96fde4f61ba65774",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_target.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "47eea0605e82c442152c801f95a3f55e31f816720bde09b7153caa4d9c58228f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_certgrp.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "117c7e4ad499d72b90e13ee5dd72788e78f266832b670b4830154030e0b69e5d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_ds.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "756950f76e59b5099a8a331bb9afa80976cd7e37c605791f517af6442b9040b7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_userpolicy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8e9fe1856db864f057d4eb3bafb1107dce0d7c429acc4deeb25dfba991e510f0",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_certs.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b79151ea9333e6bde34361ab8a8e18b8d961ed6ed18c601c0b574d12020fa35f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_snmp_agent.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2da4ecae583c8c94c55046e4a72a9437ac1f01aefa83e77d315e02792edf4a2c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_subnet.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2ff34ed58891cf1dcca1757f2d2a2d79a21f40e61195cc2d509fc56108560409",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_apiclient.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2cc1381512d001748885bd41104f8215397c74f464b696c216368de7598e47bb",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_inventory.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "391dedb1a7265a3f57b2193ee5efa254e981d3f4be1c6425adb036c6ddb7cf6b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_syslog.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fe39dc9131937befc223fd3efd96a369238fa320618e77323fedaa8c7f2e7621",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_lag.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "911181fd37fedbb616cb2d2cc6b94c070a04ca56f4a69b97299ccff40be2c803",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_messages.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0a0bcd83ebb86063ed9fb3db1bacbda9a89d4d82f11590b1d2cbfd978cd1c198",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_banner.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5daf1a121e8086c3ce3b510c9a52119ba256e49591932f4a575484fc7230b1f9",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_snmp_mgr.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2ff095c16f369a129dff76ab9c2660ba2f45d0bc62b2c07bcbf58d62067addfd",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_ra.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3a9172183c8afdd07d3eb854f466a6c687ea881f6978053909ad9908f76db71b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_groupquota.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fb933221f221bc66e49534594bd0ed6c06f3d83fe57b1ec45bfda80ec593becd",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_s3acc.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ff4391301e7e1a21329460afd11d73b60fec6dbab050bea8ab0d8c740f571218",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "45900eaeaafc923ea85e88c1dc87d2948e5a07f3ccb3aa2a4767c69fb2da3ac9",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_s3user.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4e3221ed572489da65f749e185123f662047918a8f9b8b9391f665d343e6acf4",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_pingtrace.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "facfd9bbb4ec84cca4c6dc3608da73a2ab8af7a9b5b1f139fbcf6f91b4f83612",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_snap.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c0ccbd3a590ee10c35445717c2f0378abb36078d3fbb5908e195e40022eaa802",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_smtp.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "76d37be7050f2e57b7fa09cae4b7555fe8b644c031ae7b93a3de5af2cbe19781",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_keytabs.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9e68ef5023904b2b70f95567ef69356b43ed4324ab18fd080cc054c217326445",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_dsrole.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d625a7248695e857cc0eaf32beb340de4772c406278de8b3c81b1ce2740854c3",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_bucket.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2b7e76b4c8be29be79485ec99cf01ce365e725801f7467931d6eb656c5f64120",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_ntp.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3df2990a95399fb343b3d9733534ffe3cef10b5546b939924aa17d04fb10fdd2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_policy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6cbb2b5f7a2bbbebefc28ab19d06344fdf43f316a31839a440f2f29b652d130b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_bucket_replica.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a8ad0c4a4506527009dbb28920c81b8cef6dddde65382af33e47c22522d27332",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_virtualhost.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "37d614801411069d3c3aab20c018daf17496832bc73e59976b5bc25f8f5cddc2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_userquota.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cf1a39e2b307e395b54c2a6ced7335971cf127f03ca6f1bd8af17a2aff28b9c2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_fs_replica.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6ef60aaaa8d397ecbef11da23f16d707829db7613811a3142f426076b2e8d577",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_timeout.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2c25d12eff522c44580b77e457c0496368e877bfe72cb41f1a9402a96ad18418",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_phonehome.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "53bcb5901f85f1938f06ef36f36ed37537b5ec2997b596c3906971ee016a3b9f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_eula.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1d06a41aeae5febbc2d1fecd64b888e5947f14b0944f473c3c5d1d46e50acfc4",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_lifecycle.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "beff3e20624460b82775e554a8c27cfd6b345d3a5a787f96df582a7026e23449",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/purestorage.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cb96797756b79883247778bbf7c9ed0c9a34e3e6f14d97b753e3d6401ec25f0f",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/purefb.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5a7a9657951dec2667ad720e965452a0003924cd36fe260527c01f83948d0473",
+ "format": 1
+ },
+ {
+ "name": "README.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9eca16f5db9ebc48387f94f50a9762c57fcb6a6eb4cd6c258f13b0a9a371be8e",
+ "format": 1
+ },
+ {
+ "name": ".pylintrc",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "75d8dc97586bc956a906be2aa0b86ec465eb78ce48d3d651ea1ddad3935d27cf",
+ "format": 1
+ },
+ {
+ "name": ".git-blame-ignore-revs",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "272d9a8e8654881cd42bb4108716e720bc634065d74064fb09f29d0e6e817e21",
+ "format": 1
+ }
+ ],
+ "format": 1
+} \ No newline at end of file
diff --git a/ansible_collections/purestorage/flashblade/LICENSE b/ansible_collections/purestorage/flashblade/LICENSE
new file mode 100644
index 000000000..f288702d2
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/LICENSE
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ <program> Copyright (C) <year> <name of author>
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<https://www.gnu.org/licenses/>.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/purestorage/flashblade/MANIFEST.json b/ansible_collections/purestorage/flashblade/MANIFEST.json
new file mode 100644
index 000000000..c111f1bf6
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/MANIFEST.json
@@ -0,0 +1,37 @@
+{
+ "collection_info": {
+ "namespace": "purestorage",
+ "name": "flashblade",
+ "version": "1.11.0",
+ "authors": [
+ "Pure Storage Ansible Team <pure-ansible-team@purestorage.com>"
+ ],
+ "readme": "README.md",
+ "tags": [
+ "purestorage",
+ "flashblade",
+ "storage",
+ "object",
+ "nfs"
+ ],
+ "description": "Collection of modules to manage Pure Storage FlashBlades",
+ "license": [
+ "GPL-3.0-or-later",
+ "BSD-2-Clause"
+ ],
+ "license_file": null,
+ "dependencies": {},
+ "repository": "https://github.com/Pure-Storage-Ansible/FlashBlade-Collection",
+ "documentation": "https://docs.ansible.com/ansible/latest/collections/purestorage/flashblade/index.html#plugins-in-purestorage-flashblade",
+ "homepage": null,
+ "issues": "https://github.com/Pure-Storage-Ansible/FlashBlade-Collection/issues"
+ },
+ "file_manifest_file": {
+ "name": "FILES.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1bb9f78982cdd6334e7f063927e0a32f11b5d6c6940b0cd253d3311be4717cda",
+ "format": 1
+ },
+ "format": 1
+} \ No newline at end of file
diff --git a/ansible_collections/purestorage/flashblade/README.md b/ansible_collections/purestorage/flashblade/README.md
new file mode 100644
index 000000000..7972158bc
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/README.md
@@ -0,0 +1,98 @@
+<a href="https://github.com/Pure-Storage-Ansible/FlashBlade-Collection/releases/latest"><img src="https://img.shields.io/github/v/tag/Pure-Storage-Ansible/FlashBlade-Collection?label=release">
+<a href="COPYING.GPLv3"><img src="https://img.shields.io/badge/license-GPL%20v3.0-brightgreen.svg"></a>
+<img src="https://cla-assistant.io/readme/badge/Pure-Storage-Ansible/FlashBlade-Collection">
+<img src="https://github.com/Pure-Storage-Ansible/FLashBlade-Collection/workflows/Pure%20Storage%20Ansible%20CI/badge.svg">
+<a href="https://github.com/psf/black"><img src="https://img.shields.io/badge/code%20style-black-000000.svg"></a>
+
+# Pure Storage FlashBlade Collection
+
+The Pure Storage FlashBlade collection consists of the latest versions of the FlashBlade modules.
+
+## Supported Platforms
+
+- Pure Storage FlashBlade with Purity 2.1.2 or later
+- Certain modules and functionality require higher versions of Purity. Modules will inform you if your Purity version is not high enough to use a module.
+
+## Prerequisites
+
+- Ansible 2.9 or later
+- Pure Storage FlashBlade system running Purity//FB 2.1.2 or later
+ - some modules require higher versions of Purity//FB
+- purity_fb >=v1.12.2
+- py-pure-client >=v1.27.0
+- python >=3.6
+- netaddr
+- datetime
+- pytz
+
+## Idempotency
+
+All modules are idempotent with the exception of modules that change or set passwords. Due to security requirements exisitng passwords can be validated against and therefore will always be modified, even if there is no change.
+
+## Available Modules
+
+- purefb_ad - manage Active Directory account on FlashBlade
+- purefb_alert - manage alert email settings on a FlashBlade
+- purefb_apiclient - manage API clients for FlashBlade
+- purefb_banner - manage FlashBlade login banner
+- purefb_bladename - manage FlashBlade name
+- purefb_bucket - manage S3 buckets on a FlashBlade
+- purefb_bucket_replica - manage bucket replica links on a FlashBlade
+- purefb_certgrp - manage FlashBlade certificate groups
+- purefb_certs - manage FlashBlade SSL certificates
+- purefb_connect - manage connections between FlashBlades
+- purefb_dns - manage DNS settings on a FlashBlade
+- purefb_ds - manage Directory Services settings on a FlashBlade
+- purefb_dsrole - manage Directory Service Roles on a FlashBlade
+- purefb_eula - manage EULA on FlashBlade
+- purefb_fs - manage filesystems on a FlashBlade
+- purefb_fs_replica - manage filesystem replica links on a FlashBlade
+- purefb_groupquota - manage individual group quotas on FlashBlade filesystems
+- purefb_info - get information about the configuration of a FlashBlade
+- purefb_inventory - get information about the hardware inventory of a FlashBlade
+- purefb_keytabs - manage FlashBlade Kerberos keytabs
+- purefb_lag - manage FlashBlade Link Aggregation Groups
+- purefb_lifecycle - manage FlashBlade Bucket Lifecycle Rules
+- purefb_messages - list FlashBlade alert messages
+- purefb_network - manage the network settings for a FlashBlade
+- purefb_ntp - manage the NTP settings for a FlashBlade
+- purefb_phonehome - manage the phone home settings for a FlashBlade
+- purefb_pingtrace - perform FlashBlade network diagnostics
+- purefb_policy - manage the filesystem snapshot policies for a FlashBlade
+- purefb_proxy - manage the phone home HTTP proxy settings for a FlashBlade
+- purefb_ra - manage the Remote Assist connections on a FlashBlade
+- purefb_remote_cred - manage the Object Store Remote Credentials on a FlashBlade
+- purefb_s3acc - manage the object store accounts on a FlashBlade
+- purefb_s3user - manage the object atore users on a FlashBlade
+- purefb_smtp - manage SMTP settings on a FlashBlade
+- purefb_snap - manage filesystem snapshots on a FlashBlade
+- purefb_snmp_agent - modify the FlashBlade SNMP Agent
+- purefb_snmp_mgr - manage SNMP Managers on a FlashBlade
+- purefb_subnet - manage network subnets on a FlashBlade
+- purefb_syslog - manage FlashBlade syslog server configuration
+- purefb_target - manage remote S3-capable targets for a FlashBlade
+- purefb_timeout - manage FlashBlade GUI timeout
+- purefb_user - manage local *pureuser* account password on a FlashBlade
+- purefb_userpolicy - manage FlashBlade Object Store User Access Policies
+- purefb_userquota - manage individual user quotas on FlashBlade filesystems
+- purefb_virtualhost - manage FlashBlade Object Store Virtual Hosts
+
+## Instructions
+
+Install the Pure Storage FlashBlade collection on your Ansible management host.
+
+- Using ansible-galaxy (Ansible 2.9 or later):
+```
+ansible-galaxy collection install purestorage.flashblade -p ~/.ansible/collections
+```
+
+All servers that execute the modules must have the appropriate Pure Storage Python SDK installed on the host.
+
+## License
+
+[BSD-2-Clause](https://directory.fsf.org/wiki?title=License:FreeBSD)
+[GPL-3.0-or-later](https://www.gnu.org/licenses/gpl-3.0.en.html)
+
+## Author
+
+This collection was created in 2019 by [Simon Dodsley](@sdodsley) for, and on behalf of, the [Pure Storage Ansible Team](pure-ansible-team@purestorage.com)
diff --git a/ansible_collections/purestorage/flashblade/README.rst b/ansible_collections/purestorage/flashblade/README.rst
new file mode 100644
index 000000000..a7d062a8a
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/README.rst
@@ -0,0 +1,19 @@
+|License| |CLA-Assistant| |Pure-Storage-Ansible-CI| |Code-style-black|
+
+|Build history for master branch|
+
+=====================
+FlashBlade-Collection
+=====================
+
+Ansible Collection for Pure Storage FlashBlade.
+
+.. |License| image:: https://img.shields.io/badge/license-GPL%20v3.0-brightgreen.svg
+ :target: COPYING.GPLv3
+ :alt: Repository License
+.. |CLA-Assistant| image:: https://cla-assistant.io/readme/badge/Pure-Storage-Ansible/FlashBlade-Collection
+.. |Pure-Storage-Ansible-CI| image:: https://github.com/Pure-Storage-Ansible/FlashBlade-Collection/workflows/Pure%20Storage%20Ansible%20CI/badge.svg
+.. |Code-style-black| image:: https://img.shields.io/badge/code%20style-black-000000.svg
+ :target: https://github.com/psf/black
+.. |Build history for master branch| image:: https://buildstats.info/github/chart/Pure-Storage-Ansible/FlashBlade-Collection?branch=master&buildCount=50&includeBuildsFromPullRequest=false&showstats=false
+ :target: https://github.com/Pure-Storage-Ansible/FlashBlade-Collection/actions?query=branch%3Amaster
diff --git a/ansible_collections/purestorage/flashblade/changelogs/.plugin-cache.yaml b/ansible_collections/purestorage/flashblade/changelogs/.plugin-cache.yaml
new file mode 100644
index 000000000..9834bdfed
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/.plugin-cache.yaml
@@ -0,0 +1,254 @@
+objects:
+ role: {}
+plugins:
+ become: {}
+ cache: {}
+ callback: {}
+ cliconf: {}
+ connection: {}
+ filter: {}
+ httpapi: {}
+ inventory: {}
+ lookup: {}
+ module:
+ purefb_ad:
+ description: Manage FlashBlade Active Directory Account
+ name: purefb_ad
+ namespace: ''
+ version_added: 1.6.0
+ purefb_admin:
+ description: Configure Pure Storage FlashBlade Global Admin settings
+ name: purefb_admin
+ namespace: ''
+ version_added: 1.8.0
+ purefb_alert:
+ description: Configure Pure Storage FlashBlade alert email settings
+ name: purefb_alert
+ namespace: ''
+ version_added: 1.0.0
+ purefb_apiclient:
+ description: Manage FlashBlade API Clients
+ name: purefb_apiclient
+ namespace: ''
+ version_added: 1.6.0
+ purefb_banner:
+ description: Configure Pure Storage FlashBlade GUI and SSH MOTD message
+ name: purefb_banner
+ namespace: ''
+ version_added: 1.4.0
+ purefb_bladename:
+ description: Configure Pure Storage FlashBlade name
+ name: purefb_bladename
+ namespace: ''
+ version_added: 1.0.0
+ purefb_bucket:
+ description: Manage Object Store Buckets on a Pure Storage FlashBlade.
+ name: purefb_bucket
+ namespace: ''
+ version_added: 1.0.0
+ purefb_bucket_replica:
+ description: Manage bucket replica links between Pure Storage FlashBlades
+ name: purefb_bucket_replica
+ namespace: ''
+ version_added: 1.0.0
+ purefb_certgrp:
+ description: Manage FlashBlade Certifcate Groups
+ name: purefb_certgrp
+ namespace: ''
+ version_added: 1.4.0
+ purefb_certs:
+ description: Manage FlashBlade SSL Certificates
+ name: purefb_certs
+ namespace: ''
+ version_added: 1.4.0
+ purefb_connect:
+ description: Manage replication connections between two FlashBlades
+ name: purefb_connect
+ namespace: ''
+ version_added: 1.0.0
+ purefb_dns:
+ description: Configure Pure Storage FlashBlade DNS settings
+ name: purefb_dns
+ namespace: ''
+ version_added: 1.0.0
+ purefb_ds:
+ description: Configure FlashBlade Directory Service
+ name: purefb_ds
+ namespace: ''
+ version_added: 1.0.0
+ purefb_dsrole:
+ description: Configure FlashBlade Management Directory Service Roles
+ name: purefb_dsrole
+ namespace: ''
+ version_added: 1.0.0
+ purefb_eula:
+ description: Sign Pure Storage FlashBlade EULA
+ name: purefb_eula
+ namespace: ''
+ version_added: 1.6.0
+ purefb_fs:
+ description: Manage filesystemon Pure Storage FlashBlade`
+ name: purefb_fs
+ namespace: ''
+ version_added: 1.0.0
+ purefb_fs_replica:
+ description: Manage filesystem replica links between Pure Storage FlashBlades
+ name: purefb_fs_replica
+ namespace: ''
+ version_added: 1.0.0
+ purefb_groupquota:
+ description: Manage filesystem group quotas
+ name: purefb_groupquota
+ namespace: ''
+ version_added: 1.7.0
+ purefb_info:
+ description: Collect information from Pure Storage FlashBlade
+ name: purefb_info
+ namespace: ''
+ version_added: 1.0.0
+ purefb_inventory:
+ description: Collect information from Pure Storage FlashBlade
+ name: purefb_inventory
+ namespace: ''
+ version_added: 1.0.0
+ purefb_keytabs:
+ description: Manage FlashBlade Kerberos Keytabs
+ name: purefb_keytabs
+ namespace: ''
+ version_added: 1.6.0
+ purefb_lag:
+ description: Manage FlashBlade Link Aggregation Groups
+ name: purefb_lag
+ namespace: ''
+ version_added: 1.7.0
+ purefb_lifecycle:
+ description: Manage FlashBlade object lifecycles
+ name: purefb_lifecycle
+ namespace: ''
+ version_added: 1.4.0
+ purefb_messages:
+ description: List FlashBlade Alert Messages
+ name: purefb_messages
+ namespace: ''
+ version_added: 1.10.0
+ purefb_network:
+ description: Manage network interfaces in a Pure Storage FlashBlade
+ name: purefb_network
+ namespace: ''
+ version_added: 1.0.0
+ purefb_ntp:
+ description: Configure Pure Storage FlashBlade NTP settings
+ name: purefb_ntp
+ namespace: ''
+ version_added: 1.0.0
+ purefb_phonehome:
+ description: Enable or Disable Pure Storage FlashBlade Phone Home
+ name: purefb_phonehome
+ namespace: ''
+ version_added: 1.0.0
+ purefb_pingtrace:
+ description: Employ the internal FlashBlade ping and trace mechanisms
+ name: purefb_pingtrace
+ namespace: ''
+ version_added: 1.11.0
+ purefb_policy:
+ description: Manage FlashBlade policies
+ name: purefb_policy
+ namespace: ''
+ version_added: 1.0.0
+ purefb_proxy:
+ description: Configure FlashBlade phonehome HTTPs proxy settings
+ name: purefb_proxy
+ namespace: ''
+ version_added: 1.0.0
+ purefb_ra:
+ description: Enable or Disable Pure Storage FlashBlade Remote Assist
+ name: purefb_ra
+ namespace: ''
+ version_added: 1.0.0
+ purefb_remote_cred:
+ description: Create, modify and delete FlashBlade object store remote credentials
+ name: purefb_remote_cred
+ namespace: ''
+ version_added: 1.0.0
+ purefb_s3acc:
+ description: Create or delete FlashBlade Object Store accounts
+ name: purefb_s3acc
+ namespace: ''
+ version_added: 1.0.0
+ purefb_s3user:
+ description: Create or delete FlashBlade Object Store account users
+ name: purefb_s3user
+ namespace: ''
+ version_added: 1.0.0
+ purefb_smtp:
+ description: Configure SMTP for Pure Storage FlashBlade
+ name: purefb_smtp
+ namespace: ''
+ version_added: 1.0.0
+ purefb_snap:
+ description: Manage filesystem snapshots on Pure Storage FlashBlades
+ name: purefb_snap
+ namespace: ''
+ version_added: 1.0.0
+ purefb_snmp_agent:
+ description: Configure the FlashBlade SNMP Agent
+ name: purefb_snmp_agent
+ namespace: ''
+ version_added: 1.0.0
+ purefb_snmp_mgr:
+ description: Configure FlashBlade SNMP Managers
+ name: purefb_snmp_mgr
+ namespace: ''
+ version_added: 1.0.0
+ purefb_subnet:
+ description: Manage network subnets in a Pure Storage FlashBlade
+ name: purefb_subnet
+ namespace: ''
+ version_added: 1.0.0
+ purefb_syslog:
+ description: Configure Pure Storage FlashBlade syslog settings
+ name: purefb_syslog
+ namespace: ''
+ version_added: 1.4.0
+ purefb_target:
+ description: Manage remote S3-capable targets for a FlashBlade
+ name: purefb_target
+ namespace: ''
+ version_added: 1.0.0
+ purefb_timeout:
+ description: Configure Pure Storage FlashBlade GUI idle timeout
+ name: purefb_timeout
+ namespace: ''
+ version_added: 1.6.0
+ purefb_tz:
+ description: Configure Pure Storage FlashBlade timezone
+ name: purefb_tz
+ namespace: ''
+ version_added: 1.10.0
+ purefb_user:
+ description: Modify FlashBlade user accounts
+ name: purefb_user
+ namespace: ''
+ version_added: 1.0.0
+ purefb_userpolicy:
+ description: Manage FlashBlade Object Store User Access Policies
+ name: purefb_userpolicy
+ namespace: ''
+ version_added: 1.6.0
+ purefb_userquota:
+ description: Manage filesystem user quotas
+ name: purefb_userquota
+ namespace: ''
+ version_added: 1.7.0
+ purefb_virtualhost:
+ description: Manage FlashBlade Object Store Virtual Hosts
+ name: purefb_virtualhost
+ namespace: ''
+ version_added: 1.6.0
+ netconf: {}
+ shell: {}
+ strategy: {}
+ test: {}
+ vars: {}
+version: 1.11.0
diff --git a/ansible_collections/purestorage/flashblade/changelogs/changelog.yaml b/ansible_collections/purestorage/flashblade/changelogs/changelog.yaml
new file mode 100644
index 000000000..9995182fa
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/changelog.yaml
@@ -0,0 +1,329 @@
+ancestor: null
+releases:
+ 1.10.0:
+ changes:
+ bugfixes:
+ - purefb_connect - Resolve connection issues between two FBs that are throttling
+ capable
+ - purefb_policy - Fix incorrect API call for NFS export policy rule creation
+ minor_changes:
+ - All - Update documentation examples with FQCNs
+ - purefb_ad - Allow service to be a list
+ - purefb_bucket - Allow setting of bucket type to support VSO - requires Purity//FB
+ 3.3.3 or higher
+ - purefb_certs - Fix several misspellings of certificate
+ - purefb_info - Added filesystem default, user and group quotas where available
+ - purefb_info - Expose object store bucket type from Purity//FB 3.3.3
+ - purefb_info - Show information for current timezone
+ - purefb_policy - Allow rename of NFS Export Policies from Purity//FB 3.3.3
+ - purefb_tz - Add support for FlashBlade timezone management
+ fragments:
+ - 179_fqcn.yaml
+ - 183_v2_connections.yaml
+ - 184_certificate_typos.yaml
+ - 185_nfs_export_rule.yaml
+ - 186_add_tz.yaml
+ - 187_rename_nfs_policy.yaml
+ - 188_bucket_type.yaml
+ - 191_add_quota_info.yaml
+ - 194_lists_for_service.yaml
+ modules:
+ - description: List FlashBlade Alert Messages
+ name: purefb_messages
+ namespace: ''
+ - description: Configure Pure Storage FlashBlade timezone
+ name: purefb_tz
+ namespace: ''
+ release_date: '2022-09-12'
+ 1.11.0:
+ changes:
+ bugfixes:
+ - purefa_info - Fixed issue when more than 10 buckets have lifecycle rules.
+ - purefb_s3user - Fix incorrect response when bad key/secret pair provided for
+ new user
+ minor_changes:
+ - purefb_info - Added `encryption` and `support_keys` information.
+ - purefb_info - Added bucket quota and safemode information per bucket
+ - purefb_info - Added security update version for Purity//FB 4.0.2, or higher
+ - purefb_info - Updated object store account information
+ - purefb_inventory - Added `part_number` to hardware item information.
+ - purefb_policy - Added support for multiple rules in snapshot policies
+ - purefb_proxy - Added new boolean parameter `secure`. Default of true (for
+ backwards compatability) sets the protocol to be `https://`. False sets `http://`
+ - purefb_s3acc - Added support for default bucket quotas and hard limits
+ - purefb_s3acc - Added support for object account quota and hard limit
+ fragments:
+ - 200_proxy.yaml
+ - 202_multiple_snap_rules.yaml
+ - 205_fix_multi_lifecycle.yaml
+ - 211_change_booleans.yaml
+ - 212_object_account_quota.yaml
+ - 213_sec_update.yaml
+ - 215_encrypt_sec_info.yaml
+ - 216_extra_bucket_info.yaml
+ - 217_inventory.yaml
+ - 218_object_account_info.yaml
+ - 220_s3user_key_fix.yaml
+ modules:
+ - description: Employ the internal FlashBlade ping and trace mechanisms
+ name: purefb_pingtrace
+ namespace: ''
+ release_date: '2023-04-13'
+ 1.3.0:
+ changes:
+ bugfixes:
+ - purefb_bucket - Add warning message if ``state`` is ``absent`` without ``eradicate:``
+ - purefb_fs - Add graceful exist when ``state`` is ``absent`` and filesystem
+ not eradicated
+ - purefb_fs - Add warning message if ``state`` is ``absent`` without ``eradicate``
+ major_changes:
+ - purefb_alert - manage alert email settings on a FlashBlade
+ - purefb_bladename - manage FlashBlade name
+ - purefb_bucket_replica - manage bucket replica links on a FlashBlade
+ - purefb_connect - manage connections between FlashBlades
+ - purefb_dns - manage DNS settings on a FlashBlade
+ - purefb_fs_replica - manage filesystem replica links on a FlashBlade
+ - purefb_inventory - get information about the hardware inventory of a FlashBlade
+ - purefb_ntp - manage the NTP settings for a FlashBlade
+ - purefb_phonehome - manage the phone home settings for a FlashBlade
+ - purefb_policy - manage the filesystem snapshot policies for a FlashBlade
+ - purefb_proxy - manage the phone home HTTP proxy settings for a FlashBlade
+ - purefb_remote_cred - manage the Object Store Remote Credentials on a FlashBlade
+ - purefb_snmp_agent - modify the FlashBlade SNMP Agent
+ - purefb_snmp_mgr - manage SNMP Managers on a FlashBlade
+ - purefb_target - manage remote S3-capable targets for a FlashBlade
+ - purefb_user - manage local ``pureuser`` account password on a FlashBlade
+ minor_changes:
+ - purefb_bucket - Versioning support added
+ - purefb_info - new options added for information collection
+ - purefb_network - Add replication service type
+ - purefb_s3user - Limit ``access_key`` recreation to 3 times
+ - purefb_s3user - return dict changed from ``ansible_facts`` to ``s3user_info``
+ release_summary: '| Release Date: 2020-08-08
+
+ | This changlelog describes all changes made to the modules and plugins included
+ in this collection since Ansible 2.9.0
+
+ '
+ fragments:
+ - v1.3.0_summary.yaml
+ release_date: '2020-08-06'
+ 1.4.0:
+ changes:
+ bugfixes:
+ - purefa_policy - Resolve multiple issues related to incorrect use of timezones
+ - purefb_connect - Ensure changing encryption status on array connection is
+ performed correctly
+ - purefb_connect - Fix breaking change created in purity_fb SDK 1.9.2 for deletion
+ of array connections
+ - purefb_connect - Hide target array API token
+ - purefb_ds - Ensure updating directory service configurations completes correctly
+ - purefb_info - Fix issue getting array info when encrypted connection exists
+ minor_changes:
+ - purefb_banner - Module to manage the GUI and SSH login message
+ - purefb_certgrp - Module to manage FlashBlade Certificate Groups
+ - purefb_certs - Module to create and delete SSL certificates
+ - purefb_connect - Support idempotency when exisitng connection is incoming
+ - purefb_fs - Add new options for filesystem control (https://github.com/Pure-Storage-Ansible/FlashBlade-Collection/pull/81)
+ - purefb_fs - Default filesystem size on creation changes from 32G to ``unlimited``
+ - purefb_fs - Fix error in deletion and eradication of filesystem
+ - purefb_fs_replica - Remove condition to attach/detach policies on unhealthy
+ replica-link
+ - purefb_info - Add support to list filesystem policies
+ - purefb_lifecycle - Module to manage FlashBlade Bucket Lifecycle Rules
+ - purefb_s3user - Add support for imported user access keys
+ - purefb_syslog - Module to manage syslog server configuration
+ fragments:
+ - 101_fix_policy_and_timezone_error.yaml
+ - 76_default_fs_size.yaml
+ - 77_filesystem_policies_info.yaml
+ - 78_update_filesystem_replica_link.yaml
+ - 79_hide_connect_api.yaml
+ - 80_support_reverse_replica_link.yaml
+ - 81_purefb_fs_new_options.yaml
+ - 83_add_certgrp.yml
+ - 84_add_cert.yaml
+ - 85_add_banner.yaml
+ - 86_add_syslog.yaml
+ - 88_add_lifecycle.yml
+ - 90_delete_conn_fix.yaml
+ - 90_imported_keys.yaml
+ - 92_fix_ds_update.yaml
+ - 96_fix_update_connection.yaml
+ - 97_fix_encrpyted_array_connection_info.yaml
+ modules:
+ - description: Configure Pure Storage FlashBlade GUI and SSH MOTD message
+ name: purefb_banner
+ namespace: ''
+ - description: Manage FlashBlade Certifcate Groups
+ name: purefb_certgrp
+ namespace: ''
+ - description: Manage FlashBlade SSL Certifcates
+ name: purefb_certs
+ namespace: ''
+ - description: Manage FlashBlade object lifecycles
+ name: purefb_lifecycle
+ namespace: ''
+ - description: Configure Pure Storage FlashBlade syslog settings
+ name: purefb_syslog
+ namespace: ''
+ release_date: '2020-10-14'
+ 1.5.0:
+ changes:
+ bugfixes:
+ - purefb_* - Return a correct value for `changed` in all modules when in check
+ mode
+ - purefb_dns - Deprecate search paramerter
+ - purefb_dsrole - Resolve idempotency issue
+ - purefb_lifecycle - Fix error when creating new bucket lifecycle rule.
+ - purefb_policy - Ensure undeclared variables are set correctly
+ - purefb_s3user - Fix maximum access_key count logic
+ minor_changes:
+ - purefb_certs - Add update functionality for array cert
+ - purefb_fs - Add multiprotocol ACL support
+ - purefb_info - Add information regarding filesystem multiprotocol (where available)
+ - purefb_info - Add new parameter to provide details on admin users
+ - purefb_info - Add replication performace statistics
+ - purefb_s3user - Add ability to remove an S3 users existing access key
+ fragments:
+ - 105_max_access_key.yaml
+ - 107_add_remove_s3user_key.yaml
+ - 108_dns_search_fix.yaml
+ - 109_update_info.yaml
+ - 111_dsrole_update_idempotency.yaml
+ - 112_fix_check_mode.yaml
+ - 113_policy_cleanup.yaml
+ - 114_certificate_update.yaml
+ - 115_multiprotocol.yaml
+ - 121_replication_perf.yaml
+ - 123_lifecycle_rule_fix.yaml
+ release_date: '2021-03-30'
+ 1.6.0:
+ changes:
+ minor_changes:
+ - purefa_virtualhost - New module to manage API Clients
+ - purefb_ad - New module to manage Active Directory Account
+ - purefb_eula - New module to sign EULA
+ - purefb_info - Add Active Directory, Kerberos and Object Store Account information
+ - purefb_info - Add extra info for Purity//FB 3.2+ systems
+ - purefb_keytabs - New module to manage Kerberos Keytabs
+ - purefb_s3user - Add access policy option to user creation
+ - purefb_timeout - Add module to set GUI idle timeout
+ - purefb_userpolicy - New module to manage object store user access policies
+ - purefb_virtualhost - New module to manage Object Store Virtual Hosts
+ fragments:
+ - 127_add_eula.yaml
+ - 128_add_32_to_info.yaml
+ - 129-virtualhost.yaml
+ - 131-apiclient.yaml
+ - 132_add_timeout.yaml
+ - 135_add_user_policies.yaml
+ - 136_add_s3user_policy.yaml
+ - 138_add_ad_module.yaml
+ - 139_add_keytabs.yaml
+ - 140_more_32_info.yaml
+ modules:
+ - description: Manage FlashBlade Active Directory Account
+ name: purefb_ad
+ namespace: ''
+ - description: Manage FlashBlade API Clients
+ name: purefb_apiclient
+ namespace: ''
+ - description: Sign Pure Storage FlashBlade EULA
+ name: purefb_eula
+ namespace: ''
+ - description: Manage FlashBlade Kerberos Keytabs
+ name: purefb_keytabs
+ namespace: ''
+ - description: Configure Pure Storage FlashBlade GUI idle timeout
+ name: purefb_timeout
+ namespace: ''
+ - description: Manage FlashBlade Object Store User Access Policies
+ name: purefb_userpolicy
+ namespace: ''
+ - description: Manage FlashBlade Object Store Virtual Hosts
+ name: purefb_virtualhost
+ namespace: ''
+ release_date: '2021-04-21'
+ 1.7.0:
+ changes:
+ bugfixes:
+ - purefb_fs - Fix bug where changing the state of both NFS v3 and v4.1 at the
+ same time ignored one of these.
+ - purefb_s3acc - Ensure S3 Account Name is always lowercase
+ - purefb_s3user - Ensure S3 Account Name is always lowercase
+ - purefb_subnet - Allow subnet creation with no gateway
+ minor_changes:
+ - purefb_groupquota - New module for manage individual filesystem group quotas
+ - purefb_lag - Add support for LAG management
+ - purefb_snap - Add support for immeadiate snapshot to remote connected FlashBlade
+ - purefb_subnet - Add support for multiple LAGs.
+ - purefb_userquota - New module for manage individual filesystem user quotas
+ fragments:
+ - 147_no_gateway.yaml
+ - 150_fix_joint_nfs_version_change.yaml
+ - 152_s3acc_lowercase.yaml
+ - 153_add_quota.yaml
+ - 154_add_snap_now.yaml
+ - 158_support_lags.yaml
+ - 159_add_lag.yaml
+ modules:
+ - description: Manage filesystem group quotas
+ name: purefb_groupquota
+ namespace: ''
+ - description: Manage FlashBlade Link Aggregation Groups
+ name: purefb_lag
+ namespace: ''
+ - description: Manage filesystem user quotas
+ name: purefb_userquota
+ namespace: ''
+ release_date: '2021-09-27'
+ 1.8.0:
+ changes:
+ known_issues:
+ - purefb_lag - The mac_address field in the response is not populated. This
+ will be fixed in a future FlashBlade update.
+ minor_changes:
+ - purefb.py - Add check to ensure FlashBlade uses the latest REST version possible
+ for Purity version installed
+ - purefb_info - Add object lifecycles rules to bucket subset
+ - purefb_lifecycle - Add support for updated object lifecycle rules. See documentation
+ for details of new parameters.
+ - purefb_lifecycle - Change `keep_for` parameter to be `keep_previous_for`.
+ `keep_for` is deprecated and will be removed in a later version.
+ - purefb_user - Add support for managing user public key and user unlock
+ fragments:
+ - 161_add_lifecycle_info.yaml
+ - 162_new_lifecycle.yaml
+ - 163_admin_key.yaml
+ - 166_lag_mac_note.yaml
+ - 167_fix_logins.yaml
+ release_date: '2021-11-08'
+ 1.8.1:
+ changes:
+ minor_changes:
+ - purefb.py - Use latest `pypureclient` SDK with fix for "best fit". No longer
+ requires double login to negotiate best API version.
+ fragments:
+ - 169_pypureclient_fix.yaml
+ release_date: '2021-11-11'
+ 1.9.0:
+ changes:
+ minor_changes:
+ - purefb_admin - New module to manage global admin settings
+ - purefb_connect - Add support for array connections to have bandwidth throttling
+ defined
+ - purefb_fs - Add support for NFS export policies
+ - purefb_info - Add NFS export policies and rules
+ - purefb_info - Show array connections bandwidth throttle information
+ - purefb_policy - Add NFS export policies, with rules, as a new policy type
+ - purefb_policy - Add support for Object Store Access Policies, associated rules
+ and user grants
+ - purefb_policy - New parameter `policy_type` added. For backwards compatability,
+ default to `snapshot` if not provided.
+ fragments:
+ - 164_add_admin.yaml
+ - 174_access_policies.yaml
+ - 175_throttle_support.yaml
+ - 176_nfs_export_policies.yaml
+ release_date: '2021-12-17'
diff --git a/ansible_collections/purestorage/flashblade/changelogs/config.yaml b/ansible_collections/purestorage/flashblade/changelogs/config.yaml
new file mode 100644
index 000000000..f5466368f
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/config.yaml
@@ -0,0 +1,31 @@
+changelog_filename_template: ../CHANGELOG.rst
+changelog_filename_version_depth: 0
+changes_file: changelog.yaml
+changes_format: combined
+ignore_other_fragment_extensions: true
+keep_fragments: true
+mention_ancestor: true
+new_plugins_after_name: removed_features
+notesdir: fragments
+prelude_section_name: release_summary
+prelude_section_title: Release Summary
+sections:
+ - - major_changes
+ - Major Changes
+ - - minor_changes
+ - Minor Changes
+ - - breaking_changes
+ - Breaking Changes / Porting Guide
+ - - deprecated_features
+ - Deprecated Features
+ - - removed_features
+ - Removed Features (previously deprecated)
+ - - security_fixes
+ - Security Fixes
+ - - bugfixes
+ - Bugfixes
+ - - known_issues
+ - Known Issues
+title: Purestorage.Flashblade
+trivial_section_name: trivial
+use_fqcn: true
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/101_fix_policy_and_timezone_error.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/101_fix_policy_and_timezone_error.yaml
new file mode 100644
index 000000000..e6c1ea64d
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/101_fix_policy_and_timezone_error.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_policy - Resolve multiple issues related to incorrect use of timezones
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/105_max_access_key.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/105_max_access_key.yaml
new file mode 100644
index 000000000..8e673dad8
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/105_max_access_key.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefb_s3user - Fix maximum access_key count logic
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/107_add_remove_s3user_key.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/107_add_remove_s3user_key.yaml
new file mode 100644
index 000000000..4567aedcd
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/107_add_remove_s3user_key.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_s3user - Add ability to remove an S3 users existing access key
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/108_dns_search_fix.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/108_dns_search_fix.yaml
new file mode 100644
index 000000000..974bf0e8d
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/108_dns_search_fix.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefb_dns - Deprecate search paramerter
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/109_update_info.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/109_update_info.yaml
new file mode 100644
index 000000000..83f2c1923
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/109_update_info.yaml
@@ -0,0 +1,3 @@
+minor_changes:
+ - purefb_info - Add new parameter to provide details on admin users
+ - purefb_info - Add information regarding filesystem multiprotocol (where available)
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/111_dsrole_update_idempotency.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/111_dsrole_update_idempotency.yaml
new file mode 100644
index 000000000..f136b3617
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/111_dsrole_update_idempotency.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefb_dsrole - Resolve idempotency issue
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/112_fix_check_mode.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/112_fix_check_mode.yaml
new file mode 100644
index 000000000..321ba1bea
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/112_fix_check_mode.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefb_* - Return a correct value for `changed` in all modules when in check mode
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/113_policy_cleanup.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/113_policy_cleanup.yaml
new file mode 100644
index 000000000..7e075ea02
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/113_policy_cleanup.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefb_policy - Ensure undeclared variables are set correctly
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/114_certificate_update.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/114_certificate_update.yaml
new file mode 100644
index 000000000..27b27bb64
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/114_certificate_update.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_certs - add update functionality for array cert
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/115_multiprotocol.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/115_multiprotocol.yaml
new file mode 100644
index 000000000..f261d9899
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/115_multiprotocol.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_fs - Add multiprotocol ACL support
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/121_replication_perf.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/121_replication_perf.yaml
new file mode 100644
index 000000000..475a4ba15
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/121_replication_perf.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_info - Add replication performace statistics
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/123_lifecycle_rule_fix.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/123_lifecycle_rule_fix.yaml
new file mode 100644
index 000000000..db689a69e
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/123_lifecycle_rule_fix.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefb_lifecycle - Fix error when creating new bucket lifecycle rule.
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/127_add_eula.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/127_add_eula.yaml
new file mode 100644
index 000000000..0b5ca328d
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/127_add_eula.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_eula - New module to sign EULA
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/128_add_32_to_info.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/128_add_32_to_info.yaml
new file mode 100644
index 000000000..9b4c9f480
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/128_add_32_to_info.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_info - Add extra info for Purity//FB 3.2+ systems
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/129-virtualhost.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/129-virtualhost.yaml
new file mode 100644
index 000000000..da52304ea
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/129-virtualhost.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_virtualhost - New module to manage Object Store Virtual Hosts
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/131-apiclient.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/131-apiclient.yaml
new file mode 100644
index 000000000..7a3f021b5
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/131-apiclient.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_virtualhost - New module to manage API Clients
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/132_add_timeout.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/132_add_timeout.yaml
new file mode 100644
index 000000000..83bde3906
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/132_add_timeout.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_timeout - Add module to set GUI idle timeout
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/135_add_user_policies.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/135_add_user_policies.yaml
new file mode 100644
index 000000000..8716047c1
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/135_add_user_policies.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_userpolicy - New module to manage object store user access policies
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/136_add_s3user_policy.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/136_add_s3user_policy.yaml
new file mode 100644
index 000000000..b2351a885
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/136_add_s3user_policy.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_s3user - Add access policy option to user creation
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/138_add_ad_module.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/138_add_ad_module.yaml
new file mode 100644
index 000000000..fb2e2277e
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/138_add_ad_module.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_ad - New module to manage Active Directory Account
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/139_add_keytabs.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/139_add_keytabs.yaml
new file mode 100644
index 000000000..98e3e75ca
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/139_add_keytabs.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_keytabs - New module to manage Kerberos Keytabs
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/140_more_32_info.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/140_more_32_info.yaml
new file mode 100644
index 000000000..ac0687e29
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/140_more_32_info.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_info - Add Active Directory, Kerberos and Object Store Account information
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/147_no_gateway.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/147_no_gateway.yaml
new file mode 100644
index 000000000..00fe81efe
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/147_no_gateway.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefb_subnet - Allow subnet creation with no gateway
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/150_fix_joint_nfs_version_change.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/150_fix_joint_nfs_version_change.yaml
new file mode 100644
index 000000000..818b49f2c
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/150_fix_joint_nfs_version_change.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefb_fs - Fix bug where changing the state of both NFS v3 and v4.1 at the same time ignored one of these.
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/152_s3acc_lowercase.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/152_s3acc_lowercase.yaml
new file mode 100644
index 000000000..a2214ca56
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/152_s3acc_lowercase.yaml
@@ -0,0 +1,3 @@
+bugfixes:
+ - purefb_s3acc - Ensure S3 Account Name is always lowercase
+ - purefb_s3user - Ensure S3 Account Name is always lowercase
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/153_add_quota.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/153_add_quota.yaml
new file mode 100644
index 000000000..e415fd23b
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/153_add_quota.yaml
@@ -0,0 +1,3 @@
+minor_changes:
+ - purefb_groupquota - New module for manage individual filesystem group quotas
+ - purefb_userquota - New module for manage individual filesystem user quotas
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/154_add_snap_now.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/154_add_snap_now.yaml
new file mode 100644
index 000000000..c037d06bd
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/154_add_snap_now.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_snap - Add support for immeadiate snapshot to remote connected FlashBlade
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/158_support_lags.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/158_support_lags.yaml
new file mode 100644
index 000000000..c2c2b3a97
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/158_support_lags.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_subnet - Add support for multiple LAGs.
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/159_add_lag.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/159_add_lag.yaml
new file mode 100644
index 000000000..7d10b895c
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/159_add_lag.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_lag - Add support for LAG management
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/161_add_lifecycle_info.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/161_add_lifecycle_info.yaml
new file mode 100644
index 000000000..1d85adae0
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/161_add_lifecycle_info.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_info - Add object lifecycles rules to bucket subset
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/162_new_lifecycle.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/162_new_lifecycle.yaml
new file mode 100644
index 000000000..4584f521f
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/162_new_lifecycle.yaml
@@ -0,0 +1,3 @@
+minor_changes:
+ - purefb_lifecycle - Add support for updated object lifecycle rules. See documentation for details of new parameters.
+ - purefb_lifecycle - Change `keep_for` parameter to be `keep_previous_for`. `keep_for` is deprecated and will be removed in a later version.
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/163_admin_key.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/163_admin_key.yaml
new file mode 100644
index 000000000..c4c785737
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/163_admin_key.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_user - Add support for managing user public key and user unlock
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/164_add_admin.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/164_add_admin.yaml
new file mode 100644
index 000000000..6f6432a86
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/164_add_admin.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_admin - New module to manage global admin settings
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/166_lag_mac_note.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/166_lag_mac_note.yaml
new file mode 100644
index 000000000..49b1ddac7
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/166_lag_mac_note.yaml
@@ -0,0 +1,2 @@
+known_issues:
+ - purefb_lag - The mac_address field in the response is not populated. This will be fixed in a future FlashBlade update.
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/167_fix_logins.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/167_fix_logins.yaml
new file mode 100644
index 000000000..ff2b70056
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/167_fix_logins.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb.py - Add check to ensure FlashBlade uses the latest REST version possible for Purity version installed
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/169_pypureclient_fix.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/169_pypureclient_fix.yaml
new file mode 100644
index 000000000..671c0d3ff
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/169_pypureclient_fix.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb.py - Use latest `pypureclient` SDK with fix for "best fit". No longer requires double login to negotiate best API version.
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/174_access_policies.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/174_access_policies.yaml
new file mode 100644
index 000000000..ab241fe39
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/174_access_policies.yaml
@@ -0,0 +1,3 @@
+minor_changes:
+ - purefb_policy - Add support for Object Store Access Policies, associated rules and user grants
+ - purefb_policy - New parameter `policy_type` added. For backwards compatability, default to `snapshot` if not provided.
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/175_throttle_support.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/175_throttle_support.yaml
new file mode 100644
index 000000000..e075475cf
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/175_throttle_support.yaml
@@ -0,0 +1,3 @@
+minor_changes:
+ - purefb_connect - Add support for array connections to have bandwidth throttling defined
+ - purefb_info - Show array connections bandwidth throttle information
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/176_nfs_export_policies.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/176_nfs_export_policies.yaml
new file mode 100644
index 000000000..2332d1c7f
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/176_nfs_export_policies.yaml
@@ -0,0 +1,4 @@
+minor_changes:
+ - purefb_policy - Add NFS export policies, with rules, as a new policy type
+ - purefb_info - Add NFS export policies and rules
+ - purefb_fs - Add support for NFS export policies
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/179_fqcn.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/179_fqcn.yaml
new file mode 100644
index 000000000..5d3ba4592
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/179_fqcn.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - All - Update documentation examples with FQCNs
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/183_v2_connections.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/183_v2_connections.yaml
new file mode 100644
index 000000000..267a19f10
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/183_v2_connections.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefb_connect - Resolve connection issues between two FBs that are throttling capable
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/184_certificate_typos.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/184_certificate_typos.yaml
new file mode 100644
index 000000000..a416d6a2e
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/184_certificate_typos.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_certs - Fix several misspellings of certificate
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/185_nfs_export_rule.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/185_nfs_export_rule.yaml
new file mode 100644
index 000000000..d7dc7fa59
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/185_nfs_export_rule.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefb_policy - Fix incorrect API call for NFS export policy rule creation
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/186_add_tz.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/186_add_tz.yaml
new file mode 100644
index 000000000..074428b9e
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/186_add_tz.yaml
@@ -0,0 +1,3 @@
+minor_changes:
+ - purefb_tz - Add support for FlashBlade timezone management
+ - purefb_info - Show information for current timezone
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/187_rename_nfs_policy.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/187_rename_nfs_policy.yaml
new file mode 100644
index 000000000..d0caf092a
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/187_rename_nfs_policy.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_policy - Allow rename of NFS Export Policies from Purity//FB 3.3.3
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/188_bucket_type.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/188_bucket_type.yaml
new file mode 100644
index 000000000..ee681e33d
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/188_bucket_type.yaml
@@ -0,0 +1,3 @@
+minor_changes:
+ - purefb_info - Expose object store bucket type from Purity//FB 3.3.3
+ - purefb_bucket - Allow setting of bucket type to support VSO - requires Purity//FB 3.3.3 or higher
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/191_add_quota_info.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/191_add_quota_info.yaml
new file mode 100644
index 000000000..604296c5f
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/191_add_quota_info.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_info - Added filesystem default, user and group quotas where available
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/194_lists_for_service.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/194_lists_for_service.yaml
new file mode 100644
index 000000000..031a9bd9e
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/194_lists_for_service.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_ad - Allow service to be a list
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/200_proxy.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/200_proxy.yaml
new file mode 100644
index 000000000..612534990
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/200_proxy.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_proxy - Added new boolean parameter `secure`. Default of true (for backwards compatability) sets the protocol to be `https://`. False sets `http://`
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/202_multiple_snap_rules.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/202_multiple_snap_rules.yaml
new file mode 100644
index 000000000..aca61dee8
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/202_multiple_snap_rules.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_policy - Added support for multiple rules in snapshot policies
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/205_fix_multi_lifecycle.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/205_fix_multi_lifecycle.yaml
new file mode 100644
index 000000000..b6810884b
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/205_fix_multi_lifecycle.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_info - Fixed issue when more than 10 buckets have lifecycle rules.
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/211_change_booleans.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/211_change_booleans.yaml
new file mode 100644
index 000000000..84c3cb521
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/211_change_booleans.yaml
@@ -0,0 +1,2 @@
+trivial:
+ - various modules - Adjust booleans from ``yes``/``no`` to ``true``/``false`` in docs
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/212_object_account_quota.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/212_object_account_quota.yaml
new file mode 100644
index 000000000..7922ea515
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/212_object_account_quota.yaml
@@ -0,0 +1,3 @@
+minor_changes:
+ - purefb_s3acc - Added support for object account quota and hard limit
+ - purefb_s3acc - Added support for default bucket quotas and hard limits
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/213_sec_update.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/213_sec_update.yaml
new file mode 100644
index 000000000..eaa250f7e
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/213_sec_update.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_info - Added security update version for Purity//FB 4.0.2, or higher
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/215_encrypt_sec_info.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/215_encrypt_sec_info.yaml
new file mode 100644
index 000000000..778b39f3e
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/215_encrypt_sec_info.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_info - Added `encryption` and `support_keys` information.
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/216_extra_bucket_info.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/216_extra_bucket_info.yaml
new file mode 100644
index 000000000..acebab758
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/216_extra_bucket_info.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_info - Added bucket quota and safemode information per bucket
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/217_inventory.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/217_inventory.yaml
new file mode 100644
index 000000000..a95b5c44f
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/217_inventory.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_inventory - Added `part_number` to hardware item information.
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/218_object_account_info.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/218_object_account_info.yaml
new file mode 100644
index 000000000..54dac9dac
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/218_object_account_info.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_info - Updated object store account information
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/220_s3user_key_fix.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/220_s3user_key_fix.yaml
new file mode 100644
index 000000000..636cfadf2
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/220_s3user_key_fix.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefb_s3user - Fix incorrect response when bad key/secret pair provided for new user
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/76_default_fs_size.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/76_default_fs_size.yaml
new file mode 100644
index 000000000..b899c31f3
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/76_default_fs_size.yaml
@@ -0,0 +1,3 @@
+minor_changes:
+ - purefb_fs - Default filesystem size on creation changes from 32G to ``unlimited``
+ - purefb_fs - Fix error in deletion and eradication of filesystem
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/77_filesystem_policies_info.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/77_filesystem_policies_info.yaml
new file mode 100644
index 000000000..c4d84070f
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/77_filesystem_policies_info.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_info - Add support to list filesystem policies
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/78_update_filesystem_replica_link.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/78_update_filesystem_replica_link.yaml
new file mode 100644
index 000000000..09bc6c3a2
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/78_update_filesystem_replica_link.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_fs_replica - Remove condition to attach/detach policies on unhealthy replica-link
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/79_hide_connect_api.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/79_hide_connect_api.yaml
new file mode 100644
index 000000000..d6dcb9fe8
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/79_hide_connect_api.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefb_connect - Hide target array API token
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/80_support_reverse_replica_link.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/80_support_reverse_replica_link.yaml
new file mode 100644
index 000000000..42d8f1fe3
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/80_support_reverse_replica_link.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_connect - Support idempotency when exisitng connection is incoming
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/81_purefb_fs_new_options.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/81_purefb_fs_new_options.yaml
new file mode 100644
index 000000000..a6eb75c04
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/81_purefb_fs_new_options.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_fs - Add new options for filesystem control (https://github.com/Pure-Storage-Ansible/FlashBlade-Collection/pull/81)
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/83_add_certgrp.yml b/ansible_collections/purestorage/flashblade/changelogs/fragments/83_add_certgrp.yml
new file mode 100644
index 000000000..4f87b305e
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/83_add_certgrp.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_certgrp - Module to manage FlashBlade Certificate Groups
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/84_add_cert.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/84_add_cert.yaml
new file mode 100644
index 000000000..1470d302e
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/84_add_cert.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_certs - Module to create and delete SSL certificates
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/85_add_banner.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/85_add_banner.yaml
new file mode 100644
index 000000000..279173ccc
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/85_add_banner.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_banner - Module to manage the GUI and SSH login message
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/86_add_syslog.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/86_add_syslog.yaml
new file mode 100644
index 000000000..0cde34ca5
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/86_add_syslog.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_syslog - Module to manage syslog server configuration
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/88_add_lifecycle.yml b/ansible_collections/purestorage/flashblade/changelogs/fragments/88_add_lifecycle.yml
new file mode 100644
index 000000000..3caa436a5
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/88_add_lifecycle.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_lifecycle - Module to manage FlashBlade Bucket Lifecycle Rules
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/90_delete_conn_fix.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/90_delete_conn_fix.yaml
new file mode 100644
index 000000000..93876fede
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/90_delete_conn_fix.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefb_connect - Fix breaking change created in purity_fb SDK 1.9.2 for deletion of array connections
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/90_imported_keys.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/90_imported_keys.yaml
new file mode 100644
index 000000000..af012f746
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/90_imported_keys.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_s3user - Add support for imported user access keys
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/92_fix_ds_update.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/92_fix_ds_update.yaml
new file mode 100644
index 000000000..c4d52cab4
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/92_fix_ds_update.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefb_ds - Ensure updating directory service configurations completes correctly
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/96_fix_update_connection.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/96_fix_update_connection.yaml
new file mode 100644
index 000000000..87bfbeeef
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/96_fix_update_connection.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefb_connect - Ensure changing encryption status on array connection is performed correctly
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/97_fix_encrpyted_array_connection_info.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/97_fix_encrpyted_array_connection_info.yaml
new file mode 100644
index 000000000..5019c18e2
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/97_fix_encrpyted_array_connection_info.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefb_info - Fix issue getting array info when encrypted connection exists
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/v1.3.0_summary.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/v1.3.0_summary.yaml
new file mode 100644
index 000000000..35cff95f9
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/v1.3.0_summary.yaml
@@ -0,0 +1,33 @@
+release_summary: |
+ | Release Date: 2020-08-08
+ | This changlelog describes all changes made to the modules and plugins included in this collection since Ansible 2.9.0
+
+major_changes:
+ - purefb_alert - manage alert email settings on a FlashBlade
+ - purefb_bladename - manage FlashBlade name
+ - purefb_bucket_replica - manage bucket replica links on a FlashBlade
+ - purefb_connect - manage connections between FlashBlades
+ - purefb_dns - manage DNS settings on a FlashBlade
+ - purefb_fs_replica - manage filesystem replica links on a FlashBlade
+ - purefb_inventory - get information about the hardware inventory of a FlashBlade
+ - purefb_ntp - manage the NTP settings for a FlashBlade
+ - purefb_phonehome - manage the phone home settings for a FlashBlade
+ - purefb_policy - manage the filesystem snapshot policies for a FlashBlade
+ - purefb_proxy - manage the phone home HTTP proxy settings for a FlashBlade
+ - purefb_remote_cred - manage the Object Store Remote Credentials on a FlashBlade
+ - purefb_snmp_agent - modify the FlashBlade SNMP Agent
+ - purefb_snmp_mgr - manage SNMP Managers on a FlashBlade
+ - purefb_target - manage remote S3-capable targets for a FlashBlade
+ - purefb_user - manage local ``pureuser`` account password on a FlashBlade
+
+minor_changes:
+ - purefb_s3user - return dict changed from ``ansible_facts`` to ``s3user_info``
+ - purefb_s3user - Limit ``access_key`` recreation to 3 times
+ - purefb_info - new options added for information collection
+ - purefb_bucket - Versioning support added
+ - purefb_network - Add replication service type
+
+bugfixes:
+ - purefb_fs - Add graceful exist when ``state`` is ``absent`` and filesystem not eradicated
+ - purefb_fs - Add warning message if ``state`` is ``absent`` without ``eradicate``
+ - purefb_bucket - Add warning message if ``state`` is ``absent`` without ``eradicate:``
diff --git a/ansible_collections/purestorage/flashblade/meta/runtime.yml b/ansible_collections/purestorage/flashblade/meta/runtime.yml
new file mode 100644
index 000000000..2ee3c9fa9
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/meta/runtime.yml
@@ -0,0 +1,2 @@
+---
+requires_ansible: '>=2.9.10'
diff --git a/ansible_collections/purestorage/flashblade/playbooks/.keep b/ansible_collections/purestorage/flashblade/playbooks/.keep
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/playbooks/.keep
diff --git a/ansible_collections/purestorage/flashblade/playbooks/files/.keep b/ansible_collections/purestorage/flashblade/playbooks/files/.keep
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/playbooks/files/.keep
diff --git a/ansible_collections/purestorage/flashblade/playbooks/roles/.keep b/ansible_collections/purestorage/flashblade/playbooks/roles/.keep
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/playbooks/roles/.keep
diff --git a/ansible_collections/purestorage/flashblade/playbooks/tasks/.keep b/ansible_collections/purestorage/flashblade/playbooks/tasks/.keep
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/playbooks/tasks/.keep
diff --git a/ansible_collections/purestorage/flashblade/playbooks/templates/.keep b/ansible_collections/purestorage/flashblade/playbooks/templates/.keep
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/playbooks/templates/.keep
diff --git a/ansible_collections/purestorage/flashblade/playbooks/vars/.keep b/ansible_collections/purestorage/flashblade/playbooks/vars/.keep
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/playbooks/vars/.keep
diff --git a/ansible_collections/purestorage/flashblade/plugins/doc_fragments/purestorage.py b/ansible_collections/purestorage/flashblade/plugins/doc_fragments/purestorage.py
new file mode 100644
index 000000000..a3e5c735a
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/doc_fragments/purestorage.py
@@ -0,0 +1,42 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Simon Dodsley <simon@purestorage.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ # Standard Pure Storage documentation fragment
+ DOCUMENTATION = r"""
+options:
+ - See separate platform section for more details
+requirements:
+ - See separate platform section for more details
+notes:
+ - Ansible modules are available for the following Pure Storage products: FlashArray, FlashBlade
+"""
+
+ # Documentation fragment for FlashBlade
+ FB = r"""
+options:
+ fb_url:
+ description:
+ - FlashBlade management IP address or Hostname.
+ type: str
+ api_token:
+ description:
+ - FlashBlade API token for admin privileged user.
+ type: str
+notes:
+ - This module requires the C(purity_fb) Python library
+ - You must set C(PUREFB_URL) and C(PUREFB_API) environment variables
+ if I(fb_url) and I(api_token) arguments are not passed to the module directly
+requirements:
+ - python >= 2.7
+ - purity_fb >= 1.9
+ - netaddr
+ - pytz
+"""
diff --git a/ansible_collections/purestorage/flashblade/plugins/module_utils/purefb.py b/ansible_collections/purestorage/flashblade/plugins/module_utils/purefb.py
new file mode 100644
index 000000000..cf987a3e5
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/module_utils/purefb.py
@@ -0,0 +1,148 @@
+# -*- coding: utf-8 -*-
+
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Simon Dodsley <simon@purestorage.com>,2017
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import PurityFb
+except ImportError:
+ HAS_PURITY_FB = False
+
+HAS_PYPURECLIENT = True
+try:
+ from pypureclient import flashblade
+except ImportError:
+ PYPURECLIENT = False
+
+from os import environ
+import platform
+
+VERSION = "1.4"
+USER_AGENT_BASE = "Ansible"
+API_AGENT_VERSION = "1.5"
+
+
+def get_blade(module):
+ """Return System Object or Fail"""
+ user_agent = "%(base)s %(class)s/%(version)s (%(platform)s)" % {
+ "base": USER_AGENT_BASE,
+ "class": __name__,
+ "version": VERSION,
+ "platform": platform.platform(),
+ }
+ blade_name = module.params["fb_url"]
+ api = module.params["api_token"]
+
+ if HAS_PURITY_FB:
+ if blade_name and api:
+ blade = PurityFb(blade_name)
+ blade.disable_verify_ssl()
+ try:
+ blade.login(api)
+ versions = blade.api_version.list_versions().versions
+ if API_AGENT_VERSION in versions:
+ blade._api_client.user_agent = user_agent
+ except Exception:
+ module.fail_json(
+ msg="Pure Storage FlashBlade authentication failed. Check your credentials"
+ )
+ elif environ.get("PUREFB_URL") and environ.get("PUREFB_API"):
+ blade = PurityFb(environ.get("PUREFB_URL"))
+ blade.disable_verify_ssl()
+ try:
+ blade.login(environ.get("PUREFB_API"))
+ versions = blade.api_version.list_versions().versions
+ if API_AGENT_VERSION in versions:
+ blade._api_client.user_agent = user_agent
+ except Exception:
+ module.fail_json(
+ msg="Pure Storage FlashBlade authentication failed. Check your credentials"
+ )
+ else:
+ module.fail_json(
+ msg="You must set PUREFB_URL and PUREFB_API environment variables "
+ "or the fb_url and api_token module arguments"
+ )
+ else:
+ module.fail_json(msg="purity_fb SDK not installed.")
+ return blade
+
+
+def get_system(module):
+ """Return System Object or Fail"""
+ user_agent = "%(base)s %(class)s/%(version)s (%(platform)s)" % {
+ "base": USER_AGENT_BASE,
+ "class": __name__,
+ "version": VERSION,
+ "platform": platform.platform(),
+ }
+ blade_name = module.params["fb_url"]
+ api = module.params["api_token"]
+
+ if HAS_PYPURECLIENT:
+ if blade_name and api:
+ system = flashblade.Client(
+ target=blade_name,
+ api_token=api,
+ user_agent=user_agent,
+ )
+ elif environ.get("PUREFB_URL") and environ.get("PUREFB_API"):
+ system = flashblade.Client(
+ target=(environ.get("PUREFB_URL")),
+ api_token=(environ.get("PUREFB_API")),
+ user_agent=user_agent,
+ )
+ else:
+ module.fail_json(
+ msg="You must set PUREFB_URL and PUREFB_API environment variables "
+ "or the fb_url and api_token module arguments"
+ )
+ res = system.get_hardware()
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Pure Storage FlashBlade authentication failed. Error: {0}".format(
+ res.errors[0].message
+ )
+ )
+ else:
+ module.fail_json(msg="pypureclient SDK not installed.")
+ return system
+
+
+def purefb_argument_spec():
+ """Return standard base dictionary used for the argument_spec argument in AnsibleModule"""
+
+ return dict(
+ fb_url=dict(),
+ api_token=dict(no_log=True),
+ )
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_ad.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_ad.py
new file mode 100644
index 000000000..ccd1f5d92
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_ad.py
@@ -0,0 +1,404 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2021, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_ad
+version_added: '1.6.0'
+short_description: Manage FlashBlade Active Directory Account
+description:
+- Add or delete FlashBlade Active Directory Account
+- FlashBlade allows the creation of one AD computer account, or joining of an
+ existing AD computer account.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Name of the AD account
+ type: str
+ required: true
+ existing:
+ description:
+ - Does the account I(name) already exist in the AD environment
+ type: bool
+ default: false
+ state:
+ description:
+ - Define whether the AD sccount is deleted or not
+ default: present
+ choices: [ absent, present ]
+ type: str
+ computer:
+ description:
+ - The common name of the computer account to be created in the Active Directory domain.
+ - If not specified, defaults to the name of the Active Directory configuration.
+ type: str
+ domain:
+ description:
+ - The Active Directory domain to join
+ type: str
+ username:
+ description:
+ - A user capable of creating a computer account within the domain
+ type: str
+ password:
+ description:
+ - Password string for I(username)
+ type: str
+ encryption:
+ description:
+ - The encryption types that will be supported for use by clients for Kerberos authentication
+ type: list
+ elements: str
+ choices: [ aes256-sha1, aes128-sha1, arcfour-hmac]
+ default: aes256-sha1
+ join_ou:
+ description:
+ - Location where the Computer account will be created. e.g. OU=Arrays,OU=Storage.
+ - If left empty, defaults to B(CN=Computers).
+ type: str
+ directory_servers:
+ description:
+ - A list of directory servers that will be used for lookups related to user authorization
+ - Accepted server formats are IP address and DNS name
+ - All specified servers must be registered to the domain appropriately in the array
+ configured DNS and will only be communicated with over the secure LDAP (LDAPS) protocol.
+ If not specified, servers are resolved for the domain in DNS
+ - The specified list can have a maximum length of 5. If more are provided only the first
+ 5 are used.
+ type: list
+ elements: str
+ kerberos_servers:
+ description:
+ - A list of key distribution servers to use for Kerberos protocol
+ - Accepted server formats are IP address and DNS name
+ - All specified servers must be registered to the domain appropriately in the array
+ configured DNS. If not specified, servers are resolved for the domain in DNS.
+ - The specified list can have a maximum length of 5. If more are provided only the first
+ 5 are used.
+ type: list
+ elements: str
+ service_principals:
+ description:
+ - A list of either FQDNs or SPNs for registering services with the domain.
+ - If not specified B(Computer Name.Domain) is used
+ type: list
+ elements: str
+ service:
+ description:
+ - Service protocol for Active Directory principals
+ - Refer to FlashBlade User Guide for more details
+ type: list
+ elements: str
+ choices: ['nfs', 'cifs', 'HOST']
+ default: nfs
+ local_only:
+ description:
+ - Do a local-only delete of an active directory account
+ type: bool
+ default: false
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Create new AD account
+ purestorage.flashblade.purefb_ad:
+ name: ad_account
+ computer: FLASHBLADE
+ domain: acme.com
+ username: Administrator
+ password: Password
+ join_ou: "CN=FakeOU"
+ encryption:
+ - aes128-cts-hmac-sha1-96
+ - aes256-cts-hmac-sha1-96
+ kerberos_servers:
+ - kdc.acme.com
+ directory_servers:
+ - ldap.acme.com
+ service_principals:
+ - vip1.flashblade.acme.com
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Connect to existing AD account
+ purestorage.flashblade.purefb_ad:
+ name: ad_account
+ computer: FLASHBLADE
+ domain: acme.com
+ username: Administrator
+ password: Password
+ existing: true
+ kerberos_servers:
+ - kdc.acme.com
+ directory_servers:
+ - ldap.acme.com
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Update existing AD account
+ purestorage.flashblade.purefb_ad:
+ name: ad_account
+ encryption:
+ - aes256-cts-hmac-sha1-96
+ kerberos_servers:
+ - kdc.acme.com
+ directory_servers:
+ - ldap.acme.com
+ service_principals:
+ - vip1.flashblade.acme.com
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Delete local AD account
+ purestorage.flashblade.purefb_ad:
+ name: ad_account
+ local_only: true
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Fully delete AD account
+ purestorage.flashblade.purefb_ad:
+ name: ad_account
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient.flashblade import ActiveDirectoryPost, ActiveDirectoryPatch
+except ImportError:
+ HAS_PURESTORAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_system,
+ purefb_argument_spec,
+)
+
+MIN_REQUIRED_API_VERSION = "2.0"
+
+
+def delete_account(module, blade):
+ """Delete Active directory Account"""
+ changed = True
+ if not module.check_mode:
+ res = blade.delete_active_directory(
+ names=[module.params["name"]], local_only=module.params["local_only"]
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete AD Account {0}".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def create_account(module, blade):
+ """Create Active Directory Account"""
+ changed = True
+ if not module.params["existing"]:
+ ad_config = ActiveDirectoryPost(
+ computer_name=module.params["computer"],
+ directory_servers=module.params["directory_servers"],
+ kerberos_servers=module.params["kerberos_servers"],
+ domain=module.params["domain"],
+ encryption_types=module.params["encryption"],
+ fqdns=module.params["service_principals"],
+ join_ou=module.params["join_ou"],
+ user=module.params["username"],
+ password=module.params["password"],
+ )
+ if not module.check_mode:
+ res = blade.post_active_directory(
+ names=[module.params["name"]], active_directory=ad_config
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to add Active Directory Account {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ else:
+ ad_config = ActiveDirectoryPost(
+ computer_name=module.params["computer"],
+ directory_servers=module.params["directory_servers"],
+ kerberos_servers=module.params["kerberos_servers"],
+ domain=module.params["domain"],
+ encryption_types=module.params["encryption"],
+ user=module.params["username"],
+ password=module.params["password"],
+ )
+ if not module.check_mode:
+ res = blade.post_active_directory(
+ names=[module.params["name"]],
+ active_directory=ad_config,
+ join_existing_account=True,
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to add Active Directory Account {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def update_account(module, blade):
+ """Update Active Directory Account"""
+ changed = False
+ mod_ad = False
+ current_ad = list(blade.get_active_directory(names=[module.params["name"]]).items)[
+ 0
+ ]
+ attr = {}
+ if (
+ module.params["join_ou"] != current_ad.join_ou
+ and module.params["encryption"].sort() != current_ad.encryption_types.sort()
+ ):
+ module.fail_json(msg="Cannot make changes to OU when changing encryption types")
+ if module.params["directory_servers"]:
+ if current_ad.directory_servers:
+ if set(module.params["directory_servers"]) != set(
+ current_ad.directory_servers
+ ):
+ attr["directory_servers"] = module.params["directory_servers"]
+ mod_ad = True
+ if module.params["kerberos_servers"]:
+ if current_ad.kerberos_servers:
+ if set(module.params["kerberos_servers"]) != set(
+ current_ad.kerberos_servers
+ ):
+ attr["kerberos_servers"] = module.params["kerberos_servers"]
+ mod_ad = True
+ if module.params["join_ou"] != current_ad.join_ou:
+ attr["join_ou"] = module.params["join_ou"]
+ mod_ad = True
+ if set(module.params["encryption"]) != set(current_ad.encryption_types):
+ attr["encryption_types"] = module.params["encryption"]
+ mod_ad = True
+ if module.params["service_principals"]:
+ if current_ad.service_principal_names:
+ full_spns = []
+ for spn in range(0, len(module.params["service_principals"])):
+ for service in range(0, len(module.params["service"])):
+ full_spns.append(
+ module.params["service"][service]
+ + "/"
+ + module.params["service_principals"][spn]
+ )
+ if set(current_ad.service_principal_names) != set(full_spns):
+ attr["service_principal_names"] = full_spns
+ mod_ad = True
+ if mod_ad:
+ changed = True
+ if not module.check_mode:
+ ad_attr = ActiveDirectoryPatch(**attr)
+ res = blade.patch_active_directory(
+ names=[module.params["name"]], active_directory=ad_attr
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to update Active Directory Account {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ username=dict(type="str"),
+ password=dict(type="str", no_log=True),
+ name=dict(type="str", required=True),
+ service=dict(
+ type="list",
+ elements="str",
+ default="nfs",
+ choices=["nfs", "cifs", "HOST"],
+ ),
+ computer=dict(type="str"),
+ existing=dict(type="bool", default=False),
+ local_only=dict(type="bool", default=False),
+ domain=dict(type="str"),
+ join_ou=dict(type="str"),
+ directory_servers=dict(type="list", elements="str"),
+ kerberos_servers=dict(type="list", elements="str"),
+ service_principals=dict(type="list", elements="str"),
+ encryption=dict(
+ type="list",
+ elements="str",
+ choices=["aes256-sha1", "aes128-sha1", "arcfour-hmac"],
+ default=["aes256-sha1"],
+ ),
+ )
+ )
+
+ required_if = [["state", "present", ["username", "password", "domain"]]]
+
+ module = AnsibleModule(
+ argument_spec, required_if=required_if, supports_check_mode=True
+ )
+
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+
+ blade = get_system(module)
+ api_version = list(blade.get_versions().items)
+ module.params["encryption"] = [
+ crypt.replace("aes256-sha1", "aes256-cts-hmac-sha1-96").replace(
+ "aes128-sha1", "aes128-cts-hmac-sha1-96"
+ )
+ for crypt in module.params["encryption"]
+ ]
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashBlade REST version not supported. "
+ "Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
+ )
+ state = module.params["state"]
+ exists = bool(blade.get_active_directory().total_item_count == 1)
+
+ # TODO: Check SMB mode.
+ # If mode is SMB adapter only allow nfs
+ # Only allow cifs or HOST is SMB mode is native
+
+ if not module.params["computer"]:
+ module.params["computer"] = module.params["name"].replace("_", "-")
+ if module.params["kerberos_servers"]:
+ module.params["kerberos_servers"] = module.params["kerberos_servers"][0:5]
+ if module.params["directory_servers"]:
+ module.params["directory_servers"] = module.params["directory_servers"][0:5]
+
+ if not exists and state == "present":
+ create_account(module, blade)
+ elif exists and state == "present":
+ update_account(module, blade)
+ elif exists and state == "absent":
+ delete_account(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_admin.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_admin.py
new file mode 100644
index 000000000..3ee87bca1
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_admin.py
@@ -0,0 +1,137 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2021, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_admin
+version_added: '1.8.0'
+short_description: Configure Pure Storage FlashBlade Global Admin settings
+description:
+- Set global admin settings for the FlashBlade
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ max_login:
+ description:
+ - Maximum number of failed logins before account is locked
+ type: int
+ min_password:
+ description:
+ - Minimum user password length
+ - Range between 1 and 100
+ default: 1
+ type: int
+ lockout:
+ description:
+ - Account lockout duration, in seconds, after max_login exceeded
+ - Range between 1 second and 90 days (7776000 seconds)
+ type: int
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Set global login parameters
+ purestorage.flashblade.purefb_admin:
+ max_login: 5
+ min_password: 10
+ lockout: 300
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient.flashblade import AdminSetting
+except ImportError:
+ HAS_PURESTORAGE = False
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_system,
+ purefb_argument_spec,
+)
+
+MIN_API_VERSION = "2.3"
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ max_login=dict(type="int"),
+ min_password=dict(type="int", default=1, no_log=False),
+ lockout=dict(type="int"),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+ if module.params["lockout"] and not 1 <= module.params["lockout"] <= 7776000:
+ module.fail_json(msg="Lockout must be between 1 and 7776000 seconds")
+ if not 1 <= module.params["min_password"] <= 100:
+ module.fail_json(msg="Minimum password length must be between 1 and 100")
+ blade = get_system(module)
+ api_version = list(blade.get_versions().items)
+ changed = False
+ if MIN_API_VERSION in api_version:
+ current_settings = list(blade.get_admins_settings().items)[0]
+ lockout = getattr(current_settings, "lockout_duration", None)
+ max_login = getattr(current_settings, "max_login_attempts", None)
+ min_password = getattr(current_settings, "min_password_length", 1)
+ if min_password != module.params["min_password"]:
+ changed = True
+ min_password = module.params["min_password"]
+ if lockout and lockout != module.params["lockout"] * 1000:
+ changed = True
+ lockout = module.params["lockout"] * 1000
+ elif not lockout and module.params["lockout"]:
+ changed = True
+ lockout = module.params["lockout"] * 1000
+ if max_login and max_login != module.params["max_login"]:
+ changed = True
+ max_login = module.params["max_login"]
+ elif not max_login and module.params["max_login"]:
+ changed = True
+ max_login = module.params["max_login"]
+
+ if changed and not module.check_mode:
+ admin = AdminSetting(
+ min_password_length=min_password,
+ max_login_attempts=max_login,
+ lockout_duration=lockout,
+ )
+
+ res = blade.patch_admins_settings(admin_setting=admin)
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to change Global Admin settings. Error: {0}".format(
+ res.errors[0].message
+ )
+ )
+ else:
+ module.fail_json(msg="Purity version does not support Global Admin settings")
+ module.exit_json(changed=changed)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_alert.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_alert.py
new file mode 100644
index 000000000..406fe1c39
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_alert.py
@@ -0,0 +1,245 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_alert
+version_added: '1.0.0'
+short_description: Configure Pure Storage FlashBlade alert email settings
+description:
+- Configure alert email configuration for Pure Storage FlashArrays.
+- Add or delete an individual syslog server to the existing
+ list of serves.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ type: str
+ description:
+ - Create or delete alert email
+ default: present
+ choices: [ absent, present ]
+ address:
+ type: str
+ description:
+ - Email address (valid format required)
+ required: true
+ enabled:
+ type: bool
+ default: true
+ description:
+ - Set specified email address to be enabled or disabled
+ severity:
+ type: str
+ description:
+ - The minimum severity that an alert must have in order for
+ emails to be sent to the array's alert watchers
+ default: info
+ choices: [ info, warning, critical ]
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Add new email recipient and enable, or enable existing email
+ purestorage.flashblade.purefb_alert:
+ address: "user@domain.com"
+ enabled: true
+ state: present
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+- name: Delete existing email recipient
+ purestorage.flashblade.purefb_alert:
+ state: absent
+ address: "user@domain.com"
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import AlertWatcher
+except ImportError:
+ HAS_PURITY_FB = False
+
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ purefb_argument_spec,
+)
+
+
+MIN_REQUIRED_API_VERSION = "1.9"
+
+
+def create_alert(module, blade):
+ """Create Alert Email"""
+ changed = True
+ if not module.check_mode:
+ api_version = blade.api_version.list_versions().versions
+ if MIN_REQUIRED_API_VERSION in api_version:
+ watcher_settings = AlertWatcher(
+ minimum_notification_severity=module.params["severity"]
+ )
+ try:
+ blade.alert_watchers.create_alert_watchers(
+ names=[module.params["address"]], watcher_settings=watcher_settings
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to create alert email: {0}".format(
+ module.params["address"]
+ )
+ )
+ else:
+ try:
+ blade.alert_watchers.create_alert_watchers(
+ names=[module.params["address"]]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to create alert email: {0}".format(
+ module.params["address"]
+ )
+ )
+ if not module.params["enabled"]:
+ watcher_settings = AlertWatcher(enabled=module.params["enabled"])
+ try:
+ blade.alert_watchers.update_alert_watchers(
+ names=[module.params["address"]], watcher_settings=watcher_settings
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to disable during create alert email: {0}".format(
+ module.params["address"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def update_alert(module, blade):
+ """Update alert Watcher"""
+ api_version = blade.api_version.list_versions().versions
+ mod_alert = False
+ try:
+ alert = blade.alert_watchers.list_alert_watchers(
+ names=[module.params["address"]]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to get information for alert email: {0}".format(
+ module.params["address"]
+ )
+ )
+ current_state = {
+ "enabled": alert.items[0].enabled,
+ "severity": alert.items[0].minimum_notification_severity,
+ }
+ if current_state["enabled"] != module.params["enabled"]:
+ mod_alert = True
+ if MIN_REQUIRED_API_VERSION in api_version:
+ if current_state["severity"] != module.params["severity"]:
+ mod_alert = True
+ if mod_alert:
+ changed = True
+ if not module.check_mode:
+ if MIN_REQUIRED_API_VERSION in api_version:
+ watcher_settings = AlertWatcher(
+ enabled=module.params["enabled"],
+ minimum_notification_severity=module.params["severity"],
+ )
+ else:
+ watcher_settings = AlertWatcher(enabled=module.params["enabled"])
+ try:
+ blade.alert_watchers.update_alert_watchers(
+ names=[module.params["address"]], watcher_settings=watcher_settings
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to update alert email: {0}".format(
+ module.params["address"]
+ )
+ )
+ else:
+ changed = False
+ module.exit_json(changed=changed)
+
+
+def delete_alert(module, blade):
+ """Delete Alert Email"""
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.alert_watchers.delete_alert_watchers(names=[module.params["address"]])
+ except Exception:
+ module.fail_json(
+ msg="Failed to delete alert email: {0}".format(module.params["address"])
+ )
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ address=dict(type="str", required=True),
+ enabled=dict(type="bool", default=True),
+ severity=dict(
+ type="str", default="info", choices=["info", "warning", "critical"]
+ ),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg="purity_fb SDK is required for this module")
+
+ pattern = re.compile(r"^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$")
+ if not pattern.match(module.params["address"]):
+ module.fail_json(msg="Valid email address not provided.")
+
+ blade = get_blade(module)
+
+ exists = False
+ try:
+ emails = blade.alert_watchers.list_alert_watchers()
+ except Exception:
+ module.fail_json(msg="Failed to get exisitng email list")
+ for email in range(0, len(emails.items)):
+ if emails.items[email].name == module.params["address"]:
+ exists = True
+ break
+ if module.params["state"] == "present" and not exists:
+ create_alert(module, blade)
+ elif module.params["state"] == "present" and exists:
+ update_alert(module, blade)
+ elif module.params["state"] == "absent" and exists:
+ delete_alert(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_apiclient.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_apiclient.py
new file mode 100644
index 000000000..6a4755a95
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_apiclient.py
@@ -0,0 +1,250 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2021, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_apiclient
+version_added: '1.6.0'
+short_description: Manage FlashBlade API Clients
+description:
+- Enable or disable FlashBlade API Clients
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Name of the API Client
+ type: str
+ required: true
+ state:
+ description:
+ - Define whether the API client should exist or not.
+ default: present
+ choices: [ absent, present ]
+ type: str
+ role:
+ description:
+ - The maximum role allowed for ID Tokens issued by this API client
+ type: str
+ choices: [readonly, ops_admin, storage_admin, array_admin]
+ issuer:
+ description:
+ - The name of the identity provider that will be issuing ID Tokens for this API client
+ - If not specified, defaults to the API client name, I(name).
+ type: str
+ public_key:
+ description:
+ - The API clients PEM formatted (Base64 encoded) RSA public key.
+ - Include the I(—–BEGIN PUBLIC KEY—–) and I(—–END PUBLIC KEY—–) lines
+ type: str
+ token_ttl:
+ description:
+ - Time To Live length in seconds for the exchanged access token
+ - Range is 1 second to 1 day (86400 seconds)
+ type: int
+ default: 86400
+ enabled:
+ description:
+ - State of the API Client Key
+ type: bool
+ default: true
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Create API token ansible-token
+ purestorage.flashblade.purefb_apiclient:
+ name: ansible_token
+ issuer: "Pure_Storage"
+ token_ttl: 3000
+ role: array_admin
+ public_key: "{{lookup('file', 'public_pem_file') }}"
+ fb_url: 10.10.10.2
+ api_token: T-68618f31-0c9e-4e57-aa44-5306a2cf10e3
+
+- name: Disable API CLient
+ purestorage.flashblade.purefb_apiclient:
+ name: ansible_token
+ enabled: false
+ fb_url: 10.10.10.2
+ api_token: T-68618f31-0c9e-4e57-aa44-5306a2cf10e3
+
+- name: Enable API CLient
+ purestorage.flashblade.purefb_apiclient:
+ name: ansible_token
+ enabled: true
+ fb_url: 10.10.10.2
+ api_token: T-68618f31-0c9e-4e57-aa44-5306a2cf10e3
+
+- name: Delete API Client
+ purestorage.flashblade.purefb_apiclient:
+ state: absent
+ name: ansible_token
+ fb_url: 10.10.10.2
+ api_token: T-68618f31-0c9e-4e57-aa44-5306a2cf10e3
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flashblade
+except ImportError:
+ HAS_PURESTORAGE = False
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_system,
+ purefb_argument_spec,
+)
+
+MIN_REQUIRED_API_VERSION = "2.0"
+
+
+def delete_client(module, blade):
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.delete_api_clients(names=[module.params["name"]])
+ except Exception:
+ module.fail_json(
+ msg="Failed to delete API Client {0}".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def update_client(module, blade, client):
+ """Update API Client"""
+ changed = False
+ if client.enabled != module.params["enabled"]:
+ changed = True
+ if not module.check_mode:
+ res = blade.patch_api_clients(
+ names=[module.params["name"]],
+ api_clients=flashblade.ApiClient(enabled=module.params["enabled"]),
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to update API Client {0}".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def create_client(module, blade):
+ """Create API Client"""
+ changed = True
+ if not 1 <= module.params["token_ttl"] <= 86400:
+ module.fail_json(msg="token_ttl parameter is out of range (1 to 86400)")
+ else:
+ token_ttl = module.params["token_ttl"] * 1000
+ if not module.params["issuer"]:
+ module.params["issuer"] = module.params["name"]
+ if not module.check_mode:
+ api_client = flashblade.ApiClientsPost(
+ max_role={"name": module.params["role"]},
+ issuer=module.params["issuer"],
+ access_token_ttl_in_ms=token_ttl,
+ public_key=module.params["public_key"],
+ )
+ res = blade.post_api_clients(
+ names=[module.params["name"]], api_client=api_client
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to create API Client {0}. Error message: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ if module.params["enabled"]:
+ attr = flashblade.ApiClient(enabled=True)
+ res = blade.patch_api_clients(
+ api_clients=attr, names=[module.params["name"]]
+ )
+ if res.status_code != 200:
+ module.warn(
+ "API Client {0} created by enable failed. Please investigate.".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ enabled=dict(type="bool", default=True),
+ name=dict(type="str", required=True),
+ role=dict(
+ type="str",
+ choices=["readonly", "ops_admin", "storage_admin", "array_admin"],
+ ),
+ public_key=dict(type="str", no_log=True),
+ token_ttl=dict(type="int", default=86400, no_log=False),
+ issuer=dict(type="str"),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+ pattern = re.compile("^[a-zA-Z0-9]([a-zA-Z0-9_]{0,54}[a-zA-Z0-9])?$")
+ if module.params["issuer"]:
+ if not pattern.match(module.params["issuer"]):
+ module.fail_json(
+ msg="API Client Issuer name {0} does not conform to required naming convention".format(
+ module.params["issuer"]
+ )
+ )
+ if not pattern.match(module.params["name"]):
+ module.fail_json(
+ msg="Object Store Virtual Host name {0} does not conform to required naming convention".format(
+ module.params["name"]
+ )
+ )
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+
+ blade = get_system(module)
+ api_version = list(blade.get_versions().items)
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashBlade REST version not supported. "
+ "Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
+ )
+ state = module.params["state"]
+
+ exists = bool(
+ blade.get_api_clients(names=[module.params["name"]]).status_code == 200
+ )
+ if exists:
+ client = list(blade.get_api_clients(names=[module.params["name"]]).items)[0]
+
+ if not exists and state == "present":
+ create_client(module, blade)
+ elif exists and state == "present":
+ update_client(module, blade, client)
+ elif exists and state == "absent":
+ delete_client(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_banner.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_banner.py
new file mode 100644
index 000000000..739c2ab9a
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_banner.py
@@ -0,0 +1,143 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_banner
+version_added: '1.4.0'
+short_description: Configure Pure Storage FlashBlade GUI and SSH MOTD message
+description:
+- Configure MOTD for Pure Storage FlashBlades.
+- This will be shown during an SSH or GUI login to the system.
+- Multiple line messages can be achieved using \\n.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Set ot delete the MOTD
+ default: present
+ type: str
+ choices: [ present, absent ]
+ banner:
+ description:
+ - Banner text, or MOTD, to use
+ type: str
+ default: "Welcome to the machine..."
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Set new banner text
+ purestorage.flashblade.purefb_banner:
+ banner: "Banner over\ntwo lines"
+ state: present
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Delete banner text
+ purestorage.flashblade.purefb_banner:
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import PureArray
+except ImportError:
+ HAS_PURITY_FB = False
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ purefb_argument_spec,
+)
+
+
+MIN_REQUIRED_API_VERSION = "1.10"
+
+
+def set_banner(module, blade):
+ """Set MOTD banner text"""
+ changed = True
+ if not module.check_mode:
+ try:
+ if not module.params["banner"]:
+ module.fail_json(msg="Invalid MOTD banner given")
+ blade_settings = PureArray(banner=module.params["banner"])
+ blade.arrays.update_arrays(array_settings=blade_settings)
+ except Exception:
+ module.fail_json(msg="Failed to set MOTD banner text")
+
+ module.exit_json(changed=changed)
+
+
+def delete_banner(module, blade):
+ """Delete MOTD banner text"""
+ changed = True
+ if not module.check_mode:
+ try:
+ blade_settings = PureArray(banner="")
+ blade.arrays.update_arrays(array_settings=blade_settings)
+ except Exception:
+ module.fail_json(msg="Failed to delete current MOTD banner text")
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ banner=dict(type="str", default="Welcome to the machine..."),
+ state=dict(type="str", default="present", choices=["present", "absent"]),
+ )
+ )
+
+ required_if = [("state", "present", ["banner"])]
+
+ module = AnsibleModule(
+ argument_spec, required_if=required_if, supports_check_mode=True
+ )
+ if not HAS_PURITY_FB:
+ module.fail_json(msg="purity_fb sdk is required for this module")
+
+ state = module.params["state"]
+ blade = get_blade(module)
+ api_version = blade.api_version.list_versions().versions
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(msg="Purity//FB must be upgraded to support this module.")
+ current_banner = blade.login_banner.list_login_banner().login_banner
+
+ # set banner if empty value or value differs
+ if state == "present" and (
+ not current_banner or current_banner != module.params["banner"]
+ ):
+ set_banner(module, blade)
+ # clear banner if it has a value
+ elif state == "absent" and current_banner:
+ delete_banner(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_bladename.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_bladename.py
new file mode 100644
index 000000000..0e0b5c755
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_bladename.py
@@ -0,0 +1,115 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_bladename
+version_added: '1.0.0'
+short_description: Configure Pure Storage FlashBlade name
+description:
+- Configure name of Pure Storage FlashBlades.
+- Ideal for Day 0 initial configuration.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Set the FlashBlade name
+ type: str
+ default: present
+ choices: [ present ]
+ name:
+ description:
+ - Name of the FlashBlade. Must conform to correct naming schema.
+ type: str
+ required: true
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Set new FlashBlade name
+ purestorage.flashblade.purefb_bladename:
+ name: new-flashblade-name
+ state: present
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import PureArray
+except ImportError:
+ HAS_PURITY_FB = False
+
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ purefb_argument_spec,
+)
+
+
+def update_name(module, blade):
+ """Change aray name"""
+ changed = True
+ if not module.check_mode:
+ try:
+ blade_settings = PureArray(name=module.params["name"])
+ blade.arrays.update_arrays(array_settings=blade_settings)
+ except Exception:
+ module.fail_json(
+ msg="Failed to change array name to {0}".format(module.params["name"])
+ )
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True),
+ state=dict(type="str", default="present", choices=["present"]),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg="purity_fb sdk is required for this module")
+
+ blade = get_blade(module)
+ pattern = re.compile("^[a-zA-Z0-9]([a-zA-Z0-9-]{0,54}[a-zA-Z0-9])?$")
+ if not pattern.match(module.params["name"]):
+ module.fail_json(
+ msg="FlashBlade name {0} does not conform to array name rules".format(
+ module.params["name"]
+ )
+ )
+ if module.params["name"] != blade.arrays.list_arrays().items[0].name:
+ update_name(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_bucket.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_bucket.py
new file mode 100644
index 000000000..67b6b1545
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_bucket.py
@@ -0,0 +1,398 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+
+DOCUMENTATION = """
+---
+module: purefb_bucket
+version_added: "1.0.0"
+short_description: Manage Object Store Buckets on a Pure Storage FlashBlade.
+description:
+ - This module managess object store (s3) buckets on Pure Storage FlashBlade.
+author: Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Bucket Name.
+ required: true
+ type: str
+ account:
+ description:
+ - Object Store Account for Bucket.
+ required: true
+ type: str
+ versioning:
+ description:
+ - State of S3 bucket versioning
+ required: false
+ default: absent
+ type: str
+ choices: [ "enabled", "suspended", "absent" ]
+ state:
+ description:
+ - Create, delete or modifies a bucket.
+ required: false
+ default: present
+ type: str
+ choices: [ "present", "absent" ]
+ eradicate:
+ description:
+ - Define whether to eradicate the bucket on delete or leave in trash.
+ required: false
+ type: bool
+ default: false
+ mode:
+ description:
+ - The type of bucket to be created. Also referred to a VSO Mode.
+ - Requires Purity//FB 3.3.3 or higher
+ - I(multi-site) type can only be used after feature is
+ enabled by Pure Technical Support
+ type: str
+ choices: [ "classic", "multi-site" ]
+ version_added: '1.10.0'
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = """
+- name: Create new bucket named foo in account bar
+ purestorage.flashblade.purefb_bucket:
+ name: foo
+ account: bar
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Delete bucket named foo in account bar
+ purestorage.flashblade.purefb_bucket:
+ name: foo
+ account: bar
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Change bucket versioning state
+ purestorage.flashblade.purefb_bucket:
+ name: foo
+ account: bar
+ versioning: enabled
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Recover deleted bucket named foo in account bar
+ purestorage.flashblade.purefb_bucket:
+ name: foo
+ account: bar
+ state: present
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Eradicate bucket named foo in account bar
+ purestorage.flashblade.purefb_bucket:
+ name: foo
+ account: bar
+ state: absent
+ eradicate: true
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+"""
+
+RETURN = """
+"""
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import Bucket, Reference, BucketPatch, BucketPost
+except ImportError:
+ HAS_PURITY_FB = False
+
+HAS_PYPURECLIENT = True
+try:
+ from pypureclient import flashblade
+except ImportError:
+ HAS_PYPURECLIENT = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ get_system,
+ purefb_argument_spec,
+)
+
+
+MIN_REQUIRED_API_VERSION = "1.5"
+VERSIONING_VERSION = "1.9"
+VSO_VERSION = "2.4"
+
+
+def get_s3acc(module, blade):
+ """Return Object Store Account or None"""
+ s3acc = None
+ accts = blade.object_store_accounts.list_object_store_accounts()
+ for acct in range(0, len(accts.items)):
+ if accts.items[acct].name == module.params["account"]:
+ s3acc = accts.items[acct]
+ return s3acc
+
+
+def get_bucket(module, blade):
+ """Return Bucket or None"""
+ s3bucket = None
+ buckets = blade.buckets.list_buckets()
+ for bucket in range(0, len(buckets.items)):
+ if buckets.items[bucket].name == module.params["name"]:
+ s3bucket = buckets.items[bucket]
+ return s3bucket
+
+
+def create_bucket(module, blade):
+ """Create bucket"""
+ changed = True
+ if not module.check_mode:
+ try:
+ api_version = blade.api_version.list_versions().versions
+ if VSO_VERSION in api_version and module.params["mode"]:
+ bladev2 = get_system(module)
+ res = bladev2.post_buckets(
+ names=[module.params["name"]],
+ bucket=flashblade.BucketPost(
+ account=flashblade.Reference(name=module.params["account"]),
+ bucket_type=module.params["mode"],
+ ),
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Object Store Bucket {0} creation failed. Error: {1}".format(
+ module.params["name"],
+ res.errors[0].message,
+ )
+ )
+ elif VERSIONING_VERSION in api_version:
+ attr = BucketPost()
+ attr.account = Reference(name=module.params["account"])
+ blade.buckets.create_buckets(names=[module.params["name"]], bucket=attr)
+ else:
+ attr = Bucket()
+ attr.account = Reference(name=module.params["account"])
+ blade.buckets.create_buckets(
+ names=[module.params["name"]], account=attr
+ )
+ if (
+ module.params["versioning"] != "absent"
+ and VERSIONING_VERSION in api_version
+ ):
+ try:
+ blade.buckets.update_buckets(
+ names=[module.params["name"]],
+ bucket=BucketPatch(versioning=module.params["versioning"]),
+ )
+ except Exception:
+ module.fail_json(
+ msg="Object Store Bucket {0} Created but versioning state failed".format(
+ module.params["name"]
+ )
+ )
+ except Exception:
+ module.fail_json(
+ msg="Object Store Bucket {0}: Creation failed".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def delete_bucket(module, blade):
+ """Delete Bucket"""
+ changed = True
+ if not module.check_mode:
+ try:
+ api_version = blade.api_version.list_versions().versions
+ if VERSIONING_VERSION in api_version:
+ blade.buckets.update_buckets(
+ names=[module.params["name"]], bucket=BucketPatch(destroyed=True)
+ )
+ else:
+ blade.buckets.update_buckets(
+ names=[module.params["name"]], destroyed=Bucket(destroyed=True)
+ )
+ if module.params["eradicate"]:
+ try:
+ blade.buckets.delete_buckets(names=[module.params["name"]])
+ except Exception:
+ module.fail_json(
+ msg="Object Store Bucket {0}: Eradication failed".format(
+ module.params["name"]
+ )
+ )
+ except Exception:
+ module.fail_json(
+ msg="Object Store Bucket {0}: Deletion failed".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def recover_bucket(module, blade):
+ """Recover Bucket"""
+ changed = True
+ if not module.check_mode:
+ try:
+ api_version = blade.api_version.list_versions().versions
+ if VERSIONING_VERSION in api_version:
+ blade.buckets.update_buckets(
+ names=[module.params["name"]], bucket=BucketPatch(destroyed=False)
+ )
+ else:
+ blade.buckets.update_buckets(
+ names=[module.params["name"]], destroyed=Bucket(destroyed=False)
+ )
+ except Exception:
+ module.fail_json(
+ msg="Object Store Bucket {0}: Recovery failed".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def update_bucket(module, blade, bucket):
+ """Update Bucket"""
+ changed = False
+ api_version = blade.api_version.list_versions().versions
+ if VSO_VERSION in api_version:
+ if module.params["mode"]:
+ bladev2 = get_system(module)
+ bucket_detail = bladev2.get_buckets(names=[module.params["name"]])
+ if list(bucket_detail.items)[0].bucket_type != module.params["mode"]:
+ module.warn("Changing bucket type is not permitted.")
+
+ if VERSIONING_VERSION in api_version:
+ if bucket.versioning != "none":
+ if module.params["versioning"] == "absent":
+ versioning = "suspended"
+ else:
+ versioning = module.params["versioning"]
+ if bucket.versioning != versioning:
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.buckets.update_buckets(
+ names=[module.params["name"]],
+ bucket=BucketPatch(versioning=versioning),
+ )
+ changed = True
+ except Exception:
+ module.fail_json(
+ msg="Object Store Bucket {0}: Versioning change failed".format(
+ module.params["name"]
+ )
+ )
+ elif module.params["versioning"] != "absent":
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.buckets.update_buckets(
+ names=[module.params["name"]],
+ bucket=BucketPatch(versioning=module.params["versioning"]),
+ )
+ changed = True
+ except Exception:
+ module.fail_json(
+ msg="Object Store Bucket {0}: Versioning change failed".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def eradicate_bucket(module, blade):
+ """Eradicate Bucket"""
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.buckets.delete_buckets(names=[module.params["name"]])
+ except Exception:
+ module.fail_json(
+ msg="Object Store Bucket {0}: Eradication failed".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(required=True),
+ account=dict(required=True),
+ eradicate=dict(default="false", type="bool"),
+ mode=dict(type="str", choices=["classic", "multi-site"]),
+ versioning=dict(
+ default="absent", choices=["enabled", "suspended", "absent"]
+ ),
+ state=dict(default="present", choices=["present", "absent"]),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg="purity_fb sdk is required for this module")
+ if module.params["mode"]:
+ if not HAS_PYPURECLIENT:
+ module.fail_json(msg="py-pure-client sdk is required to support VSO mode")
+
+ state = module.params["state"]
+ blade = get_blade(module)
+ api_version = blade.api_version.list_versions().versions
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(msg="Purity//FB must be upgraded to support this module.")
+ if module.params["mode"] and VSO_VERSION not in api_version:
+ module.fail_json(msg="VSO mode requires Purity//FB 3.3.3 or higher.")
+
+ bucket = get_bucket(module, blade)
+ if not get_s3acc(module, blade):
+ module.fail_json(
+ msg="Object Store Account {0} does not exist.".format(
+ module.params["account"]
+ )
+ )
+
+ if module.params["eradicate"] and state == "present":
+ module.warn("Eradicate flag ignored without state=absent")
+
+ if state == "present" and not bucket:
+ create_bucket(module, blade)
+ elif state == "present" and bucket and bucket.destroyed:
+ recover_bucket(module, blade)
+ elif state == "absent" and bucket and not bucket.destroyed:
+ delete_bucket(module, blade)
+ elif state == "present" and bucket:
+ update_bucket(module, blade, bucket)
+ elif (
+ state == "absent" and bucket and bucket.destroyed and module.params["eradicate"]
+ ):
+ eradicate_bucket(module, blade)
+ elif state == "absent" and not bucket:
+ module.exit_json(changed=False)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_bucket_replica.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_bucket_replica.py
new file mode 100644
index 000000000..6ac3775ae
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_bucket_replica.py
@@ -0,0 +1,313 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+
+DOCUMENTATION = """
+---
+module: purefb_bucket_replica
+version_added: '1.0.0'
+short_description: Manage bucket replica links between Pure Storage FlashBlades
+description:
+ - This module manages bucket replica links between Pure Storage FlashBlades.
+author: Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Local Bucket Name.
+ required: true
+ type: str
+ state:
+ description:
+ - Creates or modifies a bucket replica link
+ required: false
+ default: present
+ type: str
+ choices: [ "present", "absent" ]
+ target:
+ description:
+ - Remote array or target name to create replica on.
+ required: false
+ type: str
+ target_bucket:
+ description:
+ - Name of target bucket name
+ - If not supplied, will default to I(name).
+ type: str
+ required: false
+ paused:
+ description:
+ - State of the bucket replica link
+ type: bool
+ default: false
+ credential:
+ description:
+ - Name of remote credential name to use.
+ required: false
+ type: str
+extends_documentation_fragment:
+ - purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = """
+- name: Create new bucket replica from foo to bar on arrayB
+ purestorage.flashblade.purefb_bucket_replica:
+ name: foo
+ target: arrayB
+ target_bucket: bar
+ credentials: cred_1
+ state: present
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Pause exisitng bucket replica link
+ purestorage.flashblade.purefb_bucket_replica:
+ name: foo
+ paused: true
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Delete bucket replica link foo
+ purestorage.flashblade.purefb_fs_replica:
+ name: foo
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641"""
+
+RETURN = """
+"""
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import BucketReplicaLink, ObjectStoreRemoteCredentials
+except ImportError:
+ HAS_PURITY_FB = False
+
+MIN_REQUIRED_API_VERSION = "1.9"
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ purefb_argument_spec,
+)
+
+
+def get_local_bucket(module, blade):
+ """Return Bucket or None"""
+ try:
+ res = blade.buckets.list_buckets(names=[module.params["name"]])
+ return res.items[0]
+ except Exception:
+ return None
+
+
+def get_remote_cred(module, blade, target):
+ """Return Remote Credential or None"""
+ try:
+ res = (
+ blade.object_store_remote_credentials.list_object_store_remote_credentials(
+ names=[target + "/" + module.params["credential"]]
+ )
+ )
+ return res.items[0]
+ except Exception:
+ return None
+
+
+def get_local_rl(module, blade):
+ """Return Bucket Replica Link or None"""
+ try:
+ res = blade.bucket_replica_links.list_bucket_replica_links(
+ local_bucket_names=[module.params["name"]]
+ )
+ return res.items[0]
+ except Exception:
+ return None
+
+
+def get_connected(module, blade):
+ connected_blades = blade.array_connections.list_array_connections()
+ for target in range(0, len(connected_blades.items)):
+ if (
+ connected_blades.items[target].remote.name == module.params["target"]
+ or connected_blades.items[target].management_address
+ == module.params["target"]
+ ) and connected_blades.items[target].status in [
+ "connected",
+ "connecting",
+ "partially_connected",
+ ]:
+ return connected_blades.items[target].remote.name
+ connected_targets = blade.targets.list_targets()
+ for target in range(0, len(connected_targets.items)):
+ if connected_targets.items[target].name == module.params[
+ "target"
+ ] and connected_targets.items[target].status in [
+ "connected",
+ "connecting",
+ "partially_connected",
+ ]:
+ return connected_targets.items[target].name
+ return None
+
+
+def create_rl(module, blade, remote_cred):
+ """Create Bucket Replica Link"""
+ changed = True
+ if not module.check_mode:
+ try:
+ if not module.params["target_bucket"]:
+ module.params["target_bucket"] = module.params["name"]
+ else:
+ module.params["target_bucket"] = module.params["target_bucket"].lower()
+ blade.bucket_replica_links.create_bucket_replica_links(
+ local_bucket_names=[module.params["name"]],
+ remote_bucket_names=[module.params["target_bucket"]],
+ remote_credentials_names=[remote_cred.name],
+ bucket_replica_link=BucketReplicaLink(paused=module.params["paused"]),
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to create bucket replica link {0}.".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def update_rl_policy(module, blade, local_replica_link):
+ """Update Bucket Replica Link"""
+ changed = False
+ new_cred = local_replica_link.remote.name + "/" + module.params["credential"]
+ if (
+ local_replica_link.paused != module.params["paused"]
+ or local_replica_link.remote_credentials.name != new_cred
+ ):
+ changed = True
+ if not module.check_mode:
+ try:
+ module.warn("{0}".format(local_replica_link))
+ blade.bucket_replica_links.update_bucket_replica_links(
+ local_bucket_names=[module.params["name"]],
+ remote_bucket_names=[local_replica_link.remote_bucket.name],
+ remote_names=[local_replica_link.remote.name],
+ bucket_replica_link=BucketReplicaLink(
+ paused=module.params["paused"],
+ remote_credentials=ObjectStoreRemoteCredentials(name=new_cred),
+ ),
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to update bucket replica link {0}.".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def delete_rl_policy(module, blade, local_replica_link):
+ """Delete Bucket Replica Link"""
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.bucket_replica_links.delete_bucket_replica_links(
+ remote_names=[local_replica_link.remote.name],
+ local_bucket_names=[module.params["name"]],
+ remote_bucket_names=[local_replica_link.remote_bucket.name],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to delete bucket replica link {0}.".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True),
+ target=dict(type="str"),
+ target_bucket=dict(type="str"),
+ paused=dict(type="bool", default=False),
+ credential=dict(type="str"),
+ state=dict(default="present", choices=["present", "absent"]),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg="purity_fb sdk is required for this module")
+
+ state = module.params["state"]
+ module.params["name"] = module.params["name"].lower()
+ blade = get_blade(module)
+ versions = blade.api_version.list_versions().versions
+
+ if MIN_REQUIRED_API_VERSION not in versions:
+ module.fail_json(
+ msg="Minimum FlashBlade REST version required: {0}".format(
+ MIN_REQUIRED_API_VERSION
+ )
+ )
+
+ local_bucket = get_local_bucket(module, blade)
+ local_replica_link = get_local_rl(module, blade)
+ target = get_connected(module, blade)
+
+ if not target:
+ module.fail_json(
+ msg="Selected target {0} is not connected.".format(module.params["target"])
+ )
+
+ if local_replica_link and not module.params["credential"]:
+ module.params["credential"] = local_replica_link.remote_credentials.name.split(
+ "/"
+ )[1]
+ remote_cred = get_remote_cred(module, blade, target)
+ if not remote_cred:
+ module.fail_json(
+ msg="Selected remote credential {0} does not exist for target {1}.".format(
+ module.params["credential"], module.params["target"]
+ )
+ )
+
+ if not local_bucket:
+ module.fail_json(
+ msg="Selected local bucket {0} does not exist.".format(
+ module.params["name"]
+ )
+ )
+
+ if local_replica_link:
+ if local_replica_link.status == "unhealthy":
+ module.fail_json(msg="Replica Link unhealthy - please check target")
+
+ if state == "present" and not local_replica_link:
+ create_rl(module, blade, remote_cred)
+ elif state == "present" and local_replica_link:
+ update_rl_policy(module, blade, local_replica_link)
+ elif state == "absent" and local_replica_link:
+ delete_rl_policy(module, blade, local_replica_link)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_certgrp.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_certgrp.py
new file mode 100644
index 000000000..2308b6f16
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_certgrp.py
@@ -0,0 +1,249 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_certgrp
+version_added: '1.4.0'
+short_description: Manage FlashBlade Certifcate Groups
+description:
+- Manage certifcate groups for FlashBlades
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Create or delete certifcate group
+ default: present
+ type: str
+ choices: [ absent, present ]
+ name:
+ description:
+ - Name of the certificate group
+ type: str
+ certificates:
+ description:
+ - List of certifcates to add to a policy on creation
+ type: list
+ elements: str
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Create a certifcate group
+ purestorage.flashblade.purefb_certgrp:
+ name: test_grp
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Create a cerifcate group and add existing certificates
+ purestorage.flashblade.purefb_certgrp:
+ name: test_grp
+ certifcates:
+ - cert1
+ - cert2
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Delete a certifcate from a group
+ purestorage.flashblade.purefb_certgrp:
+ name: test_grp
+ certificates:
+ - cert2
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Delete a certifcate group
+ purestorage.flashblade.purefb_certgrp:
+ name: test_grp
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+"""
+
+RETURN = r"""
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ purefb_argument_spec,
+)
+
+
+MIN_REQUIRED_API_VERSION = "1.9"
+
+
+def delete_certgrp(module, blade):
+ """Delete certifcate group"""
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.certificate_groups.delete_certificate_groups(
+ names=[module.params["name"]]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to delete certifcate group {0}.".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def create_certgrp(module, blade):
+ """Create certifcate group"""
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.certificate_groups.create_certificate_groups(
+ names=[module.params["name"]]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to create certificate group {0}.".format(
+ module.params["name"]
+ )
+ )
+ if module.params["certificates"]:
+ try:
+ blade.certificate_groups.add_certificate_group_certificates(
+ certificate_names=module.params["certificates"],
+ certificate_group_names=[module.params["name"]],
+ )
+ except Exception:
+ blade.certificate_groups.delete_certificate_groups(
+ names=[module.params["name"]]
+ )
+ module.fail_json(
+ msg="Failed to add certifcates {0}. "
+ "Please check they all exist".format(module.params["certificates"])
+ )
+ module.exit_json(changed=changed)
+
+
+def update_certgrp(module, blade):
+ """Update certificate group"""
+ changed = False
+ try:
+ certs = blade.certificate_groups.list_certificate_group_certificates(
+ certificate_group_names=[module.params["name"]]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to get certifates list for group {0}.".format(
+ module.params["name"]
+ )
+ )
+ if not certs:
+ if module.params["state"] == "present":
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.certificate_groups.add_certificate_group_certificates(
+ certificate_names=module.params["certificates"],
+ certificate_group_names=[module.params["name"]],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to add certifcates {0}. "
+ "Please check they all exist".format(
+ module.params["certificates"]
+ )
+ )
+ else:
+ current = []
+ for cert in range(0, len(certs.items)):
+ current.append(certs.items[cert].member.name)
+ for new_cert in range(0, len(module.params["certificates"])):
+ certificate = module.params["certificates"][new_cert]
+ if certificate in current:
+ if module.params["state"] == "absent":
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.certificate_groups.remove_certificate_group_certificates(
+ certificate_names=[certificate],
+ certificate_group_names=[module.params["name"]],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to delete certifcate {0} from group {1}.".format(
+ certificate, module.params["name"]
+ )
+ )
+ else:
+ if module.params["state"] == "present":
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.certificate_groups.add_certificate_group_certificates(
+ certificate_names=[certificate],
+ certificate_group_names=[module.params["name"]],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to add certifcate {0} to group {1}".format(
+ certificate, module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ name=dict(type="str"),
+ certificates=dict(type="list", elements="str"),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ state = module.params["state"]
+ blade = get_blade(module)
+ versions = blade.api_version.list_versions().versions
+
+ if MIN_REQUIRED_API_VERSION not in versions:
+ module.fail_json(
+ msg="Minimum FlashBlade REST version required: {0}".format(
+ MIN_REQUIRED_API_VERSION
+ )
+ )
+
+ try:
+ certgrp = blade.certificate_groups.list_certificate_groups(
+ names=[module.params["name"]]
+ ).items[0]
+ except Exception:
+ certgrp = None
+
+ if certgrp and state == "present" and module.params["certificates"]:
+ update_certgrp(module, blade)
+ elif state == "present" and not certgrp:
+ create_certgrp(module, blade)
+ elif state == "absent" and certgrp:
+ if module.params["certificates"]:
+ update_certgrp(module, blade)
+ else:
+ delete_certgrp(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_certs.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_certs.py
new file mode 100644
index 000000000..b9a2c76f7
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_certs.py
@@ -0,0 +1,198 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_certs
+version_added: '1.4.0'
+short_description: Manage FlashBlade SSL Certificates
+description:
+- Manage SSL certificates for FlashBlades
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Create or delete certificate
+ default: present
+ type: str
+ choices: [ absent, present ]
+ name:
+ description:
+ - Name of the certificate
+ type: str
+ contents:
+ description:
+ - SSL certificate text
+ type: str
+ private_key:
+ description:
+ - SSL certificate private key test
+ type: str
+ passphrase:
+ description:
+ - Passphrase for the private_key
+ type: str
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Create a SSL certificate
+ purestorage.flashblade.purefb_certs:
+ name: test_cert
+ contents: "{{lookup('file', 'certificate_file_name') }}"
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Delete a SSL certificate
+ purestorage.flashblade.purefb_certs:
+ name: test_cert
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Update SSL certificate
+ purestorage.flashblade.purefb_certs:
+ name: global
+ contents: "{{ lookup('file', 'certificate_file_name') }}"
+ private_key: "{{ lookup('file', 'certificate_key_file_name') }}"
+ passphrase: 'mypassword'
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURITYFB = True
+try:
+ from purity_fb import Certificate, CertificatePost
+except ImportError:
+ HAS_PURITYFB = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ purefb_argument_spec,
+)
+
+
+MIN_REQUIRED_API_VERSION = "1.9"
+
+
+def delete_cert(module, blade):
+ """Delete certificate"""
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.certificates.delete_certificates(names=[module.params["name"]])
+ except Exception:
+ module.fail_json(
+ msg="Failed to delete certificate {0}.".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def create_cert(module, blade):
+ """Create certificate"""
+ changed = True
+ if not module.check_mode:
+ try:
+ body = CertificatePost(
+ certificate=module.params["contents"], certificate_type="external"
+ )
+ blade.certificates.create_certificates(
+ names=[module.params["name"]], certificate=body
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to create certificate {0}.".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def update_cert(module, blade, cert):
+ """Update certificate"""
+ changed = False
+ if cert.certificate_type == "external":
+ module.fail_json(msg="External certificates cannot be modified")
+
+ if not module.params["private_key"]:
+ module.fail_json(msg="private_key must be specified for the global certificate")
+
+ if cert.certificate.strip() != module.params["contents"].strip():
+ changed = True
+ if not module.check_mode:
+ try:
+ body = Certificate(
+ certificate=module.params["contents"],
+ private_key=module.params["private_key"],
+ )
+ if module.params["passphrase"]:
+ Certificate.passphrase = module.params["passphrase"]
+ blade.certificates.update_certificates(
+ names=[module.params["name"]], certificate=body
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to create certificate {0}.".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ name=dict(type="str"),
+ contents=dict(type="str", no_log=True),
+ private_key=dict(type="str", no_log=True),
+ passphrase=dict(type="str", no_log=True),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ state = module.params["state"]
+ blade = get_blade(module)
+ versions = blade.api_version.list_versions().versions
+
+ if MIN_REQUIRED_API_VERSION not in versions:
+ module.fail_json(
+ msg="Minimum FlashBlade REST version required: {0}".format(
+ MIN_REQUIRED_API_VERSION
+ )
+ )
+
+ try:
+ cert = blade.certificates.list_certificates(names=[module.params["name"]])
+ except Exception:
+ cert = None
+
+ if not cert and state == "present":
+ create_cert(module, blade)
+ elif state == "present":
+ update_cert(module, blade, cert.items[0])
+ elif state == "absent" and cert:
+ delete_cert(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_connect.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_connect.py
new file mode 100644
index 000000000..508c6a322
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_connect.py
@@ -0,0 +1,574 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_connect
+version_added: '1.0.0'
+short_description: Manage replication connections between two FlashBlades
+description:
+- Manage replication connections to specified remote FlashBlade system
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Create or delete replication connection
+ default: present
+ type: str
+ choices: [ absent, present ]
+ encrypted:
+ description:
+ - Define if replication connection is encrypted
+ type: bool
+ default: false
+ target_url:
+ description:
+ - Management IP address of target FlashBlade system
+ type: str
+ required: true
+ target_api:
+ description:
+ - API token for target FlashBlade system
+ type: str
+ target_repl:
+ description:
+ - Replication IP address of target FlashBlade system
+ - If not set at time of connection creation, will default to
+ all the replication addresses available on the target array
+ at the time of connection creation.
+ type: list
+ elements: str
+ version_added: "1.9.0"
+ default_limit:
+ description:
+ - Default maximum bandwidth threshold for outbound traffic in bytes.
+ - B, K, M, or G units. See examples.
+ - Must be 0 or between 5MB and 28GB
+ - Once exceeded, bandwidth throttling occurs
+ type: str
+ version_added: "1.9.0"
+ window_limit:
+ description:
+ - Maximum bandwidth threshold for outbound traffic during the specified
+ time range in bytes.
+ - B, K, M, or G units. See examples.
+ - Must be 0 or between 5MB and 28GB
+ - Once exceeded, bandwidth throttling occurs
+ type: str
+ version_added: "1.9.0"
+ window_start:
+ description:
+ - The window start time.
+ - The time must be set to the hour.
+ type: str
+ version_added: "1.9.0"
+ window_end:
+ description:
+ - The window end time.
+ - The time must be set to the hour.
+ type: str
+ version_added: "1.9.0"
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Create a connection to remote FlashBlade system
+ purestorage.flashblade.purefb_connect:
+ target_url: 10.10.10.20
+ target_api: T-b3275b1c-8958-4190-9052-eb46b0bd09f8
+ fb_url: 10.10.10.2
+ api_token: T-91528421-fe42-47ee-bcb1-47eefb0a9220
+- name: Create a connection to remote FlashBlade system with bandwidth limits
+ purestorage.flashblade.purefb_connect:
+ target_url: 10.10.10.20
+ target_api: T-b3275b1c-8958-4190-9052-eb46b0bd09f8
+ window_limit: 28G
+ window_start: 1AM
+ window_end: 7AM
+ default_limit: 5M
+ fb_url: 10.10.10.2
+ api_token: T-91528421-fe42-47ee-bcb1-47eefb0a9220
+- name: Delete connection to target FlashBlade system
+ purestorage.flashblade.purefb_connect:
+ state: absent
+ target_url: 10.10.10.20
+ target_api: T-b3275b1c-8958-4190-9052-eb46b0bd09f8
+ fb_url: 10.10.10.2
+ api_token: T-91528421-fe42-47ee-bcb1-47eefb0a9220
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURITYFB = True
+try:
+ from purity_fb import PurityFb, ArrayConnection, ArrayConnectionPost
+except ImportError:
+ HAS_PURITYFB = False
+
+HAS_PYPURECLIENT = True
+try:
+ from pypureclient import flashblade
+ from pypureclient.flashblade import ArrayConnection, ArrayConnectionPost
+except ImportError:
+ HAS_PYPURECLIENT = False
+
+from ansible.module_utils.basic import AnsibleModule, human_to_bytes
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ get_system,
+ purefb_argument_spec,
+)
+
+
+FAN_IN_MAXIMUM = 1
+FAN_OUT_MAXIMUM = 3
+MIN_REQUIRED_API_VERSION = "1.9"
+THROTTLE_API_VERSION = "2.3"
+
+
+def _convert_to_millisecs(hour):
+ if hour[-2:] == "AM" and hour[:2] == "12":
+ return 0
+ elif hour[-2:] == "AM":
+ return int(hour[:-2]) * 3600000
+ elif hour[-2:] == "PM" and hour[:2] == "12":
+ return 43200000
+ return (int(hour[:-2]) + 12) * 3600000
+
+
+def _check_connected(module, blade):
+ connected_blades = blade.array_connections.list_array_connections()
+ for target in range(0, len(connected_blades.items)):
+ if connected_blades.items[target].management_address is None:
+ try:
+ remote_system = PurityFb(module.params["target_url"])
+ remote_system.login(module.params["target_api"])
+ remote_array = remote_system.arrays.list_arrays().items[0].name
+ if connected_blades.items[target].remote.name == remote_array:
+ return connected_blades.items[target]
+ except Exception:
+ module.fail_json(
+ msg="Failed to connect to remote array {0}.".format(
+ module.params["target_url"]
+ )
+ )
+ if connected_blades.items[target].management_address == module.params[
+ "target_url"
+ ] and connected_blades.items[target].status in [
+ "connected",
+ "connecting",
+ "partially_connected",
+ ]:
+ return connected_blades.items[target]
+ return None
+
+
+def break_connection(module, blade, target_blade):
+ """Break connection between arrays"""
+ changed = True
+ if not module.check_mode:
+ source_blade = blade.arrays.list_arrays().items[0].name
+ try:
+ if target_blade.management_address is None:
+ module.fail_json(
+ msg="Disconnect can only happen from the array that formed the connection"
+ )
+ blade.array_connections.delete_array_connections(
+ remote_names=[target_blade.remote.name]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to disconnect {0} from {1}.".format(
+ target_blade.remote.name, source_blade
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def create_connection(module, blade):
+ """Create connection between arrays"""
+ changed = True
+ if not module.check_mode:
+ remote_array = module.params["target_url"]
+ try:
+ remote_system = PurityFb(module.params["target_url"])
+ remote_system.login(module.params["target_api"])
+ remote_array = remote_system.arrays.list_arrays().items[0].name
+ remote_conn_cnt = (
+ remote_system.array_connections.list_array_connections().pagination_info.total_item_count
+ )
+ if remote_conn_cnt == FAN_IN_MAXIMUM:
+ module.fail_json(
+ msg="Remote array {0} already connected to {1} other array. Fan-In not supported".format(
+ remote_array, remote_conn_cnt
+ )
+ )
+ connection_key = (
+ remote_system.array_connections.create_array_connections_connection_keys()
+ .items[0]
+ .connection_key
+ )
+ connection_info = ArrayConnectionPost(
+ management_address=module.params["target_url"],
+ encrypted=module.params["encrypted"],
+ connection_key=connection_key,
+ )
+ blade.array_connections.create_array_connections(
+ array_connection=connection_info
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to connect to remote array {0}.".format(remote_array)
+ )
+ module.exit_json(changed=changed)
+
+
+def create_v2_connection(module, blade):
+ """Create connection between REST 2 capable arrays"""
+ changed = True
+ if blade.get_array_connections().total_item_count == FAN_OUT_MAXIMUM:
+ module.fail_json(
+ msg="FlashBlade fan-out maximum of {0} already reached".format(
+ FAN_OUT_MAXIMUM
+ )
+ )
+ try:
+ remote_system = flashblade.Client(
+ target=module.params["target_url"], api_token=module.params["target_api"]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to connect to remote array {0}.".format(
+ module.params["target_url"]
+ )
+ )
+ remote_array = list(remote_system.get_arrays().items)[0].name
+ remote_conn_cnt = remote_system.get_array_connections().total_item_count
+ if remote_conn_cnt == FAN_IN_MAXIMUM:
+ module.fail_json(
+ msg="Remote array {0} already connected to {1} other array. Fan-In not supported".format(
+ remote_array, remote_conn_cnt
+ )
+ )
+ connection_key = list(remote_system.post_array_connections_connection_key().items)[
+ 0
+ ].connection_key
+
+ if module.params["default_limit"] or module.params["window_limit"]:
+ if THROTTLE_API_VERSION in list(blade.get_versions().items):
+ if THROTTLE_API_VERSION not in list(remote_system.get_versions().items):
+ module.fail_json(msg="Remote array does not support throttling")
+ if module.params["window_limit"]:
+ if not module.params["window_start"]:
+ module.params["window_start"] = "12AM"
+ if not module.params["window_end"]:
+ module.params["window_end"] = "12AM"
+ window = flashblade.TimeWindow(
+ start=_convert_to_millisecs(module.params["window_start"]),
+ end=_convert_to_millisecs(module.params["window_end"]),
+ )
+ if module.params["window_limit"] and module.params["default_limit"]:
+ throttle = flashblade.Throttle(
+ default_limit=human_to_bytes(module.params["default_limit"]),
+ window_limit=human_to_bytes(module.params["window_limit"]),
+ window=window,
+ )
+ elif module.params["window_limit"] and not module.params["default_limit"]:
+ throttle = flashblade.Throttle(
+ window_limit=human_to_bytes(module.params["window_limit"]),
+ window=window,
+ )
+ else:
+ throttle = flashblade.Throttle(
+ default_limit=human_to_bytes(module.params["default_limit"]),
+ )
+ connection_info = ArrayConnectionPost(
+ management_address=module.params["target_url"],
+ replication_addresses=module.params["target_repl"],
+ encrypted=module.params["encrypted"],
+ connection_key=connection_key,
+ throttle=throttle,
+ )
+ else:
+ connection_info = ArrayConnectionPost(
+ management_address=module.params["target_url"],
+ replication_addresses=module.params["target_repl"],
+ encrypted=module.params["encrypted"],
+ connection_key=connection_key,
+ )
+ else:
+ connection_info = ArrayConnectionPost(
+ management_address=module.params["target_url"],
+ replication_addresses=module.params["target_repl"],
+ encrypted=module.params["encrypted"],
+ connection_key=connection_key,
+ )
+ if not module.check_mode:
+ res = blade.post_array_connections(array_connection=connection_info)
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to connect to remote array {0}. Error: {1}".format(
+ remote_array, res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def update_connection(module, blade, target_blade):
+ """Update array connection - only encryption currently"""
+ changed = False
+ if target_blade.management_address is None:
+ module.fail_json(
+ msg="Update can only happen from the array that formed the connection"
+ )
+ if module.params["encrypted"] != target_blade.encrypted:
+ if (
+ module.params["encrypted"]
+ and blade.file_system_replica_links.list_file_system_replica_links().pagination_info.total_item_count
+ != 0
+ ):
+ module.fail_json(
+ msg="Cannot turn array connection encryption on if file system replica links exist"
+ )
+ new_attr = ArrayConnection(encrypted=module.params["encrypted"])
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.array_connections.update_array_connections(
+ remote_names=[target_blade.remote.name],
+ array_connection=new_attr,
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to change encryption setting for array connection."
+ )
+ module.exit_json(changed=changed)
+
+
+def update_v2_connection(module, blade):
+ """Update REST 2 based array connection"""
+ changed = False
+ versions = list(blade.get_versions().items)
+ remote_blade = flashblade.Client(
+ target=module.params["target_url"], api_token=module.params["target_api"]
+ )
+ remote_name = list(remote_blade.get_arrays().items)[0].name
+ remote_connection = list(
+ blade.get_array_connections(filter="remote.name='" + remote_name + "'").items
+ )[0]
+ if remote_connection.management_address is None:
+ module.fail_json(
+ msg="Update can only happen from the array that formed the connection"
+ )
+ if module.params["encrypted"] != remote_connection.encrypted:
+ if (
+ module.params["encrypted"]
+ and blade.get_file_system_replica_links().total_item_count != 0
+ ):
+ module.fail_json(
+ msg="Cannot turn array connection encryption on if file system replica links exist"
+ )
+ current_connection = {
+ "encrypted": remote_connection.encrypted,
+ "replication_addresses": sorted(remote_connection.replication_addresses),
+ "throttle": [],
+ }
+ if (
+ not remote_connection.throttle.default_limit
+ and not remote_connection.throttle.window_limit
+ ):
+ if (
+ module.params["default_limit"] or module.params["window_limit"]
+ ) and blade.get_bucket_replica_links().total_item_count != 0:
+ module.fail_json(
+ msg="Cannot set throttle when bucket replica links already exist"
+ )
+ if THROTTLE_API_VERSION in versions:
+ current_connection["throttle"] = {
+ "default_limit": remote_connection.throttle.default_limit,
+ "window_limit": remote_connection.throttle.window_limit,
+ "start": remote_connection.throttle.window.start,
+ "end": remote_connection.throttle.window.end,
+ }
+ if module.params["encrypted"]:
+ encryption = module.params["encrypted"]
+ else:
+ encryption = remote_connection.encrypted
+ if module.params["target_repl"]:
+ target_repl = sorted(module.params["target_repl"])
+ else:
+ target_repl = remote_connection.replication_addresses
+ if module.params["default_limit"]:
+ default_limit = human_to_bytes(module.params["default_limit"])
+ if default_limit == 0:
+ default_limit = None
+ else:
+ default_limit = remote_connection.throttle.default_limit
+ if module.params["window_limit"]:
+ window_limit = human_to_bytes(module.params["window_limit"])
+ else:
+ window_limit = remote_connection.throttle.window_limit
+ if module.params["window_start"]:
+ start = _convert_to_millisecs(module.params["window_start"])
+ else:
+ start = remote_connection.throttle.window.start
+ if module.params["window_end"]:
+ end = _convert_to_millisecs(module.params["window_end"])
+ else:
+ end = remote_connection.throttle.window.end
+
+ new_connection = {
+ "encrypted": encryption,
+ "replication_addresses": target_repl,
+ "throttle": [],
+ }
+ if THROTTLE_API_VERSION in versions:
+ new_connection["throttle"] = {
+ "default_limit": default_limit,
+ "window_limit": window_limit,
+ "start": start,
+ "end": end,
+ }
+ if new_connection != current_connection:
+ changed = True
+ if not module.check_mode:
+ if THROTTLE_API_VERSION in versions:
+ window = flashblade.TimeWindow(
+ start=new_connection["throttle"]["start"],
+ end=new_connection["throttle"]["end"],
+ )
+ throttle = flashblade.Throttle(
+ default_limit=new_connection["throttle"]["default_limit"],
+ window_limit=new_connection["throttle"]["window_limit"],
+ window=window,
+ )
+ connection_info = ArrayConnectionPost(
+ replication_addresses=new_connection["replication_addresses"],
+ encrypted=new_connection["encrypted"],
+ throttle=throttle,
+ )
+ else:
+ connection_info = ArrayConnection(
+ replication_addresses=new_connection["replication_addresses"],
+ encrypted=new_connection["encrypted"],
+ )
+ res = blade.patch_array_connections(
+ remote_names=[remote_name], array_connection=connection_info
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to update connection to remote array {0}. Error: {1}".format(
+ remote_name, res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ encrypted=dict(type="bool", default=False),
+ target_url=dict(type="str", required=True),
+ target_api=dict(type="str", no_log=True),
+ target_repl=dict(type="list", elements="str"),
+ default_limit=dict(type="str"),
+ window_limit=dict(type="str"),
+ window_start=dict(type="str"),
+ window_end=dict(type="str"),
+ )
+ )
+
+ required_if = [("state", "present", ["target_api"])]
+
+ module = AnsibleModule(
+ argument_spec, required_if=required_if, supports_check_mode=True
+ )
+
+ if not HAS_PURITYFB:
+ module.fail_json(msg="purity_fb sdk is required for this module")
+
+ state = module.params["state"]
+ blade = get_blade(module)
+ versions = blade.api_version.list_versions().versions
+
+ if MIN_REQUIRED_API_VERSION not in versions:
+ module.fail_json(
+ msg="Minimum FlashBlade REST version required: {0}".format(
+ MIN_REQUIRED_API_VERSION
+ )
+ )
+ if "2.0" in versions:
+ bladev2 = get_system(module)
+ if not HAS_PYPURECLIENT:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+ v2_connection = True
+ if module.params["default_limit"]:
+ if (
+ human_to_bytes(module.params["default_limit"]) != 0
+ and 5242880
+ >= human_to_bytes(module.params["default_limit"])
+ >= 30064771072
+ ):
+ module.fail_json(msg="Default Bandwidth must be between 5MB and 28GB")
+ if module.params["window_limit"]:
+ if (
+ human_to_bytes(module.params["window_limit"]) != 0
+ and 5242880
+ >= human_to_bytes(module.params["window_limit"])
+ >= 30064771072
+ ):
+ module.fail_json(msg="Window Bandwidth must be between 5MB and 28GB")
+ else:
+ if module.params["target_repl"]:
+ module.warn(
+ "Target Replication addresses can only be set for systems"
+ " that support REST 2.0 and higher"
+ )
+ v2_connection = False
+
+ target_blade = _check_connected(module, blade)
+ if state == "present" and not target_blade:
+ # REST 1 does not support fan-out for replication
+ # REST 2 has a limit which we can check
+ if v2_connection:
+ create_v2_connection(module, bladev2)
+ else:
+ if (
+ blade.array_connections.list_array_connections().pagination_info.total_item_count
+ == 1
+ ):
+ module.fail_json(
+ msg="Source FlashBlade already connected to another array. Fan-Out not supported"
+ )
+ create_connection(module, blade)
+ elif state == "present" and target_blade:
+ if v2_connection:
+ update_v2_connection(module, bladev2)
+ else:
+ update_connection(module, blade, target_blade)
+ elif state == "absent" and target_blade:
+ break_connection(module, blade, target_blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_dns.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_dns.py
new file mode 100644
index 000000000..b5abd9289
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_dns.py
@@ -0,0 +1,175 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_dns
+version_added: '1.0.0'
+short_description: Configure Pure Storage FlashBlade DNS settings
+description:
+- Set or erase DNS configuration for Pure Storage FlashBlades.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Create or delete DNS servers configuration
+ type: str
+ default: present
+ choices: [ absent, present ]
+ domain:
+ description:
+ - Domain suffix to be appended when perofrming DNS lookups.
+ type: str
+ nameservers:
+ description:
+ - List of up to 3 unique DNS server IP addresses. These can be
+ IPv4 or IPv6 - No validation is done of the addresses is performed.
+ type: list
+ elements: str
+ search:
+ description:
+ - Ordered list of domain names to search
+ - Deprecated option. Will be removed in Collection v1.6.0, There is no replacement for this.
+ type: list
+ elements: str
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Delete exisitng DNS settings
+ purestorage.flashblade.purefb_dns:
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+
+- name: Set DNS settings
+ purestorage.flashblade.purefb_dns:
+ domain: purestorage.com
+ nameservers:
+ - 8.8.8.8
+ - 8.8.4.4
+ search:
+ - purestorage.com
+ - acme.com
+ fa_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import Dns
+except ImportError:
+ HAS_PURITY_FB = False
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ purefb_argument_spec,
+)
+
+
+def remove(duplicate):
+ final_list = []
+ for num in duplicate:
+ if num not in final_list:
+ final_list.append(num)
+ return final_list
+
+
+def delete_dns(module, blade):
+ """Delete DNS Settings"""
+ changed = True
+ if not module.check_mode:
+ changed = False
+ current_dns = blade.dns.list_dns()
+ if current_dns.items[0].domain or current_dns.items[0].nameservers != []:
+ try:
+ blade.dns.update_dns(dns_settings=Dns(domain="", nameservers=[]))
+ changed = True
+ except Exception:
+ module.fail_json(msg="Deletion of DNS settings failed")
+ module.exit_json(changed=changed)
+
+
+def update_dns(module, blade):
+ """Set DNS Settings"""
+ changed = False
+ current_dns = blade.dns.list_dns()
+ if module.params["domain"]:
+ if current_dns.items[0].domain != module.params["domain"]:
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.dns.update_dns(
+ dns_settings=Dns(domain=module.params["domain"])
+ )
+ except Exception:
+ module.fail_json(msg="Update of DNS domain failed")
+ if module.params["nameservers"]:
+ if sorted(module.params["nameservers"]) != sorted(
+ current_dns.items[0].nameservers
+ ):
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.dns.update_dns(
+ dns_settings=Dns(nameservers=module.params["nameservers"])
+ )
+ except Exception:
+ module.fail_json(msg="Update of DNS nameservers failed")
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ nameservers=dict(type="list", elements="str"),
+ search=dict(type="list", elements="str"),
+ domain=dict(type="str"),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg="purity_fb sdk is required for this module")
+
+ blade = get_blade(module)
+
+ if module.params["state"] == "absent":
+ delete_dns(module, blade)
+ elif module.params["state"] == "present":
+ if module.params["nameservers"]:
+ module.params["nameservers"] = remove(module.params["nameservers"])
+ if module.params["search"]:
+ module.warn(
+ "'search' parameter is deprecated and will be removed in Collection v1.6.0"
+ )
+ update_dns(module, blade)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_ds.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_ds.py
new file mode 100644
index 000000000..6433d3d9d
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_ds.py
@@ -0,0 +1,470 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_ds
+version_added: '1.0.0'
+short_description: Configure FlashBlade Directory Service
+description:
+- Create, modify or erase directory services configurations. There is no
+ facility to SSL certificates at this time. Use the FlashBlade GUI for this
+ additional configuration work.
+- If updating a directory service and i(bind_password) is provided this
+ will always cause a change, even if the password given isn't different from
+ the current. This makes this part of the module non-idempotent..
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Create or delete directory service configuration
+ default: present
+ type: str
+ choices: [ absent, present ]
+ dstype:
+ description:
+ - The type of directory service to work on
+ choices: [ management, nfs, smb ]
+ type: str
+ required: true
+ enable:
+ description:
+ - Whether to enable or disable directory service support.
+ default: false
+ type: bool
+ uri:
+ description:
+ - A list of up to 30 URIs of the directory servers. Each URI must include
+ the scheme ldap:// or ldaps:// (for LDAP over SSL), a hostname, and a
+ domain name or IP address. For example, ldap://ad.company.com configures
+ the directory service with the hostname "ad" in the domain "company.com"
+ while specifying the unencrypted LDAP protocol.
+ type: list
+ elements: str
+ base_dn:
+ description:
+ - Sets the base of the Distinguished Name (DN) of the directory service
+ groups. The base should consist of only Domain Components (DCs). The
+ base_dn will populate with a default value when a URI is entered by
+ parsing domain components from the URI. The base DN should specify DC=
+ for each domain component and multiple DCs should be separated by commas.
+ type: str
+ bind_password:
+ description:
+ - Sets the password of the bind_user user name account.
+ type: str
+ bind_user:
+ description:
+ - Sets the user name that can be used to bind to and query the directory.
+ - For Active Directory, enter the username - often referred to as
+ sAMAccountName or User Logon Name - of the account that is used to
+ perform directory lookups.
+ - For OpenLDAP, enter the full DN of the user.
+ type: str
+ nis_servers:
+ description:
+ - A list of up to 30 IP addresses or FQDNs for NIS servers.
+ - This cannot be used in conjunction with LDAP configurations.
+ type: list
+ elements: str
+ nis_domain:
+ description:
+ - The NIS domain to search
+ - This cannot be used in conjunction with LDAP configurations.
+ type: str
+ join_ou:
+ description:
+ - The optional organizational unit (OU) where the machine account
+ for the directory service will be created.
+ type: str
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Delete existing management directory service
+ purestorage.flashblade.purefb_ds:
+ dstype: management
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create NFS directory service (disabled)
+ purestorage.flashblade.purefb_ds:
+ dstype: nfs
+ uri: "ldaps://lab.purestorage.com"
+ base_dn: "DC=lab,DC=purestorage,DC=com"
+ bind_user: Administrator
+ bind_password: password
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Enable existing SMB directory service
+ purestorage.flashblade.purefb_ds:
+ dstypr: smb
+ enable: true
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Disable existing management directory service
+ purestorage.flashblade.purefb_ds:
+ dstype: management
+ enable: false
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create NFS directory service (enabled)
+ purestorage.flashblade.purefb_ds:
+ dstype: nfs
+ enable: true
+ uri: "ldaps://lab.purestorage.com"
+ base_dn: "DC=lab,DC=purestorage,DC=com"
+ bind_user: Administrator
+ bind_password: password
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+
+NIS_API_VERSION = "1.7"
+HAS_PURITY_FB = True
+try:
+ from purity_fb import DirectoryService
+except ImportError:
+ HAS_PURITY_FB = False
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ purefb_argument_spec,
+)
+
+
+def enable_ds(module, blade):
+ """Enable Directory Service"""
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.directory_services.update_directory_services(
+ names=[module.params["dstype"]],
+ directory_service=DirectoryService(enabled=True),
+ )
+ changed = True
+ except Exception:
+ module.fail_json(
+ msg="Enable {0} Directory Service failed".format(
+ module.params["dstype"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def disable_ds(module, blade):
+ """Disable Directory Service"""
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.directory_services.update_directory_services(
+ names=[module.params["dstype"]],
+ directory_service=DirectoryService(enabled=False),
+ )
+ except Exception:
+ module.fail_json(
+ msg="Disable {0} Directory Service failed".format(
+ module.params["dstype"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def delete_ds(module, blade):
+ """Delete Directory Service"""
+ changed = True
+ if not module.check_mode:
+ dirserv = blade.directory_services.list_directory_services(
+ names=[module.params["dstype"]]
+ )
+ try:
+ if module.params["dstype"] == "management":
+ if dirserv.items[0].uris:
+ dir_service = DirectoryService(
+ uris=[""],
+ base_dn="",
+ bind_user="",
+ bind_password="",
+ enabled=False,
+ )
+ else:
+ changed = False
+ elif module.params["dstype"] == "smb":
+ if dirserv.items[0].uris:
+ smb_attrs = {"join_ou": ""}
+ dir_service = DirectoryService(
+ uris=[""],
+ base_dn="",
+ bind_user="",
+ bind_password="",
+ smb=smb_attrs,
+ enabled=False,
+ )
+ else:
+ changed = False
+ elif module.params["dstype"] == "nfs":
+ if dirserv.items[0].uris:
+ dir_service = DirectoryService(
+ uris=[""],
+ base_dn="",
+ bind_user="",
+ bind_password="",
+ enabled=False,
+ )
+ elif dirserv.items[0].nfs.nis_domains:
+ nfs_attrs = {"nis_domains": [], "nis_servers": []}
+ dir_service = DirectoryService(nfs=nfs_attrs, enabled=False)
+ else:
+ changed = False
+ if changed:
+ blade.directory_services.update_directory_services(
+ names=[module.params["dstype"]], directory_service=dir_service
+ )
+ except Exception:
+ module.fail_json(
+ msg="Delete {0} Directory Service failed".format(
+ module.params["dstype"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def update_ds(module, blade):
+ """Update Directory Service"""
+ mod_ds = False
+ attr = {}
+ try:
+ ds_now = blade.directory_services.list_directory_services(
+ names=[module.params["dstype"]]
+ ).items[0]
+ if module.params["dstype"] == "nfs" and module.params["nis_servers"]:
+ if sorted(module.params["nis_servers"]) != sorted(
+ ds_now.nfs.nis_servers
+ ) or module.params["nis_domain"] != "".join(
+ map(str, ds_now.nfs.nis_domains)
+ ):
+ attr["nfs"] = {
+ "nis_domains": [module.params["nis_domain"]],
+ "nis_servers": module.params["nis_servers"][0:30],
+ }
+ mod_ds = True
+ else:
+ if module.params["uri"]:
+ if sorted(module.params["uri"][0:30]) != sorted(ds_now.uris):
+ attr["uris"] = module.params["uri"][0:30]
+ mod_ds = True
+ if module.params["base_dn"]:
+ if module.params["base_dn"] != ds_now.base_dn:
+ attr["base_dn"] = module.params["base_dn"]
+ mod_ds = True
+ if module.params["bind_user"]:
+ if module.params["bind_user"] != ds_now.bind_user:
+ attr["bind_user"] = module.params["bind_user"]
+ mod_ds = True
+ if module.params["enable"]:
+ if module.params["enable"] != ds_now.enabled:
+ attr["enabled"] = module.params["enable"]
+ mod_ds = True
+ if module.params["bind_password"]:
+ attr["bind_password"] = module.params["bind_password"]
+ mod_ds = True
+ if module.params["dstype"] == "smb":
+ if module.params["join_ou"] != ds_now.smb.join_ou:
+ attr["smb"] = {"join_ou": module.params["join_ou"]}
+ mod_ds = True
+ if mod_ds:
+ changed = True
+ if not module.check_mode:
+ n_attr = DirectoryService(**attr)
+ try:
+ blade.directory_services.update_directory_services(
+ names=[module.params["dstype"]], directory_service=n_attr
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to change {0} directory service.".format(
+ module.params["dstype"]
+ )
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to get current {0} directory service.".format(
+ module.params["dstype"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def create_ds(module, blade):
+ """Create Directory Service"""
+ changed = True
+ if not module.check_mode:
+ try:
+ if module.params["dstype"] == "management":
+ if module.params["uri"]:
+ dir_service = DirectoryService(
+ uris=module.params["uri"][0:30],
+ base_dn=module.params["base_dn"],
+ bind_user=module.params["bind_user"],
+ bind_password=module.params["bind_password"],
+ enabled=module.params["enable"],
+ )
+ else:
+ module.fail_json(
+ msg="Incorrect parameters provided for dstype {0}".format(
+ module.params["dstype"]
+ )
+ )
+ elif module.params["dstype"] == "smb":
+ if module.params["uri"]:
+ smb_attrs = {"join_ou": module.params["join_ou"]}
+ dir_service = DirectoryService(
+ uris=module.params["uri"][0:30],
+ base_dn=module.params["base_dn"],
+ bind_user=module.params["bind_user"],
+ bind_password=module.params["bind_password"],
+ smb=smb_attrs,
+ enabled=module.params["enable"],
+ )
+ else:
+ module.fail_json(
+ msg="Incorrect parameters provided for dstype {0}".format(
+ module.params["dstype"]
+ )
+ )
+ elif module.params["dstype"] == "nfs":
+ if module.params["nis_domain"]:
+ nfs_attrs = {
+ "nis_domains": [module.params["nis_domain"]],
+ "nis_servers": module.params["nis_servers"][0:30],
+ }
+ dir_service = DirectoryService(
+ nfs=nfs_attrs, enabled=module.params["enable"]
+ )
+ else:
+ dir_service = DirectoryService(
+ uris=module.params["uri"][0:30],
+ base_dn=module.params["base_dn"],
+ bind_user=module.params["bind_user"],
+ bind_password=module.params["bind_password"],
+ enabled=module.params["enable"],
+ )
+ blade.directory_services.update_directory_services(
+ names=[module.params["dstype"]], directory_service=dir_service
+ )
+ except Exception:
+ module.fail_json(
+ msg="Create {0} Directory Service failed".format(
+ module.params["dstype"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ uri=dict(type="list", elements="str"),
+ dstype=dict(
+ required=True, type="str", choices=["management", "nfs", "smb"]
+ ),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ enable=dict(type="bool", default=False),
+ bind_password=dict(type="str", no_log=True),
+ bind_user=dict(type="str"),
+ base_dn=dict(type="str"),
+ join_ou=dict(type="str"),
+ nis_domain=dict(type="str"),
+ nis_servers=dict(type="list", elements="str"),
+ )
+ )
+
+ required_together = [
+ ["uri", "bind_password", "bind_user", "base_dn"],
+ ["nis_servers", "nis_domain"],
+ ]
+ mutually_exclusive = [["uri", "nis_domain"]]
+
+ module = AnsibleModule(
+ argument_spec,
+ required_together=required_together,
+ mutually_exclusive=mutually_exclusive,
+ supports_check_mode=True,
+ )
+ if not HAS_PURITY_FB:
+ module.fail_json(msg="purity_fb sdk is required for this module")
+
+ state = module.params["state"]
+ blade = get_blade(module)
+ api_version = blade.api_version.list_versions().versions
+ ds_configured = False
+ dirserv = blade.directory_services.list_directory_services(
+ names=[module.params["dstype"]]
+ )
+ ds_enabled = dirserv.items[0].enabled
+ if dirserv.items[0].base_dn is not None:
+ ds_configured = True
+ if (module.params["nis_domain"] or module.params["join_ou"]) and (
+ NIS_API_VERSION not in api_version
+ ):
+ module.fail_json(
+ msg="NFS or SMB directory service attributes not supported by FlashBlade Purity version"
+ )
+ ldap_uri = False
+ set_ldap = False
+ for uri in range(0, len(dirserv.items[0].uris)):
+ if "ldap" in dirserv.items[0].uris[uri].lower():
+ ldap_uri = True
+ if module.params["uri"]:
+ for uri in range(0, len(module.params["uri"])):
+ if "ldap" in module.params["uri"][uri].lower():
+ set_ldap = True
+ if not module.params["uri"] and ldap_uri or module.params["uri"] and set_ldap:
+ if module.params["nis_servers"] or module.params["nis_domain"]:
+ module.fail_json(
+ msg="NIS configuration not supported in an LDAP environment"
+ )
+ if state == "absent":
+ delete_ds(module, blade)
+ elif ds_configured and module.params["enable"] and ds_enabled:
+ update_ds(module, blade)
+ elif ds_configured and not module.params["enable"] and ds_enabled:
+ disable_ds(module, blade)
+ elif ds_configured and module.params["enable"] and not ds_enabled:
+ enable_ds(module, blade)
+ # Now we have enabled the DS lets make sure there aren't any new updates...
+ update_ds(module, blade)
+ elif not ds_configured and state == "present":
+ create_ds(module, blade)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_dsrole.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_dsrole.py
new file mode 100644
index 000000000..61934cc6e
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_dsrole.py
@@ -0,0 +1,213 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_dsrole
+version_added: '1.0.0'
+short_description: Configure FlashBlade Management Directory Service Roles
+description:
+- Set or erase directory services role configurations.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Create or delete directory service role
+ default: present
+ type: str
+ choices: [ absent, present ]
+ role:
+ description:
+ - The directory service role to work on
+ choices: [ array_admin, ops_admin, readonly, storage_admin ]
+ type: str
+ required: true
+ group_base:
+ description:
+ - Specifies where the configured group is located in the directory
+ tree. This field consists of Organizational Units (OUs) that combine
+ with the base DN attribute and the configured group CNs to complete
+ the full Distinguished Name of the groups. The group base should
+ specify OU= for each OU and multiple OUs should be separated by commas.
+ The order of OUs is important and should get larger in scope from left
+ to right.
+ - Each OU should not exceed 64 characters in length.
+ type: str
+ group:
+ description:
+ - Sets the common Name (CN) of the configured directory service group
+ containing users for the FlashBlade. This name should be just the
+ Common Name of the group without the CN= specifier.
+ - Common Names should not exceed 64 characters in length.
+ type: str
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Delete existing array_admin directory service role
+ purestorage.flashblade.purefb_dsrole:
+ role: array_admin
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create array_admin directory service role
+ purestorage.flashblade.purefb_dsrole:
+ role: array_admin
+ group_base: "OU=PureGroups,OU=SANManagers"
+ group: pureadmins
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Update ops_admin directory service role
+ purestorage.flashblade.purefb_dsrole:
+ role: ops_admin
+ group_base: "OU=PureGroups"
+ group: opsgroup
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import DirectoryServiceRole
+except ImportError:
+ HAS_PURITY_FB = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ purefb_argument_spec,
+)
+
+
+def update_role(module, blade):
+ """Update Directory Service Role"""
+ changed = False
+ role = blade.directory_services.list_directory_services_roles(
+ names=[module.params["role"]]
+ )
+ if (
+ role.items[0].group_base != module.params["group_base"]
+ or role.items[0].group != module.params["group"]
+ ):
+ changed = True
+ if not module.check_mode:
+ try:
+ role = DirectoryServiceRole(
+ group_base=module.params["group_base"], group=module.params["group"]
+ )
+ blade.directory_services.update_directory_services_roles(
+ names=[module.params["role"]], directory_service_role=role
+ )
+ except Exception:
+ module.fail_json(
+ msg="Update Directory Service Role {0} failed".format(
+ module.params["role"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def delete_role(module, blade):
+ """Delete Directory Service Role"""
+ changed = True
+ if not module.check_mode:
+ try:
+ role = DirectoryServiceRole(group_base="", group="")
+ blade.directory_services.update_directory_services_roles(
+ names=[module.params["role"]], directory_service_role=role
+ )
+ except Exception:
+ module.fail_json(
+ msg="Delete Directory Service Role {0} failed".format(
+ module.params["role"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def create_role(module, blade):
+ """Create Directory Service Role"""
+ changed = True
+ if not module.check_mode:
+ try:
+ role = DirectoryServiceRole(
+ group_base=module.params["group_base"], group=module.params["group"]
+ )
+ blade.directory_services.update_directory_services_roles(
+ names=[module.params["role"]], directory_service_role=role
+ )
+ except Exception:
+ module.fail_json(
+ msg="Create Directory Service Role {0} failed".format(
+ module.params["role"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ role=dict(
+ required=True,
+ type="str",
+ choices=["array_admin", "ops_admin", "readonly", "storage_admin"],
+ ),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ group_base=dict(type="str"),
+ group=dict(type="str"),
+ )
+ )
+
+ required_together = [["group", "group_base"]]
+
+ module = AnsibleModule(
+ argument_spec, required_together=required_together, supports_check_mode=True
+ )
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg="purity_fb sdk is required for this module")
+
+ state = module.params["state"]
+ blade = get_blade(module)
+ role_configured = False
+ role = blade.directory_services.list_directory_services_roles(
+ names=[module.params["role"]]
+ )
+ if role.items[0].group is not None:
+ role_configured = True
+
+ if state == "absent" and role_configured:
+ delete_role(module, blade)
+ elif role_configured and state == "present":
+ update_role(module, blade)
+ elif not role_configured and state == "present":
+ create_role(module, blade)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_eula.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_eula.py
new file mode 100644
index 000000000..83b5e656a
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_eula.py
@@ -0,0 +1,131 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2021, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_eula
+version_added: '1.6.0'
+short_description: Sign Pure Storage FlashBlade EULA
+description:
+- Sign the FlashBlade EULA for Day 0 config, or change signatory.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ company:
+ description:
+ - Full legal name of the entity.
+ - The value must be between 1 and 64 characters in length.
+ type: str
+ required: true
+ name:
+ description:
+ - Full legal name of the individual at the company who has the authority to accept the terms of the agreement.
+ - The value must be between 1 and 64 characters in length.
+ type: str
+ required: true
+ title:
+ description:
+ - Individual's job title at the company.
+ - The value must be between 1 and 64 characters in length.
+ type: str
+ required: true
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Sign EULA for FlashBlade
+ purestorage.flashblade.purefb_eula:
+ company: "ACME Storage, Inc."
+ name: "Fred Bloggs"
+ title: "Storage Manager"
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import Eula, EulaSignature
+except ImportError:
+ HAS_PURITY_FB = False
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_system,
+ get_blade,
+ purefb_argument_spec,
+)
+
+
+EULA_API_VERSION = "2.0"
+
+
+def set_eula(module, blade):
+ """Sign EULA"""
+ changed = False
+ if not module.check_mode:
+ current_eula = list(blade.get_arrays_eula().items)[0].signature
+ if not current_eula.accepted:
+ if (
+ current_eula.company != module.params["company"]
+ or current_eula.title != module.params["title"]
+ or current_eula.name != module.params["name"]
+ ):
+ signature = EulaSignature(
+ company=module.params["company"],
+ title=module.params["title"],
+ name=module.params["name"],
+ )
+ eula_body = Eula(signature=signature)
+ if not module.check_mode:
+ changed = True
+ rc = blade.patch_arrays_eula(eula=eula_body)
+ if rc.status_code != 200:
+ module.fail_json(msg="Signing EULA failed")
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ company=dict(type="str", required=True),
+ name=dict(type="str", required=True),
+ title=dict(type="str", required=True),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg="purity_fb sdk is required for this module")
+
+ blade = get_blade(module)
+ api_version = blade.api_version.list_versions().versions
+ api_version = blade.api_version.list_versions().versions
+ if EULA_API_VERSION not in api_version:
+ module.fail_json(msg="Purity//FB must be upgraded to support this module.")
+ blade = get_system(module)
+ set_eula(module, blade)
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_fs.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_fs.py
new file mode 100644
index 000000000..a07180793
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_fs.py
@@ -0,0 +1,944 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+
+DOCUMENTATION = """
+---
+module: purefb_fs
+version_added: "1.0.0"
+short_description: Manage filesystemon Pure Storage FlashBlade`
+description:
+ - This module manages filesystems on Pure Storage FlashBlade.
+author: Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Filesystem Name.
+ required: true
+ type: str
+ state:
+ description:
+ - Create, delete or modifies a filesystem.
+ required: false
+ default: present
+ type: str
+ choices: [ "present", "absent" ]
+ eradicate:
+ description:
+ - Define whether to eradicate the filesystem on delete or leave in trash.
+ required: false
+ type: bool
+ default: false
+ size:
+ description:
+ - Volume size in M, G, T or P units. See examples.
+ - If size is not set at filesystem creation time the filesystem size becomes unlimited.
+ type: str
+ required: false
+ nfsv3:
+ description:
+ - Define whether to NFSv3 protocol is enabled for the filesystem.
+ required: false
+ type: bool
+ default: true
+ nfsv4:
+ description:
+ - Define whether to NFSv4.1 protocol is enabled for the filesystem.
+ required: false
+ type: bool
+ default: true
+ nfs_rules:
+ description:
+ - Define the NFS rules in operation.
+ - If not set at filesystem creation time it defaults to I(*(rw,no_root_squash))
+ - Supported binary options are ro/rw, secure/insecure, fileid_32bit/no_fileid_32bit,
+ root_squash/no_root_squash, all_squash/no_all_squash and atime/noatime
+ - Supported non-binary options are anonuid=#, anongid=#, sec=(sys|krb5)
+ - Superceeded by I(export_policy) if provided
+ required: false
+ type: str
+ smb:
+ description:
+ - Define whether to SMB protocol is enabled for the filesystem.
+ required: false
+ type: bool
+ default: false
+ smb_aclmode:
+ description:
+ - Specify the ACL mode for the SMB protocol.
+ - Deprecated from Purity//FB 3.1.1. Use I(access_control) instead.
+ required: false
+ type: str
+ default: shared
+ choices: [ "shared", "native" ]
+ http:
+ description:
+ - Define whether to HTTP/HTTPS protocol is enabled for the filesystem.
+ required: false
+ type: bool
+ default: false
+ snapshot:
+ description:
+ - Define whether a snapshot directory is enabled for the filesystem.
+ required: false
+ type: bool
+ default: false
+ writable:
+ description:
+ - Define if a filesystem is writeable.
+ required: false
+ type: bool
+ promote:
+ description:
+ - Promote/demote a filesystem.
+ - Can only demote the file-system if it is in a replica-link relationship.
+ required: false
+ type: bool
+ fastremove:
+ description:
+ - Define whether the fast remove directory is enabled for the filesystem.
+ required: false
+ type: bool
+ default: false
+ hard_limit:
+ description:
+ - Define whether the capacity for a filesystem is a hard limit.
+ - CAUTION This will cause the filesystem to go Read-Only if the
+ capacity has already exceeded the logical size of the filesystem.
+ required: false
+ type: bool
+ default: false
+ user_quota:
+ description:
+ - Default quota in M, G, T or P units for a user under this file system.
+ required: false
+ type: str
+ group_quota:
+ description:
+ - Default quota in M, G, T or P units for a group under this file system.
+ required: false
+ type: str
+ policy:
+ description:
+ - Filesystem policy to assign to or remove from a filesystem.
+ required: false
+ type: str
+ policy_state:
+ description:
+ - Add or delete a policy from a filesystem
+ required: false
+ default: present
+ type: str
+ choices: [ "absent", "present" ]
+ delete_link:
+ description:
+ - Define if the filesystem can be deleted even if it has a replica link
+ required: false
+ default: false
+ type: bool
+ discard_snaps:
+ description:
+ - Allow a filesystem to be demoted.
+ required: false
+ default: false
+ type: bool
+ access_control:
+ description:
+ - The access control style that is utilized for client actions such
+ as setting file and directory ACLs.
+ - Only available from Purity//FB 3.1.1
+ type: str
+ default: shared
+ choices: [ 'nfs', 'smb', 'shared', 'independent', 'mode-bits' ]
+ safeguard_acls:
+ description:
+ - Safeguards ACLs on a filesystem.
+ - Performs different roles depending on the filesystem protocol enabled.
+ - See Purity//FB documentation for detailed description.
+ - Only available from Purity//FB 3.1.1
+ type: bool
+ default: true
+ export_policy:
+ description:
+ - Name of NFS export policy to assign to filesystem
+ - Overrides I(nfs_rules)
+ - Only valid for Purity//FB 3.3.0 or higher
+ type: str
+ version_added: "1.9.0"
+extends_documentation_fragment:
+ - purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = """
+- name: Create new filesystem named foo
+ purestorage.flashblade.purefb_fs:
+ name: foo
+ size: 1T
+ state: present
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Delete filesystem named foo
+ purestorage.flashblade.purefb_fs:
+ name: foo
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Recover filesystem named foo
+ purestorage.flashblade.purefb_fs:
+ name: foo
+ state: present
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Eradicate filesystem named foo
+ purestorage.flashblade.purefb_fs:
+ name: foo
+ state: absent
+ eradicate: true
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Promote filesystem named foo ready for failover
+ purestorage.flashblade.purefb_fs:
+ name: foo
+ promote: true
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Demote filesystem named foo after failover
+ purestorage.flashblade.purefb_fs:
+ name: foo
+ promote: false
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Modify attributes of an existing filesystem named foo
+ purestorage.flashblade.purefb_fs:
+ name: foo
+ size: 2T
+ nfsv3 : false
+ nfsv4 : true
+ user_quota: 10K
+ group_quota: 25M
+ nfs_rules: '10.21.200.0/24(ro)'
+ snapshot: true
+ fastremove: true
+ hard_limit: true
+ smb: true
+ state: present
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641"""
+
+RETURN = """
+"""
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import (
+ FileSystem,
+ ProtocolRule,
+ NfsRule,
+ SmbRule,
+ MultiProtocolRule,
+ rest,
+ )
+except ImportError:
+ HAS_PURITY_FB = False
+
+HAS_PYPURECLIENT = True
+try:
+ from pypureclient.flashblade import (
+ FileSystemPatch,
+ NfsPatch,
+ Reference,
+ )
+except ImportError:
+ HAS_PYPURECLIENT = False
+
+HAS_JSON = True
+try:
+ import json
+except ImportError:
+ HAS_JSON = False
+
+from ansible.module_utils.basic import AnsibleModule, human_to_bytes
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ get_system,
+ purefb_argument_spec,
+)
+
+
+HARD_LIMIT_API_VERSION = "1.4"
+NFSV4_API_VERSION = "1.6"
+REPLICATION_API_VERSION = "1.9"
+MULTIPROTOCOL_API_VERSION = "1.11"
+EXPORT_POLICY_API_VERSION = "2.3"
+
+
+def get_fs(module, blade):
+ """Return Filesystem or None"""
+ fsys = []
+ fsys.append(module.params["name"])
+ try:
+ res = blade.file_systems.list_file_systems(names=fsys)
+ return res.items[0]
+ except Exception:
+ return None
+
+
+def create_fs(module, blade):
+ """Create Filesystem"""
+ changed = True
+ if not module.check_mode:
+ try:
+ if not module.params["nfs_rules"]:
+ module.params["nfs_rules"] = "*(rw,no_root_squash)"
+ if module.params["size"]:
+ size = human_to_bytes(module.params["size"])
+ else:
+ size = 0
+
+ if module.params["user_quota"]:
+ user_quota = human_to_bytes(module.params["user_quota"])
+ else:
+ user_quota = None
+ if module.params["group_quota"]:
+ group_quota = human_to_bytes(module.params["group_quota"])
+ else:
+ group_quota = None
+
+ api_version = blade.api_version.list_versions().versions
+ if HARD_LIMIT_API_VERSION in api_version:
+ if NFSV4_API_VERSION in api_version:
+ if REPLICATION_API_VERSION in api_version:
+ if MULTIPROTOCOL_API_VERSION in api_version:
+ if module.params["access_control"] == "nfs" and not (
+ module.params["nfsv3"] or module.params["nfsv4"]
+ ):
+ module.fail_json(
+ msg="Cannot set access_control to nfs when NFS is not enabled."
+ )
+ if (
+ module.params["access_control"]
+ in ["smb", "independent"]
+ and not module.params["smb"]
+ ):
+ module.fail_json(
+ msg="Cannot set access_control to smb or independent when SMB is not enabled."
+ )
+ if module.params["safeguard_acls"] and (
+ module.params["access_control"]
+ in ["mode-bits", "independent"]
+ or module.params["smb"]
+ ):
+ module.fail_json(
+ msg="ACL Safeguarding cannot be enabled with SMB or if access_control is mode-bits or independent."
+ )
+ fs_obj = FileSystem(
+ name=module.params["name"],
+ provisioned=size,
+ fast_remove_directory_enabled=module.params[
+ "fastremove"
+ ],
+ hard_limit_enabled=module.params["hard_limit"],
+ snapshot_directory_enabled=module.params["snapshot"],
+ nfs=NfsRule(
+ v3_enabled=module.params["nfsv3"],
+ v4_1_enabled=module.params["nfsv4"],
+ rules=module.params["nfs_rules"],
+ ),
+ smb=SmbRule(enabled=module.params["smb"]),
+ http=ProtocolRule(enabled=module.params["http"]),
+ multi_protocol=MultiProtocolRule(
+ safeguard_acls=module.params["safeguard_acls"],
+ access_control_style=module.params[
+ "access_control"
+ ],
+ ),
+ default_user_quota=user_quota,
+ default_group_quota=group_quota,
+ )
+ else:
+ fs_obj = FileSystem(
+ name=module.params["name"],
+ provisioned=size,
+ fast_remove_directory_enabled=module.params[
+ "fastremove"
+ ],
+ hard_limit_enabled=module.params["hard_limit"],
+ snapshot_directory_enabled=module.params["snapshot"],
+ nfs=NfsRule(
+ v3_enabled=module.params["nfsv3"],
+ v4_1_enabled=module.params["nfsv4"],
+ rules=module.params["nfs_rules"],
+ ),
+ smb=SmbRule(
+ enabled=module.params["smb"],
+ acl_mode=module.params["smb_aclmode"],
+ ),
+ http=ProtocolRule(enabled=module.params["http"]),
+ default_user_quota=user_quota,
+ default_group_quota=group_quota,
+ )
+ else:
+ fs_obj = FileSystem(
+ name=module.params["name"],
+ provisioned=size,
+ fast_remove_directory_enabled=module.params["fastremove"],
+ hard_limit_enabled=module.params["hard_limit"],
+ snapshot_directory_enabled=module.params["snapshot"],
+ nfs=NfsRule(
+ v3_enabled=module.params["nfsv3"],
+ v4_1_enabled=module.params["nfsv4"],
+ rules=module.params["nfs_rules"],
+ ),
+ smb=ProtocolRule(enabled=module.params["smb"]),
+ http=ProtocolRule(enabled=module.params["http"]),
+ default_user_quota=user_quota,
+ default_group_quota=group_quota,
+ )
+ else:
+ fs_obj = FileSystem(
+ name=module.params["name"],
+ provisioned=size,
+ fast_remove_directory_enabled=module.params["fastremove"],
+ hard_limit_enabled=module.params["hard_limit"],
+ snapshot_directory_enabled=module.params["snapshot"],
+ nfs=NfsRule(
+ enabled=module.params["nfsv3"],
+ rules=module.params["nfs_rules"],
+ ),
+ smb=ProtocolRule(enabled=module.params["smb"]),
+ http=ProtocolRule(enabled=module.params["http"]),
+ )
+ else:
+ fs_obj = FileSystem(
+ name=module.params["name"],
+ provisioned=size,
+ fast_remove_directory_enabled=module.params["fastremove"],
+ snapshot_directory_enabled=module.params["snapshot"],
+ nfs=NfsRule(
+ enabled=module.params["nfs"], rules=module.params["nfs_rules"]
+ ),
+ smb=ProtocolRule(enabled=module.params["smb"]),
+ http=ProtocolRule(enabled=module.params["http"]),
+ )
+ blade.file_systems.create_file_systems(fs_obj)
+ except rest.ApiException as err:
+ message = json.loads(err.body)["errors"][0]["message"]
+ module.fail_json(
+ msg="Failed to create filesystem {0}. Error: {1}".format(
+ module.params["name"], message
+ )
+ )
+ if REPLICATION_API_VERSION in api_version:
+ if module.params["policy"]:
+ try:
+ blade.policies.list_policies(names=[module.params["policy"]])
+ except Exception:
+ _delete_fs(module, blade)
+ module.fail_json(
+ msg="Policy {0} doesn't exist.".format(module.params["policy"])
+ )
+ try:
+ blade.policies.create_policy_filesystems(
+ policy_names=[module.params["policy"]],
+ member_names=[module.params["name"]],
+ )
+ except Exception:
+ _delete_fs(module, blade)
+ module.fail_json(
+ msg="Failed to apply policy {0} when creating filesystem {1}.".format(
+ module.params["policy"], module.params["name"]
+ )
+ )
+ if EXPORT_POLICY_API_VERSION in api_version and module.params["export_policy"]:
+ system = get_system(module)
+ export_attr = FileSystemPatch(
+ nfs=NfsPatch(
+ export_policy=Reference(name=module.params["export_policy"])
+ )
+ )
+ res = system.patch_file_systems(
+ names=[module.params["name"]], file_system=export_attr
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Filesystem {0} created, but failed to assign export "
+ "policy {1}. Error: {2}".format(
+ module.params["name"],
+ module.params["export_policy"],
+ res.errors[0].message,
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def modify_fs(module, blade):
+ """Modify Filesystem"""
+ changed = False
+ mod_fs = False
+ attr = {}
+ if module.params["policy"] and module.params["policy_state"] == "present":
+ try:
+ policy = blade.policies.list_policy_filesystems(
+ policy_names=[module.params["policy"]],
+ member_names=[module.params["name"]],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Policy {0} does not exist.".format(module.params["policy"])
+ )
+ if not policy.items:
+ try:
+ blade.policies.create_policy_filesystems(
+ policy_names=[module.params["policy"]],
+ member_names=[module.params["name"]],
+ )
+ mod_fs = True
+ except Exception:
+ module.fail_json(
+ msg="Failed to add filesystem {0} to policy {1}.".format(
+ module.params["name"], module.params["polict"]
+ )
+ )
+ if module.params["policy"] and module.params["policy_state"] == "absent":
+ try:
+ policy = blade.policies.list_policy_filesystems(
+ policy_names=[module.params["policy"]],
+ member_names=[module.params["name"]],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Policy {0} does not exist.".format(module.params["policy"])
+ )
+ if len(policy.items) == 1:
+ try:
+ blade.policies.delete_policy_filesystems(
+ policy_names=[module.params["policy"]],
+ member_names=[module.params["name"]],
+ )
+ mod_fs = True
+ except Exception:
+ module.fail_json(
+ msg="Failed to remove filesystem {0} to policy {1}.".format(
+ module.params["name"], module.params["polict"]
+ )
+ )
+ if module.params["user_quota"]:
+ user_quota = human_to_bytes(module.params["user_quota"])
+ if module.params["group_quota"]:
+ group_quota = human_to_bytes(module.params["group_quota"])
+ fsys = get_fs(module, blade)
+ if fsys.destroyed:
+ attr["destroyed"] = False
+ mod_fs = True
+ if module.params["size"]:
+ if human_to_bytes(module.params["size"]) != fsys.provisioned:
+ attr["provisioned"] = human_to_bytes(module.params["size"])
+ mod_fs = True
+ api_version = blade.api_version.list_versions().versions
+ if NFSV4_API_VERSION in api_version:
+ v3_state = v4_state = None
+ if module.params["nfsv3"] and not fsys.nfs.v3_enabled:
+ v3_state = module.params["nfsv3"]
+ if not module.params["nfsv3"] and fsys.nfs.v3_enabled:
+ v3_state = module.params["nfsv3"]
+ if module.params["nfsv4"] and not fsys.nfs.v4_1_enabled:
+ v4_state = module.params["nfsv4"]
+ if not module.params["nfsv4"] and fsys.nfs.v4_1_enabled:
+ v4_state = module.params["nfsv4"]
+ if v3_state is not None or v4_state is not None:
+ attr["nfs"] = NfsRule(v4_1_enabled=v4_state, v3_enabled=v3_state)
+ mod_fs = True
+ if (
+ module.params["nfsv3"]
+ or module.params["nfsv4"]
+ and fsys.nfs.v3_enabled
+ or fsys.nfs.v4_1_enabled
+ ):
+ if module.params["nfs_rules"] is not None:
+ if fsys.nfs.rules != module.params["nfs_rules"]:
+ attr["nfs"] = NfsRule(rules=module.params["nfs_rules"])
+ mod_fs = True
+ if module.params["user_quota"] and user_quota != fsys.default_user_quota:
+ attr["default_user_quota"] = user_quota
+ mod_fs = True
+ if module.params["group_quota"] and group_quota != fsys.default_group_quota:
+ attr["default_group_quota"] = group_quota
+ mod_fs = True
+ else:
+ if module.params["nfsv3"] and not fsys.nfs.enabled:
+ attr["nfs"] = NfsRule(enabled=module.params["nfsv3"])
+ mod_fs = True
+ if not module.params["nfsv3"] and fsys.nfs.enabled:
+ attr["nfs"] = NfsRule(enabled=module.params["nfsv3"])
+ mod_fs = True
+ if module.params["nfsv3"] and fsys.nfs.enabled:
+ if fsys.nfs.rules != module.params["nfs_rules"]:
+ attr["nfs"] = NfsRule(rules=module.params["nfs_rules"])
+ mod_fs = True
+ if REPLICATION_API_VERSION in api_version:
+ if module.params["smb"] and not fsys.smb.enabled:
+ if MULTIPROTOCOL_API_VERSION in api_version:
+ attr["smb"] = SmbRule(enabled=module.params["smb"])
+ else:
+ attr["smb"] = SmbRule(
+ enabled=module.params["smb"], acl_mode=module.params["smb_aclmode"]
+ )
+ mod_fs = True
+ if not module.params["smb"] and fsys.smb.enabled:
+ attr["smb"] = ProtocolRule(enabled=module.params["smb"])
+ mod_fs = True
+ if (
+ module.params["smb"]
+ and fsys.smb.enabled
+ and MULTIPROTOCOL_API_VERSION not in api_version
+ ):
+ if fsys.smb.acl_mode != module.params["smb_aclmode"]:
+ attr["smb"] = SmbRule(
+ enabled=module.params["smb"], acl_mode=module.params["smb_aclmode"]
+ )
+ mod_fs = True
+ else:
+ if module.params["smb"] and not fsys.smb.enabled:
+ attr["smb"] = ProtocolRule(enabled=module.params["smb"])
+ mod_fs = True
+ if not module.params["smb"] and fsys.smb.enabled:
+ attr["smb"] = ProtocolRule(enabled=module.params["smb"])
+ mod_fs = True
+ if module.params["http"] and not fsys.http.enabled:
+ attr["http"] = ProtocolRule(enabled=module.params["http"])
+ mod_fs = True
+ if not module.params["http"] and fsys.http.enabled:
+ attr["http"] = ProtocolRule(enabled=module.params["http"])
+ mod_fs = True
+ if module.params["snapshot"] and not fsys.snapshot_directory_enabled:
+ attr["snapshot_directory_enabled"] = module.params["snapshot"]
+ mod_fs = True
+ if not module.params["snapshot"] and fsys.snapshot_directory_enabled:
+ attr["snapshot_directory_enabled"] = module.params["snapshot"]
+ mod_fs = True
+ if module.params["fastremove"] and not fsys.fast_remove_directory_enabled:
+ attr["fast_remove_directory_enabled"] = module.params["fastremove"]
+ mod_fs = True
+ if not module.params["fastremove"] and fsys.fast_remove_directory_enabled:
+ attr["fast_remove_directory_enabled"] = module.params["fastremove"]
+ mod_fs = True
+ if HARD_LIMIT_API_VERSION in api_version:
+ if not module.params["hard_limit"] and fsys.hard_limit_enabled:
+ attr["hard_limit_enabled"] = module.params["hard_limit"]
+ mod_fs = True
+ if module.params["hard_limit"] and not fsys.hard_limit_enabled:
+ attr["hard_limit_enabled"] = module.params["hard_limit"]
+ mod_fs = True
+ if MULTIPROTOCOL_API_VERSION in api_version:
+ if module.params["safeguard_acls"] and not fsys.multi_protocol.safeguard_acls:
+ attr["multi_protocol"] = MultiProtocolRule(safeguard_acls=True)
+ mod_fs = True
+ if not module.params["safeguard_acls"] and fsys.multi_protocol.safeguard_acls:
+ attr["multi_protocol"] = MultiProtocolRule(safeguard_acls=False)
+ mod_fs = True
+ if module.params["access_control"] != fsys.multi_protocol.access_control_style:
+ attr["multi_protocol"] = MultiProtocolRule(
+ access_control_style=module.params["access_control"]
+ )
+ mod_fs = True
+ if REPLICATION_API_VERSION in api_version:
+ if module.params["writable"] is not None:
+ if not module.params["writable"] and fsys.writable:
+ attr["writable"] = module.params["writable"]
+ mod_fs = True
+ if (
+ module.params["writable"]
+ and not fsys.writable
+ and fsys.promotion_status == "promoted"
+ ):
+ attr["writable"] = module.params["writable"]
+ mod_fs = True
+ if module.params["promote"] is not None:
+ if module.params["promote"] and fsys.promotion_status != "promoted":
+ attr["requested_promotion_state"] = "promoted"
+ mod_fs = True
+ if not module.params["promote"] and fsys.promotion_status == "promoted":
+ # Demotion only allowed on filesystems in a replica-link
+ try:
+ blade.file_system_replica_links.list_file_system_replica_links(
+ local_file_system_names=[module.params["name"]]
+ ).items[0]
+ except Exception:
+ module.fail_json(
+ msg="Filesystem {0} not demoted. Not in a replica-link".format(
+ module.params["name"]
+ )
+ )
+ attr["requested_promotion_state"] = module.params["promote"]
+ mod_fs = True
+ if mod_fs:
+ changed = True
+ if not module.check_mode:
+ n_attr = FileSystem(**attr)
+ if REPLICATION_API_VERSION in api_version:
+ try:
+ blade.file_systems.update_file_systems(
+ name=module.params["name"],
+ attributes=n_attr,
+ discard_non_snapshotted_data=module.params["discard_snaps"],
+ )
+ except rest.ApiException as err:
+ message = json.loads(err.body)["errors"][0]["message"]
+ module.fail_json(
+ msg="Failed to update filesystem {0}. Error {1}".format(
+ module.params["name"], message
+ )
+ )
+ else:
+ try:
+ blade.file_systems.update_file_systems(
+ name=module.params["name"], attributes=n_attr
+ )
+ except rest.ApiException as err:
+ message = json.loads(err.body)["errors"][0]["message"]
+ module.fail_json(
+ msg="Failed to update filesystem {0}. Error {1}".format(
+ module.params["name"], message
+ )
+ )
+ if EXPORT_POLICY_API_VERSION in api_version and module.params["export_policy"]:
+ system = get_system(module)
+ change_export = False
+ current_fs = list(
+ system.get_file_systems(filter="name='" + module.params["name"] + "'").items
+ )[0]
+ if (
+ current_fs.nfs.export_policy.name
+ and current_fs.nfs.export_policy.name != module.params["export_policy"]
+ ):
+ change_export = True
+ if not current_fs.nfs.export_policy.name and module.params["export_policy"]:
+ change_export = True
+ if change_export and not module.check_mode:
+ export_attr = FileSystemPatch(
+ nfs=NfsPatch(
+ export_policy=Reference(name=module.params["export_policy"])
+ )
+ )
+ res = system.patch_file_systems(
+ names=[module.params["name"]], file_system=export_attr
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to modify export policy {1} for "
+ "filesystem {0}. Error: {2}".format(
+ module.params["name"],
+ module.params["export_policy"],
+ res.errors[0].message,
+ )
+ )
+
+ module.exit_json(changed=changed)
+
+
+def _delete_fs(module, blade):
+ """In module Delete Filesystem"""
+ api_version = blade.api_version.list_versions().versions
+ if NFSV4_API_VERSION in api_version:
+ if MULTIPROTOCOL_API_VERSION in api_version:
+ blade.file_systems.update_file_systems(
+ name=module.params["name"],
+ attributes=FileSystem(
+ nfs=NfsRule(v3_enabled=False, v4_1_enabled=False),
+ smb=ProtocolRule(enabled=False),
+ http=ProtocolRule(enabled=False),
+ multi_protocol=MultiProtocolRule(access_control_style="shared"),
+ destroyed=True,
+ ),
+ )
+ else:
+ blade.file_systems.update_file_systems(
+ name=module.params["name"],
+ attributes=FileSystem(
+ nfs=NfsRule(v3_enabled=False, v4_1_enabled=False),
+ smb=ProtocolRule(enabled=False),
+ http=ProtocolRule(enabled=False),
+ destroyed=True,
+ ),
+ )
+ else:
+ blade.file_systems.update_file_systems(
+ name=module.params["name"],
+ attributes=FileSystem(
+ nfs=NfsRule(enabled=False),
+ smb=ProtocolRule(enabled=False),
+ http=ProtocolRule(enabled=False),
+ destroyed=True,
+ ),
+ )
+
+ blade.file_systems.delete_file_systems(module.params["name"])
+
+
+def delete_fs(module, blade):
+ """Delete Filesystem"""
+ changed = True
+ if not module.check_mode:
+ try:
+ api_version = blade.api_version.list_versions().versions
+ if REPLICATION_API_VERSION in api_version:
+ if NFSV4_API_VERSION in api_version:
+ blade.file_systems.update_file_systems(
+ name=module.params["name"],
+ attributes=FileSystem(
+ nfs=NfsRule(v3_enabled=False, v4_1_enabled=False),
+ smb=ProtocolRule(enabled=False),
+ http=ProtocolRule(enabled=False),
+ destroyed=True,
+ ),
+ delete_link_on_eradication=module.params["delete_link"],
+ )
+ else:
+ blade.file_systems.update_file_systems(
+ name=module.params["name"],
+ attributes=FileSystem(
+ nfs=NfsRule(enabled=False),
+ smb=ProtocolRule(enabled=False),
+ http=ProtocolRule(enabled=False),
+ destroyed=True,
+ ),
+ delete_link_on_eradication=module.params["delete_link"],
+ )
+ else:
+ if NFSV4_API_VERSION in api_version:
+ blade.file_systems.update_file_systems(
+ name=module.params["name"],
+ attributes=FileSystem(
+ nfs=NfsRule(v3_enabled=False, v4_1_enabled=False),
+ smb=ProtocolRule(enabled=False),
+ http=ProtocolRule(enabled=False),
+ destroyed=True,
+ ),
+ )
+ else:
+ blade.file_systems.update_file_systems(
+ name=module.params["name"],
+ attributes=FileSystem(
+ nfs=NfsRule(enabled=False),
+ smb=ProtocolRule(enabled=False),
+ http=ProtocolRule(enabled=False),
+ destroyed=True,
+ ),
+ )
+ if module.params["eradicate"]:
+ try:
+ blade.file_systems.delete_file_systems(name=module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Failed to delete filesystem {0}.".format(
+ module.params["name"]
+ )
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to update filesystem {0} prior to deletion.".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def eradicate_fs(module, blade):
+ """Eradicate Filesystem"""
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.file_systems.delete_file_systems(name=module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Failed to eradicate filesystem {0}.".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True),
+ eradicate=dict(default="false", type="bool"),
+ nfsv3=dict(default="true", type="bool"),
+ nfsv4=dict(default="true", type="bool"),
+ nfs_rules=dict(type="str"),
+ smb=dict(default="false", type="bool"),
+ http=dict(default="false", type="bool"),
+ snapshot=dict(default="false", type="bool"),
+ writable=dict(type="bool"),
+ promote=dict(type="bool"),
+ fastremove=dict(default="false", type="bool"),
+ hard_limit=dict(default="false", type="bool"),
+ user_quota=dict(type="str"),
+ policy=dict(type="str"),
+ group_quota=dict(type="str"),
+ smb_aclmode=dict(
+ type="str", default="shared", choices=["shared", "native"]
+ ),
+ policy_state=dict(default="present", choices=["present", "absent"]),
+ state=dict(default="present", choices=["present", "absent"]),
+ delete_link=dict(default=False, type="bool"),
+ discard_snaps=dict(default=False, type="bool"),
+ safeguard_acls=dict(default=True, type="bool"),
+ access_control=dict(
+ type="str",
+ default="shared",
+ choices=["nfs", "smb", "shared", "independent", "mode-bits"],
+ ),
+ size=dict(type="str"),
+ export_policy=dict(type="str"),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_JSON:
+ module.fail_json(msg="json sdk is required for this module")
+ if not HAS_PURITY_FB:
+ module.fail_json(msg="purity_fb sdk is required for this module")
+
+ state = module.params["state"]
+ blade = get_blade(module)
+ fsys = get_fs(module, blade)
+
+ if module.params["eradicate"] and state == "present":
+ module.warn("Eradicate flag ignored without state=absent")
+
+ if state == "present" and not fsys:
+ create_fs(module, blade)
+ elif state == "present" and fsys:
+ modify_fs(module, blade)
+ elif state == "absent" and fsys and not fsys.destroyed:
+ delete_fs(module, blade)
+ elif state == "absent" and fsys and fsys.destroyed and module.params["eradicate"]:
+ eradicate_fs(module, blade)
+ elif state == "absent" and not fsys:
+ module.exit_json(changed=False)
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_fs_replica.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_fs_replica.py
new file mode 100644
index 000000000..f96903788
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_fs_replica.py
@@ -0,0 +1,308 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+
+DOCUMENTATION = """
+---
+module: purefb_fs_replica
+version_added: '1.0.0'
+short_description: Manage filesystem replica links between Pure Storage FlashBlades
+description:
+ - This module manages filesystem replica links between Pure Storage FlashBlades.
+author: Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Local Filesystem Name.
+ required: true
+ type: str
+ state:
+ description:
+ - Creates or modifies a filesystem replica link
+ required: false
+ default: present
+ type: str
+ choices: [ "present", "absent" ]
+ target_array:
+ description:
+ - Remote array name to create replica on.
+ required: false
+ type: str
+ target_fs:
+ description:
+ - Name of target filesystem name
+ - If not supplied, will default to I(name).
+ type: str
+ required: false
+ policy:
+ description:
+ - Name of filesystem snapshot policy to apply to the replica link.
+ required: false
+ type: str
+extends_documentation_fragment:
+ - purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = """
+- name: Create new filesystem replica from foo to bar on arrayB
+ purestorage.flashblade.purefb_fs_replica:
+ name: foo
+ target_array: arrayB
+ target_fs: bar
+ policy: daily
+ state: present
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Add new snapshot policy to exisitng filesystem replica link
+ purestorage.flashblade.purefb_fs_replica:
+ name: foo
+ policy: weekly
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Delete snapshot policy from filesystem replica foo
+ purestorage.flashblade.purefb_fs_replica:
+ name: foo
+ policy: weekly
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641"""
+
+RETURN = """
+"""
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import FileSystemReplicaLink, LocationReference
+except ImportError:
+ HAS_PURITY_FB = False
+
+MIN_REQUIRED_API_VERSION = "1.9"
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ purefb_argument_spec,
+)
+
+
+def get_local_fs(module, blade):
+ """Return Filesystem or None"""
+ try:
+ res = blade.file_systems.list_file_systems(names=[module.params["name"]])
+ return res.items[0]
+ except Exception:
+ return None
+
+
+def get_local_rl(module, blade):
+ """Return Filesystem Replica Link or None"""
+ try:
+ res = blade.file_system_replica_links.list_file_system_replica_links(
+ local_file_system_names=[module.params["name"]]
+ )
+ return res.items[0]
+ except Exception:
+ return None
+
+
+def _check_connected(module, blade):
+ connected_blades = blade.array_connections.list_array_connections()
+ for target in range(0, len(connected_blades.items)):
+ if (
+ connected_blades.items[target].remote.name == module.params["target_array"]
+ or connected_blades.items[target].management_address
+ == module.params["target_array"]
+ ) and connected_blades.items[target].status in [
+ "connected",
+ "connecting",
+ "partially_connected",
+ ]:
+ return connected_blades.items[target]
+ return None
+
+
+def create_rl(module, blade):
+ """Create Filesystem Replica Link"""
+ changed = True
+ if not module.check_mode:
+ try:
+ remote_array = _check_connected(module, blade)
+ if remote_array:
+ if not module.params["target_fs"]:
+ module.params["target_fs"] = module.params["name"]
+ if not module.params["policy"]:
+ blade.file_system_replica_links.create_file_system_replica_links(
+ local_file_system_names=[module.params["name"]],
+ remote_file_system_names=[module.params["target_fs"]],
+ remote_names=[remote_array.remote.name],
+ )
+ else:
+ blade.file_system_replica_links.create_file_system_replica_links(
+ local_file_system_names=[module.params["name"]],
+ remote_file_system_names=[module.params["target_fs"]],
+ remote_names=[remote_array.remote.name],
+ file_system_replica_link=FileSystemReplicaLink(
+ policies=[LocationReference(name=module.params["policy"])]
+ ),
+ )
+ else:
+ module.fail_json(
+ msg="Target array {0} is not connected".format(
+ module.params["target_array"]
+ )
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to create filesystem replica link for {0}.".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def add_rl_policy(module, blade):
+ """Add Policy to Filesystem Replica Link"""
+ changed = False
+ if not module.params["target_array"]:
+ module.params["target_array"] = (
+ blade.file_system_replica_links.list_file_system_replica_links(
+ local_file_system_names=[module.params["name"]]
+ )
+ .items[0]
+ .remote.name
+ )
+ remote_array = _check_connected(module, blade)
+ try:
+ already_a_policy = (
+ blade.file_system_replica_links.list_file_system_replica_link_policies(
+ local_file_system_names=[module.params["name"]],
+ policy_names=[module.params["policy"]],
+ remote_names=[remote_array.remote.name],
+ )
+ )
+ if not already_a_policy.items:
+ changed = True
+ if not module.check_mode:
+ blade.file_system_replica_links.create_file_system_replica_link_policies(
+ policy_names=[module.params["policy"]],
+ local_file_system_names=[module.params["name"]],
+ remote_names=[remote_array.remote.name],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to add policy {0} to replica link {1}.".format(
+ module.params["policy"], module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def delete_rl_policy(module, blade):
+ """Delete Policy from Filesystem Replica Link"""
+ changed = True
+ if not module.check_mode:
+ current_policy = (
+ blade.file_system_replica_links.list_file_system_replica_link_policies(
+ local_file_system_names=[module.params["name"]],
+ policy_names=[module.params["policy"]],
+ )
+ )
+ if current_policy.items:
+ try:
+ blade.file_system_replica_links.delete_file_system_replica_link_policies(
+ policy_names=[module.params["policy"]],
+ local_file_system_names=[module.params["name"]],
+ remote_names=[current_policy.items[0].link.remote.name],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to remove policy {0} from replica link {1}.".format(
+ module.params["policy"], module.params["name"]
+ )
+ )
+ else:
+ changed = False
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True),
+ target_fs=dict(type="str"),
+ target_array=dict(type="str"),
+ policy=dict(type="str"),
+ state=dict(default="present", choices=["present", "absent"]),
+ )
+ )
+
+ required_if = [["state", "absent", ["policy"]]]
+
+ module = AnsibleModule(
+ argument_spec, required_if=required_if, supports_check_mode=True
+ )
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg="purity_fb sdk is required for this module")
+
+ state = module.params["state"]
+ blade = get_blade(module)
+ versions = blade.api_version.list_versions().versions
+
+ if MIN_REQUIRED_API_VERSION not in versions:
+ module.fail_json(
+ msg="Minimum FlashBlade REST version required: {0}".format(
+ MIN_REQUIRED_API_VERSION
+ )
+ )
+
+ local_fs = get_local_fs(module, blade)
+ local_replica_link = get_local_rl(module, blade)
+
+ if not local_fs:
+ module.fail_json(
+ msg="Selected local filesystem {0} does not exist.".format(
+ module.params["name"]
+ )
+ )
+
+ if module.params["policy"]:
+ try:
+ policy = blade.policies.list_policies(names=[module.params["policy"]])
+ except Exception:
+ module.fail_json(
+ msg="Selected policy {0} does not exist.".format(
+ module.params["policy"]
+ )
+ )
+ else:
+ policy = None
+ if state == "present" and not local_replica_link:
+ create_rl(module, blade)
+ elif state == "present" and local_replica_link and policy:
+ add_rl_policy(module, blade)
+ elif state == "absent" and policy:
+ delete_rl_policy(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_groupquota.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_groupquota.py
new file mode 100644
index 000000000..2ae610275
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_groupquota.py
@@ -0,0 +1,321 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2021, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+
+DOCUMENTATION = """
+---
+module: purefb_groupquota
+version_added: "1.7.0"
+short_description: Manage filesystem group quotas
+description:
+ - This module manages group quotas for filesystems on Pure Storage FlashBlade.
+author: Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Filesystem Name.
+ required: true
+ type: str
+ state:
+ description:
+ - Create, delete or modifies a quota.
+ required: false
+ default: present
+ type: str
+ choices: [ "present", "absent" ]
+ quota:
+ description:
+ - Group quota in M, G, T or P units. This cannot be 0.
+ - This value will override the file system's default group quota.
+ type: str
+ gid:
+ description:
+ - The group id on which the quota is enforced.
+ - Cannot be combined with I(gname)
+ type: int
+ gname:
+ description:
+ - The group name on which the quota is enforced.
+ - Cannot be combined with I(gid)
+ type: str
+extends_documentation_fragment:
+ - purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = """
+- name: Create new group (using GID) quota for filesystem named foo
+ purestorage.flashblade.purefb_groupquota:
+ name: foo
+ quota: 1T
+ gid: 1234
+ state: present
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Create new group (using groupname) quota for filesystem named foo
+ purestorage.flashblade.purefb_groupquota:
+ name: foo
+ quota: 1T
+ gname: bar
+ state: present
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Delete group quota on filesystem foo for group by GID
+ purestorage.flashblade.purefb_groupquota:
+ name: foo
+ gid: 1234
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Delete group quota on filesystem foo for group by groupname
+ purestorage.flashblade.purefb_groupquota:
+ name: foo
+ gname: bar
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Update group quota on filesystem foo for group by groupname
+ purestorage.flashblade.purefb_groupquota:
+ name: foo
+ quota: 20G
+ gname: bar
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Update group quota on filesystem foo for group by GID
+ purestorage.flashblade.purefb_groupquota:
+ name: foo
+ quota: 20G
+ gid: bar
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641"""
+
+RETURN = """
+"""
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import QuotasGroup
+except ImportError:
+ HAS_PURITY_FB = False
+
+from ansible.module_utils.basic import AnsibleModule, human_to_bytes
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ purefb_argument_spec,
+)
+
+
+MIN_REQUIRED_API_VERSION = "1.6"
+
+
+def get_fs(module, blade):
+ """Return Filesystem or None"""
+ fsys = []
+ fsys.append(module.params["name"])
+ try:
+ res = blade.file_systems.list_file_systems(names=fsys)
+ return res.items[0]
+ except Exception:
+ return None
+
+
+def get_quota(module, blade):
+ """Return Filesystem User Quota or None"""
+ fsys = []
+ fsys.append(module.params["name"])
+ try:
+ if module.params["gid"]:
+ res = blade.quotas_groups.list_group_quotas(
+ file_system_names=fsys, filter="group.id=" + str(module.params["gid"])
+ )
+ else:
+ res = blade.quotas_groups.list_group_quotas(
+ file_system_names=fsys,
+ filter="group.name='" + module.params["gname"] + "'",
+ )
+ return res.items[0]
+ except Exception:
+ return None
+
+
+def create_quota(module, blade):
+ """Create Filesystem User Quota"""
+ changed = True
+ if not module.check_mode:
+ try:
+ if module.params["gid"]:
+ blade.quotas_groups.create_group_quotas(
+ file_system_names=[module.params["name"]],
+ gids=[module.params["gid"]],
+ quota=QuotasGroup(
+ quota=int(human_to_bytes(module.params["quota"]))
+ ),
+ )
+ else:
+ blade.quotas_groups.create_group_quotas(
+ file_system_names=[module.params["name"]],
+ group_names=[module.params["gname"]],
+ quota=QuotasGroup(
+ quota=int(human_to_bytes(module.params["quota"]))
+ ),
+ )
+ except Exception:
+ if module.params["gid"]:
+ module.fail_json(
+ msg="Failed to create quote for UID {0} on filesystem {1}.".format(
+ module.params["gid"], module.params["name"]
+ )
+ )
+ else:
+ module.fail_json(
+ msg="Failed to create quote for groupname {0} on filesystem {1}.".format(
+ module.params["gname"], module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def update_quota(module, blade):
+ """Upodate Filesystem User Quota"""
+ changed = False
+ current_quota = get_quota(module, blade)
+ if current_quota.quota != human_to_bytes(module.params["quota"]):
+ changed = True
+ if not module.check_mode:
+ if module.params["gid"]:
+ try:
+ blade.quotas_groups.update_group_quotas(
+ file_system_names=[module.params["name"]],
+ gids=[module.params["gid"]],
+ quota=QuotasGroup(
+ quota=int(human_to_bytes(module.params["quota"]))
+ ),
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to update quota for UID {0} on filesystem {1}.".format(
+ module.params["gid"], module.params["name"]
+ )
+ )
+ else:
+ try:
+ blade.quotas_groups.update_group_quotas(
+ file_system_names=[module.params["name"]],
+ group_names=[module.params["gname"]],
+ quota=QuotasGroup(
+ quota=int(human_to_bytes(module.params["quota"]))
+ ),
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to update quota for UID {0} on filesystem {1}.".format(
+ module.params["gname"], module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def delete_quota(module, blade):
+ """Delete Filesystem User Quota"""
+ changed = True
+ if not module.check_mode:
+ try:
+ if module.params["gid"]:
+ blade.quotas_groups.delete_group_quotas(
+ file_system_names=[module.params["name"]],
+ gids=[module.params["gid"]],
+ )
+ else:
+ blade.quotas_groups.delete_group_quotas(
+ file_system_names=[module.params["name"]],
+ group_names=[module.params["gname"]],
+ )
+ except Exception:
+ if module.params["gid"]:
+ module.fail_json(
+ msg="Failed to delete quota for UID {0} on filesystem {1}.".format(
+ module.params["gid"], module.params["name"]
+ )
+ )
+ else:
+ module.fail_json(
+ msg="Failed to delete quota for groupname {0} on filesystem {1}.".format(
+ module.params["gname"], module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True),
+ gid=dict(type="int"),
+ gname=dict(type="str"),
+ state=dict(default="present", choices=["present", "absent"]),
+ quota=dict(type="str"),
+ )
+ )
+
+ mutually_exclusive = [["gid", "gname"]]
+ required_if = [["state", "present", ["quota"]]]
+ module = AnsibleModule(
+ argument_spec,
+ mutually_exclusive=mutually_exclusive,
+ required_if=required_if,
+ supports_check_mode=True,
+ )
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg="purity_fb sdk is required for this module")
+
+ state = module.params["state"]
+ blade = get_blade(module)
+ versions = blade.api_version.list_versions().versions
+
+ if MIN_REQUIRED_API_VERSION not in versions:
+ module.fail_json(
+ msg="Minimum FlashBlade REST version required: {0}".format(
+ MIN_REQUIRED_API_VERSION
+ )
+ )
+ fsys = get_fs(module, blade)
+ if not fsys:
+ module.fail_json(
+ msg="Filesystem {0} does not exist.".format(module.params["name"])
+ )
+ quota = get_quota(module, blade)
+
+ if state == "present" and not quota:
+ create_quota(module, blade)
+ elif state == "present" and quota:
+ update_quota(module, blade)
+ elif state == "absent" and quota:
+ delete_quota(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_info.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_info.py
new file mode 100644
index 000000000..8525bd8e3
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_info.py
@@ -0,0 +1,1548 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2019, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_info
+version_added: '1.0.0'
+short_description: Collect information from Pure Storage FlashBlade
+description:
+ - Collect information from a Pure Storage FlashBlade running the
+ Purity//FB operating system. By default, the module will collect basic
+ information including hosts, host groups, protection
+ groups and volume counts. Additional information can be collected
+ based on the configured set of arguements.
+author:
+ - Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ gather_subset:
+ description:
+ - When supplied, this argument will define the information to be collected.
+ Possible values for this include all, minimum, config, performance,
+ capacity, network, subnets, lags, filesystems, snapshots, buckets,
+ replication, policies, arrays, accounts, admins, ad, kerberos
+ and drives.
+ required: false
+ type: list
+ elements: str
+ default: minimum
+extends_documentation_fragment:
+ - purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: collect default set of info
+ purestorage.flashblade.purefb_info:
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+ register: blade_info
+- name: show default information
+ debug:
+ msg: "{{ blade_info['purefb_info']['default'] }}"
+
+- name: collect configuration and capacity info
+ purestorage.flashblade.purefb_info:
+ gather_subset:
+ - config
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+ register: blade_info
+- name: show config information
+ debug:
+ msg: "{{ blade_info['purefb_info']['config'] }}"
+
+- name: collect all info
+ purestorage.flashblade.purefb_info:
+ gather_subset:
+ - all
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+ register: blade_info
+- name: show all information
+ debug:
+ msg: "{{ blade_info['purefb_info'] }}"
+"""
+
+RETURN = r"""
+purefb_info:
+ description: Returns the information collected from the FlashBlade
+ returned: always
+ type: complex
+ sample: {
+ "admins": {
+ "pureuser": {
+ "api_token_timeout": null,
+ "local": true,
+ "public_key": null
+ },
+ "another_user": {
+ "api_token_timeout": null,
+ "local": false,
+ "public_key": null
+ },
+ },
+ "buckets": {
+ "central": {
+ "account_name": "jake",
+ "bucket_type": "classic",
+ "created": 1628900154000,
+ "data_reduction": null,
+ "destroyed": false,
+ "id": "43758f09-9e71-7bf7-5757-2028a95a2b65",
+ "lifecycle_rules": {},
+ "object_count": 0,
+ "snapshot_space": 0,
+ "time_remaining": null,
+ "total_physical_space": 0,
+ "unique_space": 0,
+ "versioning": "none",
+ "virtual_space": 0
+ },
+ "test": {
+ "account_name": "acme",
+ "bucket_type": "classic",
+ "created": 1630591952000,
+ "data_reduction": 3.6,
+ "destroyed": false,
+ "id": "d5f6149c-fbef-f3c5-58b6-8fd143110ba9",
+ "lifecycle_rules": {
+ "test": {
+ "abort_incomplete_multipart_uploads_after (days)": 1,
+ "cleanup_expired_object_delete_marker": true,
+ "enabled": true,
+ "keep_current_version_for (days)": null,
+ "keep_current_version_until": "2023-12-21",
+ "keep_previous_version_for (days)": null,
+ "prefix": "foo"
+ }
+ },
+ },
+ },
+ "capacity": {
+ "aggregate": {
+ "data_reduction": 1.1179228,
+ "snapshots": 0,
+ "total_physical": 17519748439,
+ "unique": 17519748439,
+ "virtual": 19585726464
+ },
+ "file-system": {
+ "data_reduction": 1.3642412,
+ "snapshots": 0,
+ "total_physical": 4748219708,
+ "unique": 4748219708,
+ "virtual": 6477716992
+ },
+ "object-store": {
+ "data_reduction": 1.0263462,
+ "snapshots": 0,
+ "total_physical": 12771528731,
+ "unique": 12771528731,
+ "virtual": 6477716992
+ },
+ "total": 83359896948925
+ },
+ "config": {
+ "alert_watchers": {
+ "enabled": true,
+ "name": "notify@acmestorage.com"
+ },
+ "array_management": {
+ "base_dn": null,
+ "bind_password": null,
+ "bind_user": null,
+ "enabled": false,
+ "name": "management",
+ "services": [
+ "management"
+ ],
+ "uris": []
+ },
+ "directory_service_roles": {
+ "array_admin": {
+ "group": null,
+ "group_base": null
+ },
+ "ops_admin": {
+ "group": null,
+ "group_base": null
+ },
+ "readonly": {
+ "group": null,
+ "group_base": null
+ },
+ "storage_admin": {
+ "group": null,
+ "group_base": null
+ }
+ },
+ "dns": {
+ "domain": "demo.acmestorage.com",
+ "name": "demo-fb-1",
+ "nameservers": [
+ "8.8.8.8"
+ ],
+ "search": [
+ "demo.acmestorage.com"
+ ]
+ },
+ "nfs_directory_service": {
+ "base_dn": null,
+ "bind_password": null,
+ "bind_user": null,
+ "enabled": false,
+ "name": "nfs",
+ "services": [
+ "nfs"
+ ],
+ "uris": []
+ },
+ "ntp": [
+ "0.ntp.pool.org"
+ ],
+ "smb_directory_service": {
+ "base_dn": null,
+ "bind_password": null,
+ "bind_user": null,
+ "enabled": false,
+ "name": "smb",
+ "services": [
+ "smb"
+ ],
+ "uris": []
+ },
+ "smtp": {
+ "name": "demo-fb-1",
+ "relay_host": null,
+ "sender_domain": "acmestorage.com"
+ },
+ "ssl_certs": {
+ "certificate": "-----BEGIN CERTIFICATE-----\n\n-----END CERTIFICATE-----",
+ "common_name": "Acme Storage",
+ "country": "US",
+ "email": null,
+ "intermediate_certificate": null,
+ "issued_by": "Acme Storage",
+ "issued_to": "Acme Storage",
+ "key_size": 4096,
+ "locality": null,
+ "name": "global",
+ "organization": "Acme Storage",
+ "organizational_unit": "Acme Storage",
+ "passphrase": null,
+ "private_key": null,
+ "state": null,
+ "status": "self-signed",
+ "valid_from": "1508433967000",
+ "valid_to": "2458833967000"
+ }
+ },
+ "default": {
+ "blades": 15,
+ "buckets": 7,
+ "filesystems": 2,
+ "flashblade_name": "demo-fb-1",
+ "object_store_accounts": 1,
+ "object_store_users": 1,
+ "purity_version": "2.2.0",
+ "snapshots": 1,
+ "total_capacity": 83359896948925,
+ "smb_mode": "native"
+ },
+ "filesystems": {
+ "k8s-pvc-d24b1357-579e-11e8-811f-ecf4bbc88f54": {
+ "default_group_quota": 0,
+ "default_user_quota": 0,
+ "destroyed": false,
+ "fast_remove": false,
+ "hard_limit": true,
+ "nfs_rules": "10.21.255.0/24(rw,no_root_squash)",
+ "provisioned": 21474836480,
+ "snapshot_enabled": false
+ },
+ "z": {
+ "default_group_quota": 0,
+ "default_user_quota": 0,
+ "destroyed": false,
+ "fast_remove": false,
+ "hard_limit": false,
+ "provisioned": 1073741824,
+ "snapshot_enabled": false
+ }
+ },
+ "lag": {
+ "uplink": {
+ "lag_speed": 0,
+ "port_speed": 40000000000,
+ "ports": [
+ {
+ "name": "CH1.FM1.ETH1.1"
+ },
+ {
+ "name": "CH1.FM1.ETH1.2"
+ },
+ ],
+ "status": "healthy"
+ }
+ },
+ "network": {
+ "fm1.admin0": {
+ "address": "10.10.100.6",
+ "gateway": "10.10.100.1",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "support"
+ ],
+ "type": "vip",
+ "vlan": 2200
+ },
+ "fm2.admin0": {
+ "address": "10.10.100.7",
+ "gateway": "10.10.100.1",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "support"
+ ],
+ "type": "vip",
+ "vlan": 2200
+ },
+ "nfs1": {
+ "address": "10.10.100.4",
+ "gateway": "10.10.100.1",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "data"
+ ],
+ "type": "vip",
+ "vlan": 2200
+ },
+ "vir0": {
+ "address": "10.10.100.5",
+ "gateway": "10.10.100.1",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "management"
+ ],
+ "type": "vip",
+ "vlan": 2200
+ }
+ },
+ "performance": {
+ "aggregate": {
+ "bytes_per_op": 0,
+ "bytes_per_read": 0,
+ "bytes_per_write": 0,
+ "read_bytes_per_sec": 0,
+ "reads_per_sec": 0,
+ "usec_per_other_op": 0,
+ "usec_per_read_op": 0,
+ "usec_per_write_op": 0,
+ "write_bytes_per_sec": 0,
+ "writes_per_sec": 0
+ },
+ "http": {
+ "bytes_per_op": 0,
+ "bytes_per_read": 0,
+ "bytes_per_write": 0,
+ "read_bytes_per_sec": 0,
+ "reads_per_sec": 0,
+ "usec_per_other_op": 0,
+ "usec_per_read_op": 0,
+ "usec_per_write_op": 0,
+ "write_bytes_per_sec": 0,
+ "writes_per_sec": 0
+ },
+ "nfs": {
+ "bytes_per_op": 0,
+ "bytes_per_read": 0,
+ "bytes_per_write": 0,
+ "read_bytes_per_sec": 0,
+ "reads_per_sec": 0,
+ "usec_per_other_op": 0,
+ "usec_per_read_op": 0,
+ "usec_per_write_op": 0,
+ "write_bytes_per_sec": 0,
+ "writes_per_sec": 0
+ },
+ "s3": {
+ "bytes_per_op": 0,
+ "bytes_per_read": 0,
+ "bytes_per_write": 0,
+ "read_bytes_per_sec": 0,
+ "reads_per_sec": 0,
+ "usec_per_other_op": 0,
+ "usec_per_read_op": 0,
+ "usec_per_write_op": 0,
+ "write_bytes_per_sec": 0,
+ "writes_per_sec": 0
+ }
+ },
+ "snapshots": {
+ "z.188": {
+ "destroyed": false,
+ "source": "z",
+ "source_destroyed": false,
+ "suffix": "188"
+ }
+ },
+ "subnet": {
+ "new-mgmt": {
+ "gateway": "10.10.100.1",
+ "interfaces": [
+ {
+ "name": "fm1.admin0"
+ },
+ {
+ "name": "fm2.admin0"
+ },
+ {
+ "name": "nfs1"
+ },
+ {
+ "name": "vir0"
+ }
+ ],
+ "lag": "uplink",
+ "mtu": 1500,
+ "prefix": "10.10.100.0/24",
+ "services": [
+ "data",
+ "management",
+ "support"
+ ],
+ "vlan": 2200
+ }
+ }
+ }
+"""
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ get_system,
+ purefb_argument_spec,
+)
+from datetime import datetime
+
+
+MIN_REQUIRED_API_VERSION = "1.3"
+HARD_LIMIT_API_VERSION = "1.4"
+POLICIES_API_VERSION = "1.5"
+CERT_GROUPS_API_VERSION = "1.8"
+REPLICATION_API_VERSION = "1.9"
+MULTIPROTOCOL_API_VERSION = "1.11"
+MIN_32_API = "2.0"
+LIFECYCLE_API_VERSION = "2.1"
+SMB_MODE_API_VERSION = "2.2"
+NFS_POLICY_API_VERSION = "2.3"
+VSO_VERSION = "2.4"
+DRIVES_API_VERSION = "2.5"
+SECURITY_API_VERSION = "2.7"
+BUCKET_API_VERSION = "2.8"
+
+
+def _millisecs_to_time(millisecs):
+ if millisecs:
+ return (str(int(millisecs / 3600000 % 24)).zfill(2) + ":00",)
+ return None
+
+
+def _bytes_to_human(bytes_number):
+ if bytes_number:
+ labels = ["B/s", "KB/s", "MB/s", "GB/s", "TB/s", "PB/s"]
+ i = 0
+ double_bytes = bytes_number
+ while i < len(labels) and bytes_number >= 1024:
+ double_bytes = bytes_number / 1024.0
+ i += 1
+ bytes_number = bytes_number / 1024
+ return str(round(double_bytes, 2)) + " " + labels[i]
+ return None
+
+
+def generate_default_dict(module, blade):
+ default_info = {}
+ defaults = blade.arrays.list_arrays().items[0]
+ default_info["flashblade_name"] = defaults.name
+ default_info["purity_version"] = defaults.version
+ default_info["filesystems"] = len(blade.file_systems.list_file_systems().items)
+ default_info["snapshots"] = len(
+ blade.file_system_snapshots.list_file_system_snapshots().items
+ )
+ default_info["buckets"] = len(blade.buckets.list_buckets().items)
+ default_info["object_store_users"] = len(
+ blade.object_store_users.list_object_store_users().items
+ )
+ default_info["object_store_accounts"] = len(
+ blade.object_store_accounts.list_object_store_accounts().items
+ )
+ default_info["blades"] = len(blade.blade.list_blades().items)
+ default_info["certificates"] = len(blade.certificates.list_certificates().items)
+ default_info["total_capacity"] = blade.arrays.list_arrays_space().items[0].capacity
+ api_version = blade.api_version.list_versions().versions
+ default_info["api_versions"] = api_version
+ if POLICIES_API_VERSION in api_version:
+ default_info["policies"] = len(blade.policies.list_policies().items)
+ if CERT_GROUPS_API_VERSION in api_version:
+ default_info["certificate_groups"] = len(
+ blade.certificate_groups.list_certificate_groups().items
+ )
+ if REPLICATION_API_VERSION in api_version:
+ default_info["fs_replicas"] = len(
+ blade.file_system_replica_links.list_file_system_replica_links().items
+ )
+ default_info["remote_credentials"] = len(
+ blade.object_store_remote_credentials.list_object_store_remote_credentials().items
+ )
+ default_info["bucket_replicas"] = len(
+ blade.bucket_replica_links.list_bucket_replica_links().items
+ )
+ default_info["connected_arrays"] = len(
+ blade.array_connections.list_array_connections().items
+ )
+ default_info["targets"] = len(blade.targets.list_targets().items)
+ default_info["kerberos_keytabs"] = len(blade.keytabs.list_keytabs().items)
+ # This section is just for REST 2.x features
+ if MIN_32_API in api_version:
+ blade = get_system(module)
+ blade_info = list(blade.get_arrays().items)[0]
+ default_info["object_store_virtual_hosts"] = len(
+ blade.get_object_store_virtual_hosts().items
+ )
+ default_info["api_clients"] = len(blade.get_api_clients().items)
+ default_info["idle_timeout"] = int(blade_info.idle_timeout / 60000)
+ if list(blade.get_arrays_eula().items)[0].signature.accepted:
+ default_info["EULA"] = "Signed"
+ else:
+ default_info["EULA"] = "Not Signed"
+ if NFS_POLICY_API_VERSION in api_version:
+ admin_settings = list(blade.get_admins_settings().items)[0]
+ default_info["max_login_attempts"] = admin_settings.max_login_attempts
+ default_info["min_password_length"] = admin_settings.min_password_length
+ if admin_settings.lockout_duration:
+ default_info["lockout_duration"] = (
+ str(admin_settings.lockout_duration / 1000) + " seconds"
+ )
+ if NFS_POLICY_API_VERSION in api_version:
+ default_info["smb_mode"] = blade_info.smb_mode
+ if VSO_VERSION in api_version:
+ default_info["timezone"] = blade_info.time_zone
+ if DRIVES_API_VERSION in api_version:
+ default_info["product_type"] = getattr(
+ blade_info, "product_type", "Unknown"
+ )
+ if SECURITY_API_VERSION in api_version:
+ dar = blade_info.encryption.data_at_rest
+ default_info["encryption"] = {
+ "data_at_rest_enabled": dar.enabled,
+ "data_at_rest_algorithms": dar.algorithms,
+ "data_at_rest_entropy_source": dar.entropy_source,
+ }
+ keys = list(blade.get_support_verification_keys().items)
+ default_info["support_keys"] = {}
+ for key in range(0, len(keys)):
+ keyname = keys[key].name
+ default_info["support_keys"][keyname] = {keys[key].verification_key}
+ default_info["security_update"] = getattr(
+ blade_info, "security_update", None
+ )
+
+ return default_info
+
+
+def generate_perf_dict(blade):
+ perf_info = {}
+ total_perf = blade.arrays.list_arrays_performance()
+ http_perf = blade.arrays.list_arrays_performance(protocol="http")
+ s3_perf = blade.arrays.list_arrays_performance(protocol="s3")
+ nfs_perf = blade.arrays.list_arrays_performance(protocol="nfs")
+ perf_info["aggregate"] = {
+ "bytes_per_op": total_perf.items[0].bytes_per_op,
+ "bytes_per_read": total_perf.items[0].bytes_per_read,
+ "bytes_per_write": total_perf.items[0].bytes_per_write,
+ "read_bytes_per_sec": total_perf.items[0].read_bytes_per_sec,
+ "reads_per_sec": total_perf.items[0].reads_per_sec,
+ "usec_per_other_op": total_perf.items[0].usec_per_other_op,
+ "usec_per_read_op": total_perf.items[0].usec_per_read_op,
+ "usec_per_write_op": total_perf.items[0].usec_per_write_op,
+ "write_bytes_per_sec": total_perf.items[0].write_bytes_per_sec,
+ "writes_per_sec": total_perf.items[0].writes_per_sec,
+ }
+ perf_info["http"] = {
+ "bytes_per_op": http_perf.items[0].bytes_per_op,
+ "bytes_per_read": http_perf.items[0].bytes_per_read,
+ "bytes_per_write": http_perf.items[0].bytes_per_write,
+ "read_bytes_per_sec": http_perf.items[0].read_bytes_per_sec,
+ "reads_per_sec": http_perf.items[0].reads_per_sec,
+ "usec_per_other_op": http_perf.items[0].usec_per_other_op,
+ "usec_per_read_op": http_perf.items[0].usec_per_read_op,
+ "usec_per_write_op": http_perf.items[0].usec_per_write_op,
+ "write_bytes_per_sec": http_perf.items[0].write_bytes_per_sec,
+ "writes_per_sec": http_perf.items[0].writes_per_sec,
+ }
+ perf_info["s3"] = {
+ "bytes_per_op": s3_perf.items[0].bytes_per_op,
+ "bytes_per_read": s3_perf.items[0].bytes_per_read,
+ "bytes_per_write": s3_perf.items[0].bytes_per_write,
+ "read_bytes_per_sec": s3_perf.items[0].read_bytes_per_sec,
+ "reads_per_sec": s3_perf.items[0].reads_per_sec,
+ "usec_per_other_op": s3_perf.items[0].usec_per_other_op,
+ "usec_per_read_op": s3_perf.items[0].usec_per_read_op,
+ "usec_per_write_op": s3_perf.items[0].usec_per_write_op,
+ "write_bytes_per_sec": s3_perf.items[0].write_bytes_per_sec,
+ "writes_per_sec": s3_perf.items[0].writes_per_sec,
+ }
+ perf_info["nfs"] = {
+ "bytes_per_op": nfs_perf.items[0].bytes_per_op,
+ "bytes_per_read": nfs_perf.items[0].bytes_per_read,
+ "bytes_per_write": nfs_perf.items[0].bytes_per_write,
+ "read_bytes_per_sec": nfs_perf.items[0].read_bytes_per_sec,
+ "reads_per_sec": nfs_perf.items[0].reads_per_sec,
+ "usec_per_other_op": nfs_perf.items[0].usec_per_other_op,
+ "usec_per_read_op": nfs_perf.items[0].usec_per_read_op,
+ "usec_per_write_op": nfs_perf.items[0].usec_per_write_op,
+ "write_bytes_per_sec": nfs_perf.items[0].write_bytes_per_sec,
+ "writes_per_sec": nfs_perf.items[0].writes_per_sec,
+ }
+ api_version = blade.api_version.list_versions().versions
+ if REPLICATION_API_VERSION in api_version:
+ file_repl_perf = (
+ blade.array_connections.list_array_connections_performance_replication(
+ type="file-system"
+ )
+ )
+ obj_repl_perf = (
+ blade.array_connections.list_array_connections_performance_replication(
+ type="object-store"
+ )
+ )
+ if len(file_repl_perf.total):
+ perf_info["file_replication"] = {
+ "received_bytes_per_sec": file_repl_perf.total[
+ 0
+ ].periodic.received_bytes_per_sec,
+ "transmitted_bytes_per_sec": file_repl_perf.total[
+ 0
+ ].periodic.transmitted_bytes_per_sec,
+ }
+ if len(obj_repl_perf.total):
+ perf_info["object_replication"] = {
+ "received_bytes_per_sec": obj_repl_perf.total[
+ 0
+ ].periodic.received_bytes_per_sec,
+ "transmitted_bytes_per_sec": obj_repl_perf.total[
+ 0
+ ].periodic.transmitted_bytes_per_sec,
+ }
+ return perf_info
+
+
+def generate_config_dict(blade):
+ config_info = {}
+ config_info["dns"] = blade.dns.list_dns().items[0].to_dict()
+ config_info["smtp"] = blade.smtp.list_smtp().items[0].to_dict()
+ try:
+ config_info["alert_watchers"] = (
+ blade.alert_watchers.list_alert_watchers().items[0].to_dict()
+ )
+ except Exception:
+ config_info["alert_watchers"] = ""
+ api_version = blade.api_version.list_versions().versions
+ if HARD_LIMIT_API_VERSION in api_version:
+ config_info["array_management"] = (
+ blade.directory_services.list_directory_services(names=["management"])
+ .items[0]
+ .to_dict()
+ )
+ config_info["directory_service_roles"] = {}
+ roles = blade.directory_services.list_directory_services_roles()
+ for role in range(0, len(roles.items)):
+ role_name = roles.items[role].name
+ config_info["directory_service_roles"][role_name] = {
+ "group": roles.items[role].group,
+ "group_base": roles.items[role].group_base,
+ }
+ config_info["nfs_directory_service"] = (
+ blade.directory_services.list_directory_services(names=["nfs"])
+ .items[0]
+ .to_dict()
+ )
+ config_info["smb_directory_service"] = (
+ blade.directory_services.list_directory_services(names=["smb"])
+ .items[0]
+ .to_dict()
+ )
+ config_info["ntp"] = blade.arrays.list_arrays().items[0].ntp_servers
+ config_info["ssl_certs"] = blade.certificates.list_certificates().items[0].to_dict()
+ api_version = blade.api_version.list_versions().versions
+ if CERT_GROUPS_API_VERSION in api_version:
+ try:
+ config_info["certificate_groups"] = (
+ blade.certificate_groups.list_certificate_groups().items[0].to_dict()
+ )
+ except Exception:
+ config_info["certificate_groups"] = ""
+ if REPLICATION_API_VERSION in api_version:
+ config_info["snmp_agents"] = {}
+ snmp_agents = blade.snmp_agents.list_snmp_agents()
+ for agent in range(0, len(snmp_agents.items)):
+ agent_name = snmp_agents.items[agent].name
+ config_info["snmp_agents"][agent_name] = {
+ "version": snmp_agents.items[agent].version,
+ "engine_id": snmp_agents.items[agent].engine_id,
+ }
+ if config_info["snmp_agents"][agent_name]["version"] == "v3":
+ config_info["snmp_agents"][agent_name][
+ "auth_protocol"
+ ] = snmp_agents.items[agent].v3.auth_protocol
+ config_info["snmp_agents"][agent_name][
+ "privacy_protocol"
+ ] = snmp_agents.items[agent].v3.privacy_protocol
+ config_info["snmp_agents"][agent_name]["user"] = snmp_agents.items[
+ agent
+ ].v3.user
+ config_info["snmp_managers"] = {}
+ snmp_managers = blade.snmp_managers.list_snmp_managers()
+ for manager in range(0, len(snmp_managers.items)):
+ mgr_name = snmp_managers.items[manager].name
+ config_info["snmp_managers"][mgr_name] = {
+ "version": snmp_managers.items[manager].version,
+ "host": snmp_managers.items[manager].host,
+ "notification": snmp_managers.items[manager].notification,
+ }
+ if config_info["snmp_managers"][mgr_name]["version"] == "v3":
+ config_info["snmp_managers"][mgr_name][
+ "auth_protocol"
+ ] = snmp_managers.items[manager].v3.auth_protocol
+ config_info["snmp_managers"][mgr_name][
+ "privacy_protocol"
+ ] = snmp_managers.items[manager].v3.privacy_protocol
+ config_info["snmp_managers"][mgr_name]["user"] = snmp_managers.items[
+ manager
+ ].v3.user
+ return config_info
+
+
+def generate_subnet_dict(blade):
+ sub_info = {}
+ subnets = blade.subnets.list_subnets()
+ for sub in range(0, len(subnets.items)):
+ sub_name = subnets.items[sub].name
+ if subnets.items[sub].enabled:
+ sub_info[sub_name] = {
+ "gateway": subnets.items[sub].gateway,
+ "mtu": subnets.items[sub].mtu,
+ "vlan": subnets.items[sub].vlan,
+ "prefix": subnets.items[sub].prefix,
+ "services": subnets.items[sub].services,
+ }
+ sub_info[sub_name]["lag"] = subnets.items[sub].link_aggregation_group.name
+ sub_info[sub_name]["interfaces"] = []
+ for iface in range(0, len(subnets.items[sub].interfaces)):
+ sub_info[sub_name]["interfaces"].append(
+ {"name": subnets.items[sub].interfaces[iface].name}
+ )
+ return sub_info
+
+
+def generate_lag_dict(blade):
+ lag_info = {}
+ groups = blade.link_aggregation_groups.list_link_aggregation_groups()
+ for groupcnt in range(0, len(groups.items)):
+ lag_name = groups.items[groupcnt].name
+ lag_info[lag_name] = {
+ "lag_speed": groups.items[groupcnt].lag_speed,
+ "port_speed": groups.items[groupcnt].port_speed,
+ "status": groups.items[groupcnt].status,
+ }
+ lag_info[lag_name]["ports"] = []
+ for port in range(0, len(groups.items[groupcnt].ports)):
+ lag_info[lag_name]["ports"].append(
+ {"name": groups.items[groupcnt].ports[port].name}
+ )
+ return lag_info
+
+
+def generate_admin_dict(module, blade):
+ admin_info = {}
+ api_version = blade.api_version.list_versions().versions
+ if MULTIPROTOCOL_API_VERSION in api_version:
+ admins = blade.admins.list_admins()
+ for admin in range(0, len(admins.items)):
+ admin_name = admins.items[admin].name
+ admin_info[admin_name] = {
+ "api_token_timeout": admins.items[admin].api_token_timeout,
+ "public_key": admins.items[admin].public_key,
+ "local": admins.items[admin].is_local,
+ }
+
+ if MIN_32_API in api_version:
+ bladev2 = get_system(module)
+ admins = list(bladev2.get_admins().items)
+ for admin in range(0, len(admins)):
+ admin_name = admins[admin].name
+ if admins[admin].api_token.expires_at:
+ admin_info[admin_name]["token_expires"] = datetime.fromtimestamp(
+ admins[admin].api_token.expires_at / 1000
+ ).strftime("%Y-%m-%d %H:%M:%S")
+ else:
+ admin_info[admin_name]["token_expires"] = None
+ admin_info[admin_name]["token_created"] = datetime.fromtimestamp(
+ admins[admin].api_token.created_at / 1000
+ ).strftime("%Y-%m-%d %H:%M:%S")
+ admin_info[admin_name]["role"] = admins[admin].role.name
+ if NFS_POLICY_API_VERSION in api_version:
+ admin_info[admin_name]["locked"] = admins[admin].locked
+ admin_info[admin_name]["lockout_remaining"] = admins[
+ admin
+ ].lockout_remaining
+ return admin_info
+
+
+def generate_targets_dict(blade):
+ targets_info = {}
+ targets = blade.targets.list_targets()
+ for target in range(0, len(targets.items)):
+ target_name = targets.items[target].name
+ targets_info[target_name] = {
+ "address": targets.items[target].address,
+ "status": targets.items[target].status,
+ "status_details": targets.items[target].status_details,
+ }
+ return targets_info
+
+
+def generate_remote_creds_dict(blade):
+ remote_creds_info = {}
+ remote_creds = (
+ blade.object_store_remote_credentials.list_object_store_remote_credentials()
+ )
+ for cred_cnt in range(0, len(remote_creds.items)):
+ cred_name = remote_creds.items[cred_cnt].name
+ remote_creds_info[cred_name] = {
+ "access_key": remote_creds.items[cred_cnt].access_key_id,
+ "remote_array": remote_creds.items[cred_cnt].remote.name,
+ }
+ return remote_creds_info
+
+
+def generate_file_repl_dict(blade):
+ file_repl_info = {}
+ file_links = blade.file_system_replica_links.list_file_system_replica_links()
+ for linkcnt in range(0, len(file_links.items)):
+ fs_name = file_links.items[linkcnt].local_file_system.name
+ file_repl_info[fs_name] = {
+ "direction": file_links.items[linkcnt].direction,
+ "lag": file_links.items[linkcnt].lag,
+ "status": file_links.items[linkcnt].status,
+ "remote_fs": file_links.items[linkcnt].remote.name
+ + ":"
+ + file_links.items[linkcnt].remote_file_system.name,
+ "recovery_point": file_links.items[linkcnt].recovery_point,
+ }
+ file_repl_info[fs_name]["policies"] = []
+ for policy_cnt in range(0, len(file_links.items[linkcnt].policies)):
+ file_repl_info[fs_name]["policies"].append(
+ file_links.items[linkcnt].policies[policy_cnt].display_name
+ )
+ return file_repl_info
+
+
+def generate_bucket_repl_dict(module, blade):
+ bucket_repl_info = {}
+ bucket_links = blade.bucket_replica_links.list_bucket_replica_links()
+ for linkcnt in range(0, len(bucket_links.items)):
+ bucket_name = bucket_links.items[linkcnt].local_bucket.name
+ bucket_repl_info[bucket_name] = {
+ "direction": bucket_links.items[linkcnt].direction,
+ "lag": bucket_links.items[linkcnt].lag,
+ "paused": bucket_links.items[linkcnt].paused,
+ "status": bucket_links.items[linkcnt].status,
+ "remote_bucket": bucket_links.items[linkcnt].remote_bucket.name,
+ "remote_credentials": bucket_links.items[linkcnt].remote_credentials.name,
+ "recovery_point": bucket_links.items[linkcnt].recovery_point,
+ "object_backlog": {},
+ }
+ api_version = blade.api_version.list_versions().versions
+ if SMB_MODE_API_VERSION in api_version:
+ blade = get_system(module)
+ bucket_links = list(blade.get_bucket_replica_links().items)
+ for linkcnt in range(0, len(bucket_links)):
+ bucket_name = bucket_links[linkcnt].local_bucket.name
+ bucket_repl_info[bucket_name]["object_backlog"] = {
+ "bytes_count": bucket_links[linkcnt].object_backlog.bytes_count,
+ "delete_ops_count": bucket_links[
+ linkcnt
+ ].object_backlog.delete_ops_count,
+ "other_ops_count": bucket_links[linkcnt].object_backlog.other_ops_count,
+ "put_ops_count": bucket_links[linkcnt].object_backlog.put_ops_count,
+ }
+ bucket_repl_info[bucket_name]["cascading_enabled"] = bucket_links[
+ linkcnt
+ ].cascading_enabled
+ return bucket_repl_info
+
+
+def generate_network_dict(blade):
+ net_info = {}
+ ports = blade.network_interfaces.list_network_interfaces()
+ for portcnt in range(0, len(ports.items)):
+ int_name = ports.items[portcnt].name
+ if ports.items[portcnt].enabled:
+ net_info[int_name] = {
+ "type": ports.items[portcnt].type,
+ "mtu": ports.items[portcnt].mtu,
+ "vlan": ports.items[portcnt].vlan,
+ "address": ports.items[portcnt].address,
+ "services": ports.items[portcnt].services,
+ "gateway": ports.items[portcnt].gateway,
+ "netmask": ports.items[portcnt].netmask,
+ }
+ return net_info
+
+
+def generate_capacity_dict(blade):
+ capacity_info = {}
+ total_cap = blade.arrays.list_arrays_space()
+ file_cap = blade.arrays.list_arrays_space(type="file-system")
+ object_cap = blade.arrays.list_arrays_space(type="object-store")
+ capacity_info["total"] = total_cap.items[0].capacity
+ capacity_info["aggregate"] = {
+ "data_reduction": total_cap.items[0].space.data_reduction,
+ "snapshots": total_cap.items[0].space.snapshots,
+ "total_physical": total_cap.items[0].space.total_physical,
+ "unique": total_cap.items[0].space.unique,
+ "virtual": total_cap.items[0].space.virtual,
+ }
+ capacity_info["file-system"] = {
+ "data_reduction": file_cap.items[0].space.data_reduction,
+ "snapshots": file_cap.items[0].space.snapshots,
+ "total_physical": file_cap.items[0].space.total_physical,
+ "unique": file_cap.items[0].space.unique,
+ "virtual": file_cap.items[0].space.virtual,
+ }
+ capacity_info["object-store"] = {
+ "data_reduction": object_cap.items[0].space.data_reduction,
+ "snapshots": object_cap.items[0].space.snapshots,
+ "total_physical": object_cap.items[0].space.total_physical,
+ "unique": object_cap.items[0].space.unique,
+ "virtual": file_cap.items[0].space.virtual,
+ }
+
+ return capacity_info
+
+
+def generate_snap_dict(blade):
+ snap_info = {}
+ snaps = blade.file_system_snapshots.list_file_system_snapshots()
+ api_version = blade.api_version.list_versions().versions
+ for snap in range(0, len(snaps.items)):
+ snapshot = snaps.items[snap].name
+ snap_info[snapshot] = {
+ "destroyed": snaps.items[snap].destroyed,
+ "source": snaps.items[snap].source,
+ "suffix": snaps.items[snap].suffix,
+ "source_destroyed": snaps.items[snap].source_destroyed,
+ }
+ if REPLICATION_API_VERSION in api_version:
+ snap_info[snapshot]["owner"] = snaps.items[snap].owner.name
+ snap_info[snapshot]["owner_destroyed"] = snaps.items[snap].owner_destroyed
+ snap_info[snapshot]["source_display_name"] = snaps.items[
+ snap
+ ].source_display_name
+ snap_info[snapshot]["source_is_local"] = snaps.items[snap].source_is_local
+ snap_info[snapshot]["source_location"] = snaps.items[
+ snap
+ ].source_location.name
+ return snap_info
+
+
+def generate_snap_transfer_dict(blade):
+ snap_transfer_info = {}
+ snap_transfers = blade.file_system_snapshots.list_file_system_snapshots_transfer()
+ for snap_transfer in range(0, len(snap_transfers.items)):
+ transfer = snap_transfers.items[snap_transfer].name
+ snap_transfer_info[transfer] = {
+ "completed": snap_transfers.items[snap_transfer].completed,
+ "data_transferred": snap_transfers.items[snap_transfer].data_transferred,
+ "progress": snap_transfers.items[snap_transfer].progress,
+ "direction": snap_transfers.items[snap_transfer].direction,
+ "remote": snap_transfers.items[snap_transfer].remote.name,
+ "remote_snapshot": snap_transfers.items[snap_transfer].remote_snapshot.name,
+ "started": snap_transfers.items[snap_transfer].started,
+ "status": snap_transfers.items[snap_transfer].status,
+ }
+ return snap_transfer_info
+
+
+def generate_array_conn_dict(module, blade):
+ array_conn_info = {}
+ arraysv2 = {}
+ api_version = blade.api_version.list_versions().versions
+ arrays = blade.array_connections.list_array_connections()
+ if NFS_POLICY_API_VERSION in api_version:
+ bladev2 = get_system(module)
+ arraysv2 = list(bladev2.get_array_connections().items)
+ for arraycnt in range(0, len(arrays.items)):
+ array = arrays.items[arraycnt].remote.name
+ array_conn_info[array] = {
+ "encrypted": arrays.items[arraycnt].encrypted,
+ "replication_addresses": arrays.items[arraycnt].replication_addresses,
+ "management_address": arrays.items[arraycnt].management_address,
+ "status": arrays.items[arraycnt].status,
+ "version": arrays.items[arraycnt].version,
+ "throttle": [],
+ }
+ if arrays.items[arraycnt].encrypted:
+ array_conn_info[array]["ca_certificate_group"] = arrays.items[
+ arraycnt
+ ].ca_certificate_group.name
+ for v2array in range(0, len(arraysv2)):
+ if arraysv2[v2array].remote.name == array:
+ array_conn_info[array]["throttle"] = {
+ "default_limit": _bytes_to_human(
+ arraysv2[v2array].throttle.default_limit
+ ),
+ "window_limit": _bytes_to_human(
+ arraysv2[v2array].throttle.window_limit
+ ),
+ "window_start": _millisecs_to_time(
+ arraysv2[v2array].throttle.window.start
+ ),
+ "window_end": _millisecs_to_time(
+ arraysv2[v2array].throttle.window.end
+ ),
+ }
+ return array_conn_info
+
+
+def generate_policies_dict(blade):
+ policies_info = {}
+ policies = blade.policies.list_policies()
+ for policycnt in range(0, len(policies.items)):
+ policy = policies.items[policycnt].name
+ policies_info[policy] = {}
+ policies_info[policy]["enabled"] = policies.items[policycnt].enabled
+ if policies.items[policycnt].rules:
+ policies_info[policy]["rules"] = (
+ policies.items[policycnt].rules[0].to_dict()
+ )
+ return policies_info
+
+
+def generate_bucket_dict(module, blade):
+ bucket_info = {}
+ buckets = blade.buckets.list_buckets()
+ for bckt in range(0, len(buckets.items)):
+ bucket = buckets.items[bckt].name
+ bucket_info[bucket] = {
+ "versioning": buckets.items[bckt].versioning,
+ "bucket_type": getattr(buckets.items[bckt], "bucket_type", None),
+ "object_count": buckets.items[bckt].object_count,
+ "id": buckets.items[bckt].id,
+ "account_name": buckets.items[bckt].account.name,
+ "data_reduction": buckets.items[bckt].space.data_reduction,
+ "snapshot_space": buckets.items[bckt].space.snapshots,
+ "total_physical_space": buckets.items[bckt].space.total_physical,
+ "unique_space": buckets.items[bckt].space.unique,
+ "virtual_space": buckets.items[bckt].space.virtual,
+ "created": buckets.items[bckt].created,
+ "destroyed": buckets.items[bckt].destroyed,
+ "time_remaining": buckets.items[bckt].time_remaining,
+ "lifecycle_rules": {},
+ }
+ api_version = blade.api_version.list_versions().versions
+ if LIFECYCLE_API_VERSION in api_version:
+ blade = get_system(module)
+ for bckt in range(0, len(buckets.items)):
+ if buckets.items[bckt].destroyed:
+ # skip processing buckets marked as destroyed
+ continue
+ all_rules = list(
+ blade.get_lifecycle_rules(bucket_ids=[buckets.items[bckt].id]).items
+ )
+ for rule in range(0, len(all_rules)):
+ bucket_name = all_rules[rule].bucket.name
+ rule_id = all_rules[rule].rule_id
+ if all_rules[rule].keep_previous_version_for:
+ keep_previous_version_for = int(
+ all_rules[rule].keep_previous_version_for / 86400000
+ )
+ else:
+ keep_previous_version_for = None
+ if all_rules[rule].keep_current_version_for:
+ keep_current_version_for = int(
+ all_rules[rule].keep_current_version_for / 86400000
+ )
+ else:
+ keep_current_version_for = None
+ if all_rules[rule].abort_incomplete_multipart_uploads_after:
+ abort_incomplete_multipart_uploads_after = int(
+ all_rules[rule].abort_incomplete_multipart_uploads_after
+ / 86400000
+ )
+ else:
+ abort_incomplete_multipart_uploads_after = None
+ if all_rules[rule].keep_current_version_until:
+ keep_current_version_until = datetime.fromtimestamp(
+ all_rules[rule].keep_current_version_until / 1000
+ ).strftime("%Y-%m-%d")
+ else:
+ keep_current_version_until = None
+ bucket_info[bucket_name]["lifecycle_rules"][rule_id] = {
+ "keep_previous_version_for (days)": keep_previous_version_for,
+ "keep_current_version_for (days)": keep_current_version_for,
+ "keep_current_version_until": keep_current_version_until,
+ "prefix": all_rules[rule].prefix,
+ "enabled": all_rules[rule].enabled,
+ "abort_incomplete_multipart_uploads_after (days)": abort_incomplete_multipart_uploads_after,
+ "cleanup_expired_object_delete_marker": all_rules[
+ rule
+ ].cleanup_expired_object_delete_marker,
+ }
+ if VSO_VERSION in api_version:
+ buckets = list(blade.get_buckets().items)
+ for bucket in range(0, len(buckets)):
+ bucket_info[buckets[bucket].name]["bucket_type"] = buckets[
+ bucket
+ ].bucket_type
+ if BUCKET_API_VERSION in api_version:
+ for bucket in range(0, len(buckets)):
+ bucket_info[buckets[bucket].name]["retention_lock"] = buckets[
+ bucket
+ ].retention_lock
+ bucket_info[buckets[bucket].name]["quota_limit"] = buckets[
+ bucket
+ ].quota_limit
+ bucket_info[buckets[bucket].name]["object_lock_config"] = {
+ "enabled": buckets[bucket].object_lock_config.enabled,
+ "freeze_locked_objects": buckets[
+ bucket
+ ].object_lock_config.freeze_locked_objects,
+ }
+ bucket_info[buckets[bucket].name]["eradication_config"] = {
+ "eradication_delay": buckets[
+ bucket
+ ].eradication_config.eradication_delay,
+ "manual_eradication": buckets[
+ bucket
+ ].eradication_config.manual_eradication,
+ }
+ return bucket_info
+
+
+def generate_kerb_dict(blade):
+ kerb_info = {}
+ keytabs = list(blade.get_keytabs().items)
+ for ktab in range(0, len(keytabs)):
+ keytab_name = keytabs[ktab].prefix
+ kerb_info[keytab_name] = {}
+ for key in range(0, len(keytabs)):
+ if keytabs[key].prefix == keytab_name:
+ kerb_info[keytab_name][keytabs[key].suffix] = {
+ "fqdn": keytabs[key].fqdn,
+ "kvno": keytabs[key].kvno,
+ "principal": keytabs[key].principal,
+ "realm": keytabs[key].realm,
+ "encryption_type": keytabs[key].encryption_type,
+ }
+ return kerb_info
+
+
+def generate_ad_dict(blade):
+ ad_info = {}
+ active_directory = blade.get_active_directory()
+ if active_directory.total_item_count != 0:
+ ad_account = list(active_directory.items)[0]
+ ad_info[ad_account.name] = {
+ "computer": ad_account.computer_name,
+ "domain": ad_account.domain,
+ "directory_servers": ad_account.directory_servers,
+ "kerberos_servers": ad_account.kerberos_servers,
+ "service_principals": ad_account.service_principal_names,
+ "join_ou": ad_account.join_ou,
+ "encryption_types": ad_account.encryption_types,
+ }
+ return ad_info
+
+
+def generate_object_store_access_policies_dict(blade):
+ policies_info = {}
+ policies = list(blade.get_object_store_access_policies().items)
+ for policy in range(0, len(policies)):
+ policy_name = policies[policy].name
+ policies_info[policy_name] = {
+ "ARN": policies[policy].arn,
+ "description": policies[policy].description,
+ "enabled": policies[policy].enabled,
+ "local": policies[policy].is_local,
+ "rules": [],
+ }
+ for rule in range(0, len(policies[policy].rules)):
+ policies_info[policy_name]["rules"].append(
+ {
+ "actions": policies[policy].rules[rule].actions,
+ "conditions": {
+ "source_ips": policies[policy]
+ .rules[rule]
+ .conditions.source_ips,
+ "s3_delimiters": policies[policy]
+ .rules[rule]
+ .conditions.s3_delimiters,
+ "s3_prefixes": policies[policy]
+ .rules[rule]
+ .conditions.s3_prefixes,
+ },
+ "effect": policies[policy].rules[rule].effect,
+ "name": policies[policy].rules[rule].name,
+ }
+ )
+ return policies_info
+
+
+def generate_nfs_export_policies_dict(blade):
+ policies_info = {}
+ policies = list(blade.get_nfs_export_policies().items)
+ for policy in range(0, len(policies)):
+ policy_name = policies[policy].name
+ policies_info[policy_name] = {
+ "local": policies[policy].is_local,
+ "enabled": policies[policy].enabled,
+ "rules": [],
+ }
+ for rule in range(0, len(policies[policy].rules)):
+ policies_info[policy_name]["rules"].append(
+ {
+ "access": policies[policy].rules[rule].access,
+ "anongid": policies[policy].rules[rule].anongid,
+ "anonuid": policies[policy].rules[rule].anonuid,
+ "atime": policies[policy].rules[rule].atime,
+ "client": policies[policy].rules[rule].client,
+ "fileid_32bit": policies[policy].rules[rule].fileid_32bit,
+ "permission": policies[policy].rules[rule].permission,
+ "secure": policies[policy].rules[rule].secure,
+ "security": policies[policy].rules[rule].security,
+ "index": policies[policy].rules[rule].index,
+ }
+ )
+ return policies_info
+
+
+def generate_object_store_accounts_dict(blade):
+ account_info = {}
+ accounts = list(blade.get_object_store_accounts().items)
+ for account in range(0, len(accounts)):
+ acc_name = accounts[account].name
+ account_info[acc_name] = {
+ "object_count": accounts[account].object_count,
+ "data_reduction": accounts[account].space.data_reduction,
+ "snapshots_space": accounts[account].space.snapshots,
+ "total_physical_space": accounts[account].space.total_physical,
+ "unique_space": accounts[account].space.unique,
+ "virtual_space": accounts[account].space.virtual,
+ "quota_limit": getattr(accounts[account], "quota_limit", None),
+ "hard_limit_enabled": getattr(
+ accounts[account], "hard_limit_enabled", None
+ ),
+ "total_provisioned": getattr(
+ accounts[account].space, "total_provisioned", None
+ ),
+ "users": {},
+ }
+ try:
+ account_info[acc_name]["bucket_defaults"] = {
+ "hard_limit_enabled": accounts[
+ account
+ ].bucket_defaults.hard_limit_enabled,
+ "quota_limit": accounts[account].bucket_defaults.quota_limit,
+ }
+ except AttributeError:
+ pass
+ acc_users = list(
+ blade.get_object_store_users(filter='name="' + acc_name + '/*"').items
+ )
+ for acc_user in range(0, len(acc_users)):
+ user_name = acc_users[acc_user].name.split("/")[1]
+ account_info[acc_name]["users"][user_name] = {"keys": [], "policies": []}
+ if (
+ blade.get_object_store_access_keys(
+ filter='user.name="' + acc_users[acc_user].name + '"'
+ ).total_item_count
+ != 0
+ ):
+ access_keys = list(
+ blade.get_object_store_access_keys(
+ filter='user.name="' + acc_users[acc_user].name + '"'
+ ).items
+ )
+ for key in range(0, len(access_keys)):
+ account_info[acc_name]["users"][user_name]["keys"].append(
+ {
+ "name": access_keys[key].name,
+ "enabled": bool(access_keys[key].enabled),
+ }
+ )
+ if (
+ blade.get_object_store_access_policies_object_store_users(
+ member_names=[acc_users[acc_user].name]
+ ).total_item_count
+ != 0
+ ):
+ policies = list(
+ blade.get_object_store_access_policies_object_store_users(
+ member_names=[acc_users[acc_user].name]
+ ).items
+ )
+ for policy in range(0, len(policies)):
+ account_info[acc_name]["users"][user_name]["policies"].append(
+ policies[policy].policy.name
+ )
+ return account_info
+
+
+def generate_fs_dict(module, blade):
+ api_version = blade.api_version.list_versions().versions
+ if SMB_MODE_API_VERSION in api_version:
+ bladev2 = get_system(module)
+ fsys_v2 = list(bladev2.get_file_systems().items)
+ fs_info = {}
+ fsys = blade.file_systems.list_file_systems()
+ for fsystem in range(0, len(fsys.items)):
+ share = fsys.items[fsystem].name
+ fs_info[share] = {
+ "fast_remove": fsys.items[fsystem].fast_remove_directory_enabled,
+ "snapshot_enabled": fsys.items[fsystem].snapshot_directory_enabled,
+ "provisioned": fsys.items[fsystem].provisioned,
+ "destroyed": fsys.items[fsystem].destroyed,
+ "nfs_rules": fsys.items[fsystem].nfs.rules,
+ "nfs_v3": getattr(fsys.items[fsystem].nfs, "v3_enabled", False),
+ "nfs_v4_1": getattr(fsys.items[fsystem].nfs, "v4_1_enabled", False),
+ "user_quotas": {},
+ "group_quotas": {},
+ }
+ if fsys.items[fsystem].http.enabled:
+ fs_info[share]["http"] = fsys.items[fsystem].http.enabled
+ if fsys.items[fsystem].smb.enabled:
+ fs_info[share]["smb_mode"] = fsys.items[fsystem].smb.acl_mode
+ api_version = blade.api_version.list_versions().versions
+ if MULTIPROTOCOL_API_VERSION in api_version:
+ fs_info[share]["multi_protocol"] = {
+ "safegaurd_acls": fsys.items[fsystem].multi_protocol.safeguard_acls,
+ "access_control_style": fsys.items[
+ fsystem
+ ].multi_protocol.access_control_style,
+ }
+ if HARD_LIMIT_API_VERSION in api_version:
+ fs_info[share]["hard_limit"] = fsys.items[fsystem].hard_limit_enabled
+ if REPLICATION_API_VERSION in api_version:
+ fs_info[share]["promotion_status"] = fsys.items[fsystem].promotion_status
+ fs_info[share]["requested_promotion_state"] = fsys.items[
+ fsystem
+ ].requested_promotion_state
+ fs_info[share]["writable"] = fsys.items[fsystem].writable
+ fs_info[share]["source"] = {
+ "is_local": fsys.items[fsystem].source.is_local,
+ "name": fsys.items[fsystem].source.name,
+ }
+ if SMB_MODE_API_VERSION in api_version:
+ for v2fs in range(0, len(fsys_v2)):
+ if fsys_v2[v2fs].name == share:
+ fs_info[share]["default_group_quota"] = fsys_v2[
+ v2fs
+ ].default_group_quota
+ fs_info[share]["default_user_quota"] = fsys_v2[
+ v2fs
+ ].default_user_quota
+ if NFS_POLICY_API_VERSION in api_version:
+ fs_info[share]["export_policy"] = fsys_v2[
+ v2fs
+ ].nfs.export_policy.name
+ if VSO_VERSION in api_version:
+ for v2fs in range(0, len(fsys_v2)):
+ if fsys_v2[v2fs].name == share:
+ try:
+ fs_groups = True
+ fs_group_quotas = list(
+ bladev2.get_quotas_groups(file_system_names=[share]).items
+ )
+ except Exception:
+ fs_groups = False
+ try:
+ fs_users = True
+ fs_user_quotas = list(
+ bladev2.get_quotas_users(file_system_names=[share]).items
+ )
+ except Exception:
+ fs_users = False
+ if fs_groups:
+ for group_quota in range(0, len(fs_group_quotas)):
+ group_name = fs_group_quotas[group_quota].name.rsplit("/")[
+ 1
+ ]
+ fs_info[share]["group_quotas"][group_name] = {
+ "group_id": fs_group_quotas[group_quota].group.id,
+ "group_name": fs_group_quotas[group_quota].group.name,
+ "quota": fs_group_quotas[group_quota].quota,
+ "usage": fs_group_quotas[group_quota].usage,
+ }
+ if fs_users:
+ for user_quota in range(0, len(fs_user_quotas)):
+ user_name = fs_user_quotas[user_quota].name.rsplit("/")[1]
+ fs_info[share]["user_quotas"][user_name] = {
+ "user_id": fs_user_quotas[user_quota].user.id,
+ "user_name": fs_user_quotas[user_quota].user.name,
+ "quota": fs_user_quotas[user_quota].quota,
+ "usage": fs_user_quotas[user_quota].usage,
+ }
+
+ return fs_info
+
+
+def generate_drives_dict(blade):
+ """
+ Drives information is only available for the Legend chassis.
+ The Legend chassis product_name has // in it so only bother if
+ that is the case.
+ """
+ drives_info = {}
+ drives = list(blade.get_drives().items)
+ if "//" in list(blade.get_arrays().items)[0].product_type:
+ for drive in range(0, len(drives)):
+ name = drives[drive].name
+ drives_info[name] = {
+ "progress": getattr(drives[drive], "progress", None),
+ "raw_capacity": getattr(drives[drive], "raw_capacity", None),
+ "status": getattr(drives[drive], "status", None),
+ "details": getattr(drives[drive], "details", None),
+ }
+ return drives_info
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(gather_subset=dict(default="minimum", type="list", elements="str"))
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ blade = get_blade(module)
+ versions = blade.api_version.list_versions().versions
+
+ if MIN_REQUIRED_API_VERSION not in versions:
+ module.fail_json(
+ msg="Minimum FlashBlade REST version required: {0}".format(
+ MIN_REQUIRED_API_VERSION
+ )
+ )
+ if not module.params["gather_subset"]:
+ module.params["gather_subset"] = ["minimum"]
+ subset = [test.lower() for test in module.params["gather_subset"]]
+ valid_subsets = (
+ "all",
+ "minimum",
+ "config",
+ "performance",
+ "capacity",
+ "network",
+ "subnets",
+ "lags",
+ "filesystems",
+ "snapshots",
+ "buckets",
+ "arrays",
+ "replication",
+ "policies",
+ "accounts",
+ "admins",
+ "ad",
+ "kerberos",
+ "drives",
+ )
+ subset_test = (test in valid_subsets for test in subset)
+ if not all(subset_test):
+ module.fail_json(
+ msg="value must gather_subset must be one or more of: %s, got: %s"
+ % (",".join(valid_subsets), ",".join(subset))
+ )
+
+ info = {}
+
+ api_version = blade.api_version.list_versions().versions
+ if "minimum" in subset or "all" in subset:
+ info["default"] = generate_default_dict(module, blade)
+ if "performance" in subset or "all" in subset:
+ info["performance"] = generate_perf_dict(blade)
+ if "config" in subset or "all" in subset:
+ info["config"] = generate_config_dict(blade)
+ if "capacity" in subset or "all" in subset:
+ info["capacity"] = generate_capacity_dict(blade)
+ if "lags" in subset or "all" in subset:
+ info["lag"] = generate_lag_dict(blade)
+ if "network" in subset or "all" in subset:
+ info["network"] = generate_network_dict(blade)
+ if "subnets" in subset or "all" in subset:
+ info["subnet"] = generate_subnet_dict(blade)
+ if "filesystems" in subset or "all" in subset:
+ info["filesystems"] = generate_fs_dict(module, blade)
+ if "admins" in subset or "all" in subset:
+ info["admins"] = generate_admin_dict(module, blade)
+ if "snapshots" in subset or "all" in subset:
+ info["snapshots"] = generate_snap_dict(blade)
+ if "buckets" in subset or "all" in subset:
+ info["buckets"] = generate_bucket_dict(module, blade)
+ if POLICIES_API_VERSION in api_version:
+ if "policies" in subset or "all" in subset:
+ info["policies"] = generate_policies_dict(blade)
+ info["snapshot_policies"] = generate_policies_dict(blade)
+ if REPLICATION_API_VERSION in api_version:
+ if "arrays" in subset or "all" in subset:
+ info["arrays"] = generate_array_conn_dict(module, blade)
+ if "replication" in subset or "all" in subset:
+ info["file_replication"] = generate_file_repl_dict(blade)
+ info["bucket_replication"] = generate_bucket_repl_dict(module, blade)
+ info["snap_transfers"] = generate_snap_transfer_dict(blade)
+ info["remote_credentials"] = generate_remote_creds_dict(blade)
+ info["targets"] = generate_targets_dict(blade)
+ if MIN_32_API in api_version:
+ # Calls for data only available from Purity//FB 3.2 and higher
+ blade = get_system(module)
+ if "accounts" in subset or "all" in subset:
+ info["accounts"] = generate_object_store_accounts_dict(blade)
+ if "ad" in subset or "all" in subset:
+ info["active_directory"] = generate_ad_dict(blade)
+ if "kerberos" in subset or "all" in subset:
+ info["kerberos"] = generate_kerb_dict(blade)
+ if "policies" in subset or "all" in subset:
+ if SMB_MODE_API_VERSION in api_version:
+ info["access_policies"] = generate_object_store_access_policies_dict(
+ blade
+ )
+ if NFS_POLICY_API_VERSION in api_version:
+ info["export_policies"] = generate_nfs_export_policies_dict(blade)
+ if "drives" in subset or "all" in subset and DRIVES_API_VERSION in api_version:
+ info["drives"] = generate_drives_dict(blade)
+ module.exit_json(changed=False, purefb_info=info)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_inventory.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_inventory.py
new file mode 100644
index 000000000..b17bc3f9e
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_inventory.py
@@ -0,0 +1,279 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_inventory
+version_added: '1.0.0'
+short_description: Collect information from Pure Storage FlashBlade
+description:
+ - Collect information from a Pure Storage FlashBlade running the
+ Purity//FB operating system. By default, the module will collect basic
+ information including hosts, host groups, protection
+ groups and volume counts. Additional information can be collected
+ based on the configured set of arguements.
+author:
+ - Pure Storage ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+extends_documentation_fragment:
+ - purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: collect FlashBlade inventory
+ purestorage.flashblade.purefb_inventory:
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ register: blade_info
+- name: show default information
+ debug:
+ msg: "{{ blade_info['purefb_info'] }}"
+
+"""
+
+RETURN = r"""
+purefb_inventory:
+ description: Returns the inventory information for the FlashBlade
+ returned: always
+ type: complex
+ sample: {
+ "blades": {
+ "CH1.FB1": {
+ "model": "FB-17TB",
+ "serial": "PPCXA1942AFF5",
+ "slot": 1,
+ "status": "healthy"
+ }
+ },
+ "chassis": {
+ "CH1": {
+ "index": 1,
+ "model": null,
+ "serial": "PMPAM163402AE",
+ "slot": null,
+ "status": "healthy"
+ }
+ },
+ "controllers": {},
+ "ethernet": {
+ "CH1.FM1.ETH1": {
+ "model": "624410002",
+ "serial": "APF16360021PRV",
+ "slot": 1,
+ "speed": 40000000000,
+ "status": "healthy"
+ }
+ },
+ "fans": {
+ "CH1.FM1.FAN1": {
+ "slot": 1,
+ "status": "healthy"
+ }
+ },
+ "modules": {
+ "CH1.FM1": {
+ "model": "EFM-110",
+ "serial": "PSUFS1640002C",
+ "slot": 1,
+ "status": "healthy"
+ },
+ "CH1.FM2": {
+ "model": "EFM-110",
+ "serial": "PSUFS1640004A",
+ "slot": 2,
+ "status": "healthy"
+ }
+ },
+ "power": {
+ "CH1.PWR1": {
+ "model": "DS1600SPE-3",
+ "serial": "M0500E00D8AJZ",
+ "slot": 1,
+ "status": "healthy"
+ }
+ },
+ "switch": {}
+ }
+"""
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ get_system,
+ purefb_argument_spec,
+)
+
+
+MIN_API_VERSION = "2.1"
+PART_NUMBER_API_VERSION = "2.8"
+
+
+def generate_hardware_dict(module, blade, api_version):
+ hw_info = {
+ "modules": {},
+ "ethernet": {},
+ "mgmt_ports": {},
+ "fans": {},
+ "bays": {},
+ "controllers": {},
+ "blades": {},
+ "chassis": {},
+ "power": {},
+ "switch": {},
+ }
+ blade = get_system(module)
+ components = list(blade.get_hardware(filter="type='fm'").items)
+ for component in range(0, len(components)):
+ component_name = components[component].name
+ hw_info["modules"][component_name] = {
+ "slot": components[component].slot,
+ "status": components[component].status,
+ "serial": components[component].serial,
+ "model": components[component].model,
+ "identify": components[component].identify_enabled,
+ }
+ if PART_NUMBER_API_VERSION in api_version:
+ hw_info["modules"][component_name]["part_number"] = components[
+ component
+ ].part_number
+ components = list(blade.get_hardware(filter="type='eth'").items)
+ for component in range(0, len(components)):
+ component_name = components[component].name
+ hw_info["ethernet"][component_name] = {
+ "slot": components[component].slot,
+ "status": components[component].status,
+ "serial": components[component].serial,
+ "model": components[component].model,
+ "speed": components[component].speed,
+ }
+ if PART_NUMBER_API_VERSION in api_version:
+ hw_info["ethernet"][component_name]["part_number"] = components[
+ component
+ ].part_number
+ components = list(blade.get_hardware(filter="type='mgmt_port'").items)
+ for component in range(0, len(components)):
+ component_name = components[component].name
+ hw_info["mgmt_ports"][component_name] = {
+ "slot": components[component].slot,
+ "status": components[component].status,
+ "serial": components[component].serial,
+ "model": components[component].model,
+ "speed": components[component].speed,
+ }
+ if PART_NUMBER_API_VERSION in api_version:
+ hw_info["mgmt_ports"][component_name]["part_number"] = components[
+ component
+ ].part_number
+ components = list(blade.get_hardware(filter="type='fan'").items)
+ for component in range(0, len(components)):
+ component_name = components[component].name
+ hw_info["fans"][component_name] = {
+ "slot": components[component].slot,
+ "status": components[component].status,
+ "identify": components[component].identify_enabled,
+ }
+ if PART_NUMBER_API_VERSION in api_version:
+ hw_info["fans"][component_name]["part_number"] = components[
+ component
+ ].part_number
+ components = list(blade.get_hardware(filter="type='fb'").items)
+ for component in range(0, len(components)):
+ component_name = components[component].name
+ hw_info["blades"][component_name] = {
+ "slot": components[component].slot,
+ "status": components[component].status,
+ "serial": components[component].serial,
+ "model": components[component].model,
+ "temperature": components[component].temperature,
+ "identify": components[component].identify_enabled,
+ }
+ if PART_NUMBER_API_VERSION in api_version:
+ hw_info["blades"][component_name]["part_number"] = components[
+ component
+ ].part_number
+ components = list(blade.get_hardware(filter="type='pwr'").items)
+ for component in range(0, len(components)):
+ component_name = components[component].name
+ hw_info["power"][component_name] = {
+ "slot": components[component].slot,
+ "status": components[component].status,
+ "serial": components[component].serial,
+ "model": components[component].model,
+ }
+ if PART_NUMBER_API_VERSION in api_version:
+ hw_info["power"][component_name]["part_number"] = components[
+ component
+ ].part_number
+ components = list(blade.get_hardware(filter="type='xfm'").items)
+ for component in range(0, len(components)):
+ component_name = components[component].name
+ hw_info["switch"][component_name] = {
+ "slot": components[component].slot,
+ "status": components[component].status,
+ "serial": components[component].serial,
+ "model": components[component].model,
+ }
+ if PART_NUMBER_API_VERSION in api_version:
+ hw_info["switch"][component_name]["part_number"] = components[
+ component
+ ].part_number
+ components = list(blade.get_hardware(filter="type='ch'").items)
+ for component in range(0, len(components)):
+ component_name = components[component].name
+ hw_info["chassis"][component_name] = {
+ "slot": components[component].slot,
+ "index": components[component].index,
+ "status": components[component].status,
+ "serial": components[component].serial,
+ "model": components[component].model,
+ }
+ if PART_NUMBER_API_VERSION in api_version:
+ hw_info["chassis"][component_name]["part_number"] = components[
+ component
+ ].part_number
+ components = list(blade.get_hardware(filter="type='bay'").items)
+ for component in range(0, len(components)):
+ component_name = components[component].name
+ hw_info["bays"][component_name] = {
+ "slot": components[component].slot,
+ "index": components[component].index,
+ "status": components[component].status,
+ "serial": components[component].serial,
+ "model": components[component].model,
+ "identify": components[component].identify_enabled,
+ }
+ if PART_NUMBER_API_VERSION in api_version:
+ hw_info["bay"][component_name]["part_number"] = components[
+ component
+ ].part_number
+
+ return hw_info
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+ blade = get_blade(module)
+ api_version = blade.api_version.list_versions().versions
+
+ module.exit_json(
+ changed=False, purefb_info=generate_hardware_dict(module, blade, api_version)
+ )
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_keytabs.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_keytabs.py
new file mode 100644
index 000000000..7268bc01c
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_keytabs.py
@@ -0,0 +1,254 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2021, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: purefb_keytabs
+version_added: '1.6.0'
+short_description: Manage FlashBlade Kerberos Keytabs
+description:
+- Manage Kerberos Keytabs for FlashBlades
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Manage Kerberos Keytabs
+ default: import
+ type: str
+ choices: [ absent, import, export, rotate ]
+ name:
+ description:
+ - Name of the Keytab
+ - Must include prefix and suffix
+ type: str
+ prefix:
+ description:
+ - Only required for I(import) or I(rotate)
+ - Prefix to use for naming the files slots
+ - Specifying a file entry prefix is required because a single keytab file can contain
+ multiple keytab entries in multiple slots.
+ - If not provided for I(import) the current AD Account name will be used.
+ type: str
+ keytab_file:
+ description:
+ - Name of file holding Keytab
+ type: str
+ filetype:
+ description:
+ - Format of the keytab file
+ type: str
+ choices: [ binary, base64 ]
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Import a binary keytab
+ purestorage.flashblade.purefb_keytabs:
+ state: import
+ prefix: example
+ keytab_file: pure_krb.keytab
+ filetype: binary
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+
+- name: Import a base64 keytab
+ purestorage.flashblade.purefb_keytabs:
+ state: import
+ prefix: example
+ keytab_file: pure_krb.keytab.mime
+ filetype: base64
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+
+- name: Export a keytab
+ purestorage.flashblade.purefb_keytabs:
+ state: export
+ name: example.3
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+ register: download_file
+
+- name: Delete a keytab
+ purestorage.flashblade.purefb_keytabs:
+ state: absent
+ name: example.3
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+
+- name: Rotate current AD account keytabs
+ purestorage.flashblade.purefb_keytabs:
+ state: rotate
+ fb_url: 10.10.10.2
+
+- name: Rotate AD account keytabs by creating new series
+ purestorage.flashblade.purefb_keytabs:
+ state: rotate
+ name: next_prefix
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+"""
+
+RETURN = r"""
+download_file:
+ description:
+ - Name of file containing exported keytab
+ returned: When using I(export) option
+ type: str
+ sample: "/tmp/pure_krb8939478070214877726.keytab"
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient.flashblade import KeytabPost, Reference
+except ImportError:
+ HAS_PURESTORAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_system,
+ purefb_argument_spec,
+)
+
+
+MIN_REQUIRED_API_VERSION = "2.0"
+
+
+def rotate_keytab(module, blade):
+ """Rotate keytab"""
+ changed = True
+ account = Reference(
+ name=list(blade.get_active_directory().items)[0].name,
+ resource_type="active-directory",
+ )
+ keytab = KeytabPost(source=account)
+ if not module.check_mode:
+ res = blade.post_keytabs(keytab=keytab, name_prefixes=module.params["prefix"])
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to rotate AD account keytabs, prefix {0}.".format(
+ module.params["prefix"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def delete_keytab(module, blade):
+ """Delete keytab"""
+ changed = False
+ if blade.get_keytabs(names=[module.params["name"]]).status_code == 200:
+ changed = True
+ if not module.check_mode:
+ res = blade.delete_keytabs(names=[module.params["name"]])
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete keytab {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def import_keytab(module, blade):
+ """Import keytab"""
+ changed = True
+ if not module.check_mode:
+ if module.params["filetype"] == "binary":
+ readtype = "rb"
+ else:
+ readtype = "r"
+ with open(module.params["keytab_file"], readtype) as keytab_file:
+ keytab_data = keytab_file.read()
+ short_name = module.params["keytab_file"].split("/")[-1]
+ res = blade.post_keytabs_upload(
+ name_prefixes=module.params["prefix"], keytab_file=(short_name, keytab_data)
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to import keytab file {0}. Error: {1}".format(
+ module.params["keytab_file"], res.errors[0].message
+ )
+ )
+
+ module.exit_json(changed=changed)
+
+
+def export_keytab(module, blade):
+ """Export keytab"""
+ changed = False
+ download_file = ""
+ if blade.get_keytabs(names=[module.params["name"]]).status_code == 200:
+ changed = True
+ if not module.check_mode:
+ res = blade.get_keytabs_download(keytab_names=[module.params["name"]])
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to export keytab {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ else:
+ download_file = list(res.items)[0]
+ module.exit_json(changed=changed, download_file=download_file)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(
+ type="str",
+ default="import",
+ choices=["absent", "rotate", "import", "export"],
+ ),
+ name=dict(type="str"),
+ prefix=dict(type="str"),
+ keytab_file=dict(type="str"),
+ filetype=dict(type="str", choices=["binary", "base64"]),
+ )
+ )
+
+ required_if = [["state", "import", ["prefix"]]]
+ module = AnsibleModule(
+ argument_spec, required_if=required_if, supports_check_mode=True
+ )
+
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+
+ state = module.params["state"]
+ blade = get_system(module)
+ api_version = list(blade.get_versions().items)
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(
+ msg="Minimum FlashBlade REST version required: {0}".format(
+ MIN_REQUIRED_API_VERSION
+ )
+ )
+
+ if not module.params["prefix"]:
+ module.params["prefix"] = list(blade.get_active_directory().items)[0].name
+
+ if state == "import":
+ import_keytab(module, blade)
+ elif state == "export":
+ export_keytab(module, blade)
+ elif state == "rotate":
+ rotate_keytab(module, blade)
+ elif state == "absent":
+ delete_keytab(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_lag.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_lag.py
new file mode 100644
index 000000000..e5c46e730
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_lag.py
@@ -0,0 +1,315 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2021, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_lag
+version_added: '1.7.0'
+short_description: Manage FlashBlade Link Aggregation Groups
+description:
+- Maintain FlashBlade Link Aggregation Groups
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Name of the Link Aggregation Group
+ type: str
+ default: uplink
+ state:
+ description:
+ - Define whether the LAG should be added or deleted
+ default: present
+ choices: [ absent, present ]
+ type: str
+ ports:
+ description:
+ - Name of network ports assigned to the LAG
+ - Format should be CHx.ETHy, where CHx is the chassis number and
+ ETHy is the ethernet port number.
+ - Matched port pairs from each Fabric Module in the Chassis will
+ be used.
+ - To modify required ports for a LAG specify only the ports required
+ by the LAG. Any ports currently used by the LAG not specified will be
+ disconnected from the LAG.
+ type: list
+ elements: str
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Add LAG
+ purestorage.flashblade.purefb_lag:
+ name: lag2
+ ports:
+ - ch1.eth2
+ fb_url: 10.10.10.2
+ api_token: T-68618f31-0c9e-4e57-aa44-5306a2cf10e3
+
+- name: Upate LAG
+ purestorage.flashblade.purefb_lag:
+ name: lag2
+ ports:
+ - ch1.eth2
+ - ch1.eth4
+ fb_url: 10.10.10.2
+ api_token: T-68618f31-0c9e-4e57-aa44-5306a2cf10e3
+
+- name: Delete LAG
+ purestorage.flashblade.purefb_lag:
+ name: lag2
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-68618f31-0c9e-4e57-aa44-5306a2cf10e3
+"""
+
+RETURN = r"""
+lag:
+ description: A dictionary describing the LAG.
+ type: dict
+ returned: success
+ contains:
+ lag_speed:
+ description: Combined speed of all ports in the LAG in Gb/s
+ type: str
+ port_speed:
+ description: Configured speed of each port in the LAG in Gb/s
+ type: str
+ mac_address:
+ description: Unique MAC address assigned to the LAG
+ type: str
+ status:
+ description: Health status of the LAG.
+ type: str
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flashblade
+except ImportError:
+ HAS_PURESTORAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_system,
+ purefb_argument_spec,
+)
+
+MIN_REQUIRED_API_VERSION = "2.0"
+
+
+def delete_lag(module, blade):
+ """Delete Link Aggregation Group"""
+ changed = True
+ if not module.check_mode:
+ res = blade.delete_link_aggregation_groups(names=[module.params["name"]])
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete LAG {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def update_lag(module, blade):
+ """Update Link Aggregation Group"""
+ changed = False
+ used_ports = []
+ current_ports = []
+ lagfact = []
+ current_lag = list(
+ blade.get_link_aggregation_groups(names=[module.params["name"]]).items
+ )[0]
+ for port in range(0, len(current_lag.ports)):
+ used_ports.append(current_lag.ports[port].name)
+ for lag_port in range(0, len(module.params["ports"]), 2):
+ if (
+ not (
+ module.params["ports"][lag_port].split(".")[0].upper()
+ + ".FM1."
+ + module.params["ports"][lag_port].split(".")[1].upper()
+ )
+ in used_ports
+ ):
+ current_lags = list(blade.get_link_aggregation_groups().items)
+ for lag in range(0, len(current_lags)):
+ for port in range(0, len(current_lags[lag].ports)):
+ current_ports.append(current_lags[lag].ports[port].name)
+ for current_lag_port in range(0, len(current_ports)):
+ if (
+ module.params["ports"][lag_port].split(".")[0].upper()
+ + ".FM1."
+ + module.params["ports"][lag_port].split(".")[1].upper()
+ ) in current_ports:
+ module.fail_json(
+ msg="Selected port {0} is currently in use by another LAG.".format(
+ module.params["ports"][lag_port].upper()
+ )
+ )
+ new_ports = []
+ for port in range(0, len(module.params["ports"])):
+ new_ports.append(
+ module.params["ports"][port].split(".")[0].upper()
+ + ".FM1."
+ + module.params["ports"][port].split(".")[1].upper()
+ )
+ new_ports.append(
+ module.params["ports"][port].split(".")[0].upper()
+ + ".FM2."
+ + module.params["ports"][port].split(".")[1].upper()
+ )
+ ports = []
+ for final_port in range(0, len(new_ports)):
+ ports.append(flashblade.FixedReference(name=new_ports[final_port]))
+ link_aggregation_group = flashblade.Linkaggregationgroup(ports=ports)
+ if sorted(used_ports) != sorted(new_ports):
+ changed = True
+ if not module.check_mode:
+ res = blade.patch_link_aggregation_groups(
+ names=[module.params["name"]],
+ link_aggregation_group=link_aggregation_group,
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to update LAG {0}. Error: {1}".format(
+ module.params["name"],
+ res.errors[0].message,
+ )
+ )
+ else:
+ response = list(res.items)[0]
+ lagfact = {
+ "mac_address": response.mac_address,
+ "port_speed": str(response.port_speed / 1000000000) + "Gb/s",
+ "lag_speed": str(response.lag_speed / 1000000000) + "Gb/s",
+ "status": response.status,
+ }
+ module.exit_json(changed=changed, lag=lagfact)
+
+
+def create_lag(module, blade):
+ """Create Link Aggregation Group"""
+ changed = True
+ used_ports = []
+ lagfact = []
+ current_lags = list(blade.get_link_aggregation_groups().items)
+ for lag in range(0, len(current_lags)):
+ for port in range(0, len(current_lags[lag].ports)):
+ used_ports.append(current_lags[lag].ports[port].name)
+ for lag_port in range(0, len(module.params["ports"])):
+ if (
+ module.params["ports"][lag_port].split(".")[0].upper()
+ + ".FM1."
+ + module.params["ports"][0].split(".")[1].upper()
+ ) in used_ports:
+ module.fail_json(
+ msg="Selected port {0} is currently in use by another LAG.".format(
+ module.params["ports"][lag_port].upper()
+ )
+ )
+ new_ports = []
+ for new_port in range(0, len(module.params["ports"])):
+ new_ports.append(
+ module.params["ports"][new_port].split(".")[0].upper()
+ + ".FM1."
+ + module.params["ports"][new_port].split(".")[1].upper()
+ )
+ new_ports.append(
+ module.params["ports"][new_port].split(".")[0].upper()
+ + ".FM2."
+ + module.params["ports"][new_port].split(".")[1].upper()
+ )
+ ports = []
+ for final_port in range(0, len(new_ports)):
+ ports.append(flashblade.FixedReference(name=new_ports[final_port]))
+ link_aggregation_group = flashblade.LinkAggregationGroup(ports=ports)
+ if not module.check_mode:
+ res = blade.post_link_aggregation_groups(
+ names=[module.params["name"]], link_aggregation_group=link_aggregation_group
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to create LAG {0}. Error: {1}".format(
+ module.params["name"],
+ res.errors[0].message,
+ )
+ )
+ else:
+ response = list(res.items)[0]
+ lagfact = {
+ "mac_address": response.mac_address,
+ "port_speed": str(response.port_speed / 1000000000) + "Gb/s",
+ "lag_speed": str(response.lag_speed / 1000000000) + "Gb/s",
+ "status": response.status,
+ }
+ module.exit_json(changed=changed, lag=lagfact)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ name=dict(type="str", default="uplink"),
+ ports=dict(type="list", elements="str"),
+ )
+ )
+
+ required_if = [["state", "present", ["ports"]]]
+
+ module = AnsibleModule(
+ argument_spec, required_if=required_if, supports_check_mode=True
+ )
+
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+
+ blade = get_system(module)
+ api_version = list(blade.get_versions().items)
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashBlade REST version not supported. "
+ "Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
+ )
+ state = module.params["state"]
+
+ exists = bool(
+ blade.get_link_aggregation_groups(names=[module.params["name"]]).status_code
+ == 200
+ )
+ if module.params["ports"]:
+ # Remove duplicates
+ module.params["ports"] = list(dict.fromkeys(module.params["ports"]))
+ if not exists and state == "present":
+ create_lag(module, blade)
+ elif exists and state == "present":
+ update_lag(module, blade)
+ elif exists and state == "absent":
+ if module.params["name"].lower() == "uplink":
+ module.fail_json(
+ msg="Preconfigured Link Aggregation Group cannot be deleted"
+ )
+ else:
+ delete_lag(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_lifecycle.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_lifecycle.py
new file mode 100644
index 000000000..0403aedcb
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_lifecycle.py
@@ -0,0 +1,490 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_lifecycle
+version_added: '1.4.0'
+short_description: Manage FlashBlade object lifecycles
+description:
+- Manage lifecycles for object buckets
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Create or delete lifecycle rule
+ default: present
+ type: str
+ choices: [ absent, present ]
+ bucket:
+ description:
+ - Bucket the lifecycle rule applies to
+ type: str
+ required: true
+ name:
+ description:
+ - Name of the lifecycle rule
+ type: str
+ required: true
+ enabled:
+ description:
+ - State of lifecycle rule
+ type: bool
+ default: true
+ keep_previous_for:
+ aliases: [ keep_for ]
+ description:
+ - Time after which previous versions will be marked expired.
+ - Enter as days (d) or weeks (w). Range is 1 - 2147483647 days.
+ type: str
+ keep_current_for:
+ description:
+ - Time after which current versions will be marked expired.
+ - Enter as days (d) or weeks (w). Range is 1 - 2147483647 days.
+ version_added: "1.8.0"
+ type: str
+ keep_current_until:
+ description:
+ - Date after which current versions will be marked expired.
+ - Enter as date in form YYYY-MM-DD.
+ - B(Note:) setting a date in the past will delete ALL objects with
+ the value of I(prefix) as they are created.
+ version_added: "1.8.0"
+ type: str
+ abort_uploads_after:
+ description:
+ - Duration of time after which incomplete multipart uploads will be aborted.
+ - Enter as days (d) or weeks (w). Range is 1 - 2147483647 days.
+ version_added: "1.8.0"
+ type: str
+ prefix:
+ description:
+ - Object key prefix identifying one or more objects in the bucket
+ type: str
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Create a lifecycle rule called bar for bucket foo (pre-Purity//FB 3.2.3)
+ purestorage.flashblade.purefb_lifecycle:
+ name: bar
+ bucket: foo
+ keep_previous_for: 2d
+ prefix: test
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Create a lifecycle rule called bar for bucket foo (post-Purity//FB 3.2.3)
+ purestorage.flashblade.purefb_lifecycle:
+ name: bar
+ bucket: foo
+ keep_previous_for: 2d
+ keep_current_for: 1w
+ abort_uploads_after: 1d
+ keep_current_until: 2020-11-23
+ prefix: test
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Modify a lifecycle rule (post-Purity//FB 3.2.3)
+ purestorage.flashblade.purefb_lifecycle:
+ name: bar
+ bucket: foo
+ keep_previous_for: 10d
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Delete lifecycle rule foo from bucket foo
+ purestorage.flashblade.purefb_lifecycle:
+ name: foo
+ bucket: bar
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURITYFB = True
+try:
+ from purity_fb import LifecycleRulePost, LifecycleRulePatch, Reference
+except ImportError:
+ HAS_PURITYFB = False
+
+HAS_PYPURECLIENT = True
+try:
+ from pypureclient import flashblade
+except ImportError:
+ HAS_PYPURECLIENT = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ get_system,
+ purefb_argument_spec,
+)
+from datetime import datetime
+
+
+MIN_REQUIRED_API_VERSION = "1.10"
+LIFECYCLE_API_VERSION = "2.1"
+
+
+def _get_bucket(module, blade):
+ s3bucket = None
+ buckets = blade.buckets.list_buckets()
+ for bucket in range(0, len(buckets.items)):
+ if buckets.items[bucket].name == module.params["bucket"]:
+ s3bucket = buckets.items[bucket]
+ return s3bucket
+
+
+def _convert_date_to_epoch(module):
+ try:
+ unix_date = datetime.strptime(module.params["keep_current_until"], "%Y-%m-%d")
+ except ValueError:
+ module.fail_json(msg="Incorrect data format, should be YYYY-MM-DD")
+ if unix_date < datetime.utcnow():
+ module.warn(
+ "This value of `keep_current_until` will permanently delete objects "
+ "as they are created. Using this date is not recommended"
+ )
+ epoch_milliseconds = int((unix_date - datetime(1970, 1, 1)).total_seconds() * 1000)
+ return epoch_milliseconds
+
+
+def _convert_to_millisecs(day):
+ try:
+ if day[-1:].lower() == "w":
+ return int(day[:-1]) * 7 * 86400000
+ elif day[-1:].lower() == "d":
+ return int(day[:-1]) * 86400000
+ except Exception:
+ return 0
+ return 0
+
+
+def _findstr(text, match):
+ for line in text.splitlines():
+ if match in line:
+ found = line
+ return found
+
+
+def delete_rule(module, blade):
+ """Delete lifecycle rule"""
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.lifecycle_rules.delete_lifecycle_rules(
+ names=[module.params["bucket"] + "/" + module.params["name"]]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to delete lifecycle rule {0} for bucket {1}.".format(
+ module.params["name"], module.params["bucket"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def create_rule(module, blade, bladev2=None):
+ """Create lifecycle policy"""
+ changed = True
+ if bladev2:
+ if (
+ not module.params["keep_previous_for"]
+ and not module.params["keep_current_until"]
+ and not module.params["keep_current_for"]
+ ):
+ module.fail_json(
+ msg="At least one `keep...` parameter is required to create a new lifecycle rule"
+ )
+
+ else:
+ if not module.params["keep_previous_for"] and not bladev2:
+ module.fail_json(
+ msg="'keep_previous_for' is required to create a new lifecycle rule"
+ )
+ if not module.check_mode:
+ if not bladev2:
+ try:
+ attr = LifecycleRulePost(
+ bucket=Reference(name=module.params["bucket"]),
+ rule_id=module.params["name"],
+ keep_previous_version_for=_convert_to_millisecs(
+ module.params["keep_previous_for"]
+ ),
+ prefix=module.params["prefix"],
+ )
+ blade.lifecycle_rules.create_lifecycle_rules(
+ rule=attr, confirm_date=True
+ )
+ if not module.params["enabled"]:
+ attr = LifecycleRulePatch()
+ attr.enabled = False
+ blade.lifecycle_rules.update_lifecycle_rules(
+ name=[module.params["bucket"] + "/" + module.params["name"]],
+ rule=attr,
+ confirm_date=True,
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to create lifecycle rule {0} for bucket {1}.".format(
+ module.params["name"], module.params["bucket"]
+ )
+ )
+ else:
+ attr = flashblade.LifecycleRulePost(
+ bucket=flashblade.Reference(name=module.params["bucket"]),
+ rule_id=module.params["name"],
+ keep_previous_version_for=_convert_to_millisecs(
+ module.params["keep_previous_for"]
+ ),
+ keep_current_version_until=module.params["keep_current_until"],
+ keep_current_version_for=_convert_to_millisecs(
+ module.params["keep_current_for"]
+ ),
+ abort_incomplete_multipart_uploads_after=_convert_to_millisecs(
+ module.params["abort_uploads_after"]
+ ),
+ prefix=module.params["prefix"],
+ )
+ if attr.keep_current_version_until:
+ res = bladev2.post_lifecycle_rules(rule=attr, confirm_date=True)
+ else:
+ res = bladev2.post_lifecycle_rules(rule=attr)
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to create lifecycle rule {0} for bucket {1}. Error: {2}".format(
+ module.params["name"],
+ module.params["bucket"],
+ res.errors[0].message,
+ )
+ )
+ if not module.params["enabled"]:
+ attr = flashblade.LifecycleRulePatch(enabled=module.params["enabled"])
+ res = bladev2.patch_lifecycle_rules(
+ names=[module.params["bucket"] + "/" + module.params["name"]],
+ lifecycle=attr,
+ )
+ if res.status_code != 200:
+ module.warn(
+ "Lifecycle Rule {0} did not enable correctly. "
+ "Please chack your FlashBlade".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def update_rule(module, blade, rule, bladev2=None):
+ """Update snapshot policy"""
+ changed = False
+ if not bladev2:
+ current_rule = {
+ "prefix": rule.prefix,
+ "keep_previous_version_for": rule.keep_previous_version_for,
+ "enabled": rule.enabled,
+ }
+ else:
+ current_rule = {
+ "prefix": rule.prefix,
+ "abort_incomplete_multipart_uploads_after": rule.abort_incomplete_multipart_uploads_after,
+ "keep_current_version_for": rule.keep_current_version_for,
+ "keep_previous_version_for": rule.keep_previous_version_for,
+ "keep_current_version_until": rule.keep_current_version_until,
+ "enabled": rule.enabled,
+ }
+ if not module.params["prefix"]:
+ prefix = current_rule["prefix"]
+ else:
+ prefix = module.params["prefix"]
+ if not module.params["keep_previous_for"]:
+ keep_previous_for = current_rule["keep_previous_version_for"]
+ else:
+ keep_previous_for = _convert_to_millisecs(module.params["keep_previous_for"])
+ if bladev2:
+ if not module.params["keep_current_for"]:
+ keep_current_for = current_rule["keep_current_version_for"]
+ else:
+ keep_current_for = _convert_to_millisecs(module.params["keep_current_for"])
+ if not module.params["abort_uploads_after"]:
+ abort_uploads_after = current_rule[
+ "abort_incomplete_multipart_uploads_after"
+ ]
+ else:
+ abort_uploads_after = _convert_to_millisecs(
+ module.params["abort_uploads_after"]
+ )
+ if not module.params["keep_current_until"]:
+ keep_current_until = current_rule["keep_current_version_until"]
+ else:
+ keep_current_until = module.params["keep_current_until"]
+ new_rule = {
+ "prefix": prefix,
+ "abort_incomplete_multipart_uploads_after": abort_uploads_after,
+ "keep_current_version_for": keep_current_for,
+ "keep_previous_version_for": keep_previous_for,
+ "keep_current_version_until": keep_current_until,
+ "enabled": module.params["enabled"],
+ }
+ else:
+ new_rule = {
+ "prefix": prefix,
+ "keep_previous_version_for": keep_previous_for,
+ "enabled": module.params["enabled"],
+ }
+ if current_rule != new_rule:
+ changed = True
+ if not module.check_mode:
+ if not bladev2:
+ try:
+ attr = LifecycleRulePatch(
+ keep_previous_version_for=new_rule["keep_previous_version_for"],
+ prefix=new_rule["prefix"],
+ )
+ attr.enabled = module.params["enabled"]
+ blade.lifecycle_rules.update_lifecycle_rules(
+ names=[module.params["bucket"] + "/" + module.params["name"]],
+ rule=attr,
+ confirm_date=True,
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to update lifecycle rule {0} for bucket {1}.".format(
+ module.params["name"], module.params["bucket"]
+ )
+ )
+ else:
+ attr = flashblade.LifecycleRulePatch(
+ keep_previous_version_for=new_rule["keep_previous_version_for"],
+ keep_current_version_for=new_rule["keep_current_version_for"],
+ keep_current_version_until=new_rule["keep_current_version_until"],
+ abort_incomplete_multipart_uploads_after=new_rule[
+ "abort_incomplete_multipart_uploads_after"
+ ],
+ prefix=new_rule["prefix"],
+ enabled=new_rule["enabled"],
+ )
+ if attr.keep_current_version_until:
+ res = bladev2.patch_lifecycle_rules(
+ names=[module.params["bucket"] + "/" + module.params["name"]],
+ lifecycle=attr,
+ confirm_date=True,
+ )
+ else:
+ res = bladev2.patch_lifecycle_rules(
+ names=[module.params["bucket"] + "/" + module.params["name"]],
+ lifecycle=attr,
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to update lifecycle rule {0} for bucket {1}. Error: {2}".format(
+ module.params["name"],
+ module.params["bucket"],
+ res.errors[0].message,
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ enabled=dict(type="bool", default=True),
+ bucket=dict(type="str", required=True),
+ name=dict(type="str", required=True),
+ prefix=dict(
+ type="str",
+ ),
+ keep_previous_for=dict(type="str", aliases=["keep_for"]),
+ keep_current_for=dict(type="str"),
+ keep_current_until=dict(type="str"),
+ abort_uploads_after=dict(type="str"),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_PURITYFB:
+ module.fail_json(msg="purity_fb sdk is required for this module")
+
+ state = module.params["state"]
+ blade = get_blade(module)
+ bladev2 = get_system(module)
+ versions = blade.api_version.list_versions().versions
+
+ if module.params["keep_previous_for"] and not module.params["keep_previous_for"][
+ -1:
+ ].lower() in ["w", "d"]:
+ module.fail_json(
+ msg="'keep_previous_for' format incorrect - specify as 'd' or 'w'"
+ )
+ if module.params["keep_current_for"] and not module.params["keep_current_for"][
+ -1:
+ ].lower() in ["w", "d"]:
+ module.fail_json(
+ msg="'keep_current_for' format incorrect - specify as 'd' or 'w'"
+ )
+ if module.params["abort_uploads_after"] and not module.params[
+ "abort_uploads_after"
+ ][-1:].lower() in ["w", "d"]:
+ module.fail_json(
+ msg="'abort_uploads_after' format incorrect - specify as 'd' or 'w'"
+ )
+
+ if MIN_REQUIRED_API_VERSION not in versions:
+ module.fail_json(
+ msg="Minimum FlashBlade REST version required: {0}".format(
+ MIN_REQUIRED_API_VERSION
+ )
+ )
+
+ if not _get_bucket(module, blade):
+ module.fail_json(
+ msg="Specified bucket {0} does not exist".format(module.params["bucket"])
+ )
+
+ try:
+ if LIFECYCLE_API_VERSION not in versions:
+ rule = blade.lifecycle_rules.list_lifecycle_rules(
+ names=[module.params["bucket"] + "/" + module.params["name"]]
+ ).items[0]
+ else:
+ if module.params["keep_current_until"]:
+ module.params["keep_current_until"] = _convert_date_to_epoch(module)
+ bladev2 = get_system(module)
+ rule = list(
+ bladev2.get_lifecycle_rules(
+ names=[module.params["bucket"] + "/" + module.params["name"]]
+ ).items
+ )[0]
+ except Exception:
+ rule = None
+
+ if rule and state == "present":
+ update_rule(module, blade, rule, bladev2)
+ elif state == "present" and not rule:
+ create_rule(module, blade, bladev2)
+ elif state == "absent" and rule:
+ delete_rule(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_messages.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_messages.py
new file mode 100644
index 000000000..bbfe57f95
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_messages.py
@@ -0,0 +1,193 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2022, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_messages
+version_added: '1.10.0'
+short_description: List FlashBlade Alert Messages
+description:
+- List Alert messages based on filters provided
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ severity:
+ description:
+ - severity of the alerts to show
+ type: list
+ elements: str
+ choices: [ all, critical, warning, info ]
+ default: [ all ]
+ state:
+ description:
+ - State of alerts to show
+ default: open
+ choices: [ all, open, closed ]
+ type: str
+ flagged:
+ description:
+ - Show alerts that have been acknowledged or not
+ default: false
+ type: bool
+ history:
+ description:
+ - Historical time period to show alerts for, from present time
+ - Allowed time period are hour(h), day(d), week(w) and year(y)
+ type: str
+ default: 1w
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Show critical alerts from past 4 weeks that haven't been acknowledged
+ purefb_messages:
+ history: 4w
+ flagged : false
+ severity:
+ - critical
+ fb_url: 10.10.10.2
+ api_token: T-68618f31-0c9e-4e57-aa44-5306a2cf10e3
+"""
+
+RETURN = r"""
+"""
+
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_system,
+ purefb_argument_spec,
+)
+
+MIN_REQUIRED_API_VERSION = "2.0"
+ALLOWED_PERIODS = ["h", "d", "w", "y"]
+# Time periods in micro-seconds
+HOUR = 3600000
+DAY = HOUR * 24
+WEEK = DAY * 7
+YEAR = WEEK * 52
+
+
+def _create_time_window(window):
+ period = window[-1].lower()
+ multiple = int(window[0:-1])
+ if period == "h":
+ return HOUR * multiple
+ if period == "d":
+ return DAY * multiple
+ if period == "w":
+ return WEEK * multiple
+ if period == "y":
+ return YEAR * multiple
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="open", choices=["all", "open", "closed"]),
+ history=dict(type="str", default="1w"),
+ flagged=dict(type="bool", default=False),
+ severity=dict(
+ type="list",
+ elements="str",
+ default=["all"],
+ choices=["all", "critical", "warning", "info"],
+ ),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+ time_now = int(time.time() * 1000)
+ blade = get_system(module)
+ api_version = list(blade.get_versions().items)
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashBlade REST version not supported. "
+ "Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
+ )
+ if module.params["history"][-1].lower() not in ALLOWED_PERIODS:
+ module.fail_json(msg="historical window value is not an allowsd time period")
+ since_time = str(time_now - _create_time_window(module.params["history"].lower()))
+ if module.params["flagged"]:
+ flagged = " and flagged='True'"
+ else:
+ flagged = " and flagged='False'"
+
+ multi_sev = False
+ if len(module.params["severity"]) > 1:
+ if "all" in module.params["severity"]:
+ module.params["severity"] = ["*"]
+ else:
+ multi_sev = True
+ if multi_sev:
+ severity = " and ("
+ for level in range(0, len(module.params["severity"])):
+ severity += "severity='" + str(module.params["severity"][level]) + "' or "
+ severity = severity[0:-4] + ")"
+ else:
+ if module.params["severity"] == ["all"]:
+ severity = " and severity='*'"
+ else:
+ severity = " and severity='" + str(module.params["severity"][0]) + "'"
+ messages = {}
+ if module.params["state"] == "all":
+ state = " and state='*'"
+ else:
+ state = " and state='" + module.params["state"] + "'"
+ filter_string = "notified>" + since_time + state + flagged + severity
+ try:
+ res = blade.get_alerts(filter=filter_string)
+ alerts = list(res.items)
+ except Exception:
+ module.fail_json(
+ msg="Failed to get alert messages. Error: {0}".format(res.errors[0].message)
+ )
+ for message in range(0, len(alerts)):
+ name = alerts[message].name
+ messages[name] = {
+ "summary": alerts[message].summary,
+ "component_type": alerts[message].component_type,
+ "component_name": alerts[message].component_name,
+ "description": alerts[message].description,
+ "code": alerts[message].code,
+ "severity": alerts[message].severity,
+ "state": alerts[message].state,
+ "flagged": alerts[message].flagged,
+ "created": time.strftime(
+ "%Y-%m-%d %H:%M:%S",
+ time.gmtime(alerts[message].created / 1000),
+ )
+ + " UTC",
+ "notified": time.strftime(
+ "%Y-%m-%d %H:%M:%S",
+ time.gmtime(alerts[message].notified / 1000),
+ )
+ + " UTC",
+ "updated": time.strftime(
+ "%Y-%m-%d %H:%M:%S",
+ time.gmtime(alerts[message].updated / 1000),
+ )
+ + " UTC",
+ }
+ module.exit_json(changed=False, purefb_messages=messages)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_network.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_network.py
new file mode 100644
index 000000000..27693e32c
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_network.py
@@ -0,0 +1,224 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+
+DOCUMENTATION = """
+---
+module: purefb_network
+version_added: "1.0.0"
+short_description: Manage network interfaces in a Pure Storage FlashBlade
+description:
+ - This module manages network interfaces on Pure Storage FlashBlade.
+ - When creating a network interface a subnet must already exist with
+ a network prefix that covers the IP address of the interface being
+ created.
+author: Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Interface Name.
+ required: true
+ type: str
+ state:
+ description:
+ - Create, delete or modifies a network interface.
+ required: false
+ default: present
+ choices: [ "present", "absent" ]
+ type: str
+ address:
+ description:
+ - IP address of interface.
+ required: false
+ type: str
+ services:
+ description:
+ - Define which services are configured for the interfaces.
+ required: false
+ choices: [ "data", "replication" ]
+ default: data
+ type: str
+ itype:
+ description:
+ - Type of interface.
+ required: false
+ choices: [ "vip" ]
+ default: vip
+ type: str
+extends_documentation_fragment:
+ - purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = """
+- name: Create new network interface named foo
+ purestorage.flashblade.purefb_network:
+ name: foo
+ address: 10.21.200.23
+ state: present
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Change IP address of network interface named foo
+ purestorage.flashblade.purefb_network:
+ name: foo
+ state: present
+ address: 10.21.200.123
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Delete network interface named foo
+ purestorage.flashblade.purefb_network:
+ name: foo
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641"""
+
+RETURN = """
+"""
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import NetworkInterface
+except ImportError:
+ HAS_PURITY_FB = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ purefb_argument_spec,
+)
+
+
+MINIMUM_API_VERSION = "1.3"
+
+
+def get_iface(module, blade):
+ """Return Filesystem or None"""
+ iface = []
+ iface.append(module.params["name"])
+ try:
+ res = blade.network_interfaces.list_network_interfaces(names=iface)
+ return res.items[0]
+ except Exception:
+ return None
+
+
+def create_iface(module, blade):
+ """Create Network Interface"""
+ changed = True
+ if not module.check_mode:
+ iface = []
+ services = []
+ iface.append(module.params["name"])
+ services.append(module.params["services"])
+ try:
+ blade.network_interfaces.create_network_interfaces(
+ names=iface,
+ network_interface=NetworkInterface(
+ address=module.params["address"],
+ services=services,
+ type=module.params["itype"],
+ ),
+ )
+ except Exception:
+ module.fail_json(
+ msg="Interface creation failed. Check subnet exists for {0}".format(
+ module.params["address"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def modify_iface(module, blade):
+ """Modify Network Interface IP address"""
+ changed = False
+ iface = get_iface(module, blade)
+ iface_new = []
+ iface_new.append(module.params["name"])
+ if module.params["address"] != iface.address:
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.network_interfaces.update_network_interfaces(
+ names=iface_new,
+ network_interface=NetworkInterface(
+ address=module.params["address"]
+ ),
+ )
+ changed = True
+ except Exception:
+ module.fail_json(
+ msg="Failed to modify Interface {0}".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def delete_iface(module, blade):
+ """Delete Network Interface"""
+ changed = True
+ if not module.check_mode:
+ iface = []
+ iface.append(module.params["name"])
+ try:
+ blade.network_interfaces.delete_network_interfaces(names=iface)
+ except Exception:
+ module.fail_json(
+ msg="Failed to delete network {0}".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(required=True),
+ state=dict(default="present", choices=["present", "absent"]),
+ address=dict(),
+ services=dict(default="data", choices=["data", "replication"]),
+ itype=dict(default="vip", choices=["vip"]),
+ )
+ )
+
+ required_if = [["state", "present", ["address"]]]
+
+ module = AnsibleModule(
+ argument_spec, required_if=required_if, supports_check_mode=True
+ )
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg="purity_fb sdk is required for this module")
+
+ state = module.params["state"]
+ blade = get_blade(module)
+ api_version = blade.api_version.list_versions().versions
+ if MINIMUM_API_VERSION not in api_version:
+ module.fail_json(msg="Upgrade Purity//FB to enable this module")
+ iface = get_iface(module, blade)
+
+ if state == "present" and not iface:
+ create_iface(module, blade)
+ elif state == "present" and iface:
+ modify_iface(module, blade)
+ elif state == "absent" and iface:
+ delete_iface(module, blade)
+ elif state == "absent" and not iface:
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_ntp.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_ntp.py
new file mode 100644
index 000000000..7b18442c9
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_ntp.py
@@ -0,0 +1,158 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_ntp
+version_added: '1.0.0'
+short_description: Configure Pure Storage FlashBlade NTP settings
+description:
+- Set or erase NTP configuration for Pure Storage FlashBlades.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Create or delete NTP servers configuration
+ type: str
+ default: present
+ choices: [ absent, present ]
+ ntp_servers:
+ type: list
+ elements: str
+ description:
+ - A list of up to 4 alternate NTP servers. These may include IPv4,
+ IPv6 or FQDNs. Invalid IP addresses will cause the module to fail.
+ No validation is performed for FQDNs.
+ - If more than 4 servers are provided, only the first 4 unique
+ nameservers will be used.
+ - if no servers are given a default of I(0.pool.ntp.org) will be used.
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Delete exisitng NTP server entries
+ purestorage.flashblade.purefb_ntp:
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Set array NTP servers
+ purestorage.flashblade.purefb_ntp:
+ state: present
+ ntp_servers:
+ - "0.pool.ntp.org"
+ - "1.pool.ntp.org"
+ - "2.pool.ntp.org"
+ - "3.pool.ntp.org"
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import PureArray
+except ImportError:
+ HAS_PURITY_FB = False
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ purefb_argument_spec,
+)
+
+
+MIN_REQUIRED_API_VERSION = "1.3"
+
+
+def remove(duplicate):
+ final_list = []
+ for num in duplicate:
+ if num not in final_list:
+ final_list.append(num)
+ return final_list
+
+
+def delete_ntp(module, blade):
+ """Delete NTP Servers"""
+ changed = True
+ if not module.check_mode:
+ if blade.arrays.list_arrays().items[0].ntp_servers != []:
+ try:
+ blade_settings = PureArray(ntp_servers=[])
+ blade.arrays.update_arrays(array_settings=blade_settings)
+ except Exception:
+ module.fail_json(msg="Deletion of NTP servers failed")
+ module.exit_json(changed=changed)
+
+
+def create_ntp(module, blade):
+ """Set NTP Servers"""
+ changed = True
+ if not module.check_mode:
+ if not module.params["ntp_servers"]:
+ module.params["ntp_servers"] = ["0.pool.ntp.org"]
+ try:
+ blade_settings = PureArray(ntp_servers=module.params["ntp_servers"][0:4])
+ blade.arrays.update_arrays(array_settings=blade_settings)
+ except Exception:
+ module.fail_json(msg="Update of NTP servers failed")
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ ntp_servers=dict(type="list", elements="str"),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ )
+ )
+
+ required_if = [["state", "present", ["ntp_servers"]]]
+
+ module = AnsibleModule(
+ argument_spec, required_if=required_if, supports_check_mode=True
+ )
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg="purity_fb sdk is required for this module")
+
+ blade = get_blade(module)
+
+ api_version = blade.api_version.list_versions().versions
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(msg="Purity//FB must be upgraded to support this module.")
+
+ if module.params["state"] == "absent":
+ delete_ntp(module, blade)
+ else:
+ module.params["ntp_servers"] = remove(module.params["ntp_servers"])
+ if sorted(blade.arrays.list_arrays().items[0].ntp_servers) != sorted(
+ module.params["ntp_servers"][0:4]
+ ):
+ create_ntp(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_phonehome.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_phonehome.py
new file mode 100644
index 000000000..20b99e8a0
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_phonehome.py
@@ -0,0 +1,124 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_phonehome
+version_added: '1.0.0'
+short_description: Enable or Disable Pure Storage FlashBlade Phone Home
+description:
+- Enablke or Disable Remote Phone Home for a Pure Storage FlashBlade.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Define state of phone home
+ type: str
+ default: present
+ choices: [ present, absent ]
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Enable Remote Phone Home
+ purestorage.flashblade.purefb_phonehome:
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Disable Remote Phone Home
+ purestorage.flashblade.purefb_phonehome:
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import Support
+except ImportError:
+ HAS_PURITY_FB = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ purefb_argument_spec,
+)
+
+
+MIN_REQUIRED_API_VERSION = "1.6"
+
+
+def enable_ph(module, blade):
+ """Enable Phone Hone"""
+ changed = True
+ if not module.check_mode:
+ ph_settings = Support(phonehome_enabled=True)
+ try:
+ blade.support.update_support(support=ph_settings)
+ except Exception:
+ module.fail_json(msg="Enabling Phone Home failed")
+ module.exit_json(changed=changed)
+
+
+def disable_ph(module, blade):
+ """Disable Phone Home"""
+ changed = True
+ if not module.check_mode:
+ ph_settings = Support(phonehome_enabled=False)
+ try:
+ blade.support.update_support(support=ph_settings)
+ except Exception:
+ module.fail_json(msg="Disabling Phone Home failed")
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["present", "absent"]),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ blade = get_blade(module)
+ api_version = blade.api_version.list_versions().versions
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(msg="Purity//FB must be upgraded to support this module.")
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg="purity_fb SDK is required for this module")
+
+ if (
+ module.params["state"] == "present"
+ and not blade.support.list_support().items[0].phonehome_enabled
+ ):
+ enable_ph(module, blade)
+ elif (
+ module.params["state"] == "absent"
+ and blade.support.list_support().items[0].phonehome_enabled
+ ):
+ disable_ph(module, blade)
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_pingtrace.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_pingtrace.py
new file mode 100644
index 000000000..e9f20a158
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_pingtrace.py
@@ -0,0 +1,277 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2023, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_pingtrace
+version_added: '1.11.0'
+short_description: Employ the internal FlashBlade ping and trace mechanisms
+description:
+- Ping or trace a destination
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ action:
+ description:
+ - Which action is required, ping or trace
+ type: str
+ choices: [ ping, trace ]
+ default: ping
+ count:
+ description:
+ - Used by ping to specify the number of packets to send
+ type: int
+ default: 1
+ resolve:
+ description:
+ - Specify whether or not to map IP addresses to host names
+ type: bool
+ default: True
+ latency:
+ description:
+ - Specify whether or not to print the full user-to-user latency
+ type: bool
+ default: False
+ packet_size:
+ description:
+ - Used by ping to specify the number of data bytes to send per packet
+ type: int
+ default: 56
+ destination:
+ description:
+ - IP addtress or hostname used to run ping or trace against.
+ type: str
+ required: true
+ method:
+ description:
+ - Used by trace to specify the method to use for operations
+ type: str
+ choices: [ icmp, tcp, udp ]
+ default: udp
+ fragment:
+ description:
+ - Used by trace to specify whether or not to fragment packets
+ type: bool
+ default: true
+ discover_mtu:
+ description:
+ - Used by trace to specify whether or not to discover the MTU
+ along the path being traced
+ type: bool
+ default: false
+ port:
+ description:
+ - Used by trace to specify a destination port
+ type: str
+ source:
+ description:
+ - IP address or hostname used by ping and trace to specify where
+ to start to run the specified operation
+ - If not specified will use all available sources
+ type: str
+ component:
+ description:
+ - Used by ping and trace to specify where to run the operation.
+ - Valid values are controllers and blades from hardware list.
+ - If not specified defaults to all available controllers and selected blades
+ type: str
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: ping Google DNS server
+ purestorage.flashblade.purefb_pingtrace:
+ destination: 8.8.8.8
+ fb_url: 10.10.10.2
+ api_token: T-68618f31-0c9e-4e57-aa44-5306a2cf10e3
+
+- name: trace to Google DNS server from CH1.FM0
+ purestorage.flashblade.purefb_pingtrace:
+ action: trace
+ destination: 8.8.8.8
+ fragment_packet: true
+ source: CH1.FM0
+ discover_mtu: true
+ fb_url: 10.10.10.2
+ api_token: T-68618f31-0c9e-4e57-aa44-5306a2cf10e3
+"""
+
+RETURN = r"""
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_system,
+ purefb_argument_spec,
+)
+
+MIN_REQUIRED_API_VERSION = "2.6"
+
+
+def run_ping(module, blade):
+ """Run network ping"""
+ ping_fact = {}
+ if module.params["source"] and module.params["component"]:
+ res = blade.get_network_interfaces_ping(
+ destination=module.params["destination"],
+ component=module.params["component"],
+ source=module.params["source"],
+ packet_size=module.params["packet_size"],
+ count=module.params["count"],
+ print_latency=module.params["latency"],
+ resolve_hostname=module.params["resolve"],
+ )
+ elif module.params["source"] and not module.params["component"]:
+ res = blade.get_network_interfaces_ping(
+ destination=module.params["destination"],
+ source=module.params["source"],
+ packet_size=module.params["packet_size"],
+ count=module.params["count"],
+ print_latency=module.params["latency"],
+ resolve_hostname=module.params["resolve"],
+ )
+ elif not module.params["source"] and module.params["component"]:
+ res = blade.get_network_interfaces_ping(
+ destination=module.params["destination"],
+ component=module.params["component"],
+ packet_size=module.params["packet_size"],
+ count=module.params["count"],
+ print_latency=module.params["latency"],
+ resolve_hostname=module.params["resolve"],
+ )
+ else:
+ res = blade.get_network_interfaces_ping(
+ destination=module.params["destination"],
+ packet_size=module.params["packet_size"],
+ count=module.params["count"],
+ print_latency=module.params["latency"],
+ resolve_hostname=module.params["resolve"],
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to run ping. Error: {0}".format(res.errors[0].message)
+ )
+ else:
+ responses = list(res.items)
+ for resp in range(0, len(responses)):
+ comp_name = responses[resp].component_name.replace(".", "_")
+ ping_fact[comp_name] = {
+ "details": responses[resp].details,
+ }
+
+ module.exit_json(changed=False, pingfact=ping_fact)
+
+
+def run_trace(module, blade):
+ """Run network trace"""
+ trace_fact = {}
+ if module.params["source"] and module.params["component"]:
+ res = blade.get_network_interfaces_trace(
+ port=module.params["port"],
+ destination=module.params["destination"],
+ component=module.params["component"],
+ discover_mtu=module.params["discover_mtu"],
+ source=module.params["source"],
+ fragment_packet=module.params["fragment"],
+ method=module.params["method"],
+ resolve_hostname=module.params["resolve"],
+ )
+ elif module.params["source"] and not module.params["component"]:
+ res = blade.get_network_interfaces_trace(
+ port=module.params["port"],
+ destination=module.params["destination"],
+ discover_mtu=module.params["discover_mtu"],
+ source=module.params["source"],
+ fragment_packet=module.params["fragment"],
+ method=module.params["method"],
+ resolve_hostname=module.params["resolve"],
+ )
+ elif not module.params["source"] and module.params["component"]:
+ res = blade.get_network_interfaces_trace(
+ port=module.params["port"],
+ destination=module.params["destination"],
+ discover_mtu=module.params["discover_mtu"],
+ component=module.params["component"],
+ fragment_packet=module.params["fragment"],
+ method=module.params["method"],
+ resolve_hostname=module.params["resolve"],
+ )
+ else:
+ res = blade.get_network_interfaces_trace(
+ port=module.params["port"],
+ destination=module.params["destination"],
+ discover_mtu=module.params["discover_mtu"],
+ fragment_packet=module.params["fragment"],
+ method=module.params["method"],
+ resolve_hostname=module.params["resolve"],
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to run trace. Error: {0}".format(res.errors[0].message)
+ )
+ else:
+ responses = list(res.items)
+ for resp in range(0, len(responses)):
+ comp_name = responses[resp].component_name.replace(".", "_")
+ trace_fact[comp_name] = {
+ "details": responses[resp].details,
+ }
+
+ module.exit_json(changed=False, tracefact=trace_fact)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ action=dict(type="str", choices=["ping", "trace"], default="ping"),
+ method=dict(type="str", choices=["icmp", "tcp", "udp"], default="udp"),
+ destination=dict(type="str", required=True),
+ source=dict(type="str"),
+ component=dict(type="str"),
+ port=dict(type="str"),
+ count=dict(type="int", default=1),
+ packet_size=dict(type="int", default=56),
+ resolve=dict(type="bool", default=True),
+ fragment=dict(type="bool", default=True),
+ latency=dict(type="bool", default=False),
+ discover_mtu=dict(type="bool", default=False),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ blade = get_system(module)
+ api_version = list(blade.get_versions().items)
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashBlade REST version not supported. "
+ "Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
+ )
+
+ blade = get_system(module)
+ if module.params["action"] == "ping":
+ run_ping(module, blade)
+ else:
+ run_trace(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_policy.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_policy.py
new file mode 100644
index 000000000..273166de8
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_policy.py
@@ -0,0 +1,2079 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_policy
+version_added: '1.0.0'
+short_description: Manage FlashBlade policies
+description:
+- Manage policies for filesystem, file replica links and object store access.
+- To update an existing snapshot policy rule, you must first delete the
+ original rule and then add the new rule to replace it. Purity's best-fit
+ will try to ensure that any required snapshots deleted on the deletion of
+ the first rule will be recovered as long replacement rule is added before
+ the snapshot eradication period is exceeded (usuually 24 hours).
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Create or delete policy.
+ - Copy is applicable only to Object Store Access Policies Rules
+ default: present
+ type: str
+ choices: [ absent, present, copy ]
+ target:
+ description:
+ - Name of policy to copy rule to
+ type: str
+ version_added: "1.9.0"
+ target_rule:
+ description:
+ - Name of the rule to copy the exisitng rule to.
+ - If not defined the existing rule name is used.
+ type: str
+ version_added: "1.9.0"
+ policy_type:
+ description:
+ - Type of policy
+ default: snapshot
+ type: str
+ choices: [ snapshot, access, nfs ]
+ version_added: "1.9.0"
+ account:
+ description:
+ - Name of Object Store account policy applies to.
+ - B(Special Case) I(pure policy) is used for the system-wide S3 policies
+ type: str
+ version_added: "1.9.0"
+ rule:
+ description:
+ - Name of the rule for the Object Store Access Policy
+ - Rules in system wide policies cannot be deleted or modified
+ type: str
+ version_added: "1.9.0"
+ effect:
+ description:
+ - Allow S3 requests that match all of the I(actions) item selected.
+ Rules are additive.
+ type: str
+ default: allow
+ choices: [ allow ]
+ version_added: "1.9.0"
+ actions:
+ description:
+ - List of permissions to grant.
+ - System-wide policy rules cannot be deleted or modified
+ type: list
+ elements: str
+ choices:
+ - s3:*
+ - s3:AbortMultipartUpload
+ - s3:CreateBucket
+ - s3:DeleteBucket
+ - s3:DeleteObject
+ - s3:DeleteObjectVersion
+ - s3:ExtendSafemodeRetentionPeriod
+ - s3:GetBucketAcl
+ - s3:GetBucketLocation
+ - s3:GetBucketVersioning
+ - s3:GetLifecycleConfiguration
+ - s3:GetObject
+ - s3:GetObjectAcl
+ - s3:GetObjectVersion
+ - s3:ListAllMyBuckets
+ - s3:ListBucket
+ - s3:ListBucketMultipartUploads
+ - s3:ListBucketVersions
+ - s3:ListMultipartUploadParts
+ - s3:PutBucketVersioning
+ - s3:PutLifecycleConfiguration
+ - s3:PutObject
+ version_added: "1.9.0"
+ object_resources:
+ description:
+ - List of bucket names and object paths, with a wildcard (*) to
+ specify objects in a bucket; e.g., bucket1, bucket1/*, bucket2,
+ bucket2/*.
+ - System-wide policy rules cannot be deleted or modified
+ type: list
+ elements: str
+ version_added: "1.9.0"
+ source_ips:
+ description:
+ - List of IPs and subnets from which this rule should allow requests;
+ e.g., 10.20.30.40, 10.20.30.0/24, 2001:DB8:1234:5678::/64.
+ - System-wide policy rules cannot be deleted or modified
+ type: list
+ elements: str
+ version_added: "1.9.0"
+ s3_prefixes:
+ description:
+ - List of 'folders' (object key prefixes) for which object listings
+ may be requested.
+ - System-wide policy rules cannot be deleted or modified
+ type: list
+ elements: str
+ version_added: "1.9.0"
+ s3_delimiters:
+ description:
+ - List of delimiter characters allowed in object list requests.
+ - Grants permissions to list 'folder names' (prefixes ending in a
+ delimiter) instead of object keys.
+ - System-wide policy rules cannot be deleted or modified
+ type: list
+ elements: str
+ version_added: "1.9.0"
+ ignore_enforcement:
+ description:
+ - Certain combinations of actions and other rule elements are inherently
+ ignored if specified together in a rule.
+ - If set to true, operations which attempt to set these combinations will fail.
+ - If set to false, such operations will instead be allowed.
+ type: bool
+ default: true
+ version_added: "1.9.0"
+ user:
+ description:
+ - User in the I(account) that the policy is granted to.
+ type: str
+ version_added: "1.9.0"
+ force_delete:
+ description:
+ - Force the deletion of a Object Store Access Policy is this
+ has attached users.
+ - WARNING This can have undesired side-effects.
+ - System-wide policies cannot be deleted
+ type: bool
+ default: false
+ version_added: "1.9.0"
+ name:
+ description:
+ - Name of the policy
+ type: str
+ enabled:
+ description:
+ - State of policy
+ type: bool
+ default: true
+ every:
+ description:
+ - Interval between snapshots in seconds
+ - Range available 300 - 31536000 (equates to 5m to 365d)
+ type: int
+ keep_for:
+ description:
+ - How long to keep snapshots for
+ - Range available 300 - 31536000 (equates to 5m to 365d)
+ - Must not be set less than I(every)
+ type: int
+ at:
+ description:
+ - Provide a time in 12-hour AM/PM format, eg. 11AM
+ type: str
+ timezone:
+ description:
+ - Time Zone used for the I(at) parameter
+ - If not provided, the module will attempt to get the current local timezone from the server
+ type: str
+ filesystem:
+ description:
+ - List of filesystems to add to a policy on creation
+ - To amend policy members use the I(purestorage.flashblade.purefb_fs) module
+ type: list
+ elements: str
+ replica_link:
+ description:
+ - List of filesystem replica links to add to a policy on creation
+ - To amend policy members use the I(purestorage.flashblade.purefb_fs_replica) module
+ type: list
+ elements: str
+ access:
+ description:
+ - Specifies access control for the export policy rule
+ type: str
+ choices: [ root-squash, all-squash, no-squash ]
+ default: root-squash
+ version_added: "1.9.0"
+ anonuid:
+ description:
+ - Any user whose UID is affected by an I(access) of `root_squash` or `all_squash`
+ will have their UID mapped to anonuid.
+ The defaultis null, which means 65534.
+ Use "" to clear.
+ type: str
+ version_added: "1.9.0"
+ anongid:
+ description:
+ - Any user whose GID is affected by an I(access) of `root_squash` or `all_squash`
+ will have their GID mapped to anongid.
+ The default anongid is null, which means 65534.
+ Use "" to clear.
+ type: str
+ version_added: "1.9.0"
+ atime:
+ description:
+ - After a read operation has occurred, the inode access time is updated only if any
+ of the following conditions is true; the previous access time is less than the
+ inode modify time, the previous access time is less than the inode change time,
+ or the previous access time is more than 24 hours ago.
+ - If set to false, disables the update of inode access times after read operations.
+ type: bool
+ default: true
+ version_added: "1.9.0"
+ client:
+ description:
+ - Specifies the clients that will be permitted to access the export.
+ - Accepted notation is a single IP address, subnet in CIDR notation, netgroup, or
+ anonymous (*).
+ type: str
+ default: "*"
+ version_added: "1.9.0"
+ fileid_32bit:
+ description:
+ - Whether the file id is 32 bits or not.
+ type: bool
+ default: false
+ version_added: "1.9.0"
+ permission:
+ description:
+ - Specifies which read-write client access permissions are allowed for the export.
+ type: str
+ choices: [ rw, ro ]
+ default: ro
+ version_added: "1.9.0"
+ secure:
+ description:
+ - If true, this prevents NFS access to client connections coming from non-reserved ports.
+ - If false, allows NFS access to client connections coming from non-reserved ports.
+ - Applies to NFSv3, NFSv4.1, and auxiliary protocols MOUNT and NLM.
+ type: bool
+ default: false
+ version_added: "1.9.0"
+ security:
+ description:
+ - The security flavors to use for accessing files on this mount point.
+ - If the server does not support the requested flavor, the mount operation fails.
+ - I(sys) trusts the client to specify users identity.
+ - I(krb) provides cryptographic proof of a users identity in each RPC request.
+ - I(krb5i) adds integrity checking to krb5, to ensure the data has not been tampered with.
+ - I(krb5p) adds integrity checking and encryption to krb5.
+ type: list
+ elements: str
+ choices: [ sys, krb5, krb5i, krb5p ]
+ default: sys
+ version_added: "1.9.0"
+ before_rule:
+ description:
+ - The index of the client rule to insert or move a client rule before.
+ type: int
+ version_added: "1.9.0"
+ rename:
+ description:
+ - New name for export policy
+ - Only applies to NFS export policies
+ type: str
+ version_added: "1.10.0"
+ destroy_snapshots:
+ description:
+ - This parameter must be set to true in order to modify a policy such that local or remote snapshots would be destroyed.
+ type: bool
+ version_added: '1.11.0'
+ default: false
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Create a simple snapshot policy with no rules
+ purestorage.flashblade.purefb_policy:
+ name: test_policy
+ policy_type: snapshot
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Create a snapshot policy and connect to existing filesystems and filesystem replica links
+ purestorage.flashblade.purefb_policy:
+ name: test_policy_with_members
+ policy_type: snapshot
+ filesystem:
+ - fs1
+ - fs2
+ replica_link:
+ - rl1
+ - rl2
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Create a snapshot policy with rules
+ purestorage.flashblade.purefb_policy:
+ name: test_policy2
+ policy_type: snapshot
+ at: 11AM
+ keep_for: 86400
+ every: 86400
+ timezone: Asia/Shanghai
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Delete a snapshot policy
+ purestorage.flashblade.purefb_policy:
+ name: test_policy
+ policy_type: snapshot
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Create an empty object store access policy
+ purestorage.flashblade.purefb_policy:
+ name: test_os_policy
+ account: test
+ policy_type: access
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Create an empty object store access policy and assign user
+ purestorage.flashblade.purefb_policy:
+ name: test_os_policy
+ account: test
+ policy_type: access
+ user: fred
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Create a object store access policy with simple rule
+ purestorage.flashblade.purefb_policy:
+ name: test_os_policy_rule
+ policy_type: access
+ account: test
+ rule: rule1
+ actions: "s3:*"
+ object_resources: "*"
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Create an empty NFS export policy
+ purestorage.flashblade.purefb_policy:
+ name: test_nfs_export
+ policy_type: nfs
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Create an NFS export policy with a client rule
+ purestorage.flashblade.purefb_policy:
+ name: test_nfs_export
+ policy_type: nfs
+ atime: true
+ client: "10.0.1.0/24"
+ secure: true
+ security: [sys, krb5]
+ permission: rw
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Create a new rule for an existing NFS export policy
+ purestorage.flashblade.purefb_policy:
+ name: test_nfs_export
+ policy_type: nfs
+ atime: true
+ client: "10.0.2.0/24"
+ security: sys
+ permission: ro
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Delete a client rule from an NFS export policy
+ purestorage.flashblade.purefb_policy:
+ name: test_nfs_export
+ client: "10.0.1.0/24"
+ policy_type: nfs
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Delete an NFS export policy and all associated rules
+ purestorage.flashblade.purefb_policy:
+ name: test_nfs_export
+ state: absent
+ policy_type: nfs
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Delete a rule from an object store access policy
+ purestorage.flashblade.purefb_policy:
+ name: test_os_policy_rule
+ account: test
+ policy_type: access
+ rule: rule1
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Delete a user from an object store access policy
+ purestorage.flashblade.purefb_policy:
+ name: test_os_policy_rule
+ account: test
+ user: fred
+ policy_type: access
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Delete an object store access policy with attached users (USE WITH CAUTION)
+ purestorage.flashblade.purefb_policy:
+ name: test_os_policy_rule
+ account: test
+ policy_type: access
+ force_delete: true
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Delete an object store access policy with no attached users
+ purestorage.flashblade.purefb_policy:
+ name: test_os_policy_rule
+ account: test
+ policy_type: access
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Copy an object store access policy rule to another exisitng policy
+ purestorage.flashblade.purefb_policy:
+ name: test_os_policy_rule
+ policy_type: access
+ account: test
+ target: "account2/anotherpolicy"
+ target_rule: new_rule1
+ state: copy
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Rename an NFS Export Policy
+ purestorage.flashblade.purefb_policy:
+ name: old_name
+ policy_type: nfs
+ rename: new_name
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURITYFB = True
+try:
+ from purity_fb import Policy, PolicyRule, PolicyPatch
+except ImportError:
+ HAS_PURITYFB = False
+
+HAS_PYPURECLIENT = True
+try:
+ from pypureclient.flashblade import (
+ PolicyRuleObjectAccessCondition,
+ PolicyRuleObjectAccessPost,
+ PolicyRuleObjectAccess,
+ NfsExportPolicy,
+ NfsExportPolicyRule,
+ Policy,
+ PolicyRule,
+ )
+except ImportError:
+ HAS_PYPURECLIENT = False
+
+HAS_PYTZ = True
+try:
+ import pytz
+except ImportError:
+ HAS_PYTX = False
+
+import os
+import re
+import platform
+
+from ansible.module_utils.common.process import get_bin_path
+from ansible.module_utils.facts.utils import get_file_content
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ get_system,
+ purefb_argument_spec,
+)
+
+
+MIN_REQUIRED_API_VERSION = "1.9"
+SNAPSHOT_POLICY_API_VERSION = "2.1"
+ACCESS_POLICY_API_VERSION = "2.2"
+NFS_POLICY_API_VERSION = "2.3"
+NFS_RENAME_API_VERSION = "2.4"
+
+
+def _convert_to_millisecs(hour):
+ if hour[-2:] == "AM" and hour[:2] == "12":
+ return 0
+ elif hour[-2:] == "AM":
+ return int(hour[:-2]) * 3600000
+ elif hour[-2:] == "PM" and hour[:2] == "12":
+ return 43200000
+ return (int(hour[:-2]) + 12) * 3600000
+
+
+def _findstr(text, match):
+ for line in text.splitlines():
+ if match in line:
+ found = line
+ return found
+
+
+def _get_local_tz(module, timezone="UTC"):
+ """
+ We will attempt to get the local timezone of the server running the module and use that.
+ If we can't get the timezone then we will set the default to be UTC
+
+ Linnux has been tested and other opersting systems should be OK.
+ Failures cause assumption of UTC
+
+ Windows is not supported and will assume UTC
+ """
+ if platform.system() == "Linux":
+ timedatectl = get_bin_path("timedatectl")
+ if timedatectl is not None:
+ rcode, stdout, stderr = module.run_command(timedatectl)
+ if rcode == 0 and stdout:
+ line = _findstr(stdout, "Time zone")
+ full_tz = line.split(":", 1)[1].rstrip()
+ timezone = full_tz.split()[0]
+ return timezone
+ else:
+ module.warn("Incorrect timedatectl output. Timezone will be set to UTC")
+ else:
+ if os.path.exists("/etc/timezone"):
+ timezone = get_file_content("/etc/timezone")
+ else:
+ module.warn("Could not find /etc/timezone. Assuming UTC")
+
+ elif platform.system() == "SunOS":
+ if os.path.exists("/etc/default/init"):
+ for line in get_file_content("/etc/default/init", "").splitlines():
+ if line.startswith("TZ="):
+ timezone = line.split("=", 1)[1]
+ return timezone
+ else:
+ module.warn("Could not find /etc/default/init. Assuming UTC")
+
+ elif re.match("^Darwin", platform.platform()):
+ systemsetup = get_bin_path("systemsetup")
+ if systemsetup is not None:
+ rcode, stdout, stderr = module.execute(systemsetup, "-gettimezone")
+ if rcode == 0 and stdout:
+ timezone = stdout.split(":", 1)[1].lstrip()
+ else:
+ module.warn("Could not run systemsetup. Assuming UTC")
+ else:
+ module.warn("Could not find systemsetup. Assuming UTC")
+
+ elif re.match("^(Free|Net|Open)BSD", platform.platform()):
+ if os.path.exists("/etc/timezone"):
+ timezone = get_file_content("/etc/timezone")
+ else:
+ module.warn("Could not find /etc/timezone. Assuming UTC")
+
+ elif platform.system() == "AIX":
+ aix_oslevel = int(platform.version() + platform.release())
+ if aix_oslevel >= 61:
+ if os.path.exists("/etc/environment"):
+ for line in get_file_content("/etc/environment", "").splitlines():
+ if line.startswith("TZ="):
+ timezone = line.split("=", 1)[1]
+ return timezone
+ else:
+ module.warn("Could not find /etc/environment. Assuming UTC")
+ else:
+ module.warn(
+ "Cannot determine timezone when AIX os level < 61. Assuming UTC"
+ )
+
+ else:
+ module.warn("Could not find /etc/timezone. Assuming UTC")
+
+ return timezone
+
+
+def delete_nfs_policy(module, blade):
+ """Delete NFS Export Policy, or Rule
+
+ If client is provided then delete the client rule if it exists.
+ """
+
+ changed = False
+ policy_delete = True
+ if module.params["client"]:
+ policy_delete = False
+ res = blade.get_nfs_export_policies_rules(
+ policy_names=[module.params["name"]],
+ filter="client='" + module.params["client"] + "'",
+ )
+ if res.status_code == 200:
+ if res.total_item_count == 0:
+ pass
+ elif res.total_item_count == 1:
+ rule = list(res.items)[0]
+ if module.params["client"] == rule.client:
+ changed = True
+ if not module.check_mode:
+ res = blade.delete_nfs_export_policies_rules(names=[rule.name])
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete rule for client {0} in policy {1}. "
+ "Error: {2}".format(
+ module.params["client"],
+ module.params["name"],
+ res.errors[0].message,
+ )
+ )
+ else:
+ rules = list(res.items)
+ for cli in range(0, len(rules)):
+ if rules[cli].client == "*":
+ changed = True
+ if not module.check_mode:
+ res = blade.delete_nfs_export_policies_rules(
+ names=[rules[cli].name]
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete rule for client {0} in policy {1}. "
+ "Error: {2}".format(
+ module.params["client"],
+ module.params["name"],
+ res.errors[0].message,
+ )
+ )
+ if policy_delete:
+ changed = True
+ if not module.check_mode:
+ res = blade.delete_nfs_export_policies(names=[module.params["name"]])
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete export policy {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def rename_nfs_policy(module, blade):
+ """Rename NFS Export Policy"""
+
+ changed = True
+ if not module.check_mode:
+ res = blade.patch_nfs_export_policies(
+ names=[module.params["name"]],
+ policy=NfsExportPolicy(name=module.params["rename"]),
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to rename NFS export policy {0} to {1}. Error: {2}".format(
+ module.params["name"],
+ module.params["rename"],
+ res.errors[0].message,
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def update_nfs_policy(module, blade):
+ """Update NFS Export Policy Rule"""
+
+ changed = False
+ if module.params["client"]:
+ current_policy_rule = blade.get_nfs_export_policies_rules(
+ policy_names=[module.params["name"]],
+ filter="client='" + module.params["client"] + "'",
+ )
+ if (
+ current_policy_rule.status_code == 200
+ and current_policy_rule.total_item_count == 0
+ ):
+ rule = NfsExportPolicyRule(
+ client=module.params["client"],
+ permission=module.params["permission"],
+ access=module.params["access"],
+ anonuid=module.params["anonuid"],
+ anongid=module.params["anongid"],
+ fileid_32bit=module.params["fileid_32bit"],
+ atime=module.params["atime"],
+ secure=module.params["secure"],
+ security=module.params["security"],
+ )
+ changed = True
+ if not module.check_mode:
+ if module.params["before_rule"]:
+ before_name = (
+ module.params["name"] + "." + str(module.params["before_rule"])
+ )
+ res = blade.post_nfs_export_policies_rules(
+ policy_names=[module.params["name"]],
+ rule=rule,
+ before_rule_name=before_name,
+ )
+ else:
+ res = blade.post_nfs_export_policies_rules(
+ policy_names=[module.params["name"]],
+ rule=rule,
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to create rule for client {0} "
+ "in export policy {1}. Error: {2}".format(
+ module.params["client"],
+ module.params["name"],
+ res.errors[0].message,
+ )
+ )
+ else:
+ rules = list(current_policy_rule.items)
+ cli_count = None
+ done = False
+ if module.params["client"] == "*":
+ for cli in range(0, len(rules)):
+ if rules[cli].client == "*":
+ cli_count = cli
+ if not cli_count:
+ rule = NfsExportPolicyRule(
+ client=module.params["client"],
+ permission=module.params["permission"],
+ access=module.params["access"],
+ anonuid=module.params["anonuid"],
+ anongid=module.params["anongid"],
+ fileid_32bit=module.params["fileid_32bit"],
+ atime=module.params["atime"],
+ secure=module.params["secure"],
+ security=module.params["security"],
+ )
+ done = True
+ changed = True
+ if not module.check_mode:
+ if module.params["before_rule"]:
+ res = blade.post_nfs_export_policies_rules(
+ policy_names=[module.params["name"]],
+ rule=rule,
+ before_rule_name=(
+ module.params["name"]
+ + "."
+ + str(module.params["before_rule"]),
+ ),
+ )
+ else:
+ res = blade.post_nfs_export_policies_rules(
+ policy_names=[module.params["name"]],
+ rule=rule,
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to create rule for "
+ "client {0} in export policy {1}. Error: {2}".format(
+ module.params["client"],
+ module.params["name"],
+ res.errors[0].message,
+ )
+ )
+ if not done:
+ old_policy_rule = rules[0]
+ current_rule = {
+ "anongid": getattr(old_policy_rule, "anongid", None),
+ "anonuid": getattr(old_policy_rule, "anonuid", None),
+ "atime": old_policy_rule.atime,
+ "client": sorted(old_policy_rule.client),
+ "fileid_32bit": old_policy_rule.fileid_32bit,
+ "permission": sorted(old_policy_rule.permission),
+ "secure": old_policy_rule.secure,
+ "security": sorted(old_policy_rule.security),
+ }
+ if module.params["permission"]:
+ new_permission = sorted(module.params["permission"])
+ else:
+ new_permission = sorted(current_rule["permission"])
+ if module.params["client"]:
+ new_client = sorted(module.params["client"])
+ else:
+ new_client = sorted(current_rule["client"])
+ if module.params["security"]:
+ new_security = sorted(module.params["security"])
+ else:
+ new_security = sorted(current_rule["security"])
+ if module.params["anongid"]:
+ new_anongid = module.params["anongid"]
+ else:
+ new_anongid = current_rule["anongid"]
+ if module.params["anonuid"]:
+ new_anonuid = module.params["anonuid"]
+ else:
+ new_anonuid = current_rule["anonuid"]
+ if module.params["atime"] != current_rule["atime"]:
+ new_atime = module.params["atime"]
+ else:
+ new_atime = current_rule["atime"]
+ if module.params["secure"] != current_rule["secure"]:
+ new_secure = module.params["secure"]
+ else:
+ new_secure = current_rule["secure"]
+ if module.params["fileid_32bit"] != current_rule["fileid_32bit"]:
+ new_fileid_32bit = module.params["fileid_32bit"]
+ else:
+ new_fileid_32bit = current_rule["fileid_32bit"]
+ new_rule = {
+ "anongid": new_anongid,
+ "anonuid": new_anonuid,
+ "atime": new_atime,
+ "client": new_client,
+ "fileid_32bit": new_fileid_32bit,
+ "permission": new_permission,
+ "secure": new_secure,
+ "security": new_security,
+ }
+ if current_rule != new_rule:
+ changed = True
+ if not module.check_mode:
+ rule = NfsExportPolicyRule(
+ client=module.params["client"],
+ permission=module.params["permission"],
+ access=module.params["access"],
+ anonuid=module.params["anonuid"],
+ anongid=module.params["anongid"],
+ fileid_32bit=module.params["fileid_32bit"],
+ atime=module.params["atime"],
+ secure=module.params["secure"],
+ security=module.params["security"],
+ )
+ res = blade.patch_nfs_export_policies_rules(
+ names=[
+ module.params["name"] + "." + str(old_policy_rule.index)
+ ],
+ rule=rule,
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to update NFS export rule {0}. Error: {1}".format(
+ module.params["name"]
+ + "."
+ + str(old_policy_rule.index),
+ res.errors[0].message,
+ )
+ )
+ if (
+ module.params["before_rule"]
+ and module.params["before_rule"] != old_policy_rule.index
+ ):
+ changed = True
+ if not module.check_mode:
+ before_name = (
+ module.params["name"]
+ + "."
+ + str(module.params["before_rule"])
+ )
+ res = blade.patch_nfs_export_policies_rules(
+ names=[
+ module.params["name"] + "." + str(old_policy_rule.index)
+ ],
+ rule=NfsExportPolicyRule(),
+ before_rule_name=before_name,
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to move NFS export rule {0}. Error: {1}".format(
+ module.params["name"]
+ + "."
+ + str(old_policy_rule.index),
+ res.errors[0].message,
+ )
+ )
+ current_policy = list(
+ blade.get_nfs_export_policies(names=[module.params["name"]]).items
+ )[0]
+ if current_policy.enabled != module.params["enabled"]:
+ changed = True
+ if not module.check_mode:
+ res = blade.patch_nfs_export_policies(
+ policy=NfsExportPolicy(enabled=module.params["enabled"]),
+ names=[module.params["name"]],
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to change state of nfs export policy {0}.Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def create_nfs_policy(module, blade):
+ """Create NFS Export Policy"""
+ changed = True
+ if not module.check_mode:
+ res = blade.post_nfs_export_policies(names=[module.params["name"]])
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to create nfs export policy {0}.Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ if not module.params["enabled"]:
+ res = blade.patch_nfs_export_policies(
+ policy=NfsExportPolicy(enabled=False), names=[module.params["name"]]
+ )
+ if res.status_code != 200:
+ blade.delete_nfs_export_policies(names=[module.params["name"]])
+ module.fail_json(
+ msg="Failed to create nfs export policy {0}.Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ if not module.params["client"]:
+ module.fail_json(msg="client is required to create a new rule")
+ else:
+ rule = NfsExportPolicyRule(
+ client=module.params["client"],
+ permission=module.params["permission"],
+ access=module.params["access"],
+ anonuid=module.params["anonuid"],
+ anongid=module.params["anongid"],
+ fileid_32bit=module.params["fileid_32bit"],
+ atime=module.params["atime"],
+ secure=module.params["secure"],
+ security=module.params["security"],
+ )
+ res = blade.post_nfs_export_policies_rules(
+ policy_names=[module.params["name"]],
+ rule=rule,
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to rule for policy {0}. Error: {1}".format(
+ module.params["name"],
+ res.errors[0].message,
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def delete_os_policy(module, blade):
+ """Delete Object Store Access Policy, Rule, or User
+
+ If rule is provided then delete the rule if it exists.
+ If user is provided then remove grant from user if granted.
+ If no user or rule provided delete the whole policy.
+ Cannot delete a policy with attached users, so delete all users
+ if the force_delete option is selected.
+ """
+
+ changed = False
+ policy_name = module.params["account"] + "/" + module.params["name"]
+ policy_delete = True
+ if module.params["rule"]:
+ policy_delete = False
+ res = blade.get_object_store_access_policies_rules(
+ policy_names=[policy_name], names=[module.params["rule"]]
+ )
+ if res.status_code == 200 and res.total_item_count != 0:
+ changed = True
+ if not module.check_mode:
+ res = blade.delete_object_store_access_policies_object_store_rules(
+ policy_names=[policy_name], names=[module.params["rule"]]
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete users from policy {0}. Error: {1} - {2}".format(
+ policy_name, res.errors[0].context, res.errors[0].message
+ )
+ )
+
+ if module.params["user"]:
+ member_name = module.params["account"] + "/" + module.params["user"]
+ policy_delete = False
+ res = blade.get_object_store_access_policies_object_store_users(
+ policy_names=[policy_name], member_names=[member_name]
+ )
+ if res.status_code == 200 and res.total_item_count != 0:
+ changed = True
+ if not module.check_mode:
+ member_name = module.params["account"] + "/" + module.params["user"]
+ res = blade.delete_object_store_access_policies_object_store_users(
+ policy_names=[policy_name], member_names=[member_name]
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete users from policy {0}. Error: {1} - {2}".format(
+ policy_name, res.errors[0].context, res.errors[0].message
+ )
+ )
+
+ if policy_delete:
+ if module.params["account"].lower() == "pure:policy":
+ module.fail_json(msg="System-Wide policies cannot be deleted.")
+ policy_users = list(
+ blade.get_object_store_access_policies_object_store_users(
+ policy_names=[policy_name]
+ ).items
+ )
+ if len(policy_users) == 0:
+ changed = True
+ if not module.check_mode:
+ res = blade.delete_object_store_access_policies(names=[policy_name])
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete policy {0}. Error: {1}".format(
+ policy_name, res.errors[0].message
+ )
+ )
+ else:
+ if module.params["force_delete"]:
+ changed = True
+ if not module.check_mode:
+ for user in range(0, len(policy_users)):
+ res = blade.delete_object_store_access_policies_object_store_users(
+ member_names=[policy_users[user].member.name],
+ policy_names=[policy_name],
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete user {0} from policy {1}, "
+ "Error: {2}".format(
+ policy_users[user].member,
+ policy_name,
+ res.errors[0].message,
+ )
+ )
+ res = blade.delete_object_store_access_policies(names=[policy_name])
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete policy {0}. Error: {1}".format(
+ policy_name, res.errors[0].message
+ )
+ )
+ else:
+ module.fail_json(
+ msg="Policy {0} cannot be deleted with connected users".format(
+ policy_name
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def create_os_policy(module, blade):
+ """Create Object Store Access Policy"""
+ changed = True
+ policy_name = module.params["account"] + "/" + module.params["name"]
+ if not module.check_mode:
+ res = blade.post_object_store_access_policies(names=[policy_name])
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to create access policy {0}.".format(policy_name)
+ )
+ if module.params["rule"]:
+ if not module.params["actions"] or not module.params["object_resources"]:
+ module.fail_json(
+ msg="Parameters `actions` and `object_resources` "
+ "are required to create a new rule"
+ )
+ conditions = PolicyRuleObjectAccessCondition(
+ source_ips=module.params["source_ips"],
+ s3_delimiters=module.params["s3_delimiters"],
+ s3_prefixes=module.params["s3_prefixes"],
+ )
+ rule = PolicyRuleObjectAccessPost(
+ actions=module.params["actions"],
+ resources=module.params["object_resources"],
+ conditions=conditions,
+ )
+ res = blade.post_object_store_access_policies_rules(
+ policy_names=policy_name,
+ names=[module.params["rule"]],
+ enforce_action_restrictions=module.params["ignore_enforcement"],
+ rule=rule,
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to create rule {0} to policy {1}. Error: {2}".format(
+ module.params["rule"], policy_name, res.errors[0].message
+ )
+ )
+ if module.params["user"]:
+ member_name = module.params["account"] + "/" + module.params["user"]
+ res = blade.post_object_store_access_policies_object_store_users(
+ member_names=[member_name], policy_names=[policy_name]
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to add users to policy {0}. Error: {1} - {2}".format(
+ policy_name, res.errors[0].context, res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def update_os_policy(module, blade):
+ """Update Object Store Access Policy"""
+ changed = False
+ policy_name = module.params["account"] + "/" + module.params["name"]
+ if module.params["rule"]:
+ current_policy_rule = blade.get_object_store_access_policies_rules(
+ policy_names=[policy_name], names=[module.params["rule"]]
+ )
+ if current_policy_rule.status_code != 200:
+ conditions = PolicyRuleObjectAccessCondition(
+ source_ips=module.params["source_ips"],
+ s3_delimiters=module.params["s3_delimiters"],
+ s3_prefixes=module.params["s3_prefixes"],
+ )
+ rule = PolicyRuleObjectAccessPost(
+ actions=module.params["actions"],
+ resources=module.params["object_resources"],
+ conditions=conditions,
+ )
+ res = blade.post_object_store_access_policies_rules(
+ policy_names=policy_name,
+ names=[module.params["rule"]],
+ enforce_action_restrictions=module.params["ignore_enforcement"],
+ rule=rule,
+ )
+ else:
+ old_policy_rule = list(current_policy_rule.items)[0]
+ current_rule = {
+ "actions": old_policy_rule.actions,
+ "resources": old_policy_rule.resources,
+ "ips": getattr(old_policy_rule.conditions, "source_ips", None),
+ "prefixes": getattr(old_policy_rule.conditions, "s3_prefixes", None),
+ "delimiters": getattr(
+ old_policy_rule.conditions, "s3_delimiters", None
+ ),
+ }
+ if module.params["actions"]:
+ new_actions = sorted(module.params["actions"])
+ else:
+ new_actions = sorted(current_rule["actions"])
+ if module.params["object_resources"]:
+ new_resources = sorted(module.params["object_resources"])
+ else:
+ new_resources = sorted(current_rule["resources"])
+ if module.params["s3_prefixes"]:
+ new_prefixes = sorted(module.params["s3_prefixes"])
+ elif current_rule["prefixes"]:
+ new_prefixes = sorted(current_rule["prefixes"])
+ else:
+ new_prefixes = None
+ if module.params["s3_delimiters"]:
+ new_delimiters = sorted(module.params["s3_delimiters"])
+ elif current_rule["delimiters"]:
+ new_delimiters = sorted(current_rule["delimiters"])
+ else:
+ new_delimiters = None
+ if module.params["source_ips"]:
+ new_ips = sorted(module.params["source_ips"])
+ elif current_rule["ips"]:
+ new_ips = sorted(current_rule["source_ips"])
+ else:
+ new_ips = None
+ new_rule = {
+ "actions": new_actions,
+ "resources": new_resources,
+ "ips": new_ips,
+ "prefixes": new_prefixes,
+ "delimiters": new_delimiters,
+ }
+ if current_rule != new_rule:
+ changed = True
+ if not module.check_mode:
+ conditions = PolicyRuleObjectAccessCondition(
+ source_ips=new_rule["ips"],
+ s3_prefixes=new_rule["prefixes"],
+ s3_delimiters=new_rule["delimiters"],
+ )
+ rule = PolicyRuleObjectAccess(
+ actions=new_rule["actions"],
+ resources=new_rule["resources"],
+ conditions=conditions,
+ )
+ res = blade.patch_object_store_access_policies_rules(
+ policy_names=[policy_name],
+ names=[module.params["rule"]],
+ rule=rule,
+ enforce_action_restrictions=module.params["ignore_enforcement"],
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to update rule {0} in policy {1}. Error: {2}".format(
+ module.params["rule"], policy_name, res.errors[0].message
+ )
+ )
+ if module.params["user"]:
+ member_name = module.params["account"] + "/" + module.params["user"]
+ res = blade.get_object_store_access_policies_object_store_users(
+ policy_names=[policy_name], member_names=[member_name]
+ )
+ if res.status_code != 200 or (
+ res.status_code == 200 and res.total_item_count == 0
+ ):
+ changed = True
+ if not module.check_mode:
+ res = blade.post_object_store_access_policies_object_store_users(
+ member_names=[member_name], policy_names=[policy_name]
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to add user {0} to policy {1}. Error: {2}".format(
+ member_name, policy_name, res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def copy_os_policy_rule(module, blade):
+ """Copy an existing policy rule to a new policy"""
+ changed = True
+ policy_name = module.params["account"] + "/" + module.params["name"]
+ if not module.params["target_rule"]:
+ module.params["target_rule"] = module.params["rule"]
+ if (
+ blade.get_object_store_access_policies_rules(
+ policy_names=[module.params["target"]], names=[module.params["target_rule"]]
+ ).status_code
+ == 200
+ ):
+ module.fail_json(
+ msg="Target rule {0} already exists in policy {1}".format(
+ module.params["target_rule"], policy_name
+ )
+ )
+ current_rule = list(
+ blade.get_object_store_access_policies_rules(
+ policy_names=[policy_name], names=[module.params["rule"]]
+ ).items
+ )[0]
+ if not module.check_mode:
+ conditions = PolicyRuleObjectAccessCondition(
+ source_ips=current_rule.conditions.source_ips,
+ s3_delimiters=current_rule.conditions.s3_delimiters,
+ s3_prefixes=current_rule.conditions.s3_prefixes,
+ )
+ rule = PolicyRuleObjectAccessPost(
+ actions=current_rule.actions,
+ resources=current_rule.resources,
+ conditions=conditions,
+ )
+ res = blade.post_object_store_access_policies_rules(
+ policy_names=module.params["target"],
+ names=[module.params["target_rule"]],
+ enforce_action_restrictions=module.params["ignore_enforcement"],
+ rule=rule,
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to copy rule {0} from policy {1} to policy {2}. "
+ "Error: {3}".format(
+ module.params["rule"],
+ policy_name,
+ module.params["target"],
+ res.errors[0].message,
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def delete_policy(module, blade):
+ """Delete policy"""
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.policies.delete_policies(names=[module.params["name"]])
+ except Exception:
+ module.fail_json(
+ msg="Failed to delete policy {0}.".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def delete_snap_policy(module, blade):
+ """Delete REST 2 snapshot policy
+
+ If any rule parameters are provided then delete any rules that match
+ all of the parameters provided.
+ If no rule parameters are provided delete the entire policy
+ """
+
+ changed = False
+ rule_delete = False
+ if (
+ module.params["at"]
+ or module.params["every"]
+ or module.params["timezone"]
+ or module.params["keep_for"]
+ ):
+ rule_delete = True
+ if rule_delete:
+ current_rules = list(blade.get_policies(names=[module.params["name"]]).items)[
+ 0
+ ].rules
+ for rule in range(0, len(current_rules)):
+ current_rule = {
+ "at": current_rules[rule].at,
+ "every": current_rules[rule].every,
+ "keep_for": current_rules[rule].keep_for,
+ "time_zone": current_rules[rule].time_zone,
+ }
+ if not module.params["at"]:
+ delete_at = current_rules[rule].at
+ else:
+ delete_at = _convert_to_millisecs(module.params["at"])
+ if module.params["keep_for"]:
+ delete_keep_for = module.params["keep_for"]
+ else:
+ delete_keep_for = int(current_rules[rule].keep_for / 1000)
+ if module.params["every"]:
+ delete_every = module.params["every"]
+ else:
+ delete_every = int(current_rules[rule].every / 1000)
+ if not module.params["timezone"]:
+ delete_tz = current_rules[rule].time_zone
+ else:
+ delete_tz = module.params["timezone"]
+ delete_rule = {
+ "at": delete_at,
+ "every": delete_every * 1000,
+ "keep_for": delete_keep_for * 1000,
+ "time_zone": delete_tz,
+ }
+ if current_rule == delete_rule:
+ changed = True
+ attr = PolicyPatch(remove_rules=[delete_rule])
+ if not module.check_mode:
+ res = blade.patch_policies(
+ destroy_snapshots=module.params["destroy_snapshots"],
+ names=[module.params["name"]],
+ policy=attr,
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete policy rule {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ else:
+ changed = True
+ if not module.check_mode:
+ res = blade.delete_policies(names=[module.params["name"]])
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete policy {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def create_snap_policy(module, blade):
+ """Create REST 2 snapshot policy"""
+ changed = True
+ if (
+ module.params["keep_for"]
+ and not module.params["every"]
+ or module.params["every"]
+ and not module.params["keep_for"]
+ ):
+ module.fail_json(msg="`keep_for` and `every` are required.")
+ if module.params["timezone"] and not module.params["at"]:
+ module.fail_json(msg="`timezone` requires `at` to be provided.")
+ if module.params["at"] and not module.params["every"]:
+ module.fail_json(msg="`at` requires `every` to be provided.")
+
+ if not module.check_mode:
+ if module.params["at"] and module.params["every"]:
+ if not module.params["every"] % 86400 == 0:
+ module.fail_json(
+ msg="At time can only be set if every value is a multiple of 86400"
+ )
+ if not module.params["timezone"]:
+ module.params["timezone"] = _get_local_tz(module)
+ if module.params["timezone"] not in pytz.all_timezones_set:
+ module.fail_json(
+ msg="Timezone {0} is not valid".format(
+ module.params["timezone"]
+ )
+ )
+ if not module.params["keep_for"]:
+ module.params["keep_for"] = 0
+ if not module.params["every"]:
+ module.params["every"] = 0
+ if module.params["keep_for"] < module.params["every"]:
+ module.fail_json(
+ msg="Retention period cannot be less than snapshot interval."
+ )
+ if module.params["at"] and not module.params["timezone"]:
+ module.params["timezone"] = _get_local_tz(module)
+ if module.params["timezone"] not in set(pytz.all_timezones_set):
+ module.fail_json(
+ msg="Timezone {0} is not valid".format(module.params["timezone"])
+ )
+
+ if module.params["keep_for"]:
+ if not 300 <= module.params["keep_for"] <= 34560000:
+ module.fail_json(
+ msg="keep_for parameter is out of range (300 to 34560000)"
+ )
+ if not 300 <= module.params["every"] <= 34560000:
+ module.fail_json(
+ msg="every parameter is out of range (300 to 34560000)"
+ )
+ if module.params["at"]:
+ attr = Policy(
+ enabled=module.params["enabled"],
+ rules=[
+ PolicyRule(
+ keep_for=module.params["keep_for"] * 1000,
+ every=module.params["every"] * 1000,
+ at=_convert_to_millisecs(module.params["at"]),
+ time_zone=module.params["timezone"],
+ )
+ ],
+ )
+ else:
+ attr = Policy(
+ enabled=module.params["enabled"],
+ rules=[
+ PolicyRule(
+ keep_for=module.params["keep_for"] * 1000,
+ every=module.params["every"] * 1000,
+ )
+ ],
+ )
+ else:
+ attr = Policy(enabled=module.params["enabled"])
+ res = blade.post_policies(names=[module.params["name"]], policy=attr)
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to create snapshot policy {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def create_policy(module, blade):
+ """Create snapshot policy"""
+ changed = True
+ if not module.check_mode:
+ try:
+ if module.params["at"] and module.params["every"]:
+ if not module.params["every"] % 86400 == 0:
+ module.fail_json(
+ msg="At time can only be set if every value is a multiple of 86400"
+ )
+ if not module.params["timezone"]:
+ module.params["timezone"] = _get_local_tz(module)
+ if module.params["timezone"] not in pytz.all_timezones_set:
+ module.fail_json(
+ msg="Timezone {0} is not valid".format(
+ module.params["timezone"]
+ )
+ )
+ if not module.params["keep_for"]:
+ module.params["keep_for"] = 0
+ if not module.params["every"]:
+ module.params["every"] = 0
+ if module.params["keep_for"] < module.params["every"]:
+ module.fail_json(
+ msg="Retention period cannot be less than snapshot interval."
+ )
+ if module.params["at"] and not module.params["timezone"]:
+ module.params["timezone"] = _get_local_tz(module)
+ if module.params["timezone"] not in set(pytz.all_timezones_set):
+ module.fail_json(
+ msg="Timezone {0} is not valid".format(
+ module.params["timezone"]
+ )
+ )
+
+ if module.params["keep_for"]:
+ if not 300 <= module.params["keep_for"] <= 34560000:
+ module.fail_json(
+ msg="keep_for parameter is out of range (300 to 34560000)"
+ )
+ if not 300 <= module.params["every"] <= 34560000:
+ module.fail_json(
+ msg="every parameter is out of range (300 to 34560000)"
+ )
+ if module.params["at"]:
+ attr = Policy(
+ enabled=module.params["enabled"],
+ rules=[
+ PolicyRule(
+ keep_for=module.params["keep_for"] * 1000,
+ every=module.params["every"] * 1000,
+ at=_convert_to_millisecs(module.params["at"]),
+ time_zone=module.params["timezone"],
+ )
+ ],
+ )
+ else:
+ attr = Policy(
+ enabled=module.params["enabled"],
+ rules=[
+ PolicyRule(
+ keep_for=module.params["keep_for"] * 1000,
+ every=module.params["every"] * 1000,
+ )
+ ],
+ )
+ else:
+ attr = Policy(enabled=module.params["enabled"])
+ blade.policies.create_policies(names=[module.params["name"]], policy=attr)
+ except Exception:
+ module.fail_json(
+ msg="Failed to create policy {0}.".format(module.params["name"])
+ )
+ if module.params["filesystem"]:
+ try:
+ blade.file_systems.list_file_systems(names=module.params["filesystem"])
+ blade.policies.create_policy_filesystems(
+ policy_names=[module.params["name"]],
+ member_names=module.params["filesystem"],
+ )
+ except Exception:
+ blade.policies.delete_policies(names=[module.params["name"]])
+ module.fail_json(
+ msg="Failed to connect filesystems to policy {0}, "
+ "or one of {1} doesn't exist.".format(
+ module.params["name"], module.params["filesystem"]
+ )
+ )
+ if module.params["replica_link"]:
+ for link in module.params["replica_link"]:
+ remote_array = (
+ blade.file_system_replica_links.list_file_system_replica_links(
+ local_file_system_names=[link]
+ )
+ )
+ try:
+ blade.policies.create_policy_file_system_replica_links(
+ policy_names=[module.params["name"]],
+ member_names=[link],
+ remote_names=[remote_array.items[0].remote.name],
+ )
+ except Exception:
+ blade.policies.delete_policies(names=[module.params["name"]])
+ module.fail_json(
+ msg="Failed to connect filesystem replicsa link {0} to policy {1}. "
+ "Replica Link {0} does not exist.".format(
+ link, module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def update_snap_policy(module, blade):
+ """Update REST 2 snapshot policy
+
+ Add new rules to the policy using this function.
+ Should it be necessary to modify an existing rule these are the rules:
+
+ Due to the 'best fit' nature of Purity we only add new rulkes in this function.
+ If you trying to update an existing rule, then this should be done by deleting
+ the current rule and then adding the new rule.
+
+ Purity may recover some snapshots as long as the add happens before the eradication delay
+ (typically 24h) causes the snapshots to be eradicated.
+ """
+
+ changed = False
+ if (
+ module.params["keep_for"]
+ and not module.params["every"]
+ or module.params["every"]
+ and not module.params["keep_for"]
+ ):
+ module.fail_json(msg="`keep_for` and `every` are required.")
+ if module.params["timezone"] and not module.params["at"]:
+ module.fail_json(msg="`timezone` requires `at` to be provided.")
+ if module.params["at"] and not module.params["every"]:
+ module.fail_json(msg="`at` requires `every` to be provided.")
+ current_rules = list(blade.get_policies(names=[module.params["name"]]).items)[
+ 0
+ ].rules
+ create_new = True
+ for rule in range(0, len(current_rules)):
+ current_rule = {
+ "at": current_rules[rule].at,
+ "every": current_rules[rule].every,
+ "keep_for": current_rules[rule].keep_for,
+ "time_zone": current_rules[rule].time_zone,
+ }
+ if not module.params["at"]:
+ new_at = current_rules[rule].at
+ else:
+ new_at = _convert_to_millisecs(module.params["at"])
+ if module.params["keep_for"]:
+ new_keep_for = module.params["keep_for"]
+ else:
+ new_keep_for = int(current_rules[rule].keep_for / 1000)
+ if module.params["every"]:
+ new_every = module.params["every"]
+ else:
+ new_every = int(current_rules[rule].every / 1000)
+ if not module.params["timezone"]:
+ new_tz = current_rules[rule].time_zone
+ else:
+ new_tz = module.params["timezone"]
+ new_rule = {
+ "at": new_at,
+ "every": new_every * 1000,
+ "keep_for": new_keep_for * 1000,
+ "time_zone": new_tz,
+ }
+ if current_rule == new_rule:
+ create_new = False
+
+ if create_new:
+ changed = True
+ if not module.check_mode:
+ if module.params["at"] and module.params["every"]:
+ if not module.params["every"] % 86400 == 0:
+ module.fail_json(
+ msg="At time can only be set if every value is a multiple of 86400"
+ )
+ if not module.params["timezone"]:
+ module.params["timezone"] = _get_local_tz(module)
+ if module.params["timezone"] not in pytz.all_timezones_set:
+ module.fail_json(
+ msg="Timezone {0} is not valid".format(
+ module.params["timezone"]
+ )
+ )
+ if not module.params["keep_for"]:
+ module.params["keep_for"] = 0
+ if not module.params["every"]:
+ module.params["every"] = 0
+ if module.params["keep_for"] < module.params["every"]:
+ module.fail_json(
+ msg="Retention period cannot be less than snapshot interval."
+ )
+ if module.params["at"] and not module.params["timezone"]:
+ module.params["timezone"] = _get_local_tz(module)
+ if module.params["timezone"] not in set(pytz.all_timezones_set):
+ module.fail_json(
+ msg="Timezone {0} is not valid".format(
+ module.params["timezone"]
+ )
+ )
+
+ if module.params["keep_for"]:
+ if not 300 <= module.params["keep_for"] <= 34560000:
+ module.fail_json(
+ msg="keep_for parameter is out of range (300 to 34560000)"
+ )
+ if not 300 <= module.params["every"] <= 34560000:
+ module.fail_json(
+ msg="every parameter is out of range (300 to 34560000)"
+ )
+ if module.params["at"]:
+ attr = PolicyPatch(
+ enabled=module.params["enabled"],
+ add_rules=[
+ PolicyRule(
+ keep_for=module.params["keep_for"] * 1000,
+ every=module.params["every"] * 1000,
+ at=_convert_to_millisecs(module.params["at"]),
+ time_zone=module.params["timezone"],
+ )
+ ],
+ )
+ else:
+ attr = PolicyPatch(
+ enabled=module.params["enabled"],
+ add_rules=[
+ PolicyRule(
+ keep_for=module.params["keep_for"] * 1000,
+ every=module.params["every"] * 1000,
+ )
+ ],
+ )
+ else:
+ attr = PolicyPatch(enabled=module.params["enabled"])
+ res = blade.patch_policies(
+ names=[module.params["name"]],
+ policy=attr,
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to update snapshot policy {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def update_policy(module, blade, policy):
+ """Update snapshot policy"""
+ changed = False
+ if not policy.rules:
+ current_policy = {
+ "time_zone": None,
+ "every": 0,
+ "keep_for": 0,
+ "at": 0,
+ "enabled": policy.enabled,
+ }
+ else:
+ if policy.rules[0].keep_for != 0:
+ policy.rules[0].keep_for = int(policy.rules[0].keep_for / 1000)
+ if policy.rules[0].every != 0:
+ policy.rules[0].every = int(policy.rules[0].every / 1000)
+
+ current_policy = {
+ "time_zone": policy.rules[0].time_zone,
+ "every": policy.rules[0].every,
+ "keep_for": policy.rules[0].keep_for,
+ "at": policy.rules[0].at,
+ "enabled": policy.enabled,
+ }
+ if not module.params["every"]:
+ every = 0
+ else:
+ every = module.params["every"]
+ if not module.params["keep_for"]:
+ keep_for = 0
+ else:
+ keep_for = module.params["keep_for"]
+ if module.params["at"]:
+ at_time = _convert_to_millisecs(module.params["at"])
+ else:
+ at_time = None
+ if not module.params["timezone"]:
+ timezone = _get_local_tz(module)
+ else:
+ timezone = module.params["timezone"]
+ if at_time:
+ new_policy = {
+ "time_zone": timezone,
+ "every": every,
+ "keep_for": keep_for,
+ "at": at_time,
+ "enabled": module.params["enabled"],
+ }
+ else:
+ new_policy = {
+ "time_zone": None,
+ "every": every,
+ "keep_for": keep_for,
+ "at": None,
+ "enabled": module.params["enabled"],
+ }
+ if (
+ new_policy["time_zone"]
+ and new_policy["time_zone"] not in pytz.all_timezones_set
+ ):
+ module.fail_json(
+ msg="Timezone {0} is not valid".format(module.params["timezone"])
+ )
+
+ if current_policy != new_policy:
+ if not module.params["at"]:
+ module.params["at"] = current_policy["at"]
+ if not module.params["keep_for"]:
+ module.params["keep_for"] = current_policy["keep_for"]
+ if not module.params["every"]:
+ module.params["every"] = current_policy["every"]
+ if module.params["at"] and module.params["every"]:
+ if not module.params["every"] % 86400 == 0:
+ module.fail_json(
+ msg="At time can only be set if every value is a multiple of 86400"
+ )
+ if module.params["keep_for"] < module.params["every"]:
+ module.fail_json(
+ msg="Retention period cannot be less than snapshot interval."
+ )
+ if module.params["at"] and not module.params["timezone"]:
+ module.params["timezone"] = _get_local_tz(module)
+ if module.params["timezone"] not in set(pytz.all_timezones_set):
+ module.fail_json(
+ msg="Timezone {0} is not valid".format(module.params["timezone"])
+ )
+
+ changed = True
+ if not module.check_mode:
+ try:
+ attr = PolicyPatch()
+ attr.enabled = module.params["enabled"]
+ if at_time:
+ attr.add_rules = [
+ PolicyRule(
+ keep_for=module.params["keep_for"] * 1000,
+ every=module.params["every"] * 1000,
+ at=at_time,
+ time_zone=timezone,
+ )
+ ]
+ else:
+ attr.add_rules = [
+ PolicyRule(
+ keep_for=module.params["keep_for"] * 1000,
+ every=module.params["every"] * 1000,
+ )
+ ]
+ attr.remove_rules = [
+ PolicyRule(
+ keep_for=current_policy["keep_for"] * 1000,
+ every=current_policy["every"] * 1000,
+ at=current_policy["at"],
+ time_zone=current_policy["time_zone"],
+ )
+ ]
+ blade.policies.update_policies(
+ names=[module.params["name"]], policy_patch=attr
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to update policy {0}.".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(
+ type="str", default="present", choices=["absent", "present", "copy"]
+ ),
+ policy_type=dict(
+ type="str", default="snapshot", choices=["snapshot", "access", "nfs"]
+ ),
+ enabled=dict(type="bool", default=True),
+ timezone=dict(type="str"),
+ name=dict(type="str"),
+ at=dict(type="str"),
+ every=dict(type="int"),
+ keep_for=dict(type="int"),
+ filesystem=dict(type="list", elements="str"),
+ replica_link=dict(type="list", elements="str"),
+ account=dict(type="str"),
+ target=dict(type="str"),
+ target_rule=dict(type="str"),
+ rename=dict(type="str"),
+ rule=dict(type="str"),
+ user=dict(type="str"),
+ effect=dict(type="str", default="allow", choices=["allow"]),
+ actions=dict(
+ type="list",
+ elements="str",
+ choices=[
+ "s3:*",
+ "s3:AbortMultipartUpload",
+ "s3:CreateBucket",
+ "s3:DeleteBucket",
+ "s3:DeleteObject",
+ "s3:DeleteObjectVersion",
+ "s3:ExtendSafemodeRetentionPeriod",
+ "s3:GetBucketAcl",
+ "s3:GetBucketLocation",
+ "s3:GetBucketVersioning",
+ "s3:GetLifecycleConfiguration",
+ "s3:GetObject",
+ "s3:GetObjectAcl",
+ "s3:GetObjectVersion",
+ "s3:ListAllMyBuckets",
+ "s3:ListBucket",
+ "s3:ListBucketMultipartUploads",
+ "s3:ListBucketVersions",
+ "s3:ListMultipartUploadParts",
+ "s3:PutBucketVersioning",
+ "s3:PutLifecycleConfiguration",
+ "s3:PutObject",
+ ],
+ ),
+ object_resources=dict(type="list", elements="str"),
+ source_ips=dict(type="list", elements="str"),
+ s3_prefixes=dict(type="list", elements="str"),
+ s3_delimiters=dict(type="list", elements="str"),
+ ignore_enforcement=dict(type="bool", default=True),
+ force_delete=dict(type="bool", default=False),
+ access=dict(
+ type="str",
+ choices=["root-squash", "all-squash", "no-squash"],
+ default="root-squash",
+ ),
+ anonuid=dict(type="str"),
+ anongid=dict(type="str"),
+ atime=dict(type="bool", default=True),
+ client=dict(type="str", default="*"),
+ fileid_32bit=dict(type="bool", default=False),
+ permission=dict(type="str", choices=["rw", "ro"], default="ro"),
+ secure=dict(type="bool", default=False),
+ destroy_snapshots=dict(type="bool", default=False),
+ security=dict(
+ type="list",
+ elements="str",
+ choices=["sys", "krb5", "krb5i", "krb5p"],
+ default=["sys"],
+ ),
+ before_rule=dict(type="int"),
+ )
+ )
+
+ required_together = [["keep_for", "every"]]
+ required_if = [
+ ["policy_type", "access", ["account", "name"]],
+ ["policy_type", "nfs", ["name"]],
+ ]
+
+ module = AnsibleModule(
+ argument_spec,
+ required_together=required_together,
+ required_if=required_if,
+ supports_check_mode=True,
+ )
+
+ if not HAS_PURITYFB:
+ module.fail_json(msg="purity-fb sdk is required for this module")
+ if not HAS_PYTZ:
+ module.fail_json(msg="pytz is required for this module")
+
+ state = module.params["state"]
+ blade = get_blade(module)
+ versions = blade.api_version.list_versions().versions
+ if module.params["policy_type"] == "access":
+ if ACCESS_POLICY_API_VERSION not in versions:
+ module.fail_json(
+ msg=(
+ "Minimum FlashBlade REST version required: {0}".format(
+ ACCESS_POLICY_API_VERSION
+ )
+ )
+ )
+ if not HAS_PYPURECLIENT:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+ blade = get_system(module)
+ try:
+ policy = list(
+ blade.get_object_store_access_policies(
+ names=[module.params["account"] + "/" + module.params["name"]]
+ ).items
+ )[0]
+ except AttributeError:
+ policy = None
+ if module.params["user"]:
+ member_name = module.params["account"] + "/" + module.params["user"]
+ res = blade.get_object_store_users(filter='name="' + member_name + "'")
+ if res.status_code != 200:
+ module.fail_json(
+ msg="User {0} does not exist in account {1}".format(
+ module.params["user"], module.params["account"]
+ )
+ )
+ if policy and state == "present":
+ update_os_policy(module, blade)
+ elif state == "present" and not policy:
+ create_os_policy(module, blade)
+ elif state == "absent" and policy:
+ delete_os_policy(module, blade)
+ elif state == "copy" and module.params["target"] and module.params["rule"]:
+ if "/" not in module.params["target"]:
+ module.fail_json(
+ msg='Incorrect format for target policy. Must be "<account>/<name>"'
+ )
+ if (
+ blade.get_object_store_access_policies(
+ names=[module.params["target"]]
+ ).status_code
+ != 200
+ ):
+ module.fail_json(
+ msg="Target policy {0} does not exist".format(
+ module.params["target"]
+ )
+ )
+ copy_os_policy_rule(module, blade)
+ elif module.params["policy_type"] == "nfs":
+ if NFS_POLICY_API_VERSION not in versions:
+ module.fail_json(
+ msg=(
+ "Minimum FlashBlade REST version required: {0}".format(
+ NFS_POLICY_API_VERSION
+ )
+ )
+ )
+ if not HAS_PYPURECLIENT:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+ blade = get_system(module)
+ try:
+ policy = list(
+ blade.get_nfs_export_policies(names=[module.params["name"]]).items
+ )[0]
+ except AttributeError:
+ policy = None
+ if module.params["rename"]:
+ try:
+ new_policy = list(
+ blade.get_nfs_export_policies(names=[module.params["rename"]]).items
+ )[0]
+ except AttributeError:
+ new_policy = None
+ if policy and state == "present" and not module.params["rename"]:
+ if module.params["before_rule"]:
+ res = blade.get_nfs_export_policies_rules(
+ policy_names=[module.params["name"]],
+ names=[
+ module.params["name"] + "." + str(module.params["before_rule"])
+ ],
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Rule index {0} does not exist.".format(
+ module.params["before_rule"]
+ )
+ )
+ update_nfs_policy(module, blade)
+ elif (
+ state == "present" and module.params["rename"] and policy and not new_policy
+ ):
+ rename_nfs_policy(module, blade)
+ elif state == "present" and not policy and not module.params["rename"]:
+ create_nfs_policy(module, blade)
+ elif state == "absent" and policy:
+ delete_nfs_policy(module, blade)
+ elif SNAPSHOT_POLICY_API_VERSION in versions:
+ if not HAS_PYPURECLIENT:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+ blade = get_system(module)
+ try:
+ policy = list(blade.get_policies(names=[module.params["name"]]).items)[0]
+ except AttributeError:
+ policy = None
+ if not policy and state == "present":
+ create_snap_policy(module, blade)
+ elif policy and state == "present":
+ update_snap_policy(module, blade)
+ elif policy and state == "absent":
+ delete_snap_policy(module, blade)
+ else:
+ if MIN_REQUIRED_API_VERSION not in versions:
+ module.fail_json(
+ msg="Minimum FlashBlade REST version required: {0}".format(
+ MIN_REQUIRED_API_VERSION
+ )
+ )
+ try:
+ policy = blade.policies.list_policies(names=[module.params["name"]]).items[
+ 0
+ ]
+ except Exception:
+ policy = None
+
+ if policy and state == "present":
+ update_policy(module, blade, policy)
+ elif state == "present" and not policy:
+ create_policy(module, blade)
+ elif state == "absent" and policy:
+ delete_policy(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_proxy.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_proxy.py
new file mode 100644
index 000000000..ed9e39ac8
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_proxy.py
@@ -0,0 +1,155 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2019, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_proxy
+version_added: '1.0.0'
+author:
+ - Pure Storage ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+short_description: Configure FlashBlade phonehome HTTPs proxy settings
+description:
+- Set or erase configuration for the phonehome proxy settings.
+options:
+ state:
+ description:
+ - Set or delete proxy configuration
+ default: present
+ type: str
+ choices: [ absent, present ]
+ host:
+ description:
+ - The proxy host name.
+ type: str
+ port:
+ description:
+ - The proxy TCP/IP port number.
+ type: int
+ secure:
+ description:
+ - Use http or https as the proxy protocol.
+ - True uses https, false uses http.
+ default: true
+ type: bool
+ version_added: '1.11.0'
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Delete exisitng proxy settings
+ purestorage.flashblade.purefb_proxy:
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Set proxy settings
+ purestorage.flashblade.purefb_proxy:
+ host: purestorage.com
+ port: 8080
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import Support
+except ImportError:
+ HAS_PURITY_FB = False
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ purefb_argument_spec,
+)
+
+
+def delete_proxy(module, blade):
+ """Delete proxy settings"""
+ changed = False
+ current_proxy = blade.support.list_support().items[0].proxy
+ if current_proxy != "":
+ changed = True
+ if not module.check_mode:
+ try:
+ proxy_settings = Support(proxy="")
+ blade.support.update_support(support=proxy_settings)
+ except Exception:
+ module.fail_json(msg="Delete proxy settigs failed")
+ module.exit_json(changed=changed)
+
+
+def create_proxy(module, blade):
+ """Set proxy settings"""
+ changed = False
+ current_proxy = blade.support.list_support().items[0].proxy
+ if module.params["secure"]:
+ protocol = "https://"
+ else:
+ protocol = "http://"
+ if current_proxy is not None:
+ changed = True
+ if not module.check_mode:
+ new_proxy = (
+ protocol + module.params["host"] + ":" + str(module.params["port"])
+ )
+ if new_proxy != current_proxy:
+ try:
+ proxy_settings = Support(proxy=new_proxy)
+ blade.support.update_support(support=proxy_settings)
+ except Exception:
+ module.fail_json(msg="Set phone home proxy failed.")
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ secure=dict(type="bool", default=True),
+ host=dict(type="str"),
+ port=dict(type="int"),
+ )
+ )
+
+ required_together = [["host", "port"]]
+
+ module = AnsibleModule(
+ argument_spec, required_together=required_together, supports_check_mode=True
+ )
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg="purity_fb SDK is required for this module")
+
+ state = module.params["state"]
+ blade = get_blade(module)
+
+ if state == "absent":
+ delete_proxy(module, blade)
+ elif state == "present":
+ create_proxy(module, blade)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_ra.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_ra.py
new file mode 100644
index 000000000..c84ba1e41
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_ra.py
@@ -0,0 +1,126 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_ra
+version_added: '1.0.0'
+short_description: Enable or Disable Pure Storage FlashBlade Remote Assist
+description:
+- Enablke or Disable Remote Assist for a Pure Storage FlashBlade.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Define state of remote assist
+ - When set to I(enable) the RA port can be exposed using the
+ I(debug) module.
+ type: str
+ default: present
+ choices: [ present, absent ]
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Enable Remote Assist port
+ purestorage.flashblade.purefb_ra:
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Disable Remote Assist port
+ purestorage.flashblade.purefb_ra:
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import Support
+except ImportError:
+ HAS_PURITY_FB = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ purefb_argument_spec,
+)
+
+
+MIN_REQUIRED_API_VERSION = "1.6"
+
+
+def enable_ra(module, blade):
+ """Enable Remote Assist"""
+ changed = True
+ if not module.check_mode:
+ ra_settings = Support(remote_assist_active=True)
+ try:
+ blade.support.update_support(support=ra_settings)
+ except Exception:
+ module.fail_json(msg="Enabling Remote Assist failed")
+ module.exit_json(changed=changed)
+
+
+def disable_ra(module, blade):
+ """Disable Remote Assist"""
+ changed = True
+ if not module.check_mode:
+ ra_settings = Support(remote_assist_active=False)
+ try:
+ blade.support.update_support(support=ra_settings)
+ except Exception:
+ module.fail_json(msg="Disabling Remote Assist failed")
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["present", "absent"]),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ blade = get_blade(module)
+ api_version = blade.api_version.list_versions().versions
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(msg="Purity//FB must be upgraded to support this module.")
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg="purity_fb SDK is required for this module")
+
+ if (
+ module.params["state"] == "present"
+ and not blade.support.list_support().items[0].remote_assist_active
+ ):
+ enable_ra(module, blade)
+ elif (
+ module.params["state"] == "absent"
+ and blade.support.list_support().items[0].remote_assist_active
+ ):
+ disable_ra(module, blade)
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_remote_cred.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_remote_cred.py
new file mode 100644
index 000000000..53c985d35
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_remote_cred.py
@@ -0,0 +1,243 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_remote_cred
+version_added: '1.0.0'
+short_description: Create, modify and delete FlashBlade object store remote credentials
+description:
+- Create, modify and delete object store remote credentials
+- You must have a correctly configured remote array or target
+- This module is B(not) idempotent when updating existing remote credentials
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Define state of remote credential
+ default: present
+ choices: [ absent, present ]
+ type: str
+ name:
+ description:
+ - The name of the credential
+ required: true
+ type: str
+ access_key:
+ description:
+ - Access Key ID of the S3 target
+ type: str
+ secret:
+ description:
+ - Secret Access Key for the S3 or Azure target
+ type: str
+ target:
+ description:
+ - Define whether to initialize the S3 bucket
+ required: true
+ type: str
+
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Create remote credential
+ purestorage.flashblade.purefb_remote_cred:
+ name: cred1
+ access_key: "3794fb12c6204e19195f"
+ secret: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"
+ target: target1
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Delete remote credential
+ purestorage.flashblade.purefb_remote_cred:
+ name: cred1
+ target: target1
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import ObjectStoreRemoteCredentials
+except ImportError:
+ HAS_PURITY_FB = False
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ purefb_argument_spec,
+)
+
+MIN_REQUIRED_API_VERSION = "1.9"
+
+
+def get_connected(module, blade):
+ """Return connected device or None"""
+ connected_blades = blade.array_connections.list_array_connections()
+ for target in range(0, len(connected_blades.items)):
+ if (
+ connected_blades.items[target].remote.name == module.params["target"]
+ or connected_blades.items[target].management_address
+ == module.params["target"]
+ ) and connected_blades.items[target].status in [
+ "connected",
+ "connecting",
+ "partially_connected",
+ ]:
+ return connected_blades.items[target].remote.name
+ connected_targets = blade.targets.list_targets()
+ for target in range(0, len(connected_targets.items)):
+ if connected_targets.items[target].name == module.params[
+ "target"
+ ] and connected_targets.items[target].status in [
+ "connected",
+ "connecting",
+ "partially_connected",
+ ]:
+ return connected_targets.items[target].name
+ return None
+
+
+def get_remote_cred(module, blade):
+ """Return Remote Credential or None"""
+ try:
+ res = (
+ blade.object_store_remote_credentials.list_object_store_remote_credentials(
+ names=[module.params["target"] + "/" + module.params["name"]]
+ )
+ )
+ return res.items[0]
+ except Exception:
+ return None
+
+
+def create_credential(module, blade):
+ """Create remote credential"""
+ changed = True
+ if not module.check_mode:
+ remote_cred = module.params["target"] + "/" + module.params["name"]
+ remote_credentials = ObjectStoreRemoteCredentials(
+ access_key_id=module.params["access_key"],
+ secret_access_key=module.params["secret"],
+ )
+ try:
+ blade.object_store_remote_credentials.create_object_store_remote_credentials(
+ names=[remote_cred], remote_credentials=remote_credentials
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to create remote credential {0}".format(remote_cred)
+ )
+ module.exit_json(changed=changed)
+
+
+def update_credential(module, blade):
+ """Update remote credential"""
+ changed = True
+ if not module.check_mode:
+ remote_cred = module.params["target"] + "/" + module.params["name"]
+ new_attr = ObjectStoreRemoteCredentials(
+ access_key_id=module.params["access_key"],
+ secret_access_key=module.params["secret"],
+ )
+ try:
+ blade.object_store_remote_credentials.update_object_store_remote_credentials(
+ names=[remote_cred], remote_credentials=new_attr
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to update remote credential {0}".format(remote_cred)
+ )
+ module.exit_json(changed=changed)
+
+
+def delete_credential(module, blade):
+ """Delete remote credential"""
+ changed = True
+ if not module.check_mode:
+ remote_cred = module.params["target"] + "/" + module.params["name"]
+ try:
+ blade.object_store_remote_credentials.delete_object_store_remote_credentials(
+ names=[remote_cred]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to delete remote credential {0}.".format(remote_cred)
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["present", "absent"]),
+ name=dict(type="str", required=True),
+ access_key=dict(type="str", no_log=False),
+ secret=dict(type="str", no_log=True),
+ target=dict(type="str", required=True),
+ )
+ )
+
+ required_if = [["state", "present", ["access_key", "secret"]]]
+
+ module = AnsibleModule(
+ argument_spec, required_if=required_if, supports_check_mode=True
+ )
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg="purity_fb sdk is required for this module")
+
+ blade = get_blade(module)
+ api_version = blade.api_version.list_versions().versions
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashBlade REST version not supported. "
+ "Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
+ )
+
+ target = get_connected(module, blade)
+
+ if not target:
+ module.fail_json(
+ msg="Selected target {0} is not connected.".format(module.params["target"])
+ )
+
+ remote_cred = get_remote_cred(module, blade)
+
+ if module.params["state"] == "present" and not remote_cred:
+ create_credential(module, blade)
+ elif module.params["state"] == "present":
+ update_credential(module, blade)
+ elif module.params["state"] == "absent" and remote_cred:
+ delete_credential(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_s3acc.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_s3acc.py
new file mode 100644
index 000000000..034731994
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_s3acc.py
@@ -0,0 +1,314 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_s3acc
+version_added: '1.0.0'
+short_description: Create or delete FlashBlade Object Store accounts
+description:
+- Create or delete object store accounts on a Pure Stoage FlashBlade.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Create or delete object store account
+ default: present
+ choices: [ absent, present ]
+ type: str
+ name:
+ description:
+ - The name of object store account
+ type: str
+ required: true
+ quota:
+ description:
+ - The effective quota limit to be applied against the size of the account in bytes.
+ - If set to '' (empty string), the account is unlimited in size.
+ version_added: 1.11.0
+ type: str
+ hard_limit:
+ description:
+ - If set to true, the account size, as defined by I(quota_limit), is used as a hard limit quota.
+ - If set to false, a hard limit quota will not be applied to the account, but soft quota alerts
+ will still be sent if the account has a value set for I(quota_limit).
+ version_added: 1.11.0
+ type: bool
+ default: false
+ default_quota:
+ description:
+ - The value of this field will be used to configure the I(quota_limit) field of newly created buckets
+ associated with this object store account, if the bucket creation does not specify its own value.
+ - If set to '' (empty string), the bucket default is unlimited in size.
+ version_added: 1.11.0
+ type: str
+ default_hard_limit:
+ description:
+ - The value of this field will be used to configure the I(hard_limit) field of newly created buckets
+ associated with this object store account, if the bucket creation does not specify its own value.
+ version_added: 1.11.0
+ type: bool
+ default: false
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Crrate object store account foo (with no quotas)
+ purestorage.flashblade.purefb_s3acc:
+ name: foo
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create object store account foo (with quotas)
+ purestorage.flashblade.purefb_s3acc:
+ name: foo
+ quota: 20480000
+ hard_limit: true
+ default_quota: 1024000
+ default_hard_limit: false
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete object store account foo
+ purestorage.flashblade.purefb_s3acc:
+ name: foo
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient.flashblade import ObjectStoreAccountPatch, BucketDefaults
+except ImportError:
+ HAS_PURESTORAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ get_system,
+ purefb_argument_spec,
+)
+
+
+MIN_REQUIRED_API_VERSION = "1.3"
+QUOTA_API_VERSION = "2.1"
+
+
+def get_s3acc(module, blade):
+ """Return Object Store Account or None"""
+ s3acc = None
+ accts = blade.object_store_accounts.list_object_store_accounts()
+ for acct in range(0, len(accts.items)):
+ if accts.items[acct].name == module.params["name"]:
+ s3acc = accts.items[acct]
+ return s3acc
+
+
+def update_s3acc(module):
+ """Update Object Store Account"""
+ changed = False
+ blade = get_system(module)
+ acc_settings = list(
+ blade.get_object_store_accounts(names=[module.params["name"]]).items
+ )[0]
+ current_account = {
+ "hard_limit": acc_settings.hard_limit_enabled,
+ "default_hard_limit": acc_settings.bucket_defaults.hard_limit_enabled,
+ "quota": str(acc_settings.quota_limit),
+ "default_quota": str(acc_settings.bucket_defaults.quota_limit),
+ }
+ if current_account["quota"] == "None":
+ current_account["quota"] = ""
+ if current_account["default_quota"] == "None":
+ current_account["default_quota"] = ""
+ if module.params["quota"] is None:
+ module.params["quota"] = current_account["quota"]
+ if module.params["default_quota"] is None:
+ module.params["default_quota"] = current_account["default_quota"]
+ new_account = {
+ "hard_limit": module.params["hard_limit"],
+ "default_hard_limit": module.params["default_hard_limit"],
+ "quota": module.params["quota"],
+ "default_quota": module.params["default_quota"],
+ }
+ if new_account != current_account:
+ changed = True
+ if not module.check_mode:
+ osa = ObjectStoreAccountPatch(
+ hard_limit_enabled=new_account["hard_limit"],
+ quota_limit=new_account["quota"],
+ bucket_defaults=BucketDefaults(
+ hard_limit_enabled=new_account["default_hard_limit"],
+ quota_limit=new_account["default_quota"],
+ ),
+ )
+ res = blade.patch_object_store_accounts(
+ object_store_account=osa, names=[module.params["name"]]
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to update account {0}. "
+ "Error: {1}".format(module.params["name"], res.errors[0].message)
+ )
+ module.exit_json(changed=changed)
+
+
+def create_s3acc(module, blade):
+ """Create Object Store Account"""
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.object_store_accounts.create_object_store_accounts(
+ names=[module.params["name"]]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Object Store Account {0}: Creation failed".format(
+ module.params["name"]
+ )
+ )
+ if module.params["quota"] or module.params["default_quota"]:
+ blade2 = get_system(module)
+ if module.params["quota"] and not module.params["default_quota"]:
+ osa = ObjectStoreAccountPatch(
+ hard_limit_enabled=module.params["hard_limit"],
+ quota_limit=module.params["quota"],
+ )
+ if not module.params["quota"] and module.params["default_quota"]:
+ osa = ObjectStoreAccountPatch(
+ bucket_defaults=BucketDefaults(
+ hard_limit_enabled=module.params["default_hard_limit"],
+ quota_limit=module.params["default_quota"],
+ )
+ )
+ else:
+ osa = ObjectStoreAccountPatch(
+ hard_limit_enabled=module.params["hard_limit"],
+ quota_limit=module.params["quota"],
+ bucket_defaults=BucketDefaults(
+ hard_limit_enabled=module.params["default_hard_limit"],
+ quota_limit=module.params["default_quota"],
+ ),
+ )
+ res = blade2.patch_object_store_accounts(
+ object_store_account=osa, names=[module.params["name"]]
+ )
+ if res.status_code != 200:
+ blade.object_store_accounts.delete_object_store_accounts(
+ names=[module.params["name"]]
+ )
+ module.fail_json(
+ msg="Failed to set quotas correctly for account {0}. "
+ "Error: {1}".format(module.params["name"], res.errors[0].message)
+ )
+ module.exit_json(changed=changed)
+
+
+def delete_s3acc(module, blade):
+ """Delete Object Store Account"""
+ changed = True
+ if not module.check_mode:
+ count = len(
+ blade.object_store_users.list_object_store_users(
+ filter="name='" + module.params["name"] + "/*'"
+ ).items
+ )
+ if count != 0:
+ module.fail_json(
+ msg="Remove all Users from Object Store Account {0} \
+ before deletion".format(
+ module.params["name"]
+ )
+ )
+ else:
+ try:
+ blade.object_store_accounts.delete_object_store_accounts(
+ names=[module.params["name"]]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Object Store Account {0}: Deletion failed".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(required=True, type="str"),
+ hard_limit=dict(type="bool", default=False),
+ default_hard_limit=dict(type="bool", default=False),
+ quota=dict(type="str"),
+ default_quota=dict(type="str"),
+ state=dict(default="present", choices=["present", "absent"]),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ state = module.params["state"]
+ blade = get_blade(module)
+ versions = blade.api_version.list_versions().versions
+
+ if MIN_REQUIRED_API_VERSION not in versions:
+ module.fail_json(
+ msg="Minimum FlashBlade REST version required: {0}".format(
+ MIN_REQUIRED_API_VERSION
+ )
+ )
+
+ if module.params["quota"] or module.params["default_quota"]:
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for to set quotas")
+ if QUOTA_API_VERSION not in versions:
+ module.fail_json(
+ msg="Quotas require minimum FlashBlade REST version: {0}".format(
+ QUOTA_API_VERSION
+ )
+ )
+
+ upper = False
+ for element in module.params["name"]:
+ if element.isupper():
+ upper = True
+ break
+ if upper:
+ module.warn("Changing account name to lowercase...")
+ module.params["name"] = module.params["name"].lower()
+
+ s3acc = get_s3acc(module, blade)
+
+ if state == "absent" and s3acc:
+ delete_s3acc(module, blade)
+ elif state == "present" and s3acc:
+ update_s3acc(module)
+ elif not s3acc and state == "present":
+ create_s3acc(module, blade)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_s3user.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_s3user.py
new file mode 100644
index 000000000..55bc05c3f
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_s3user.py
@@ -0,0 +1,436 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_s3user
+version_added: '1.0.0'
+short_description: Create or delete FlashBlade Object Store account users
+description:
+- Create or delete object store account users on a Pure Stoage FlashBlade.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Create or delete object store account user
+ - Remove a specified access key for a user
+ default: present
+ choices: [ absent, present, remove_key ]
+ type: str
+ name:
+ description:
+ - The name of object store user
+ type: str
+ required: true
+ account:
+ description:
+ - The name of object store account associated with user
+ type: str
+ required: true
+ access_key:
+ description:
+ - Create secret access key.
+ - Key can be exposed using the I(debug) module
+ - If enabled this will override I(imported_key)
+ type: bool
+ default: false
+ remove_key:
+ description:
+ - Access key to be removed from user
+ type: str
+ version_added: "1.5.0"
+ imported_key:
+ description:
+ - Access key of imported credentials
+ type: str
+ version_added: "1.4.0"
+ imported_secret:
+ description:
+ - Access key secret for access key to import
+ type: str
+ version_added: "1.4.0"
+ policy:
+ description:
+ - User Access Policies to be assigned to user on creation
+ - To amend policies use the I(purestorage.flashblade.purefb_userpolicy) module
+ - If not specified, I(pure\:policy/full-access) will be added
+ type: list
+ elements: str
+ version_added: "1.6.0"
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Create object store user (with access ID and key) foo in account bar
+ purestorage.flashblade.purefb_s3user:
+ name: foo
+ account: bar
+ access_key: true
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ register: result
+
+- debug:
+ msg: "S3 User: {{ result['s3user_info'] }}"
+
+- name: Create object store user (with access ID and key) foo in account bar with access policy (Purity 3.2 and higher)
+ purestorage.flashblade.purefb_s3user:
+ name: foo
+ account: bar
+ access_key: true
+ policy:
+ - pure:policy/safemode-configure
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create object store user foo using imported key/secret in account bar
+ purestorage.flashblade.purefb_s3user:
+ name: foo
+ account: bar
+ imported_key: "PSABSSZRHPMEDKHMAAJPJBONPJGGDDAOFABDGLBJLHO"
+ imported_secret: "BAG61F63105e0d3669/e066+5C5DFBE2c127d395LBGG"
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete object store user foo in account bar
+ purestorage.flashblade.purefb_s3user:
+ name: foo
+ account: bar
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import ObjectStoreAccessKey, ObjectStoreAccessKeyPost
+except ImportError:
+ HAS_PURITY_FB = False
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ get_system,
+ purefb_argument_spec,
+)
+
+
+MIN_REQUIRED_API_VERSION = "1.3"
+IMPORT_KEY_API_VERSION = "1.10"
+POLICY_API_VERSION = "2.0"
+
+
+def get_s3acc(module, blade):
+ """Return Object Store Account or None"""
+ s3acc = None
+ accts = blade.object_store_accounts.list_object_store_accounts()
+ for acct in range(0, len(accts.items)):
+ if accts.items[acct].name == module.params["account"]:
+ s3acc = accts.items[acct]
+ return s3acc
+
+
+def get_s3user(module, blade):
+ """Return Object Store Account or None"""
+ full_user = module.params["account"] + "/" + module.params["name"]
+ s3user = None
+ s3users = blade.object_store_users.list_object_store_users()
+ for user in range(0, len(s3users.items)):
+ if s3users.items[user].name == full_user:
+ s3user = s3users.items[user]
+ return s3user
+
+
+def update_s3user(module, blade):
+ """Update Object Store User"""
+ changed = False
+ exists = False
+ s3user_facts = {}
+ user = module.params["account"] + "/" + module.params["name"]
+ if module.params["access_key"] or module.params["imported_key"]:
+ key_count = 0
+ keys = blade.object_store_access_keys.list_object_store_access_keys()
+ for key in range(0, len(keys.items)):
+ if module.params["imported_key"]:
+ versions = blade.api_version.list_versions().versions
+ if IMPORT_KEY_API_VERSION in versions:
+ if keys.items[key].name == module.params["imported_key"]:
+ module.warn("Imported key provided already belongs to a user")
+ exists = True
+ if keys.items[key].user.name == user:
+ key_count += 1
+ if not exists:
+ if key_count < 2:
+ changed = True
+ if not module.check_mode:
+ try:
+ if (
+ module.params["access_key"]
+ and module.params["imported_key"]
+ ):
+ module.warn("'access_key: true' overrides imported keys")
+ if module.params["access_key"]:
+ result = blade.object_store_access_keys.create_object_store_access_keys(
+ object_store_access_key=ObjectStoreAccessKey(
+ user={"name": user}
+ )
+ )
+ s3user_facts["fb_s3user"] = {
+ "user": user,
+ "access_key": result.items[0].secret_access_key,
+ "access_id": result.items[0].name,
+ }
+ else:
+ if IMPORT_KEY_API_VERSION in versions:
+ blade.object_store_access_keys.create_object_store_access_keys(
+ names=[module.params["imported_key"]],
+ object_store_access_key=ObjectStoreAccessKeyPost(
+ user={"name": user},
+ secret_access_key=module.params[
+ "imported_secret"
+ ],
+ ),
+ )
+ except Exception:
+ if module.params["imported_key"]:
+ module.fail_json(
+ msg="Object Store User {0}: Access Key import failed".format(
+ user
+ )
+ )
+ else:
+ module.fail_json(
+ msg="Object Store User {0}: Access Key creation failed".format(
+ user
+ )
+ )
+ else:
+ module.warn(
+ "Object Store User {0}: Maximum Access Key count reached".format(
+ user
+ )
+ )
+ module.exit_json(changed=changed, s3user_info=s3user_facts)
+
+
+def create_s3user(module, blade):
+ """Create Object Store Account"""
+ s3user_facts = {}
+ changed = True
+ if not module.check_mode:
+ user = module.params["account"] + "/" + module.params["name"]
+ blade.object_store_users.create_object_store_users(names=[user])
+ if module.params["access_key"] and module.params["imported_key"]:
+ module.warn("'access_key: true' overrides imported keys")
+ if module.params["access_key"]:
+ try:
+ result = blade.object_store_access_keys.create_object_store_access_keys(
+ object_store_access_key=ObjectStoreAccessKey(user={"name": user})
+ )
+ s3user_facts["fb_s3user"] = {
+ "user": user,
+ "access_key": result.items[0].secret_access_key,
+ "access_id": result.items[0].name,
+ }
+ except Exception:
+ delete_s3user(module, blade, True)
+ module.fail_json(
+ msg="Object Store User {0}: Creation failed".format(user)
+ )
+ else:
+ if module.params["imported_key"]:
+ versions = blade.api_version.list_versions().versions
+ if IMPORT_KEY_API_VERSION in versions:
+ try:
+ blade.object_store_access_keys.create_object_store_access_keys(
+ names=[module.params["imported_key"]],
+ object_store_access_key=ObjectStoreAccessKeyPost(
+ user={"name": user},
+ secret_access_key=module.params["imported_secret"],
+ ),
+ )
+ except Exception:
+ delete_s3user(module, blade)
+ module.fail_json(
+ msg="Object Store User {0}: Creation failed with imported access key".format(
+ user
+ )
+ )
+ if module.params["policy"]:
+ blade = get_system(module)
+ api_version = list(blade.get_versions().items)
+
+ if POLICY_API_VERSION in api_version:
+ policy_list = module.params["policy"]
+ for policy in range(0, len(policy_list)):
+ if (
+ blade.get_object_store_access_policies(
+ names=[policy_list[policy]]
+ ).status_code
+ != 200
+ ):
+ module.warn(
+ "Policy {0} is not valid. Ignoring...".format(
+ policy_list[policy]
+ )
+ )
+ policy_list.remove(policy_list[policy])
+ username = module.params["account"] + "/" + module.params["name"]
+ for policy in range(0, len(policy_list)):
+ if not (
+ blade.get_object_store_users_object_store_access_policies(
+ member_names=[username], policy_names=[policy_list[policy]]
+ ).items
+ ):
+ res = (
+ blade.post_object_store_access_policies_object_store_users(
+ member_names=[username],
+ policy_names=[policy_list[policy]],
+ )
+ )
+ if res.status_code != 200:
+ module.warn(
+ "Failed to add policy {0} to account user {1}. Skipping...".format(
+ policy_list[policy], username
+ )
+ )
+ if "pure:policy/full-access" not in policy_list:
+ # User Create adds the pure:policy/full-access policy by default
+ # If we are specifying a list then remove this default value
+ blade.delete_object_store_access_policies_object_store_users(
+ member_names=[username],
+ policy_names=["pure:policy/full-access"],
+ )
+ else:
+ module.warn(
+ "FlashBlade REST version not supported for user access policies. Skipping..."
+ )
+ module.exit_json(changed=changed, s3user_info=s3user_facts)
+
+
+def remove_key(module, blade):
+ """Remove Access Key from User"""
+ changed = False
+ if not module.check_mode:
+ try:
+ keys = blade.object_store_access_keys.list_object_store_access_keys()
+ for key in range(0, len(keys.items)):
+ if keys.items[key].name == module.params["remove_key"]:
+ blade.object_store_access_keys.delete_object_store_access_keys(
+ names=[module.params["remove_key"]]
+ )
+ changed = True
+ except Exception:
+ module.fail_json(msg="Failed to correctly read or delete access keys")
+ module.exit_json(changed=changed)
+
+
+def delete_s3user(module, blade, internal=False):
+ """Delete Object Store Account"""
+ changed = True
+ if not module.check_mode:
+ user = module.params["account"] + "/" + module.params["name"]
+ try:
+ blade.object_store_users.delete_object_store_users(names=[user])
+ except Exception:
+ module.fail_json(
+ msg="Object Store Account {0}: Deletion failed".format(
+ module.params["name"]
+ )
+ )
+ if internal:
+ return
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(required=True, type="str"),
+ account=dict(required=True, type="str"),
+ access_key=dict(default="false", type="bool"),
+ imported_key=dict(type="str", no_log=False),
+ remove_key=dict(type="str", no_log=False),
+ imported_secret=dict(type="str", no_log=True),
+ policy=dict(type="list", elements="str"),
+ state=dict(default="present", choices=["present", "absent", "remove_key"]),
+ )
+ )
+
+ required_together = [["imported_key", "imported_secret"]]
+ required_if = [["state", "remove_key", ["remove_key"]]]
+
+ module = AnsibleModule(
+ argument_spec,
+ required_together=required_together,
+ required_if=required_if,
+ supports_check_mode=True,
+ )
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg="purity_fb sdk is required for this module")
+
+ state = module.params["state"]
+ blade = get_blade(module)
+ versions = blade.api_version.list_versions().versions
+
+ if MIN_REQUIRED_API_VERSION not in versions:
+ module.fail_json(
+ msg="Minimum FlashBlade REST version required: {0}".format(
+ MIN_REQUIRED_API_VERSION
+ )
+ )
+ upper = False
+ for element in module.params["account"]:
+ if element.isupper():
+ upper = True
+ break
+ if upper:
+ module.warn("Changing account name to lowercase...")
+ module.params["account"] = module.params["account"].lower()
+
+ s3acc = get_s3acc(module, blade)
+ if not s3acc:
+ module.fail_json(
+ msg="Object Store Account {0} does not exist".format(
+ module.params["account"]
+ )
+ )
+
+ s3user = get_s3user(module, blade)
+
+ if state == "absent" and s3user:
+ delete_s3user(module, blade)
+ elif state == "present" and s3user:
+ update_s3user(module, blade)
+ elif not s3user and state == "present":
+ create_s3user(module, blade)
+ elif state == "remove_key" and s3user:
+ remove_key(module, blade)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_smtp.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_smtp.py
new file mode 100644
index 000000000..379443669
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_smtp.py
@@ -0,0 +1,124 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_smtp
+version_added: '1.0.0'
+short_description: Configure SMTP for Pure Storage FlashBlade
+description:
+- Configure SMTP for a Pure Storage FlashBlade.
+- Whilst there can be no relay host, a sender domain must be configured.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ host:
+ description:
+ - Relay server name
+ type: str
+ domain:
+ description:
+ - Domain name for alert messages
+ required: true
+ type: str
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Configure SMTP settings
+ purestorage.flashblade.purefb_smtp:
+ host: hostname
+ domain: xyz.com
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import Smtp
+except ImportError:
+ HAS_PURITY_FB = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ purefb_argument_spec,
+)
+
+
+MIN_REQUIRED_API_VERSION = "1.6"
+
+
+def set_smtp(module, blade):
+ """Configure SMTP settings"""
+ changed = False
+ current_smtp = blade.smtp.list_smtp().items[0]
+ if module.params["host"] and module.params["host"] != current_smtp.relay_host:
+ smtp_settings = Smtp(relay_host=module.params["host"])
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.smtp.update_smtp(smtp_settings=smtp_settings)
+ except Exception:
+ module.fail_json(msg="Configuring SMTP relay host failed")
+ elif current_smtp.relay_host and not module.params["host"]:
+ smtp_settings = Smtp(relay_host="")
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.smtp.update_smtp(smtp_settings=smtp_settings)
+ except Exception:
+ module.fail_json(msg="Configuring SMTP relay host failed")
+ if module.params["domain"] != current_smtp.sender_domain:
+ smtp_settings = Smtp(sender_domain=module.params["domain"])
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.smtp.update_smtp(smtp_settings=smtp_settings)
+ except Exception:
+ module.fail_json(msg="Configuring SMTP sender domain failed")
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ host=dict(type="str"),
+ domain=dict(type="str", required=True),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ blade = get_blade(module)
+ api_version = blade.api_version.list_versions().versions
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(msg="Purity//FB must be upgraded to support this module.")
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg="purity_fb SDK is required for this module")
+
+ set_smtp(module, blade)
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_snap.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_snap.py
new file mode 100644
index 000000000..5df0455f8
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_snap.py
@@ -0,0 +1,379 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_snap
+version_added: '1.0.0'
+short_description: Manage filesystem snapshots on Pure Storage FlashBlades
+description:
+- Create or delete volumes and filesystem snapshots on Pure Storage FlashBlades.
+- Restoring a filesystem from a snapshot is only supported using
+ the latest snapshot.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - The name of the source filesystem.
+ required: true
+ type: str
+ suffix:
+ description:
+ - Suffix of snapshot name.
+ type: str
+ state:
+ description:
+ - Define whether the filesystem snapshot should exist or not.
+ choices: [ absent, present, restore ]
+ default: present
+ type: str
+ targets:
+ description:
+ - Name of target to replicate snapshot to.
+ - This is only applicable when I(now) is B(true)
+ type: list
+ elements: str
+ version_added: "1.7.0"
+ now:
+ description:
+ - Whether to initiate a snapshot replication immeadiately
+ type: bool
+ default: false
+ version_added: "1.7.0"
+ eradicate:
+ description:
+ - Define whether to eradicate the snapshot on delete or leave in trash.
+ type: bool
+ default: false
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Create snapshot foo.ansible
+ purestorage.flashblade.purefb_snap:
+ name: foo
+ suffix: ansible
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: present
+
+- name: Create immeadiate snapshot foo.ansible to connected FB bar
+ purestorage.flashblade.purefb_snap:
+ name: foo
+ suffix: ansible
+ now: true
+ targets:
+ - bar
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: present
+
+- name: Delete snapshot named foo.snap
+ purestorage.flashblade.purefb_snap:
+ name: foo
+ suffix: snap
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: absent
+
+- name: Recover deleted snapshot foo.ansible
+ purestorage.flashblade.purefb_snap:
+ name: foo
+ suffix: ansible
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: present
+
+- name: Restore filesystem foo (uses latest snapshot)
+ purestorage.flashblade.purefb_snap:
+ name: foo
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: restore
+
+- name: Eradicate snapshot named foo.snap
+ purestorage.flashblade.purefb_snap:
+ name: foo
+ suffix: snap
+ eradicate: true
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: absent
+"""
+
+RETURN = r"""
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ purefb_argument_spec,
+)
+
+from datetime import datetime
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import FileSystemSnapshot, SnapshotSuffix, FileSystem, Reference
+except ImportError:
+ HAS_PURITY_FB = False
+
+SNAP_NOW_API = 1.10
+
+
+def get_fs(module, blade):
+ """Return Filesystem or None"""
+ filesystem = []
+ filesystem.append(module.params["name"])
+ try:
+ res = blade.file_systems.list_file_systems(names=filesystem)
+ return res.items[0]
+ except Exception:
+ return None
+
+
+def get_latest_fssnapshot(module, blade):
+ """Get the name of the latest snpshot or None"""
+ try:
+ filt = "source='" + module.params["name"] + "'"
+ all_snaps = blade.file_system_snapshots.list_file_system_snapshots(filter=filt)
+ if not all_snaps.items[0].destroyed:
+ return all_snaps.items[0].name
+ else:
+ module.fail_json(
+ msg="Latest snapshot {0} is destroyed."
+ " Eradicate or recover this first.".format(all_snaps.items[0].name)
+ )
+ except Exception:
+ return None
+
+
+def get_fssnapshot(module, blade):
+ """Return Snapshot or None"""
+ try:
+ filt = (
+ "source='"
+ + module.params["name"]
+ + "' and suffix='"
+ + module.params["suffix"]
+ + "'"
+ )
+ res = blade.file_system_snapshots.list_file_system_snapshots(filter=filt)
+ return res.items[0]
+ except Exception:
+ return None
+
+
+def create_snapshot(module, blade):
+ """Create Snapshot"""
+ changed = False
+ source = []
+ source.append(module.params["name"])
+ try:
+ if module.params["now"]:
+ blade_exists = []
+ connected_blades = blade.array_connections.list_array_connections()
+ for target in range(0, len(module.params["targets"])):
+ blade_exists.append(False)
+ for blade in range(0, len(connected_blades)):
+ if (
+ target[target] == connected_blades.items[blade].name
+ and connected_blades.items[blade].status == "connected"
+ ):
+ blade_exists[target] = True
+ if not blade_exists:
+ module.fail_json(
+ msg="Not all selected targets are correctly connected blades"
+ )
+ changed = True
+ if not module.check_mode:
+ blade.file_system_snapshots.create_file_system_snapshots(
+ sources=source,
+ send=True,
+ targets=module.params["targets"],
+ suffix=SnapshotSuffix(module.params["suffix"]),
+ )
+ else:
+ changed = True
+ if not module.check_mode:
+ blade.file_system_snapshots.create_file_system_snapshots(
+ sources=source, suffix=SnapshotSuffix(module.params["suffix"])
+ )
+ except Exception:
+ changed = False
+ module.exit_json(changed=changed)
+
+
+def restore_snapshot(module, blade):
+ """Restore a filesystem back from the latest snapshot"""
+ changed = True
+ snapname = get_latest_fssnapshot(module, blade)
+ if snapname is not None:
+ if not module.check_mode:
+ fs_attr = FileSystem(
+ name=module.params["name"], source=Reference(name=snapname)
+ )
+ try:
+ blade.file_systems.create_file_systems(
+ overwrite=True,
+ discard_non_snapshotted_data=True,
+ file_system=fs_attr,
+ )
+ except Exception:
+ changed = False
+ else:
+ module.fail_json(
+ msg="Filesystem {0} has no snapshots to restore from.".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def recover_snapshot(module, blade):
+ """Recover deleted Snapshot"""
+ changed = True
+ if not module.check_mode:
+ snapname = module.params["name"] + "." + module.params["suffix"]
+ new_attr = FileSystemSnapshot(destroyed=False)
+ try:
+ blade.file_system_snapshots.update_file_system_snapshots(
+ name=snapname, attributes=new_attr
+ )
+ except Exception:
+ changed = False
+ module.exit_json(changed=changed)
+
+
+def update_snapshot(module, blade):
+ """Update Snapshot"""
+ changed = False
+ module.exit_json(changed=changed)
+
+
+def delete_snapshot(module, blade):
+ """Delete Snapshot"""
+ if not module.check_mode:
+ snapname = module.params["name"] + "." + module.params["suffix"]
+ new_attr = FileSystemSnapshot(destroyed=True)
+ try:
+ blade.file_system_snapshots.update_file_system_snapshots(
+ name=snapname, attributes=new_attr
+ )
+ changed = True
+ if module.params["eradicate"]:
+ try:
+ blade.file_system_snapshots.delete_file_system_snapshots(
+ name=snapname
+ )
+ changed = True
+ except Exception:
+ changed = False
+ except Exception:
+ changed = False
+ module.exit_json(changed=changed)
+
+
+def eradicate_snapshot(module, blade):
+ """Eradicate Snapshot"""
+ if not module.check_mode:
+ snapname = module.params["name"] + "." + module.params["suffix"]
+ try:
+ blade.file_system_snapshots.delete_file_system_snapshots(name=snapname)
+ changed = True
+ except Exception:
+ changed = False
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(required=True),
+ suffix=dict(type="str"),
+ now=dict(type="bool", default=False),
+ targets=dict(type="list", elements="str"),
+ eradicate=dict(default="false", type="bool"),
+ state=dict(default="present", choices=["present", "absent", "restore"]),
+ )
+ )
+
+ required_if = [["now", True, ["targets"]]]
+ module = AnsibleModule(
+ argument_spec, required_if=required_if, supports_check_mode=True
+ )
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg="purity_fb sdk is required for this module")
+
+ if module.params["suffix"] is None:
+ suffix = "snap-" + str(
+ (datetime.utcnow() - datetime(1970, 1, 1, 0, 0, 0, 0)).total_seconds()
+ )
+ module.params["suffix"] = suffix.replace(".", "")
+
+ state = module.params["state"]
+ blade = get_blade(module)
+ versions = blade.api_version.list_versions().versions
+
+ if SNAP_NOW_API not in versions and module.params["now"]:
+ module.fail_json(
+ msg="Minimum FlashBlade REST version for immeadiate remote snapshots: {0}".format(
+ SNAP_NOW_API
+ )
+ )
+ filesystem = get_fs(module, blade)
+ snap = get_fssnapshot(module, blade)
+
+ if state == "present" and filesystem and not filesystem.destroyed and not snap:
+ create_snapshot(module, blade)
+ elif (
+ state == "present"
+ and filesystem
+ and not filesystem.destroyed
+ and snap
+ and not snap.destroyed
+ ):
+ update_snapshot(module, blade)
+ elif (
+ state == "present"
+ and filesystem
+ and not filesystem.destroyed
+ and snap
+ and snap.destroyed
+ ):
+ recover_snapshot(module, blade)
+ elif state == "present" and filesystem and filesystem.destroyed:
+ update_snapshot(module, blade)
+ elif state == "present" and not filesystem:
+ update_snapshot(module, blade)
+ elif state == "restore" and filesystem:
+ restore_snapshot(module, blade)
+ elif state == "absent" and snap and not snap.destroyed:
+ delete_snapshot(module, blade)
+ elif state == "absent" and snap and snap.destroyed:
+ eradicate_snapshot(module, blade)
+ elif state == "absent" and not snap:
+ module.exit_json(changed=False)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_snmp_agent.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_snmp_agent.py
new file mode 100644
index 000000000..8db09f7b6
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_snmp_agent.py
@@ -0,0 +1,210 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_snmp_agent
+version_added: '1.0.0'
+short_description: Configure the FlashBlade SNMP Agent
+description:
+- Configure the management SNMP Agent on a Pure Storage FlashBlade.
+- This module is not idempotent and will always modify the
+ existing management SNMP agent due to hidden parameters that cannot
+ be compared to the play parameters.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ auth_passphrase:
+ type: str
+ description:
+ - SNMPv3 only. Passphrase of 8 - 32 characters.
+ auth_protocol:
+ type: str
+ description:
+ - SNMP v3 only. Hash algorithm to use
+ choices: [ MD5, SHA ]
+ community:
+ type: str
+ description:
+ - SNMP v2c only. Manager community ID. Between 1 and 32 characters long.
+ user:
+ type: str
+ description:
+ - SNMP v3 only. User ID recognized by the specified SNMP agent.
+ Must be between 1 and 32 characters.
+ version:
+ type: str
+ description:
+ - Version of SNMP protocol to use for the agent.
+ choices: [ v2c, v3 ]
+ privacy_passphrase:
+ type: str
+ description:
+ - SNMPv3 only. Passphrase to encrypt SNMP messages.
+ Must be between 8 and 63 non-space ASCII characters.
+ privacy_protocol:
+ type: str
+ description:
+ - SNMP v3 only. Encryption protocol to use
+ choices: [ AES, DES ]
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Update v2c SNMP agent
+ purestorage.flashblade.purefb_snmp_agent:
+ community: public
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Update v3 SNMP agent
+ purestorage.flashblade.purefb_snmp_agent:
+ version: v3
+ auth_protocol: MD5
+ auth_passphrase: password
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+"""
+
+RETURN = r"""
+"""
+
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import SnmpAgent, SnmpV2c, SnmpV3
+except ImportError:
+ HAS_PURITY_FB = False
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ purefb_argument_spec,
+)
+
+
+MIN_REQUIRED_API_VERSION = "1.9"
+
+
+def update_agent(module, blade):
+ """Update SNMP Agent"""
+ changed = False
+ try:
+ agent = blade.snmp_agents.list_snmp_agents()
+ except Exception:
+ module.fail_json(msg="Failed to get configuration for SNMP agent.")
+ current_attr = {
+ "community": agent.items[0].v2c.community,
+ "version": agent.items[0].version,
+ "auth_passphrase": agent.items[0].v3.auth_passphrase,
+ "auth_protocol": agent.items[0].v3.auth_protocol,
+ "privacy_passphrase": agent.items[0].v3.privacy_passphrase,
+ "privacy_protocol": agent.items[0].v3.privacy_protocol,
+ "user": agent.items[0].v3.user,
+ }
+ new_attr = {
+ "community": module.params["community"],
+ "version": module.params["version"],
+ "auth_passphrase": module.params["auth_passphrase"],
+ "auth_protocol": module.params["auth_protocol"],
+ "privacy_passphrase": module.params["privacy_passphrase"],
+ "privacy_protocol": module.params["privacy_protocol"],
+ "user": module.params["user"],
+ }
+ if current_attr != new_attr:
+ changed = True
+ if not module.check_mode:
+ if new_attr["version"] == "v2c":
+ updated_v2c_attrs = SnmpV2c(community=new_attr["community"])
+ updated_v2c_agent = SnmpAgent(version="v2c", v2c=updated_v2c_attrs)
+ try:
+ blade.snmp_agents.update_snmp_agents(snmp_agent=updated_v2c_agent)
+ changed = True
+ except Exception:
+ module.fail_json(msg="Failed to update v2c SNMP agent.")
+ else:
+ updated_v3_attrs = SnmpV3(
+ auth_protocol=new_attr["auth_protocol"],
+ auth_passphrase=new_attr["auth_passphrase"],
+ privacy_protocol=new_attr["privacy_protocol"],
+ privacy_passphrase=new_attr["privacy_passphrase"],
+ user=new_attr["user"],
+ )
+ updated_v3_agent = SnmpAgent(version="v3", v3=updated_v3_attrs)
+ try:
+ blade.snmp_agents.update_snmp_agents(snmp_agent=updated_v3_agent)
+ changed = True
+ except Exception:
+ module.fail_json(msg="Failed to update v3 SNMP agent.")
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ user=dict(type="str"),
+ auth_passphrase=dict(type="str", no_log=True),
+ auth_protocol=dict(type="str", choices=["MD5", "SHA"]),
+ privacy_passphrase=dict(type="str", no_log=True),
+ privacy_protocol=dict(type="str", choices=["AES", "DES"]),
+ version=dict(type="str", choices=["v2c", "v3"]),
+ community=dict(type="str"),
+ )
+ )
+
+ required_together = [
+ ["auth_passphrase", "auth_protocol"],
+ ["privacy_passphrase", "privacy_protocol"],
+ ]
+ required_if = [["version", "v2c", ["community"]], ["version", "v3", ["user"]]]
+
+ module = AnsibleModule(
+ argument_spec,
+ required_together=required_together,
+ required_if=required_if,
+ supports_check_mode=True,
+ )
+
+ blade = get_blade(module)
+ api_version = blade.api_version.list_versions().versions
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(msg="Purity//FB must be upgraded to support this module.")
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg="purity_fb SDK is required for this module")
+
+ if module.params["version"] == "v3":
+ if module.params["auth_passphrase"] and (
+ 8 > len(module.params["auth_passphrase"]) > 32
+ ):
+ module.fail_json(msg="auth_password must be between 8 and 32 characters")
+ if (
+ module.params["privacy_passphrase"]
+ and 8 > len(module.params["privacy_passphrase"]) > 63
+ ):
+ module.fail_json(msg="privacy_password must be between 8 and 63 characters")
+
+ update_agent(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_snmp_mgr.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_snmp_mgr.py
new file mode 100644
index 000000000..66c2417dd
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_snmp_mgr.py
@@ -0,0 +1,357 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_snmp_mgr
+version_added: '1.0.0'
+short_description: Configure FlashBlade SNMP Managers
+description:
+- Manage SNMP managers on a Pure Storage FlashBlade.
+- This module is not idempotent and will always modify an
+ existing SNMP manager due to hidden parameters that cannot
+ be compared to the play parameters.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Name of SNMP Manager
+ required: true
+ type: str
+ state:
+ description:
+ - Create or delete SNMP manager
+ type: str
+ default: present
+ choices: [ absent, present ]
+ auth_passphrase:
+ type: str
+ description:
+ - SNMPv3 only. Passphrase of 8 - 32 characters.
+ auth_protocol:
+ type: str
+ description:
+ - SNMP v3 only. Hash algorithm to use
+ choices: [ MD5, SHA ]
+ community:
+ type: str
+ description:
+ - SNMP v2c only. Manager community ID. Between 1 and 32 characters long.
+ host:
+ type: str
+ description:
+ - IPv4 or IPv6 address or FQDN to send trap messages to.
+ user:
+ type: str
+ description:
+ - SNMP v3 only. User ID recognized by the specified SNMP manager.
+ Must be between 1 and 32 characters.
+ version:
+ type: str
+ description:
+ - Version of SNMP protocol to use for the manager.
+ choices: [ v2c, v3 ]
+ notification:
+ type: str
+ description:
+ - Action to perform on event.
+ default: trap
+ choices: [ inform, trap ]
+ privacy_passphrase:
+ type: str
+ description:
+ - SNMPv3 only. Passphrase to encrypt SNMP messages.
+ Must be between 8 and 63 non-space ASCII characters.
+ privacy_protocol:
+ type: str
+ description:
+ - SNMP v3 only. Encryption protocol to use
+ choices: [ AES, DES ]
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Delete exisitng SNMP manager
+ purestorage.flashblade.purefb_snmp_mgr:
+ name: manager1
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Create v2c SNMP manager
+ purestorage.flashblade.purefb_snmp_mgr:
+ name: manager1
+ community: public
+ host: 10.21.22.23
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Create v3 SNMP manager
+ purestorage.flashblade.purefb_snmp_mgr:
+ name: manager2
+ version: v3
+ auth_protocol: MD5
+ auth_passphrase: password
+ host: 10.21.22.23
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Update existing SNMP manager
+ purestorage.flashblade.purefb_snmp_mgr:
+ name: manager1
+ community: private
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+"""
+
+RETURN = r"""
+"""
+
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import SnmpManager, SnmpV2c, SnmpV3
+except ImportError:
+ HAS_PURITY_FB = False
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ purefb_argument_spec,
+)
+
+
+MIN_REQUIRED_API_VERSION = "1.9"
+
+
+def update_manager(module, blade):
+ """Update SNMP Manager"""
+ changed = False
+ try:
+ mgr = blade.snmp_managers.list_snmp_managers(names=[module.params["name"]])
+ except Exception:
+ module.fail_json(
+ msg="Failed to get configuration for SNMP manager {0}.".format(
+ module.params["name"]
+ )
+ )
+ current_attr = {
+ "community": mgr.items[0].v2c.community,
+ "notification": mgr.items[0].notification,
+ "host": mgr.items[0].host,
+ "version": mgr.items[0].version,
+ "auth_passphrase": mgr.items[0].v3.auth_passphrase,
+ "auth_protocol": mgr.items[0].v3.auth_protocol,
+ "privacy_passphrase": mgr.items[0].v3.privacy_passphrase,
+ "privacy_protocol": mgr.items[0].v3.privacy_protocol,
+ "user": mgr.items[0].v3.user,
+ }
+ new_attr = {
+ "community": module.params["community"],
+ "notification": module.params["notification"],
+ "host": module.params["host"],
+ "version": module.params["version"],
+ "auth_passphrase": module.params["auth_passphrase"],
+ "auth_protocol": module.params["auth_protocol"],
+ "privacy_passphrase": module.params["privacy_passphrase"],
+ "privacy_protocol": module.params["privacy_protocol"],
+ "user": module.params["user"],
+ }
+ if current_attr != new_attr:
+ changed = True
+ if not module.check_mode:
+ if new_attr["version"] == "v2c":
+ updated_v2c_attrs = SnmpV2c(community=new_attr["community"])
+ updated_v2c_manager = SnmpManager(
+ host=new_attr["host"],
+ notification=new_attr["notification"],
+ version="v2c",
+ v2c=updated_v2c_attrs,
+ )
+ try:
+ blade.snmp_managers.update_snmp_managers(
+ names=[module.params["name"]], snmp_manager=updated_v2c_manager
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to update v2c SNMP manager {0}.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ updated_v3_attrs = SnmpV3(
+ auth_protocol=new_attr["auth_protocol"],
+ auth_passphrase=new_attr["auth_passphrase"],
+ privacy_protocol=new_attr["privacy_protocol"],
+ privacy_passphrase=new_attr["privacy_passphrase"],
+ user=new_attr["user"],
+ )
+ updated_v3_manager = SnmpManager(
+ host=new_attr["host"],
+ notification=new_attr["notification"],
+ version="v3",
+ v3=updated_v3_attrs,
+ )
+ try:
+ blade.snmp_managers.update_snmp_managers(
+ names=[module.params["name"]], snmp_manager=updated_v3_manager
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to update v3 SNMP manager {0}.".format(
+ module.params["name"]
+ )
+ )
+
+ module.exit_json(changed=changed)
+
+
+def delete_manager(module, blade):
+ """Delete SNMP Manager"""
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.snmp_managers.delete_snmp_managers(names=[module.params["name"]])
+ except Exception:
+ module.fail_json(
+ msg="Delete SNMP manager {0} failed".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def create_manager(module, blade):
+ """Create SNMP Manager"""
+ changed = True
+ if not module.check_mode:
+ if not module.params["version"]:
+ module.fail_json(msg="SNMP version required to create a new manager")
+ if module.params["version"] == "v2c":
+ v2_attrs = SnmpV2c(community=module.params["community"])
+ new_v2_manager = SnmpManager(
+ host=module.params["host"],
+ notification=module.params["notification"],
+ version="v2c",
+ v2c=v2_attrs,
+ )
+ try:
+ blade.snmp_managers.create_snmp_managers(
+ names=[module.params["name"]], snmp_manager=new_v2_manager
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to create v2c SNMP manager {0}.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ v3_attrs = SnmpV3(
+ auth_protocol=module.params["auth_protocol"],
+ auth_passphrase=module.params["auth_passphrase"],
+ privacy_protocol=module.params["privacy_protocol"],
+ privacy_passphrase=module.params["privacy_passphrase"],
+ user=module.params["user"],
+ )
+ new_v3_manager = SnmpManager(
+ host=module.params["host"],
+ notification=module.params["notification"],
+ version="v3",
+ v3=v3_attrs,
+ )
+ try:
+ blade.snmp_managers.create_snmp_managers(
+ names=[module.params["name"]], snmp_manager=new_v3_manager
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to create v3 SNMP manager {0}.".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True),
+ host=dict(type="str"),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ user=dict(type="str"),
+ notification=dict(type="str", choices=["inform", "trap"], default="trap"),
+ auth_passphrase=dict(type="str", no_log=True),
+ auth_protocol=dict(type="str", choices=["MD5", "SHA"]),
+ privacy_passphrase=dict(type="str", no_log=True),
+ privacy_protocol=dict(type="str", choices=["AES", "DES"]),
+ version=dict(type="str", choices=["v2c", "v3"]),
+ community=dict(type="str"),
+ )
+ )
+
+ required_together = [
+ ["auth_passphrase", "auth_protocol"],
+ ["privacy_passphrase", "privacy_protocol"],
+ ]
+ required_if = [
+ ["version", "v2c", ["community", "host"]],
+ ["version", "v3", ["host", "user"]],
+ ]
+
+ module = AnsibleModule(
+ argument_spec,
+ required_together=required_together,
+ required_if=required_if,
+ supports_check_mode=True,
+ )
+
+ state = module.params["state"]
+ blade = get_blade(module)
+ api_version = blade.api_version.list_versions().versions
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(msg="Purity//FB must be upgraded to support this module.")
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg="purity_fb SDK is required for this module")
+
+ mgr_configured = False
+ mgrs = blade.snmp_managers.list_snmp_managers()
+ for mgr in range(0, len(mgrs.items)):
+ if mgrs.items[mgr].name == module.params["name"]:
+ mgr_configured = True
+ break
+ if module.params["version"] == "v3":
+ if module.params["auth_passphrase"] and (
+ 8 > len(module.params["auth_passphrase"]) > 32
+ ):
+ module.fail_json(msg="auth_password must be between 8 and 32 characters")
+ if (
+ module.params["privacy_passphrase"]
+ and 8 > len(module.params["privacy_passphrase"]) > 63
+ ):
+ module.fail_json(msg="privacy_password must be between 8 and 63 characters")
+ if state == "absent" and mgr_configured:
+ delete_manager(module, blade)
+ elif mgr_configured and state == "present":
+ update_manager(module, blade)
+ elif not mgr_configured and state == "present":
+ create_manager(module, blade)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_subnet.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_subnet.py
new file mode 100644
index 000000000..7e3a35484
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_subnet.py
@@ -0,0 +1,347 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+
+DOCUMENTATION = """
+---
+module: purefb_subnet
+version_added: "1.0.0"
+short_description: Manage network subnets in a Pure Storage FlashBlade
+description:
+ - This module manages network subnets on Pure Storage FlashBlade.
+author: Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Subnet Name.
+ required: true
+ type: str
+ state:
+ description:
+ - Create, delete or modifies a subnet.
+ required: false
+ default: present
+ choices: [ "present", "absent" ]
+ type: str
+ lag:
+ description:
+ - Name of the Link Aggreation Group to use for the subnet.
+ default: uplink
+ type: str
+ version_added: "1.7.0"
+ gateway:
+ description:
+ - IPv4 or IPv6 address of subnet gateway.
+ required: false
+ type: str
+ mtu:
+ description:
+ - MTU size of the subnet. Range is 1280 to 9216.
+ required: false
+ default: 1500
+ type: int
+ prefix:
+ description:
+ - IPv4 or IPv6 address associated with the subnet.
+ - Supply the prefix length (CIDR) as well as the IP address.
+ - Required for subnet creation.
+ required: false
+ type: str
+ vlan:
+ description:
+ - VLAN ID of the subnet.
+ required: false
+ default: 0
+ type: int
+extends_documentation_fragment:
+ - purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = """
+- name: Create new network subnet named foo
+ purestorage.flashblade.purefb_subnet:
+ name: foo
+ prefix: "10.21.200.3/24"
+ gateway: 10.21.200.1
+ mtu: 9000
+ vlan: 2200
+ lag: bar
+ state: present
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Change configuration of existing subnet foo
+ purestorage.flashblade.purefb_subnet:
+ name: foo
+ state: present
+ prefix: "10.21.100.3/24"
+ gateway: 10.21.100.1
+ mtu: 1500
+ address: 10.21.200.123
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Delete network subnet named foo
+ purestorage.flashblade.purefb_subnet:
+ name: foo
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641"""
+
+RETURN = """
+"""
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import Subnet, Reference
+except ImportError:
+ HAS_PURITY_FB = False
+
+try:
+ import netaddr
+
+ HAS_NETADDR = True
+except ImportError:
+ HAS_NETADDR = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ purefb_argument_spec,
+)
+
+
+MINIMUM_API_VERSION = "1.3"
+
+
+def get_subnet(module, blade):
+ """Return Subnet or None"""
+ subnet = []
+ subnet.append(module.params["name"])
+ try:
+ res = blade.subnets.list_subnets(names=subnet)
+ return res.items[0]
+ except Exception:
+ return None
+
+
+def create_subnet(module, blade):
+ """Create Subnet"""
+ changed = True
+ if not module.params["prefix"]:
+ module.fail_json(msg="prefix is required for subnet creation")
+ if not module.check_mode:
+ subnet = []
+ subnet.append(module.params["name"])
+ try:
+ if module.params["gateway"]:
+ blade.subnets.create_subnets(
+ names=subnet,
+ subnet=Subnet(
+ prefix=module.params["prefix"],
+ vlan=module.params["vlan"],
+ mtu=module.params["mtu"],
+ gateway=module.params["gateway"],
+ link_aggregation_group=Reference(name=module.params["lag"]),
+ ),
+ )
+ else:
+ blade.subnets.create_subnets(
+ names=subnet,
+ subnet=Subnet(
+ prefix=module.params["prefix"],
+ vlan=module.params["vlan"],
+ mtu=module.params["mtu"],
+ link_aggregation_group=Reference(name=module.params["lag"]),
+ ),
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to create subnet {0}. Confirm supplied parameters".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def modify_subnet(module, blade):
+ """Modify Subnet settings"""
+ changed = False
+ subnet = get_subnet(module, blade)
+ subnet_new = []
+ subnet_new.append(module.params["name"])
+ if module.params["prefix"]:
+ if module.params["prefix"] != subnet.prefix:
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.subnets.update_subnets(
+ names=subnet_new, subnet=Subnet(prefix=module.params["prefix"])
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to change subnet {0} prefix to {1}".format(
+ module.params["name"], module.params["prefix"]
+ )
+ )
+ if module.params["vlan"]:
+ if module.params["vlan"] != subnet.vlan:
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.subnets.update_subnets(
+ names=subnet_new, subnet=Subnet(vlan=module.params["vlan"])
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to change subnet {0} VLAN to {1}".format(
+ module.params["name"], module.params["vlan"]
+ )
+ )
+ if module.params["gateway"]:
+ if module.params["gateway"] != subnet.gateway:
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.subnets.update_subnets(
+ names=subnet_new,
+ subnet=Subnet(gateway=module.params["gateway"]),
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to change subnet {0} gateway to {1}".format(
+ module.params["name"], module.params["gateway"]
+ )
+ )
+ if module.params["mtu"]:
+ if module.params["mtu"] != subnet.mtu:
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.subnets.update_subnets(
+ names=subnet_new, subnet=Subnet(mtu=module.params["mtu"])
+ )
+ changed = True
+ except Exception:
+ module.fail_json(
+ msg="Failed to change subnet {0} MTU to {1}".format(
+ module.params["name"], module.params["mtu"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def delete_subnet(module, blade):
+ """Delete Subnet"""
+ changed = True
+ if not module.check_mode:
+ subnet = []
+ subnet.append(module.params["name"])
+ try:
+ blade.subnets.delete_subnets(names=subnet)
+ except Exception:
+ module.fail_json(
+ msg="Failed to delete subnet {0}".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(required=True),
+ state=dict(default="present", choices=["present", "absent"]),
+ gateway=dict(),
+ lag=dict(type="str", default="uplink"),
+ mtu=dict(type="int", default=1500),
+ prefix=dict(),
+ vlan=dict(type="int", default=0),
+ )
+ )
+
+ required_if = [["state", "present", ["prefix"]]]
+
+ module = AnsibleModule(
+ argument_spec, required_if=required_if, supports_check_mode=True
+ )
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg="purity_fb sdk is required for this module")
+
+ if not HAS_NETADDR:
+ module.fail_json(msg="netaddr module is required")
+
+ state = module.params["state"]
+ blade = get_blade(module)
+ api_version = blade.api_version.list_versions().versions
+ if MINIMUM_API_VERSION not in api_version:
+ module.fail_json(msg="Upgrade Purity//FB to enable this module")
+ subnet = get_subnet(module, blade)
+ try:
+ blade.link_aggregation_groups.list_link_aggregation_groups(
+ names=[module.params["lag"]]
+ )
+ except Exception:
+ module.fail_json(msg="LAG {0} does not exist.".format(module.params["lag"]))
+
+ if state == "present":
+ if not (1280 <= module.params["mtu"] <= 9216):
+ module.fail_json(
+ msg="MTU {0} is out of range (1280 to 9216)".format(
+ module.params["mtu"]
+ )
+ )
+ if not (0 <= module.params["vlan"] <= 4094):
+ module.fail_json(
+ msg="VLAN ID {0} is out of range (0 to 4094)".format(
+ module.params["vlan"]
+ )
+ )
+ if module.params["gateway"]:
+ if netaddr.IPAddress(module.params["gateway"]) not in netaddr.IPNetwork(
+ module.params["prefix"]
+ ):
+ module.fail_json(msg="Gateway and subnet are not compatible.")
+ subnets = blade.subnets.list_subnets()
+ nrange = netaddr.IPSet([module.params["prefix"]])
+ for sub in range(0, len(subnets.items)):
+ if (
+ subnets.items[sub].vlan == module.params["vlan"]
+ and subnets.items[sub].name != module.params["name"]
+ ):
+ module.fail_json(
+ msg="VLAN ID {0} is already in use.".format(module.params["vlan"])
+ )
+ if (
+ nrange & netaddr.IPSet([subnets.items[sub].prefix])
+ and subnets.items[sub].name != module.params["name"]
+ ):
+ module.fail_json(msg="Prefix CIDR overlaps with existing subnet.")
+
+ if state == "present" and not subnet:
+ create_subnet(module, blade)
+ elif state == "present" and subnet:
+ modify_subnet(module, blade)
+ elif state == "absent" and subnet:
+ delete_subnet(module, blade)
+ elif state == "absent" and not subnet:
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_syslog.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_syslog.py
new file mode 100644
index 000000000..2a7406418
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_syslog.py
@@ -0,0 +1,198 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_syslog
+version_added: '1.4.0'
+short_description: Configure Pure Storage FlashBlade syslog settings
+description:
+- Configure syslog configuration for Pure Storage FlashBlades.
+- Add or delete an individual syslog server to the existing
+ list of serves.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Unique identifier for the syslog server address
+ type: str
+ required: true
+ state:
+ description:
+ - Create or delete syslog servers configuration
+ default: present
+ type: str
+ choices: [ absent, present ]
+ protocol:
+ description:
+ - Protocol which server uses
+ type: str
+ choices: [ tcp, tls, udp ]
+ port:
+ description:
+ - Port at which the server is listening. If no port is specified
+ the system will use 514
+ type: str
+ address:
+ description:
+ - Syslog server address.
+ This field supports IPv4 or FQDN.
+ An invalid IP addresses will cause the module to fail.
+ No validation is performed for FQDNs.
+ type: str
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Delete exisitng syslog server entries
+ purestorage.flashblade.purefb_syslog:
+ name: syslog1
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Set array syslog servers
+ purestorage.flashblade.purefb_syslog:
+ state: present
+ name: syslog1
+ address: syslog1.com
+ protocol: udp
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+"""
+
+RETURN = r"""
+"""
+
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import SyslogServerPostOrPatch
+except ImportError:
+ HAS_PURITY_FB = False
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ purefb_argument_spec,
+)
+
+
+MIN_REQUIRED_API_VERSION = "1.10"
+
+
+def delete_syslog(module, blade):
+ """Delete Syslog Server"""
+ changed = False
+ try:
+ server = blade.syslog.list_syslog_servers(names=[module.params["name"]])
+ except Exception:
+ server = None
+
+ if server:
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.syslog.delete_syslog_servers(names=[module.params["name"]])
+ changed = True
+ except Exception:
+ module.fail_json(
+ msg="Failed to remove syslog server: {0}".format(
+ module.params["name"]
+ )
+ )
+
+ module.exit_json(changed=changed)
+
+
+def add_syslog(module, blade):
+ """Add Syslog Server"""
+ changed = False
+ noport_address = module.params["protocol"] + "://" + module.params["address"]
+
+ if module.params["port"]:
+ full_address = noport_address + ":" + module.params["port"]
+ else:
+ full_address = noport_address
+
+ address_list = blade.syslog.list_syslog_servers()
+ if len(address_list.items) == 3:
+ module.fail_json(msg="Maximum number of syslog servers (3) already configured.")
+ exists = False
+
+ if address_list:
+ for address in range(0, len(address_list.items)):
+ if address_list.items[address].name == module.params["name"]:
+ exists = True
+ break
+ if not exists:
+ changed = True
+ if not module.check_mode:
+ try:
+ attr = SyslogServerPostOrPatch(uri=full_address)
+ blade.syslog.create_syslog_servers(
+ syslog=attr, names=[module.params["name"]]
+ )
+ changed = True
+ except Exception:
+ module.fail_json(
+ msg="Failed to add syslog server {0} - {1}".format(
+ module.params["name"], full_address
+ )
+ )
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ address=dict(type="str"),
+ protocol=dict(type="str", choices=["tcp", "tls", "udp"]),
+ port=dict(type="str"),
+ name=dict(type="str", required=True),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ )
+ )
+
+ required_if = [["state", "present", ["address", "protocol"]]]
+
+ module = AnsibleModule(
+ argument_spec, required_if=required_if, supports_check_mode=True
+ )
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg="purity_fb sdk is required for this module")
+
+ blade = get_blade(module)
+ api_version = blade.api_version.list_versions().versions
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(msg="Purity//FB must be upgraded to support this module.")
+
+ if module.params["state"] == "absent":
+ delete_syslog(module, blade)
+ else:
+ add_syslog(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_target.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_target.py
new file mode 100644
index 000000000..ab37bfda3
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_target.py
@@ -0,0 +1,201 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_target
+version_added: '1.0.0'
+short_description: Manage remote S3-capable targets for a FlashBlade
+description:
+- Manage remote S3-capable targets for a FlashBlade system
+- Use this for non-FlashBlade targets.
+- Use I(purestorage.flashblade.purefb_connect) for FlashBlade targets.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Create or delete remote target
+ default: present
+ type: str
+ choices: [ absent, present ]
+ name:
+ description:
+ - Name of S3-capable target (IP or FQDN)
+ type: str
+ required: true
+ address:
+ description:
+ - Address of S3-capable target (IP or FQDN)
+ type: str
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Create a connection to remote S3-capable target
+ purestorage.flashblade.purefb_target:
+ name: target_1
+ address: 10.10.10.20
+ fb_url: 10.10.10.2
+ api_token: T-89faa581-c668-483d-b77d-23c5d88ba35c
+- name: Delete connection to remote S3-capable system
+ purestorage.flashblade.purefb_target:
+ state: absent
+ name: target_1
+ target_api: 9c0b56bc-f941-f7a6-9f85-dcc3e9a8f7d6
+ fb_url: 10.10.10.2
+ api_token: T-89faa581-c668-483d-b77d-23c5d88ba35c
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURITYFB = True
+try:
+ from purity_fb import TargetPost, Target
+except ImportError:
+ HAS_PURITYFB = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ purefb_argument_spec,
+)
+
+
+MINIMUM_API_VERSION = "1.9"
+
+
+def _check_replication_configured(module, blade):
+ interfaces = blade.network_interfaces.list_network_interfaces()
+ repl_ok = False
+ for link in range(0, len(interfaces.items)):
+ if "replication" in interfaces.items[link].services:
+ repl_ok = True
+ if not repl_ok:
+ module.fail_json(
+ msg="Replication network interface required to configure a target"
+ )
+
+
+def _check_connected(module, blade):
+ connected_targets = blade.targets.list_targets()
+ for target in range(0, len(connected_targets.items)):
+ if connected_targets.items[target].name == module.params["name"]:
+ return connected_targets.items[target]
+ return None
+
+
+def break_connection(module, blade):
+ """Break connection to remote target"""
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.targets.delete_targets(names=[module.params["name"]])
+ except Exception:
+ module.fail_json(
+ msg="Failed to disconnect target {0}.".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def create_connection(module, blade):
+ """Create connection to remote target"""
+ changed = True
+ if not module.check_mode:
+ connected_targets = blade.targets.list_targets()
+ for target in range(0, len(connected_targets.items)):
+ if connected_targets.items[target].address == module.params["address"]:
+ module.fail_json(
+ msg="Target already exists with same connection address"
+ )
+ try:
+ target = TargetPost(address=module.params["address"])
+ blade.targets.create_targets(names=[module.params["name"]], target=target)
+ except Exception:
+ module.fail_json(
+ msg="Failed to connect to remote target {0}.".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def update_connection(module, blade, connection):
+ """Update target connection address"""
+ changed = False
+ connected_targets = blade.targets.list_targets()
+ for target in range(0, len(connected_targets.items)):
+ if (
+ connected_targets.items[target].address == module.params["address"]
+ and connected_targets.items[target].name != module.params["name"]
+ ):
+ module.fail_json(msg="Target already exists with same connection address")
+ if module.params["address"] != connection.address:
+ changed = True
+ if not module.check_mode:
+ new_address = Target(
+ name=module.params["name"], address=module.params["address"]
+ )
+ try:
+ blade.targets.update_targets(
+ names=[connection.name], target=new_address
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to change address for target {0}.".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ name=dict(type="str", required=True),
+ address=dict(type="str"),
+ )
+ )
+
+ required_if = [["state", "present", ["address"]]]
+
+ module = AnsibleModule(
+ argument_spec, required_if=required_if, supports_check_mode=True
+ )
+
+ if not HAS_PURITYFB:
+ module.fail_json(msg="purity_fb sdk is required for this module")
+
+ state = module.params["state"]
+ blade = get_blade(module)
+ _check_replication_configured(module, blade)
+ target = _check_connected(module, blade)
+ if state == "present" and not target:
+ create_connection(module, blade)
+ elif state == "present" and target:
+ update_connection(module, blade, target)
+ elif state == "absent" and target:
+ break_connection(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_timeout.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_timeout.py
new file mode 100644
index 000000000..21e83c002
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_timeout.py
@@ -0,0 +1,136 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2021, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_timeout
+version_added: '1.6.0'
+short_description: Configure Pure Storage FlashBlade GUI idle timeout
+description:
+- Configure GUI idle timeout for Pure Storage FlashBlade.
+- This does not affect existing GUI sessions.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Set or disable the GUI idle timeout
+ default: present
+ type: str
+ choices: [ present, absent ]
+ timeout:
+ description:
+ - Minutes for idle timeout.
+ type: int
+ default: 30
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Set GUI idle timeout to 25 minutes
+ purestorage.flashblade.purefb_timeout:
+ timeout: 25
+ state: present
+ fb_url: 10.10.10.2
+ api_token: T-68618f31-0c9e-4e57-aa44-5306a2cf10e3
+
+- name: Disable idle timeout
+ purestorage.flashblade.purefb_timeout:
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-68618f31-0c9e-4e57-aa44-5306a2cf10e3
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flashblade
+except ImportError:
+ HAS_PURESTORAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_system,
+ purefb_argument_spec,
+)
+
+MIN_REQUIRED_API_VERSION = "2.0"
+
+
+def set_timeout(module, blade):
+ """Set GUI idle timeout"""
+ changed = True
+ if not module.check_mode:
+ res = blade.patch_arrays(
+ flashblade.Array(idle_timeout=module.params["timeout"] * 60000)
+ )
+ if res.status_code != 200:
+ module.fail_json(msg="Failed to set GUI idle timeout")
+
+ module.exit_json(changed=changed)
+
+
+def disable_timeout(module, blade):
+ """Disable idle timeout"""
+ changed = True
+ if not module.check_mode:
+ res = blade.patch_arrays(flashblade.Array(idle_timeout=0))
+ if res.status_code != 200:
+ module.fail_json(msg="Failed to disable GUI idle timeout")
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ timeout=dict(type="int", default=30),
+ state=dict(type="str", default="present", choices=["present", "absent"]),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+
+ blade = get_system(module)
+ api_version = list(blade.get_versions().items)
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashBlade REST version not supported. "
+ "Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
+ )
+
+ state = module.params["state"]
+ if 5 < module.params["timeout"] > 180 and module.params["timeout"] != 0:
+ module.fail_json(msg="Timeout value must be between 5 and 180 minutes")
+ blade = get_system(module)
+ current_timeout = list(blade.get_arrays().items)[0].idle_timeout * 60000
+ if state == "present" and current_timeout != module.params["timeout"]:
+ set_timeout(module, blade)
+ elif state == "absent" and current_timeout != 0:
+ disable_timeout(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_tz.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_tz.py
new file mode 100644
index 000000000..9f6acc5d4
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_tz.py
@@ -0,0 +1,206 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2021, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_tz
+version_added: '1.10.0'
+short_description: Configure Pure Storage FlashBlade timezone
+description:
+- Configure the timezone for a Pure Storage FlashBlade.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ timezone:
+ description:
+ - If not provided, the module will attempt to get the current local timezone from the server
+ type: str
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Set FlashBlade Timezone to Americas/Los_Angeles
+ purestorage.flashblade.purefb_tz:
+ timezone: "America/Los_Angeles"
+ fb_url: 10.10.10.2
+ api_token: T-68618f31-0c9e-4e57-aa44-5306a2cf10e3
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flashblade
+except ImportError:
+ HAS_PURESTORAGE = False
+
+HAS_PYTZ = True
+try:
+ import pytz
+except ImportError:
+ HAS_PYTX = False
+
+import os
+import re
+import platform
+
+from ansible.module_utils.common.process import get_bin_path
+from ansible.module_utils.facts.utils import get_file_content
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_system,
+ purefb_argument_spec,
+)
+
+MIN_REQUIRED_API_VERSION = "2.0"
+
+
+def _findstr(text, match):
+ for line in text.splitlines():
+ if match in line:
+ found = line
+ return found
+
+
+def _get_local_tz(module, timezone="UTC"):
+ """
+ We will attempt to get the local timezone of the server running the module and use that.
+ If we can't get the timezone then we will set the default to be UTC
+
+ Linnux has been tested and other opersting systems should be OK.
+ Failures cause assumption of UTC
+
+ Windows is not supported and will assume UTC
+ """
+ if platform.system() == "Linux":
+ timedatectl = get_bin_path("timedatectl")
+ if timedatectl is not None:
+ rcode, stdout, stderr = module.run_command(timedatectl)
+ if rcode == 0 and stdout:
+ line = _findstr(stdout, "Time zone")
+ full_tz = line.split(":", 1)[1].rstrip()
+ timezone = full_tz.split()[0]
+ return timezone
+ else:
+ module.warn("Incorrect timedatectl output. Timezone will be set to UTC")
+ else:
+ if os.path.exists("/etc/timezone"):
+ timezone = get_file_content("/etc/timezone")
+ else:
+ module.warn("Could not find /etc/timezone. Assuming UTC")
+
+ elif platform.system() == "SunOS":
+ if os.path.exists("/etc/default/init"):
+ for line in get_file_content("/etc/default/init", "").splitlines():
+ if line.startswith("TZ="):
+ timezone = line.split("=", 1)[1]
+ return timezone
+ else:
+ module.warn("Could not find /etc/default/init. Assuming UTC")
+
+ elif re.match("^Darwin", platform.platform()):
+ systemsetup = get_bin_path("systemsetup")
+ if systemsetup is not None:
+ rcode, stdout, stderr = module.execute(systemsetup, "-gettimezone")
+ if rcode == 0 and stdout:
+ timezone = stdout.split(":", 1)[1].lstrip()
+ else:
+ module.warn("Could not run systemsetup. Assuming UTC")
+ else:
+ module.warn("Could not find systemsetup. Assuming UTC")
+
+ elif re.match("^(Free|Net|Open)BSD", platform.platform()):
+ if os.path.exists("/etc/timezone"):
+ timezone = get_file_content("/etc/timezone")
+ else:
+ module.warn("Could not find /etc/timezone. Assuming UTC")
+
+ elif platform.system() == "AIX":
+ aix_oslevel = int(platform.version() + platform.release())
+ if aix_oslevel >= 61:
+ if os.path.exists("/etc/environment"):
+ for line in get_file_content("/etc/environment", "").splitlines():
+ if line.startswith("TZ="):
+ timezone = line.split("=", 1)[1]
+ return timezone
+ else:
+ module.warn("Could not find /etc/environment. Assuming UTC")
+ else:
+ module.warn(
+ "Cannot determine timezone when AIX os level < 61. Assuming UTC"
+ )
+
+ else:
+ module.warn("Could not find /etc/timezone. Assuming UTC")
+
+ return timezone
+
+
+def set_timezone(module, blade):
+ """Set timezone"""
+ changed = True
+ if not module.check_mode:
+ res = blade.patch_arrays(flashblade.Array(time_zone=module.params["timezone"]))
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to timezone. Error: {0}".format(res.errors[0].message)
+ )
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ timezone=dict(type="str"),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+ if not HAS_PYTZ:
+ module.fail_json(msg="pytz is required for this module")
+
+ blade = get_system(module)
+ api_version = list(blade.get_versions().items)
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashBlade REST version not supported. "
+ "Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
+ )
+ if not module.params["timezone"]:
+ module.params["timezone"] = _get_local_tz(module)
+ if module.params["timezone"] not in pytz.all_timezones_set:
+ module.fail_json(
+ msg="Timezone {0} is not valid".format(module.params["timezone"])
+ )
+
+ blade = get_system(module)
+ current_tz = list(blade.get_arrays().items)[0].time_zone
+ if current_tz != module.params["timezone"]:
+ set_timezone(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_user.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_user.py
new file mode 100644
index 000000000..ffa34be8e
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_user.py
@@ -0,0 +1,211 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2019, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_user
+version_added: '1.0.0'
+short_description: Modify FlashBlade user accounts
+description:
+- Modify user on a Pure Stoage FlashBlade.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - The name of the user account
+ type: str
+ password:
+ description:
+ - Password for the local user.
+ - Only applies to the local user 'pureuser'
+ type: str
+ old_password:
+ description:
+ - If changing an existing password, you must provide the old password for security
+ - Only applies to the local user 'pureuser'
+ type: str
+ public_key:
+ description:
+ - The API clients PEM formatted (Base64 encoded) RSA public key.
+ - Include the I(—–BEGIN PUBLIC KEY—–) and I(—–END PUBLIC KEY—–) lines
+ type: str
+ version_added: "1.8.0"
+ clear_lock:
+ description:
+ - Clear user lockout flag
+ type: bool
+ default: false
+ version_added: "1.8.0"
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Change password for local user (NOT IDEMPOTENT)
+ purestorage.flashblade.purefb_user:
+ name: pureuser
+ password: anewpassword
+ old_password: apassword
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+
+- name: Set public key for user
+ purestorage.flashblade.purefb_user:
+ name: fred
+ public_key: "{{lookup('file', 'public_pem_file') }}"
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+
+- name: Clear user lockout
+ purestorage.flashblade.purefb_user:
+ name: fred
+ clear_lock: true
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import Admin
+except ImportError:
+ HAS_PURITY_FB = False
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient.flashblade import AdminPatch
+except ImportError:
+ HAS_PURESTORAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ get_system,
+ purefb_argument_spec,
+)
+
+MIN_REQUIRED_API_VERSION = "1.3"
+MIN_KEY_API_VERSION = "2.1"
+MIN_LOCK_API_VERSION = "2.3"
+
+
+def update_user(module, blade):
+ """Create or Update Local User Account"""
+ changed = False
+ if module.params["password"] and module.params["name"].lower() == "pureuser":
+ if module.params["password"] != module.params["old_password"]:
+ changed = True
+ if not module.check_mode:
+ try:
+ new_admin = Admin()
+ new_admin.password = module.params["password"]
+ new_admin.old_password = module.params["old_password"]
+ blade.admins.update_admins(names=["pureuser"], admin=new_admin)
+ except Exception:
+ module.fail_json(
+ msg="Local User {0}: Password reset failed. "
+ "Check passwords. One of these is incorrect.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ module.fail_json(
+ msg="Local User Account {0}: Password change failed - "
+ "Old and new passwords are the same".format(module.params["name"])
+ )
+ if module.params["password"] and module.params["name"].lower() != "pureuser":
+ module.fail_json(msg="Changing password for remote accounts is not supported.")
+ api_version = blade.api_version.list_versions().versions
+ if MIN_KEY_API_VERSION in api_version:
+ bladev2 = get_system(module)
+ try:
+ user_data = list(bladev2.get_admins(names=[module.params["name"]]).items)[0]
+ except AttributeError:
+ module.fail_json(
+ msg="User {0} does not currently exist in the FlashBlade. "
+ "Please login to this user before attempting to modify it.".format(
+ module.params["name"]
+ )
+ )
+ current_key = user_data.public_key
+ if module.params["public_key"] and current_key != module.params["public_key"]:
+ changed = True
+ if not module.check_mode:
+ my_admin = AdminPatch(public_key=module.params["public_key"])
+ res = bladev2.patch_admins(
+ names=[module.params["name"]], admin=my_admin
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to change public_key for {0}.".format(
+ module.params["name"]
+ )
+ )
+ if MIN_LOCK_API_VERSION in api_version:
+ if user_data.locked and module.params["clear_lock"]:
+ changed = True
+ if not module.check_mode:
+ my_admin = AdminPatch(locked=False)
+ res = bladev2.patch_admins(
+ names=[module.params["name"]], admin=my_admin
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to unlock user {0}.".format(
+ module.params["name"]
+ )
+ )
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str"),
+ public_key=dict(type="str", no_log=True),
+ password=dict(type="str", no_log=True),
+ old_password=dict(type="str", no_log=True),
+ clear_lock=dict(type="bool", default=False),
+ )
+ )
+
+ required_together = [["password", "old_password"]]
+ module = AnsibleModule(
+ argument_spec, supports_check_mode=True, required_together=required_together
+ )
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg="purity_fb sdk is required for this module")
+ if not HAS_PURESTORAGE and module.params["public_key"]:
+ module.fail_json(msg="py-pure-client sdk is required for to set public keys")
+
+ blade = get_blade(module)
+ api_version = blade.api_version.list_versions().versions
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(msg="Purity//FB must be upgraded to support this module.")
+
+ update_user(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_userpolicy.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_userpolicy.py
new file mode 100644
index 000000000..6e7dbe49d
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_userpolicy.py
@@ -0,0 +1,269 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2021, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_userpolicy
+version_added: '1.6.0'
+short_description: Manage FlashBlade Object Store User Access Policies
+description:
+- Add or Remove FlashBlade Object Store Access Policies for Account User
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Name of the Object Store User
+ - The user to have the policy request applied to
+ type: str
+ account:
+ description:
+ - Name of the Object Store Account associated with the user
+ type: str
+ state:
+ description:
+ - Define whether the Access Policy should be added or deleted
+ - Option to list all available policies
+ default: present
+ choices: [ absent, present, show ]
+ type: str
+ policy:
+ description:
+ - Policies to added or deleted from the Object Store User
+ - Only valid policies can be used
+ - use I(list) to see available policies
+ type: list
+ elements: str
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: List existng ruser access policies for a specific user
+ purestorage.flashblade.purefb_userpolicy:
+ state: show
+ account: foo
+ name: bar
+ fb_url: 10.10.10.2
+ api_token: T-68618f31-0c9e-4e57-aa44-5306a2cf10e3
+ register: policy_list
+
+- name: List all available user access policies
+ purestorage.flashblade.purefb_userpolicy:
+ state: show
+ fb_url: 10.10.10.2
+ api_token: T-68618f31-0c9e-4e57-aa44-5306a2cf10e3
+ register: policy_list
+
+- name: Add user access policies to account user foo/bar
+ purestorage.flashblade.purefb_userpolicy:
+ name: bar
+ account: foo
+ policy:
+ - pure:policy/bucket-create
+ - pure:policy/bucket-delete
+ fb_url: 10.10.10.2
+ api_token: T-68618f31-0c9e-4e57-aa44-5306a2cf10e3
+
+- name: Delete user access policies to account user foo/bar
+ purestorage.flashblade.purefb_userpolicy:
+ name: bar
+ account: foo
+ policy:
+ - pure:policy/bucket-create
+ - pure:policy/bucket-delete
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-68618f31-0c9e-4e57-aa44-5306a2cf10e3
+"""
+
+RETURN = r"""
+policy_list:
+ description:
+ - Returns the list of access policies for a user
+ - If no user specified returns all available access policies
+ returned: always
+ type: list
+ elements: str
+ sample: ['pure:policy/object-list', 'pure:policy/bucket-list', 'pure:policy/object-read', 'pure:policy/bucket-delete', 'pure:policy/full-access']
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_system,
+ purefb_argument_spec,
+)
+
+MIN_REQUIRED_API_VERSION = "2.0"
+
+
+def _check_valid_policy(blade, policy):
+ try:
+ return bool(blade.get_object_store_access_policies(names=[policy]))
+ except AttributeError:
+ return False
+
+
+def add_policy(module, blade):
+ """Add a single or list of policies to an account user"""
+ changed = False
+ user_policy_list = []
+ policy_list = module.params["policy"]
+ for policy in range(0, len(policy_list)):
+ if not _check_valid_policy(blade, policy_list[policy]):
+ module.fail_json(msg="Policy {0} is not valid.".format(policy_list[policy]))
+ username = module.params["account"] + "/" + module.params["name"]
+ for policy in range(0, len(policy_list)):
+ if not (
+ blade.get_object_store_users_object_store_access_policies(
+ member_names=[username], policy_names=[policy_list[policy]]
+ ).items
+ ):
+ if not module.check_mode:
+ changed = True
+ res = blade.post_object_store_access_policies_object_store_users(
+ member_names=[username], policy_names=[policy_list[policy]]
+ )
+ user_policies = list(
+ blade.get_object_store_access_policies_object_store_users(
+ member_names=[username]
+ ).items
+ )
+ for user_policy in range(0, len(user_policies)):
+ user_policy_list.append(user_policies[user_policy].policy.name)
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to add policy {0} to account user {1}. Error: {2}".format(
+ policy_list[policy], username, res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed, policy_list=user_policy_list)
+
+
+def remove_policy(module, blade):
+ """Remove a single or list of policies to an account user"""
+ changed = False
+ user_policy_list = []
+ policy_list = module.params["policy"]
+ for policy in range(0, len(policy_list)):
+ if not _check_valid_policy(blade, policy):
+ module.fail_json(msg="Policy {0} is not valid.".format(policy))
+ username = module.params["account"] + "/" + module.params["name"]
+ for policy in range(0, len(policy_list)):
+ if (
+ blade.get_object_store_users_object_store_access_policies(
+ member_names=[username], policy_names=[policy_list[policy]]
+ ).total_item_count
+ == 1
+ ):
+ if not module.check_mode:
+ changed = True
+ res = blade.delete_object_store_access_policies_object_store_users(
+ member_names=[username], policy_names=[policy_list[policy]]
+ )
+ user_policies = list(
+ blade.get_object_store_access_policies_object_store_users(
+ member_names=[username]
+ ).items
+ )
+ for user_policy in range(0, len(user_policies)):
+ user_policy_list.append(user_policies[user_policy].policy.name)
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to remove policy {0} from account user {1}. Error: {2}".format(
+ policy_list[policy], username, res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed, policy_list=user_policy_list)
+
+
+def list_policy(module, blade):
+ """List Object Store User Access Policies"""
+ changed = True
+ policy_list = []
+ if not module.check_mode:
+ if module.params["account"] and module.params["name"]:
+ username = module.params["account"] + "/" + module.params["name"]
+ user_policies = list(
+ blade.get_object_store_access_policies_object_store_users(
+ member_names=[username]
+ ).items
+ )
+ for user_policy in range(0, len(user_policies)):
+ policy_list.append(user_policies[user_policy].policy.name)
+ else:
+ policies = blade.get_object_store_access_policies()
+ p_list = list(policies.items)
+ if policies.status_code != 200:
+ module.fail_json(msg="Failed to get Object Store User Access Policies")
+ for policy in range(0, len(p_list)):
+ policy_list.append(p_list[policy].name)
+ module.exit_json(changed=changed, policy_list=policy_list)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(
+ type="str", default="present", choices=["absent", "present", "show"]
+ ),
+ name=dict(type="str"),
+ account=dict(type="str"),
+ policy=dict(type="list", elements="str"),
+ )
+ )
+ required_if = [
+ ["state", "present", ["name", "account", "policy"]],
+ ["state", "absent", ["name", "account", "policy"]],
+ ]
+ module = AnsibleModule(
+ argument_spec, required_if=required_if, supports_check_mode=True
+ )
+
+ blade = get_system(module)
+ api_version = list(blade.get_versions().items)
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashBlade REST version not supported. "
+ "Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
+ )
+
+ state = module.params["state"]
+ if (
+ blade.get_object_store_users(
+ names=[module.params["account"] + "/" + module.params["name"]]
+ ).status_code
+ != 200
+ ):
+ module.fail_json(
+ msg="Account User {0}/{1} does not exist".format(
+ module.params["account"], module.params["name"]
+ )
+ )
+ if state == "show":
+ list_policy(module, blade)
+ elif state == "present":
+ add_policy(module, blade)
+ elif state == "absent":
+ remove_policy(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_userquota.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_userquota.py
new file mode 100644
index 000000000..74361f2ea
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_userquota.py
@@ -0,0 +1,315 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2021, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+
+DOCUMENTATION = """
+---
+module: purefb_userquota
+version_added: "1.7.0"
+short_description: Manage filesystem user quotas
+description:
+ - This module manages user quotas for filesystems on Pure Storage FlashBlade.
+author: Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Filesystem Name.
+ required: true
+ type: str
+ state:
+ description:
+ - Create, delete or modifies a quota.
+ required: false
+ default: present
+ type: str
+ choices: [ "present", "absent" ]
+ quota:
+ description:
+ - User quota in M, G, T or P units. This cannot be 0.
+ - This value will override the file system's default user quota.
+ type: str
+ uid:
+ description:
+ - The user id on which the quota is enforced.
+ - Cannot be combined with I(uname)
+ type: int
+ uname:
+ description:
+ - The user name on which the quota is enforced.
+ - Cannot be combined with I(uid)
+ type: str
+extends_documentation_fragment:
+ - purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = """
+- name: Create new user (using UID) quota for filesystem named foo
+ purestorage.flashblade.purefb_userquota:
+ name: foo
+ quota: 1T
+ uid: 1234
+ state: present
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Create new user (using username) quota for filesystem named foo
+ purestorage.flashblade.purefb_userquota:
+ name: foo
+ quota: 1T
+ uname: bar
+ state: present
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Delete user quota on filesystem foo for user by UID
+ purestorage.flashblade.purefb_userquota:
+ name: foo
+ uid: 1234
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Delete user quota on filesystem foo for user by username
+ purestorage.flashblade.purefb_userquota:
+ name: foo
+ uname: bar
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Update user quota on filesystem foo for user by username
+ purestorage.flashblade.purefb_userquota:
+ name: foo
+ quota: 20G
+ uname: bar
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Update user quota on filesystem foo for user by UID
+ purestorage.flashblade.purefb_userquota:
+ name: foo
+ quota: 20G
+ uid: bar
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641"""
+
+RETURN = """
+"""
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import QuotasUser
+except ImportError:
+ HAS_PURITY_FB = False
+
+from ansible.module_utils.basic import AnsibleModule, human_to_bytes
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ purefb_argument_spec,
+)
+
+
+MIN_REQUIRED_API_VERSION = "1.6"
+
+
+def get_fs(module, blade):
+ """Return Filesystem or None"""
+ fsys = []
+ fsys.append(module.params["name"])
+ try:
+ res = blade.file_systems.list_file_systems(names=fsys)
+ return res.items[0]
+ except Exception:
+ return None
+
+
+def get_quota(module, blade):
+ """Return Filesystem User Quota or None"""
+ fsys = []
+ fsys.append(module.params["name"])
+ try:
+ if module.params["uid"]:
+ res = blade.quotas_users.list_user_quotas(
+ file_system_names=fsys, filter="user.id=" + str(module.params["uid"])
+ )
+ else:
+ res = blade.quotas_users.list_user_quotas(
+ file_system_names=fsys,
+ filter="user.name='" + module.params["uname"] + "'",
+ )
+ return res.items[0]
+ except Exception:
+ return None
+
+
+def create_quota(module, blade):
+ """Create Filesystem User Quota"""
+ changed = True
+ quota = int(human_to_bytes(module.params["quota"]))
+ if not module.check_mode:
+ try:
+ if module.params["uid"]:
+ blade.quotas_users.create_user_quotas(
+ file_system_names=[module.params["name"]],
+ uids=[module.params["uid"]],
+ quota=QuotasUser(quota=quota),
+ )
+ else:
+ blade.quotas_users.create_user_quotas(
+ file_system_names=[module.params["name"]],
+ user_names=[module.params["uname"]],
+ quota=QuotasUser(quota=quota),
+ )
+ except Exception:
+ if module.params["uid"]:
+ module.fail_json(
+ msg="Failed to create quote for UID {0} on filesystem {1}.".format(
+ module.params["uid"], module.params["name"]
+ )
+ )
+ else:
+ module.fail_json(
+ msg="Failed to create quote for username {0} on filesystem {1}.".format(
+ module.params["uname"], module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def update_quota(module, blade):
+ """Upodate Filesystem User Quota"""
+ changed = False
+ current_quota = get_quota(module, blade)
+ quota = int(human_to_bytes(module.params["quota"]))
+ if current_quota.quota != quota:
+ changed = True
+ if not module.check_mode:
+ if module.params["uid"]:
+ try:
+ blade.quotas_users.update_user_quotas(
+ file_system_names=[module.params["name"]],
+ uids=[module.params["uid"]],
+ quota=QuotasUser(quota=quota),
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to update quota for UID {0} on filesystem {1}.".format(
+ module.params["uid"], module.params["name"]
+ )
+ )
+ else:
+ try:
+ blade.quotas_users.update_user_quotas(
+ file_system_names=[module.params["name"]],
+ user_names=[module.params["uname"]],
+ quota=QuotasUser(quota=quota),
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to update quota for UID {0} on filesystem {1}.".format(
+ module.params["uname"], module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def delete_quota(module, blade):
+ """Delete Filesystem User Quota"""
+ changed = True
+ if not module.check_mode:
+ try:
+ if module.params["uid"]:
+ blade.quotas_users.delete_user_quotas(
+ file_system_names=[module.params["name"]],
+ uids=[module.params["uid"]],
+ )
+ else:
+ blade.quotas_users.delete_user_quotas(
+ file_system_names=[module.params["name"]],
+ user_names=[module.params["uname"]],
+ )
+ except Exception:
+ if module.params["uid"]:
+ module.fail_json(
+ msg="Failed to delete quota for UID {0} on filesystem {1}.".format(
+ module.params["uid"], module.params["name"]
+ )
+ )
+ else:
+ module.fail_json(
+ msg="Failed to delete quota for username {0} on filesystem {1}.".format(
+ module.params["uname"], module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True),
+ uid=dict(type="int"),
+ uname=dict(type="str"),
+ state=dict(default="present", choices=["present", "absent"]),
+ quota=dict(type="str"),
+ )
+ )
+
+ mutually_exclusive = [["uid", "uname"]]
+ required_if = [["state", "present", ["quota"]]]
+ module = AnsibleModule(
+ argument_spec,
+ mutually_exclusive=mutually_exclusive,
+ required_if=required_if,
+ supports_check_mode=True,
+ )
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg="purity_fb sdk is required for this module")
+
+ state = module.params["state"]
+ blade = get_blade(module)
+ versions = blade.api_version.list_versions().versions
+
+ if MIN_REQUIRED_API_VERSION not in versions:
+ module.fail_json(
+ msg="Minimum FlashBlade REST version required: {0}".format(
+ MIN_REQUIRED_API_VERSION
+ )
+ )
+ fsys = get_fs(module, blade)
+ if not fsys:
+ module.fail_json(
+ msg="Filesystem {0} does not exist.".format(module.params["name"])
+ )
+ quota = get_quota(module, blade)
+
+ if state == "present" and not quota:
+ create_quota(module, blade)
+ elif state == "present" and quota:
+ update_quota(module, blade)
+ elif state == "absent" and quota:
+ delete_quota(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_virtualhost.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_virtualhost.py
new file mode 100644
index 000000000..7f4a2310e
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_virtualhost.py
@@ -0,0 +1,142 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2021, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_virtualhost
+version_added: '1.6.0'
+short_description: Manage FlashBlade Object Store Virtual Hosts
+description:
+- Add or delete FlashBlade Object Store Virtual Hosts
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Name of the Object Store Virtual Host
+ - A hostname or domain by which the array can be addressed for virtual
+ hosted-style S3 requests.
+ type: str
+ required: true
+ state:
+ description:
+ - Define whether the Object Store Virtual Host should be added or deleted
+ default: present
+ choices: [ absent, present ]
+ type: str
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Add Object Store Virtual Host
+ purestorage.flashblade.purefb_virtualhost:
+ name: "s3.acme.com"
+ fb_url: 10.10.10.2
+ api_token: T-68618f31-0c9e-4e57-aa44-5306a2cf10e3
+
+- name: Delete Object Store Virtual Host
+ purestorage.flashblade.purefb_virtualhost:
+ name: "nohost.acme.com"
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-68618f31-0c9e-4e57-aa44-5306a2cf10e3
+"""
+
+RETURN = r"""
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_system,
+ purefb_argument_spec,
+)
+
+MIN_REQUIRED_API_VERSION = "2.0"
+MAX_HOST_COUNT = 10
+
+
+def delete_host(module, blade):
+ """Delete Object Store Virtual Host"""
+ changed = False
+ if module.params["name"] == "s3.amazonaws.com":
+ module.warn("s3.amazonaws.com is a reserved name and cannot be deleted")
+ else:
+ changed = True
+ if not module.check_mode:
+ res = blade.delete_object_store_virtual_hosts(names=[module.params["name"]])
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete Object Store Virtual Host {0}".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def add_host(module, blade):
+ """Add Object Store Virtual Host"""
+ changed = True
+ if not module.check_mode:
+ res = blade.post_object_store_virtual_hosts(names=[module.params["name"]])
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to add Object Store Virtual Host {0}".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ name=dict(type="str", required=True),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ blade = get_system(module)
+ api_version = list(blade.get_versions().items)
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashBlade REST version not supported. "
+ "Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
+ )
+ state = module.params["state"]
+
+ exists = bool(
+ blade.get_object_store_virtual_hosts(names=[module.params["name"]]).status_code
+ == 200
+ )
+
+ if len(list(blade.get_object_store_virtual_hosts().items)) < MAX_HOST_COUNT:
+ if not exists and state == "present":
+ add_host(module, blade)
+ elif exists and state == "absent":
+ delete_host(module, blade)
+ else:
+ module.warn("Maximum Object Store Virtual Host reached.")
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/requirements.txt b/ansible_collections/purestorage/flashblade/requirements.txt
new file mode 100644
index 000000000..9742ecbfa
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/requirements.txt
@@ -0,0 +1,5 @@
+netaddr
+datetime
+pytz
+purity-fb
+py-pure-client
diff --git a/ansible_collections/purestorage/flashblade/roles/.keep b/ansible_collections/purestorage/flashblade/roles/.keep
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/roles/.keep
diff --git a/ansible_collections/purestorage/flashblade/settings.json b/ansible_collections/purestorage/flashblade/settings.json
new file mode 100644
index 000000000..a6fe89373
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/settings.json
@@ -0,0 +1,8 @@
+{
+ "id": "FlashBlade-Collection",
+ "name": "Ansible Collection for FlashBlade",
+ "filter": "devops",
+ "image": "http://code.purestorage.com/images/32_fb_collection.png",
+ "featured": 1,
+ "priority": 3
+}
diff --git a/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.10.txt b/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.10.txt
new file mode 100644
index 000000000..771db46ec
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.10.txt
@@ -0,0 +1,2 @@
+plugins/modules/purefb_info.py validate-modules:return-syntax-error
+plugins/modules/purefb_inventory.py validate-modules:return-syntax-error
diff --git a/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.11.txt b/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.11.txt
new file mode 100644
index 000000000..771db46ec
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.11.txt
@@ -0,0 +1,2 @@
+plugins/modules/purefb_info.py validate-modules:return-syntax-error
+plugins/modules/purefb_inventory.py validate-modules:return-syntax-error
diff --git a/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.12.txt b/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.12.txt
new file mode 100644
index 000000000..771db46ec
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.12.txt
@@ -0,0 +1,2 @@
+plugins/modules/purefb_info.py validate-modules:return-syntax-error
+plugins/modules/purefb_inventory.py validate-modules:return-syntax-error
diff --git a/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.13.txt b/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.13.txt
new file mode 100644
index 000000000..771db46ec
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.13.txt
@@ -0,0 +1,2 @@
+plugins/modules/purefb_info.py validate-modules:return-syntax-error
+plugins/modules/purefb_inventory.py validate-modules:return-syntax-error
diff --git a/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.14.txt b/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.14.txt
new file mode 100644
index 000000000..771db46ec
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.14.txt
@@ -0,0 +1,2 @@
+plugins/modules/purefb_info.py validate-modules:return-syntax-error
+plugins/modules/purefb_inventory.py validate-modules:return-syntax-error
diff --git a/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.15.txt b/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.15.txt
new file mode 100644
index 000000000..771db46ec
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.15.txt
@@ -0,0 +1,2 @@
+plugins/modules/purefb_info.py validate-modules:return-syntax-error
+plugins/modules/purefb_inventory.py validate-modules:return-syntax-error
diff --git a/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.16.txt b/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.16.txt
new file mode 100644
index 000000000..771db46ec
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.16.txt
@@ -0,0 +1,2 @@
+plugins/modules/purefb_info.py validate-modules:return-syntax-error
+plugins/modules/purefb_inventory.py validate-modules:return-syntax-error
diff --git a/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.9.txt b/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.9.txt
new file mode 100644
index 000000000..771db46ec
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.9.txt
@@ -0,0 +1,2 @@
+plugins/modules/purefb_info.py validate-modules:return-syntax-error
+plugins/modules/purefb_inventory.py validate-modules:return-syntax-error