summaryrefslogtreecommitdiffstats
path: root/collections-debian-merged/ansible_collections/purestorage
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-14 20:03:01 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-14 20:03:01 +0000
commita453ac31f3428614cceb99027f8efbdb9258a40b (patch)
treef61f87408f32a8511cbd91799f9cececb53e0374 /collections-debian-merged/ansible_collections/purestorage
parentInitial commit. (diff)
downloadansible-upstream.tar.xz
ansible-upstream.zip
Adding upstream version 2.10.7+merged+base+2.10.8+dfsg.upstream/2.10.7+merged+base+2.10.8+dfsgupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'collections-debian-merged/ansible_collections/purestorage')
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/CHANGELOG.rst139
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/FILES.json726
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/MANIFEST.json36
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/README.md87
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/.plugin-cache.yaml231
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/changelog.yaml152
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/config.yaml31
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/107_host_case_clarity.yaml7
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/108_fix_eradicate_idempotency.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/109_fa_files_support_purefa_info.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/110_add_apiclient_support.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/111_add_filesystem_support.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/112_add_directory_support.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/113_add_exports_support.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/115_add_gcp_offload.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/116_add_policies.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/118_rename_host.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/121_add_multi_volume_creation.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/122_add_multi_host_creation.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/123_add_multi_vgroup_creation.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/124_sdk_handshake.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/125_dns_idempotency.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/126_fix_volume_move.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/130_info_ds_update.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/131_add_v6_ds_update.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/132_fc_replication.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/133_purefa_info_v6_replication.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/134_ac_pg_support.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/135_no_cbs_ntp.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/136_add_vol_get_send_info.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/137_pgsnap_regex.yaml3
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/139_pgsnap_ac_support.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/140_pod_case.yaml3
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/141_add_remote_snapshot.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/145_fix_missing_move_variable.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/149_volumes_demoted_pods_fix.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/v1.4.0_summary.yaml37
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/playbooks/.keep0
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/playbooks/files/.keep0
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/playbooks/roles/.keep0
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/playbooks/tasks/.keep0
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/playbooks/templates/.keep0
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/playbooks/vars/.keep0
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/doc_fragments/purestorage.py43
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/module_utils/purefa.py126
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_alert.py173
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_apiclient.py215
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_arrayname.py91
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_banner.py116
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_connect.py197
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_console.py98
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_directory.py189
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_dns.py137
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_ds.py519
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_dsrole.py168
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_endpoint.py298
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_eula.py106
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_export.py195
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_fs.py207
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_hg.py306
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_host.py787
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_info.py1445
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_inventory.py222
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_network.py232
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_ntp.py139
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_offload.py348
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pg.py563
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pgsched.py448
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pgsnap.py378
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_phonehome.py99
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pod.py468
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pod_replica.py237
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_policy.py579
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_proxy.py123
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_ra.py113
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_smis.py122
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_smtp.py149
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_snap.py475
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_snmp.py328
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_subnet.py277
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_syslog.py160
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_timeout.py110
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_user.py222
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_vg.py463
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_vlan.py238
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_vnc.py144
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_volume.py913
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_volume_tags.py261
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/roles/.keep0
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flasharray/tests/sanity/ignore-2.10.txt2
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/CHANGELOG.rst90
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/FILES.json586
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/MANIFEST.json37
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/README.md77
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/.plugin-cache.yaml180
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/changelog.yaml101
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/config.yaml31
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/101_fix_policy_and_timezone_error.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/76_default_fs_size.yaml3
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/77_filesystem_policies_info.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/78_update_filesystem_replica_link.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/79_hide_connect_api.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/80_support_reverse_replica_link.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/81_purefb_fs_new_options.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/83_add_certgrp.yml2
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/84_add_cert.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/85_add_banner.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/86_add_syslog.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/88_add_lifecycle.yml2
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/90_delete_conn_fix.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/90_imported_keys.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/92_fix_ds_update.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/96_fix_update_connection.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/97_fix_encrpyted_array_connection_info.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/v1.3.0_summary.yaml33
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/playbooks/.keep0
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/playbooks/files/.keep0
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/playbooks/roles/.keep0
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/playbooks/tasks/.keep0
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/playbooks/templates/.keep0
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/playbooks/vars/.keep0
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/doc_fragments/purestorage.py42
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/module_utils/purefb.py94
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_alert.py201
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_banner.py133
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_bladename.py102
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_bucket.py290
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_bucket_replica.py257
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_certgrp.py193
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_certs.py128
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_connect.py195
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_dns.py167
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_ds.py390
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_dsrole.py177
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_fs.py645
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_fs_replica.py250
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_info.py899
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_inventory.py149
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_lifecycle.py233
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_network.py204
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_ntp.py149
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_phonehome.py111
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_policy.py441
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_proxy.py135
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_ra.py113
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_remote_cred.py202
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_s3acc.py140
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_s3user.py274
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_smtp.py114
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_snap.py274
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_snmp_agent.py191
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_snmp_mgr.py302
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_subnet.py263
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_syslog.py180
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_target.py174
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_user.py113
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/roles/.keep0
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.10.txt2
-rw-r--r--collections-debian-merged/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.9.txt2
159 files changed, 23832 insertions, 0 deletions
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/CHANGELOG.rst b/collections-debian-merged/ansible_collections/purestorage/flasharray/CHANGELOG.rst
new file mode 100644
index 00000000..aa14c36d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/CHANGELOG.rst
@@ -0,0 +1,139 @@
+====================================
+Purestorage.Flasharray Release Notes
+====================================
+
+.. contents:: Topics
+
+
+v1.6.2
+======
+
+Bugfixes
+--------
+
+- purefa_volume - Fix issues with moving volumes into demoted or linked pods
+
+v1.6.0
+======
+
+Minor Changes
+-------------
+
+- purefa_connect - Add support for FC-based array replication
+- purefa_ds - Add Purity v6 support for Directory Services, including Data DS and updating services
+- purefa_info - Add support for FC Replication
+- purefa_info - Add support for Remote Volume Snapshots
+- purefa_info - Update directory_services dictionary to cater for FA-Files data DS. Change DS dict forward. Add deprecation warning.
+- purefa_ntp - Ignore NTP configuration for CBS-based arrays
+- purefa_pg - Add support for Protection Groups in AC pods
+- purefa_snap - Add support for remote snapshot of individual volumes to offload targets
+
+Bugfixes
+--------
+
+- purefa_hg - Ensure all hostname chacks are lowercase for consistency
+- purefa_pgsnap - Add check to ensure suffix name meets naming conventions
+- purefa_pgsnap - Ensure pgsnap restores work for AC PGs
+- purefa_pod - Ensure all pod names are lowercase for consistency
+- purefa_snap - Update suffix regex pattern
+- purefa_volume - Add missing variable initialization
+
+v1.5.1
+======
+
+Minor Changes
+-------------
+
+- purefa_host - Add host rename function
+- purefa_host - Add support for multi-host creation
+- purefa_vg - Add support for multiple vgroup creation
+- purefa_volume - Add support for multi-volume creation
+
+Bugfixes
+--------
+
+- purefa.py - Resolve issue when pypureclient doesn't handshake array correctly
+- purefa_dns - Fix idempotency
+- purefa_volume - Alert when volume selected for move does not exist
+
+v1.5.0
+======
+
+Minor Changes
+-------------
+
+- purefa_apiclient - New module to support API Client management
+- purefa_directory - Add support for managed directories
+- purefa_export - Add support for filesystem exports
+- purefa_fs - Add filesystem management support
+- purefa_hg - Enforce case-sensitivity rules for hostgroup objects
+- purefa_host - Enforce hostname case-sensitivity rules
+- purefa_info - Add support for FA Files features
+- purefa_offload - Add support for Google Cloud offload target
+- purefa_pg - Enforce case-sensitivity rules for protection group objects
+- purefa_policy - Add support for NFS, SMB and Snapshot policy management
+
+Bugfixes
+--------
+
+- purefa_host - Correctly remove host that is in a hostgroup
+- purefa_volume - Fix failing idempotency on eradicate volume
+
+New Modules
+-----------
+
+- purestorage.flasharray.purefa_apiclient - Manage FlashArray API Clients
+- purestorage.flasharray.purefa_directory - Manage FlashArray File System Directories
+- purestorage.flasharray.purefa_export - Manage FlashArray File System Exports
+- purestorage.flasharray.purefa_fs - Manage FlashArray File Systems
+- purestorage.flasharray.purefa_policy - Manage FlashArray File System Policies
+
+v1.4.0
+======
+
+Release Summary
+---------------
+
+| Release Date: 2020-08-08
+| This changlelog describes all changes made to the modules and plugins included in this collection since Ansible 2.9.0
+
+
+Major Changes
+-------------
+
+- purefa_console - manage Console Lock setting for the FlashArray
+- purefa_endpoint - manage VMware protocol-endpoints on the FlashArray
+- purefa_eula - sign, or resign, FlashArray EULA
+- purefa_inventory - get hardware inventory information from a FlashArray
+- purefa_network - manage the physical and virtual network settings on the FlashArray
+- purefa_pgsched - manage protection group snapshot and replication schedules on the FlashArray
+- purefa_pod - manage ActiveCluster pods in FlashArrays
+- purefa_pod_replica - manage ActiveDR pod replica links in FlashArrays
+- purefa_proxy - manage the phonehome HTTPS proxy setting for the FlashArray
+- purefa_smis - manage SMI-S settings on the FlashArray
+- purefa_subnet - manage network subnets on the FlashArray
+- purefa_timeout - manage the GUI idle timeout on the FlashArray
+- purefa_vlan - manage VLAN interfaces on the FlashArray
+- purefa_vnc - manage VNC for installed applications on the FlashArray
+- purefa_volume_tags - manage volume tags on the FlashArray
+
+Minor Changes
+-------------
+
+- purefa_hg - All LUN ID to be set for single volume
+- purefa_host - Add CHAP support
+- purefa_host - Add support for Cloud Block Store
+- purefa_host - Add volume disconnection support
+- purefa_info - Certificate times changed to human readable rather than time since epoch
+- purefa_info - new options added for information collection
+- purefa_info - return dict names changed from ``ansible_facts`` to ``ra_info`` and ``user_info`` in approproate sections
+- purefa_offload - Add support for Azure
+- purefa_pgsnap - Add offload support
+- purefa_snap - Allow recovery of deleted snapshot
+- purefa_vg - Add QoS support
+
+Bugfixes
+--------
+
+- purefa_host - resolve hostname case inconsistencies
+- purefa_host - resolve issue found when using in Pure Storage Test Drive
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/FILES.json b/collections-debian-merged/ansible_collections/purestorage/flasharray/FILES.json
new file mode 100644
index 00000000..55e89f83
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/FILES.json
@@ -0,0 +1,726 @@
+{
+ "files": [
+ {
+ "format": 1,
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": ".",
+ "chksum_type": null
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "roles",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "name": "roles/.keep",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "tests",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "tests/sanity",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "933e8897349d63183f71b360678bd33dc2fa33a59acef0167e9801cdc4a8abdc",
+ "name": "tests/sanity/ignore-2.10.txt",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "dd2a684f8e048078a46171bc3c6191e6efac218ca9e22769af4d5830ee4aa459",
+ "name": "README.md",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "playbooks",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "playbooks/roles",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "name": "playbooks/.keep",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "playbooks/templates",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "name": "playbooks/templates/.keep",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "playbooks/files",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "name": "playbooks/files/.keep",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "playbooks/vars",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "name": "playbooks/vars/.keep",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "playbooks/tasks",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "name": "playbooks/tasks/.keep",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "165adbcb4d6430e6146dbc22221b34fac8b582fcaadf237dcece03e216ab7ca0",
+ "name": "CHANGELOG.rst",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "changelogs",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "e4fa4db86bba076cae885866948dc4510bd9edda628c3ad274d3eecf0cafd0b0",
+ "name": "changelogs/config.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "d2680a880a810db235f4f79f6abbce54fc13d7daade86dd26c6e117264cda50d",
+ "name": "changelogs/.plugin-cache.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "25330dd7c9ad361791e6a0a1b9ec991c8be1fd42cbde0e827ff0ef44bb6603ed",
+ "name": "changelogs/changelog.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "changelogs/fragments",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "a212d6e202a5231066c55f1ef24f962bd544d31220b75c6820d58616b3ba3a20",
+ "name": "changelogs/fragments/145_fix_missing_move_variable.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "308c90a9b130b29db36fe30970cc4e83a86f72d909d979c251cdfa9ea37cc17d",
+ "name": "changelogs/fragments/116_add_policies.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "8314f833498e81e803152d5a6b8fa4992b690a8954a7b75e4f78f55f3e6281f1",
+ "name": "changelogs/fragments/140_pod_case.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "5966655368d1bd6f13a19deb5ff00c2884e3015eea1fe054393e47c0a367343b",
+ "name": "changelogs/fragments/113_add_exports_support.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "eb72b8d852fda09db8bfcd0742081fecbabd6d68c97b0311a29a26765ed67307",
+ "name": "changelogs/fragments/134_ac_pg_support.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "1ce568cbe1255ecfcdb3e6e66146dcf52e8bcc5cfcc600b856958785b4c8a820",
+ "name": "changelogs/fragments/135_no_cbs_ntp.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "658ef54ac8bea8cb8a3737ace02ac93e301ac1195044ba8a474eab5ed9f68fe4",
+ "name": "changelogs/fragments/141_add_remote_snapshot.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "09dd30cee672d1bfcf0852933b8db73124d3465fe15be02d4b77cfe93df58c51",
+ "name": "changelogs/fragments/124_sdk_handshake.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "31119ca85b86a8e72c64dd998590a231499f135595052c25a73df1f5b9a1965e",
+ "name": "changelogs/fragments/132_fc_replication.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "f330217bef8d7c34061545e4071b372538afb841b2b013e6934e6f872af35a58",
+ "name": "changelogs/fragments/122_add_multi_host_creation.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "8cf22b39b47e9256017c2a10f64e327db8e87d9f2aa4c75c081e3709fce3330e",
+ "name": "changelogs/fragments/149_volumes_demoted_pods_fix.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "876598a7a2135b855db8a38e69902920412966606847157fd97d8bb49fc479d4",
+ "name": "changelogs/fragments/v1.4.0_summary.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "3523ae2ba986b989876bab0ff2182888a48c112cab18373b70e17528c330c3c5",
+ "name": "changelogs/fragments/109_fa_files_support_purefa_info.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "cbb12d92a0c8c763b7e9057fe0d7e8bef92b644c016d6da016c7bda7494b6d53",
+ "name": "changelogs/fragments/133_purefa_info_v6_replication.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "d0b47177157c3ab8b7f3676d9bf5f5d6dd6fecb2d7850b612067bc0dbaf457fe",
+ "name": "changelogs/fragments/126_fix_volume_move.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "901f750d427f4fdd282bdaac8ea955e84980af5ab61cb3537842552142fa7831",
+ "name": "changelogs/fragments/136_add_vol_get_send_info.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "27bba2cd35c66b07bbd99031704cce7e3234305d883af0e9841cb35dbefb14f0",
+ "name": "changelogs/fragments/125_dns_idempotency.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "14cdfe46c920bce4daf2066105d43bd974d27b7398f0c6021a4c7409c53ecbe9",
+ "name": "changelogs/fragments/107_host_case_clarity.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "e99a34a5a71e458de587f9741aadfb712c00f98ac28795c23929d97c32468550",
+ "name": "changelogs/fragments/111_add_filesystem_support.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "56a1a21cbd2337826c0b74c2e0b8fdc7488726ee48fa1039e9621bc8035ae01b",
+ "name": "changelogs/fragments/108_fix_eradicate_idempotency.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "688cedbf2d82b19b6d9e7ef39a9ef53e45c1371460f1b9d90dde1533275e6b23",
+ "name": "changelogs/fragments/131_add_v6_ds_update.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "e44ab022a764253dabc565481afd94e7a5a2cb0e37a638bfe76a8d0e59139bdf",
+ "name": "changelogs/fragments/139_pgsnap_ac_support.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "5fad30e620947f3c5878545aa55f223370e4e769ec28002f662efdf9ffd1358a",
+ "name": "changelogs/fragments/118_rename_host.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "6ea1e0a2ce1457141a4ce999d0538dce6943fd07f9d203dc46728fdd17121c77",
+ "name": "changelogs/fragments/112_add_directory_support.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "48e8092e90544ebbc947d35dd0a49b09c2b73a4547705e9a1db4ffc0745550cd",
+ "name": "changelogs/fragments/137_pgsnap_regex.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "7d63a0ff3a88738493bf7f0afee3600f0b236b28f592694bd686d38a94bdd7d7",
+ "name": "changelogs/fragments/121_add_multi_volume_creation.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "947189a4487b75926ef5cd535900916d0d2107159243e41b3677dc13adcbbc84",
+ "name": "changelogs/fragments/130_info_ds_update.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "500050720f1fb56e313fcd73dc7a98b06abce63cdca08b37cf11a1f8d7d01a49",
+ "name": "changelogs/fragments/115_add_gcp_offload.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "a91901335be037584e59a521f7201e27431567170240b027ef9bb9f7220bf3d0",
+ "name": "changelogs/fragments/110_add_apiclient_support.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "1dee7ff90d9aa085f7c775640e522f54a17229548c57e965c8e8229f16a51ab5",
+ "name": "changelogs/fragments/123_add_multi_vgroup_creation.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "plugins",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "plugins/modules",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "a09c8466f92c4da603ab6db5a49bd48fd583e20d12d80dd3314159c6c0850a05",
+ "name": "plugins/modules/purefa_fs.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "2552b1f50c6da6df75e20e283cf94ab4a1767c87588caeba383ea979b5c4ffd8",
+ "name": "plugins/modules/purefa_user.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "600b79b4dffc7810ef16b8d86b7b38dea56cfaf66c9be29260e45275d7ae2c2d",
+ "name": "plugins/modules/purefa_alert.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "c36203256fe870df20685b5369c90533e03ede78912220ba772f04ae54ab87e3",
+ "name": "plugins/modules/purefa_host.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "f4eea25ca151803abbec8c9cb645d5c49ae11f2c45a51bcc5b79e7ec2efe5ec5",
+ "name": "plugins/modules/purefa_vg.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "15537db7e14caaf72744ea201d0aa0e6556513fc3015466d87e07da5a6100e6b",
+ "name": "plugins/modules/purefa_ntp.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "c71a312c98f60bebf8dc7697f53547b3976afa769d06c7538e1080f0138e74a2",
+ "name": "plugins/modules/purefa_offload.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "4e0e6601e1ad8041c0c697446d0e23f1b6c70258d2353e3ccfbfefc18fcbabde",
+ "name": "plugins/modules/purefa_pg.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "7e76e09642f89d0551a16ace60af65a3a1e714b48768935fe16e4a6cba6d33b2",
+ "name": "plugins/modules/purefa_directory.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "337c903cae73a561ec8647b22494335eeb2c3741f2ff2af2b7f8c50a6e9e5aa8",
+ "name": "plugins/modules/purefa_vnc.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "3268cc8be4e170e8facc74d48c74395b5c19e12e79d5125305d267243257b83b",
+ "name": "plugins/modules/purefa_pgsched.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "9eacf92c3de97e158f8aa509ce637e6ff8354b7703ecc596f28218d37163fd15",
+ "name": "plugins/modules/purefa_phonehome.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "b60190add201e9b217128007cc6d8e46869ed77ac4884118f8e82d017594dc68",
+ "name": "plugins/modules/purefa_subnet.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "862d565169026e38d013986c7434455d92e05613de6cdb5316735fa39ba0a615",
+ "name": "plugins/modules/purefa_export.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "c1352d18515d1d23ee2847fdcb72797059c430c16b80585769ad681bb4d7063e",
+ "name": "plugins/modules/purefa_timeout.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "dbe90e8ef0883924b46d5b9ac037de3830d79511c52faab021e17ffc01d2fc89",
+ "name": "plugins/modules/purefa_dns.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "1fa81466c3831712dd0394830ba55443d1d0264d059f5e0cd9a7b14ba007c080",
+ "name": "plugins/modules/purefa_endpoint.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "40177d210b70cb78130e0f8fb75440afc0aa36461f80573d5040bcf02d75104d",
+ "name": "plugins/modules/purefa_console.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "a549a39ab3e886070646ed0566d8563be751fca2837600c0828e226be7d548c7",
+ "name": "plugins/modules/purefa_pod.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "5e038db0d0d14381ff732f3145c24e8d0758a9dae9a9a0d813df60d8b8f23354",
+ "name": "plugins/modules/purefa_pgsnap.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "c3176b5bb62cd2987cdff440df641a7b72b1d0a996433a65891ecff9a0b9915b",
+ "name": "plugins/modules/purefa_snmp.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "711b61ec2c8b4ef3e0b02caf3348a3e97ce82e80eadade8678b9ce605571f623",
+ "name": "plugins/modules/purefa_volume.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "614b81d58c3e082ff64e407fb6671b3b7aedcc8acaf31acf6515c1f8d23c3c56",
+ "name": "plugins/modules/purefa_smtp.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "35c7a35c3676435497c9f6245502b0d2dfd100fdebbc0746d48e2bddf0ee98aa",
+ "name": "plugins/modules/purefa_apiclient.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "4f36919070172c09fb832f77b0679930e14540b141079cb0e82e22c3b7da46d7",
+ "name": "plugins/modules/purefa_connect.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "75163f3dd5762860c242382f16ce0b7f99c687f3b4148efc8b1307a555c8e9f4",
+ "name": "plugins/modules/purefa_ds.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "24ef4f3208748b1e8ff3a795a56ca0f904526512fff4e47ab660b5d10b6eed54",
+ "name": "plugins/modules/purefa_dsrole.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "d288a9c8f2046f46d83a3370e2129560891db631f2bdd75579f5b0cad364beaa",
+ "name": "plugins/modules/purefa_inventory.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "d8352c16fb90fd28357c4a4ff6d97192db2d8870a8dc753506c2b85b66eb3746",
+ "name": "plugins/modules/purefa_hg.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "1ec941a241731547d22f346131139c441bab8173fe2a0817e84527e2378aa8fe",
+ "name": "plugins/modules/purefa_vlan.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "64a2e7b73a5a9b2a6e73ccb2bb08900e54f7c216c0314f5d5acb876ada01ae86",
+ "name": "plugins/modules/purefa_volume_tags.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "800f2c8ef92674e7847a686f5dd8595aa4bdeb2f739942b18be10a12d73cabba",
+ "name": "plugins/modules/purefa_policy.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "662cb243557691cc0c16ab0bce34b14300b281f58dc35f22b5edf504a21dc7b0",
+ "name": "plugins/modules/purefa_proxy.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "5796ef6ecf45223931f3c6a0a48e87de6ca38a70a1a588b050e3c8cb97897308",
+ "name": "plugins/modules/purefa_network.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "83eeeb97707235535838cef51e0a6622d34bbd4f15bf2c07f9500ce71c1b71f0",
+ "name": "plugins/modules/purefa_info.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "d6e13f1758cd737f6e55cc6eb608cb063ae30219f8b34058f6d8820e3417e223",
+ "name": "plugins/modules/purefa_smis.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "e59363b84f5ee23e883ce572cd9762a5ee6162fcad934c3a809f0b4968830d3f",
+ "name": "plugins/modules/purefa_syslog.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "f24d5e2b142b6f79fcbe18de7b53d68f6d149e2c4509ef01122244eaef886119",
+ "name": "plugins/modules/purefa_ra.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "7aa1d902882a65b553bf61bf08d1d022f20bc907cefe749fb9514bd9d48c5aa6",
+ "name": "plugins/modules/purefa_snap.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "3f2af48f4792c45943ceada8fef04a9c75bc60d29d0f4f7e236f0d78a2c5d9c3",
+ "name": "plugins/modules/purefa_banner.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "021a8853f2c8d8480ced740597eb744660da5d7c3f6cb7001cfaf51e0f915fe8",
+ "name": "plugins/modules/purefa_eula.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "83dd24e4510a503a885c25c6e87b8cbc5a23b3e3c9976de19c6dc9e04bc1e8b3",
+ "name": "plugins/modules/purefa_arrayname.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "fc82ad2895d42e202ab4b8ebab729ca9400fd658ea3a14a240dc08d385cb0a1d",
+ "name": "plugins/modules/purefa_pod_replica.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "plugins/doc_fragments",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "9dd6837a2193c2e3bf2a9dbbdbe31ef9dbda4b16c2eb440a7b950f5971fa8fcd",
+ "name": "plugins/doc_fragments/purestorage.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "plugins/module_utils",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "b78ba6bb7c4b866578a9043389457ffe1953ce97d756bf307637528565604542",
+ "name": "plugins/module_utils/purefa.py",
+ "chksum_type": "sha256",
+ "format": 1
+ }
+ ],
+ "format": 1
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/MANIFEST.json b/collections-debian-merged/ansible_collections/purestorage/flasharray/MANIFEST.json
new file mode 100644
index 00000000..51200fe7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/MANIFEST.json
@@ -0,0 +1,36 @@
+{
+ "collection_info": {
+ "description": "Collection of modules to manage Pure Storage FlashArrays (including Cloud Block Store)",
+ "repository": "https://github.com/Pure-Storage-Ansible/FlashArray-Collection",
+ "tags": [
+ "purestorage",
+ "flasharray",
+ "cloudblockstore",
+ "storage"
+ ],
+ "dependencies": {},
+ "authors": [
+ "Pure Storage Ansible Team <pure-ansible-team@purestorage.com>"
+ ],
+ "issues": "https://github.com/Pure-Storage-Ansible/FlashArray-Collection/issues",
+ "name": "flasharray",
+ "license": [
+ "GPL-3.0-or-later",
+ "BSD-2-Clause"
+ ],
+ "documentation": "https://github.com/Pure-Storage-Ansible/FlashArray-Collection",
+ "namespace": "purestorage",
+ "version": "1.6.2",
+ "readme": "README.md",
+ "license_file": null,
+ "homepage": null
+ },
+ "file_manifest_file": {
+ "format": 1,
+ "ftype": "file",
+ "chksum_sha256": "003ce9f12d1ca28d394c4dc5c0ac6e7be46b5d6180e37ea2fdb4e4e40bad4812",
+ "name": "FILES.json",
+ "chksum_type": "sha256"
+ },
+ "format": 1
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/README.md b/collections-debian-merged/ansible_collections/purestorage/flasharray/README.md
new file mode 100644
index 00000000..3f10205a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/README.md
@@ -0,0 +1,87 @@
+# Pure Storage FlashArray Collection
+
+The Pure Storage FlashArray collection consists of the latest versions of the FlashArray modules and also includes support for Cloud Block Store
+
+## Supported Platforms
+
+- Pure Storage FlashArray with Purity 4.6 or later
+- Certain modules and functionality require higher versions of Purity. Modules will inform you if your Purity version is not high enough to use a module.
+
+## Prerequisites
+
+- Ansible 2.9 or later
+- Pure Storage FlashArray system running Purity 4.6 or later
+ - some modules require higher versions of Purity
+- Some modules require specific Purity versions
+- purestorage >=v1.19
+- py-pure-client >=v1.8
+- Python >=v2.7
+- netaddr
+- requests
+
+## Idempotency
+
+All modules are idempotent with the exception of modules that change or set passwords. Due to security requirements exisitng passwords can be validated against and therefore will always be modified, even if there is no change.
+
+## Available Modules
+
+- purefa_alert - manage email alert settings on the FlashArray
+- purefa_arrayname - manage the name of the FlashArray
+- purefa_banner - manage the CLI and GUI login banner of the FlashArray
+- purefa_connect - manage FlashArrays connecting for replication purposes
+- purefa_console - manage Console Lock setting for the FlashArray
+- purefa_directory - manage FlashArray managed file system directories
+- purefa_dns - manage the DNS settings of the FlashArray
+- purefa_ds - manage the Directory Services of the FlashArray
+- purefa_dsrole - manage the Directory Service Roles of the FlashArray
+- purefa_endpoint - manage VMware protocol-endpoints on the FlashArray
+- purefa_export - manage FlashArrray managed file system exports
+- purefa_eula - sign, or resign, FlashArray EULA
+- purefa_fs - manage FlashArray managed file systems
+- purefa_hg - manage hostgroups on the FlashArray
+- purefa_host - manage hosts on the FlashArray
+- purefa_info - get information regarding the configuration of the Flasharray
+- purefa_inventory - get hardware inventory information from a FlashArray
+- purefa_network - manage the physical and virtual network settings on the FlashArray
+- purefa_ntp - manage the NTP settings on the FlashArray
+- purefa_offload - manage the offload targets for a FlashArray
+- purefa_pg - manage protection groups on the FlashArray
+- purefa_pgsched - manage protection group snapshot and replication schedules on the FlashArray
+- purefa_pgsnap - manage protection group snapshots (local and remote) on the FlashArray
+- purefa_phonehome - manage the phonehome setting for the FlashArray
+- purefa_pod - manage ActiveCluster pods in FlashArrays
+- purefa_pod_replica - manage ActiveDR pod replica links in FlashArrays
+- purefa_policy - manage FlashArray NFS, SMB and snapshot policies
+- purefa_proxy - manage the phonehome HTTPS proxy setting for the FlashArray
+- purefa_ra - manage the Remote Assist setting for the FlashArray
+- purefa_smis - manage SMI-S settings on the FlashArray
+- purefa_smtp - manage SMTP settings on the FlashArray
+- purefa_snap - manage local snapshots on the FlashArray
+- purefa_snmp - manage SNMP settings on the FlashArray
+- purefa_subnet - manage network subnets on the FlashArray
+- purefa_syslog - manage the Syslog settings on the FlashArray
+- purefa_timeout - manage the GUI idle timeout on the FlashArray
+- purefa_user - manage local user accounts on the FlashArray
+- purefa_vg - manage volume groups on the FlashArray
+- purefa_vlan - manage VLAN interfaces on the FlashArray
+- purefa_vnc - manage VNC for installed applications on the FlashArray
+- purefa_volume - manage volumes on the FlashArray
+- purefa_volume_tags - manage volume tags on the FlashArray
+
+## Instructions
+
+Install the Pure Storage FlashArray collection on your Ansible management host.
+
+- Using ansible-galaxy (Ansible 2.9 or later):
+```
+ansible-galaxy collection install purestorage.flasharray -p ~/.ansible/collections
+```
+
+## License
+
+[BSD-2-Clause](https://directory.fsf.org/wiki?title=License:FreeBSD)
+[GPL-3.0-or-later](https://www.gnu.org/licenses/gpl-3.0.en.html)
+
+## Author
+
+This collection was created in 2019 by [Simon Dodsley](@sdodsley) for, and on behalf of, the [Pure Storage Ansible Team](pure-ansible-team@purestorage.com)
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/.plugin-cache.yaml b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/.plugin-cache.yaml
new file mode 100644
index 00000000..b11565ce
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/.plugin-cache.yaml
@@ -0,0 +1,231 @@
+plugins:
+ become: {}
+ cache: {}
+ callback: {}
+ cliconf: {}
+ connection: {}
+ httpapi: {}
+ inventory: {}
+ lookup: {}
+ module:
+ purefa_alert:
+ description: Configure Pure Storage FlashArray alert email settings
+ name: purefa_alert
+ namespace: ''
+ version_added: 1.0.0
+ purefa_apiclient:
+ description: Manage FlashArray API Clients
+ name: purefa_apiclient
+ namespace: ''
+ version_added: 1.5.0
+ purefa_arrayname:
+ description: Configure Pure Storage FlashArray array name
+ name: purefa_arrayname
+ namespace: ''
+ version_added: 1.0.0
+ purefa_banner:
+ description: Configure Pure Storage FlashArray GUI and SSH MOTD message
+ name: purefa_banner
+ namespace: ''
+ version_added: 1.0.0
+ purefa_connect:
+ description: Manage replication connections between two FlashArrays
+ name: purefa_connect
+ namespace: ''
+ version_added: 1.0.0
+ purefa_console:
+ description: Enable or Disable Pure Storage FlashArray Console Lock
+ name: purefa_console
+ namespace: ''
+ version_added: 1.0.0
+ purefa_directory:
+ description: Manage FlashArray File System Directories
+ name: purefa_directory
+ namespace: ''
+ version_added: 1.5.0
+ purefa_dns:
+ description: Configure FlashArray DNS settings
+ name: purefa_dns
+ namespace: ''
+ version_added: 1.0.0
+ purefa_ds:
+ description: Configure FlashArray Directory Service
+ name: purefa_ds
+ namespace: ''
+ version_added: 1.0.0
+ purefa_dsrole:
+ description: Configure FlashArray Directory Service Roles
+ name: purefa_dsrole
+ namespace: ''
+ version_added: 1.0.0
+ purefa_endpoint:
+ description: Manage VMware protocol-endpoints on Pure Storage FlashArrays
+ name: purefa_endpoint
+ namespace: ''
+ version_added: 1.0.0
+ purefa_eula:
+ description: Sign Pure Storage FlashArray EULA
+ name: purefa_eula
+ namespace: ''
+ version_added: 1.0.0
+ purefa_export:
+ description: Manage FlashArray File System Exports
+ name: purefa_export
+ namespace: ''
+ version_added: 1.5.0
+ purefa_fs:
+ description: Manage FlashArray File Systems
+ name: purefa_fs
+ namespace: ''
+ version_added: 1.5.0
+ purefa_hg:
+ description: Manage hostgroups on Pure Storage FlashArrays
+ name: purefa_hg
+ namespace: ''
+ version_added: 1.0.0
+ purefa_host:
+ description: Manage hosts on Pure Storage FlashArrays
+ name: purefa_host
+ namespace: ''
+ version_added: 1.0.0
+ purefa_info:
+ description: Collect information from Pure Storage FlashArray
+ name: purefa_info
+ namespace: ''
+ version_added: 1.0.0
+ purefa_inventory:
+ description: Collect information from Pure Storage FlashArray
+ name: purefa_inventory
+ namespace: ''
+ version_added: 1.0.0
+ purefa_network:
+ description: Manage network interfaces in a Pure Storage FlashArray
+ name: purefa_network
+ namespace: ''
+ version_added: 1.0.0
+ purefa_ntp:
+ description: Configure Pure Storage FlashArray NTP settings
+ name: purefa_ntp
+ namespace: ''
+ version_added: 1.0.0
+ purefa_offload:
+ description: Create, modify and delete NFS, S3 or Azure offload targets
+ name: purefa_offload
+ namespace: ''
+ version_added: 1.0.0
+ purefa_pg:
+ description: Manage protection groups on Pure Storage FlashArrays
+ name: purefa_pg
+ namespace: ''
+ version_added: 1.0.0
+ purefa_pgsched:
+ description: Manage protection groups replication schedules on Pure Storage
+ FlashArrays
+ name: purefa_pgsched
+ namespace: ''
+ version_added: 1.0.0
+ purefa_pgsnap:
+ description: Manage protection group snapshots on Pure Storage FlashArrays
+ name: purefa_pgsnap
+ namespace: ''
+ version_added: 1.0.0
+ purefa_phonehome:
+ description: Enable or Disable Pure Storage FlashArray Phonehome
+ name: purefa_phonehome
+ namespace: ''
+ version_added: 1.0.0
+ purefa_pod:
+ description: Manage AC pods in Pure Storage FlashArrays
+ name: purefa_pod
+ namespace: ''
+ version_added: 1.0.0
+ purefa_pod_replica:
+ description: Manage ActiveDR pod replica links between Pure Storage FlashArrays
+ name: purefa_pod_replica
+ namespace: ''
+ version_added: 1.0.0
+ purefa_policy:
+ description: Manage FlashArray File System Policies
+ name: purefa_policy
+ namespace: ''
+ version_added: 1.5.0
+ purefa_proxy:
+ description: Configure FlashArray phonehome HTTPs proxy settings
+ name: purefa_proxy
+ namespace: ''
+ version_added: 1.0.0
+ purefa_ra:
+ description: Enable or Disable Pure Storage FlashArray Remote Assist
+ name: purefa_ra
+ namespace: ''
+ version_added: 1.0.0
+ purefa_smis:
+ description: Enable or disable FlashArray SMI-S features
+ name: purefa_smis
+ namespace: ''
+ version_added: 1.0.0
+ purefa_smtp:
+ description: Configure FlashArray SMTP settings
+ name: purefa_smtp
+ namespace: ''
+ version_added: 1.0.0
+ purefa_snap:
+ description: Manage volume snapshots on Pure Storage FlashArrays
+ name: purefa_snap
+ namespace: ''
+ version_added: 1.0.0
+ purefa_snmp:
+ description: Configure FlashArray SNMP Managers
+ name: purefa_snmp
+ namespace: ''
+ version_added: 1.0.0
+ purefa_subnet:
+ description: Manage network subnets in a Pure Storage FlashArray
+ name: purefa_subnet
+ namespace: ''
+ version_added: 1.0.0
+ purefa_syslog:
+ description: Configure Pure Storage FlashArray syslog settings
+ name: purefa_syslog
+ namespace: ''
+ version_added: 1.0.0
+ purefa_timeout:
+ description: Configure Pure Storage FlashArray GUI idle timeout
+ name: purefa_timeout
+ namespace: ''
+ version_added: 1.0.0
+ purefa_user:
+ description: Create, modify or delete FlashArray local user account
+ name: purefa_user
+ namespace: ''
+ version_added: 1.0.0
+ purefa_vg:
+ description: Manage volume groups on Pure Storage FlashArrays
+ name: purefa_vg
+ namespace: ''
+ version_added: 1.0.0
+ purefa_vlan:
+ description: Manage network VLAN interfaces in a Pure Storage FlashArray
+ name: purefa_vlan
+ namespace: ''
+ version_added: 1.0.0
+ purefa_vnc:
+ description: Enable or Disable VNC port for installed apps
+ name: purefa_vnc
+ namespace: ''
+ version_added: 1.0.0
+ purefa_volume:
+ description: Manage volumes on Pure Storage FlashArrays
+ name: purefa_volume
+ namespace: ''
+ version_added: 1.0.0
+ purefa_volume_tags:
+ description: Manage volume tags on Pure Storage FlashArrays
+ name: purefa_volume_tags
+ namespace: ''
+ version_added: 1.0.0
+ netconf: {}
+ shell: {}
+ strategy: {}
+ vars: {}
+version: 1.6.2
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/changelog.yaml b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/changelog.yaml
new file mode 100644
index 00000000..d107872d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/changelog.yaml
@@ -0,0 +1,152 @@
+ancestor: null
+releases:
+ 1.4.0:
+ changes:
+ bugfixes:
+ - purefa_host - resolve hostname case inconsistencies
+ - purefa_host - resolve issue found when using in Pure Storage Test Drive
+ major_changes:
+ - purefa_console - manage Console Lock setting for the FlashArray
+ - purefa_endpoint - manage VMware protocol-endpoints on the FlashArray
+ - purefa_eula - sign, or resign, FlashArray EULA
+ - purefa_inventory - get hardware inventory information from a FlashArray
+ - purefa_network - manage the physical and virtual network settings on the FlashArray
+ - purefa_pgsched - manage protection group snapshot and replication schedules
+ on the FlashArray
+ - purefa_pod - manage ActiveCluster pods in FlashArrays
+ - purefa_pod_replica - manage ActiveDR pod replica links in FlashArrays
+ - purefa_proxy - manage the phonehome HTTPS proxy setting for the FlashArray
+ - purefa_smis - manage SMI-S settings on the FlashArray
+ - purefa_subnet - manage network subnets on the FlashArray
+ - purefa_timeout - manage the GUI idle timeout on the FlashArray
+ - purefa_vlan - manage VLAN interfaces on the FlashArray
+ - purefa_vnc - manage VNC for installed applications on the FlashArray
+ - purefa_volume_tags - manage volume tags on the FlashArray
+ minor_changes:
+ - purefa_hg - All LUN ID to be set for single volume
+ - purefa_host - Add CHAP support
+ - purefa_host - Add support for Cloud Block Store
+ - purefa_host - Add volume disconnection support
+ - purefa_info - Certificate times changed to human readable rather than time
+ since epoch
+ - purefa_info - new options added for information collection
+ - purefa_info - return dict names changed from ``ansible_facts`` to ``ra_info``
+ and ``user_info`` in approproate sections
+ - purefa_offload - Add support for Azure
+ - purefa_pgsnap - Add offload support
+ - purefa_snap - Allow recovery of deleted snapshot
+ - purefa_vg - Add QoS support
+ release_summary: '| Release Date: 2020-08-08
+
+ | This changlelog describes all changes made to the modules and plugins included
+ in this collection since Ansible 2.9.0
+
+'
+ fragments:
+ - v1.4.0_summary.yaml
+ release_date: '2020-08-06'
+ 1.5.0:
+ changes:
+ bugfixes:
+ - purefa_host - Correctly remove host that is in a hostgroup
+ - purefa_volume - Fix failing idempotency on eradicate volume
+ minor_changes:
+ - purefa_apiclient - New module to support API Client management
+ - purefa_directory - Add support for managed directories
+ - purefa_export - Add support for filesystem exports
+ - purefa_fs - Add filesystem management support
+ - purefa_hg - Enforce case-sensitivity rules for hostgroup objects
+ - purefa_host - Enforce hostname case-sensitivity rules
+ - purefa_info - Add support for FA Files features
+ - purefa_offload - Add support for Google Cloud offload target
+ - purefa_pg - Enforce case-sensitivity rules for protection group objects
+ - purefa_policy - Add support for NFS, SMB and Snapshot policy management
+ fragments:
+ - 107_host_case_clarity.yaml
+ - 108_fix_eradicate_idempotency.yaml
+ - 109_fa_files_support_purefa_info.yaml
+ - 110_add_apiclient_support.yaml
+ - 111_add_filesystem_support.yaml
+ - 112_add_directory_support.yaml
+ - 113_add_exports_support.yaml
+ - 115_add_gcp_offload.yaml
+ - 116_add_policies.yaml
+ modules:
+ - description: Manage FlashArray API Clients
+ name: purefa_apiclient
+ namespace: ''
+ - description: Manage FlashArray File System Directories
+ name: purefa_directory
+ namespace: ''
+ - description: Manage FlashArray File System Exports
+ name: purefa_export
+ namespace: ''
+ - description: Manage FlashArray File Systems
+ name: purefa_fs
+ namespace: ''
+ - description: Manage FlashArray File System Policies
+ name: purefa_policy
+ namespace: ''
+ release_date: '2020-10-14'
+ 1.5.1:
+ changes:
+ bugfixes:
+ - purefa.py - Resolve issue when pypureclient doesn't handshake array correctly
+ - purefa_dns - Fix idempotency
+ - purefa_volume - Alert when volume selected for move does not exist
+ minor_changes:
+ - purefa_host - Add host rename function
+ - purefa_host - Add support for multi-host creation
+ - purefa_vg - Add support for multiple vgroup creation
+ - purefa_volume - Add support for multi-volume creation
+ fragments:
+ - 118_rename_host.yaml
+ - 121_add_multi_volume_creation.yaml
+ - 122_add_multi_host_creation.yaml
+ - 123_add_multi_vgroup_creation.yaml
+ - 124_sdk_handshake.yaml
+ - 125_dns_idempotency.yaml
+ - 126_fix_volume_move.yaml
+ release_date: '2020-12-11'
+ 1.6.0:
+ changes:
+ bugfixes:
+ - purefa_hg - Ensure all hostname chacks are lowercase for consistency
+ - purefa_pgsnap - Add check to ensure suffix name meets naming conventions
+ - purefa_pgsnap - Ensure pgsnap restores work for AC PGs
+ - purefa_pod - Ensure all pod names are lowercase for consistency
+ - purefa_snap - Update suffix regex pattern
+ - purefa_volume - Add missing variable initialization
+ minor_changes:
+ - purefa_connect - Add support for FC-based array replication
+ - purefa_ds - Add Purity v6 support for Directory Services, including Data DS
+ and updating services
+ - purefa_info - Add support for FC Replication
+ - purefa_info - Add support for Remote Volume Snapshots
+ - purefa_info - Update directory_services dictionary to cater for FA-Files data
+ DS. Change DS dict forward. Add deprecation warning.
+ - purefa_ntp - Ignore NTP configuration for CBS-based arrays
+ - purefa_pg - Add support for Protection Groups in AC pods
+ - purefa_snap - Add support for remote snapshot of individual volumes to offload
+ targets
+ fragments:
+ - 130_info_ds_update.yaml
+ - 131_add_v6_ds_update.yaml
+ - 132_fc_replication.yaml
+ - 133_purefa_info_v6_replication.yaml
+ - 134_ac_pg_support.yaml
+ - 135_no_cbs_ntp.yaml
+ - 136_add_vol_get_send_info.yaml
+ - 137_pgsnap_regex.yaml
+ - 139_pgsnap_ac_support.yaml
+ - 140_pod_case.yaml
+ - 141_add_remote_snapshot.yaml
+ - 145_fix_missing_move_variable.yaml
+ release_date: '2021-02-02'
+ 1.6.2:
+ changes:
+ bugfixes:
+ - purefa_volume - Fix issues with moving volumes into demoted or linked pods
+ fragments:
+ - 149_volumes_demoted_pods_fix.yaml
+ release_date: '2021-02-04'
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/config.yaml b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/config.yaml
new file mode 100644
index 00000000..3e5f04f0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/config.yaml
@@ -0,0 +1,31 @@
+changelog_filename_template: ../CHANGELOG.rst
+changelog_filename_version_depth: 0
+changes_file: changelog.yaml
+changes_format: combined
+ignore_other_fragment_extensions: true
+keep_fragments: true
+mention_ancestor: true
+new_plugins_after_name: removed_features
+notesdir: fragments
+prelude_section_name: release_summary
+prelude_section_title: Release Summary
+sections:
+- - major_changes
+ - Major Changes
+- - minor_changes
+ - Minor Changes
+- - breaking_changes
+ - Breaking Changes / Porting Guide
+- - deprecated_features
+ - Deprecated Features
+- - removed_features
+ - Removed Features (previously deprecated)
+- - security_fixes
+ - Security Fixes
+- - bugfixes
+ - Bugfixes
+- - known_issues
+ - Known Issues
+title: Purestorage.Flasharray
+trivial_section_name: trivial
+use_fqcn: true
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/107_host_case_clarity.yaml b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/107_host_case_clarity.yaml
new file mode 100644
index 00000000..418af8e9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/107_host_case_clarity.yaml
@@ -0,0 +1,7 @@
+minor_changes:
+ - purefa_host - Enforce hostname case-sensitivity rules
+ - purefa_hg - Enforce case-sensitivity rules for hostgroup objects
+ - purefa_pg - Enforce case-sensitivity rules for protection group objects
+
+bugfixes:
+ - purefa_host - Correctly remove host that is in a hostgroup
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/108_fix_eradicate_idempotency.yaml b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/108_fix_eradicate_idempotency.yaml
new file mode 100644
index 00000000..79a8c0c3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/108_fix_eradicate_idempotency.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_volume - Fix failing idempotency on eradicate volume
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/109_fa_files_support_purefa_info.yaml b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/109_fa_files_support_purefa_info.yaml
new file mode 100644
index 00000000..2342709d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/109_fa_files_support_purefa_info.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_info - Add support for FA Files features
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/110_add_apiclient_support.yaml b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/110_add_apiclient_support.yaml
new file mode 100644
index 00000000..25496854
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/110_add_apiclient_support.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_apiclient - New module to support API Client management
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/111_add_filesystem_support.yaml b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/111_add_filesystem_support.yaml
new file mode 100644
index 00000000..ed9e9f17
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/111_add_filesystem_support.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_fs - Add filesystem management support
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/112_add_directory_support.yaml b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/112_add_directory_support.yaml
new file mode 100644
index 00000000..22806892
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/112_add_directory_support.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_directory - Add support for managed directories
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/113_add_exports_support.yaml b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/113_add_exports_support.yaml
new file mode 100644
index 00000000..595a5866
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/113_add_exports_support.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_export - Add support for filesystem exports
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/115_add_gcp_offload.yaml b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/115_add_gcp_offload.yaml
new file mode 100644
index 00000000..a36255f6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/115_add_gcp_offload.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_offload - Add support for Google Cloud offload target
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/116_add_policies.yaml b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/116_add_policies.yaml
new file mode 100644
index 00000000..5159a8b3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/116_add_policies.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_policy - Add support for NFS, SMB and Snapshot policy management
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/118_rename_host.yaml b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/118_rename_host.yaml
new file mode 100644
index 00000000..b34f8a82
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/118_rename_host.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_host - Add host rename function
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/121_add_multi_volume_creation.yaml b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/121_add_multi_volume_creation.yaml
new file mode 100644
index 00000000..d4833633
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/121_add_multi_volume_creation.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_volume - Add support for multi-volume creation
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/122_add_multi_host_creation.yaml b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/122_add_multi_host_creation.yaml
new file mode 100644
index 00000000..1ad523ee
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/122_add_multi_host_creation.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_host - Add support for multi-host creation
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/123_add_multi_vgroup_creation.yaml b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/123_add_multi_vgroup_creation.yaml
new file mode 100644
index 00000000..207cd97b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/123_add_multi_vgroup_creation.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_vg - Add support for multiple vgroup creation
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/124_sdk_handshake.yaml b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/124_sdk_handshake.yaml
new file mode 100644
index 00000000..244a77c8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/124_sdk_handshake.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa.py - Resolve issue when pypureclient doesn't handshake array correctly
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/125_dns_idempotency.yaml b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/125_dns_idempotency.yaml
new file mode 100644
index 00000000..cf195b0d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/125_dns_idempotency.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_dns - Fix idempotency
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/126_fix_volume_move.yaml b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/126_fix_volume_move.yaml
new file mode 100644
index 00000000..64d22578
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/126_fix_volume_move.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_volume - Alert when volume selected for move does not exist
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/130_info_ds_update.yaml b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/130_info_ds_update.yaml
new file mode 100644
index 00000000..336c43a6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/130_info_ds_update.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_info - Update directory_services dictionary to cater for FA-Files data DS. Change DS dict forward. Add deprecation warning.
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/131_add_v6_ds_update.yaml b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/131_add_v6_ds_update.yaml
new file mode 100644
index 00000000..7fac4905
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/131_add_v6_ds_update.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_ds - Add Purity v6 support for Directory Services, including Data DS and updating services
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/132_fc_replication.yaml b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/132_fc_replication.yaml
new file mode 100644
index 00000000..b033ed5c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/132_fc_replication.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_connect - Add support for FC-based array replication
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/133_purefa_info_v6_replication.yaml b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/133_purefa_info_v6_replication.yaml
new file mode 100644
index 00000000..bbed50da
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/133_purefa_info_v6_replication.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_info - Add support for FC Replication
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/134_ac_pg_support.yaml b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/134_ac_pg_support.yaml
new file mode 100644
index 00000000..b1ccd2b3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/134_ac_pg_support.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_pg - Add support for Protection Groups in AC pods
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/135_no_cbs_ntp.yaml b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/135_no_cbs_ntp.yaml
new file mode 100644
index 00000000..6a0644d1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/135_no_cbs_ntp.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_ntp - Ignore NTP configuration for CBS-based arrays
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/136_add_vol_get_send_info.yaml b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/136_add_vol_get_send_info.yaml
new file mode 100644
index 00000000..fa3fcc8e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/136_add_vol_get_send_info.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_info - Add support for Remote Volume Snapshots
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/137_pgsnap_regex.yaml b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/137_pgsnap_regex.yaml
new file mode 100644
index 00000000..1560343c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/137_pgsnap_regex.yaml
@@ -0,0 +1,3 @@
+bugfixes:
+ - purefa_pgsnap - Add check to ensure suffix name meets naming conventions
+ - purefa_snap - Update suffix regex pattern
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/139_pgsnap_ac_support.yaml b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/139_pgsnap_ac_support.yaml
new file mode 100644
index 00000000..6004d8fa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/139_pgsnap_ac_support.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_pgsnap - Ensure pgsnap restores work for AC PGs
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/140_pod_case.yaml b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/140_pod_case.yaml
new file mode 100644
index 00000000..1896bd6f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/140_pod_case.yaml
@@ -0,0 +1,3 @@
+bugfixes:
+ - purefa_pod - Ensure all pod names are lowercase for consistency
+ - purefa_hg - Ensure all hostname chacks are lowercase for consistency
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/141_add_remote_snapshot.yaml b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/141_add_remote_snapshot.yaml
new file mode 100644
index 00000000..9af6598a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/141_add_remote_snapshot.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_snap - Add support for remote snapshot of individual volumes to offload targets
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/145_fix_missing_move_variable.yaml b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/145_fix_missing_move_variable.yaml
new file mode 100644
index 00000000..a5189a0d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/145_fix_missing_move_variable.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_volume - Add missing variable initialization
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/149_volumes_demoted_pods_fix.yaml b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/149_volumes_demoted_pods_fix.yaml
new file mode 100644
index 00000000..812d0f3c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/149_volumes_demoted_pods_fix.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_volume - Fix issues with moving volumes into demoted or linked pods
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/v1.4.0_summary.yaml b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/v1.4.0_summary.yaml
new file mode 100644
index 00000000..3a7dc7e8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/changelogs/fragments/v1.4.0_summary.yaml
@@ -0,0 +1,37 @@
+release_summary: |
+ | Release Date: 2020-08-08
+ | This changlelog describes all changes made to the modules and plugins included in this collection since Ansible 2.9.0
+
+major_changes:
+ - purefa_console - manage Console Lock setting for the FlashArray
+ - purefa_endpoint - manage VMware protocol-endpoints on the FlashArray
+ - purefa_eula - sign, or resign, FlashArray EULA
+ - purefa_inventory - get hardware inventory information from a FlashArray
+ - purefa_network - manage the physical and virtual network settings on the FlashArray
+ - purefa_pgsched - manage protection group snapshot and replication schedules on the FlashArray
+ - purefa_pod - manage ActiveCluster pods in FlashArrays
+ - purefa_pod_replica - manage ActiveDR pod replica links in FlashArrays
+ - purefa_proxy - manage the phonehome HTTPS proxy setting for the FlashArray
+ - purefa_smis - manage SMI-S settings on the FlashArray
+ - purefa_subnet - manage network subnets on the FlashArray
+ - purefa_timeout - manage the GUI idle timeout on the FlashArray
+ - purefa_vlan - manage VLAN interfaces on the FlashArray
+ - purefa_vnc - manage VNC for installed applications on the FlashArray
+ - purefa_volume_tags - manage volume tags on the FlashArray
+
+minor_changes:
+ - purefa_info - return dict names changed from ``ansible_facts`` to ``ra_info`` and ``user_info`` in approproate sections
+ - purefa_info - new options added for information collection
+ - purefa_info - Certificate times changed to human readable rather than time since epoch
+ - purefa_host - Add support for Cloud Block Store
+ - purefa_host - Add volume disconnection support
+ - purefa_host - Add CHAP support
+ - purefa_hg - All LUN ID to be set for single volume
+ - purefa_offload - Add support for Azure
+ - purefa_pgsnap - Add offload support
+ - purefa_snap - Allow recovery of deleted snapshot
+ - purefa_vg - Add QoS support
+
+bugfixes:
+ - purefa_host - resolve issue found when using in Pure Storage Test Drive
+ - purefa_host - resolve hostname case inconsistencies
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/playbooks/.keep b/collections-debian-merged/ansible_collections/purestorage/flasharray/playbooks/.keep
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/playbooks/.keep
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/playbooks/files/.keep b/collections-debian-merged/ansible_collections/purestorage/flasharray/playbooks/files/.keep
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/playbooks/files/.keep
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/playbooks/roles/.keep b/collections-debian-merged/ansible_collections/purestorage/flasharray/playbooks/roles/.keep
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/playbooks/roles/.keep
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/playbooks/tasks/.keep b/collections-debian-merged/ansible_collections/purestorage/flasharray/playbooks/tasks/.keep
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/playbooks/tasks/.keep
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/playbooks/templates/.keep b/collections-debian-merged/ansible_collections/purestorage/flasharray/playbooks/templates/.keep
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/playbooks/templates/.keep
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/playbooks/vars/.keep b/collections-debian-merged/ansible_collections/purestorage/flasharray/playbooks/vars/.keep
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/playbooks/vars/.keep
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/doc_fragments/purestorage.py b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/doc_fragments/purestorage.py
new file mode 100644
index 00000000..679be3ef
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/doc_fragments/purestorage.py
@@ -0,0 +1,43 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Simon Dodsley <simon@purestorage.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Standard Pure Storage documentation fragment
+ DOCUMENTATION = r'''
+options:
+ - See separate platform section for more details
+requirements:
+ - See separate platform section for more details
+notes:
+ - Ansible modules are available for the following Pure Storage products: FlashArray, FlashBlade
+'''
+
+ # Documentation fragment for FlashArray
+ FA = r'''
+options:
+ fa_url:
+ description:
+ - FlashArray management IPv4 address or Hostname.
+ type: str
+ api_token:
+ description:
+ - FlashArray API token for admin privileged user.
+ type: str
+notes:
+ - This module requires the C(purestorage) Python library
+ - You must set C(PUREFA_URL) and C(PUREFA_API) environment variables
+ if I(fa_url) and I(api_token) arguments are not passed to the module directly
+requirements:
+ - python >= 2.7
+ - purestorage >= 1.19
+ - py-pure-client >= 1.6.0
+ - netaddr
+ - requests
+'''
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/module_utils/purefa.py b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/module_utils/purefa.py
new file mode 100644
index 00000000..777a49f5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/module_utils/purefa.py
@@ -0,0 +1,126 @@
+# -*- coding: utf-8 -*-
+
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Simon Dodsley <simon@purestorage.com>,2017
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+HAS_PURESTORAGE = True
+try:
+ from purestorage import purestorage
+except ImportError:
+ HAS_PURESTORAGE = False
+
+HAS_PYPURECLIENT = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PYPURECLIENT = False
+
+HAS_REQUESTS = True
+try:
+ import requests
+except ImportError:
+ HAS_REQUESTS = False
+
+from os import environ
+import platform
+
+VERSION = 1.4
+USER_AGENT_BASE = 'Ansible'
+
+
+def get_system(module):
+ """Return System Object or Fail"""
+ user_agent = '%(base)s %(class)s/%(version)s (%(platform)s)' % {
+ 'base': USER_AGENT_BASE,
+ 'class': __name__,
+ 'version': VERSION,
+ 'platform': platform.platform()
+ }
+ array_name = module.params['fa_url']
+ api = module.params['api_token']
+ if HAS_PURESTORAGE:
+ if array_name and api:
+ system = purestorage.FlashArray(array_name, api_token=api, user_agent=user_agent)
+ elif environ.get('PUREFA_URL') and environ.get('PUREFA_API'):
+ system = purestorage.FlashArray(environ.get('PUREFA_URL'),
+ api_token=(environ.get('PUREFA_API')),
+ user_agent=user_agent)
+ else:
+ module.fail_json(msg="You must set PUREFA_URL and PUREFA_API environment variables "
+ "or the fa_url and api_token module arguments")
+ try:
+ system.get()
+ except Exception:
+ module.fail_json(msg="Pure Storage FlashArray authentication failed. Check your credentials")
+ else:
+ module.fail_json(msg="purestorage SDK is not installed.")
+ return system
+
+
+def get_array(module):
+ """Return System Object or Fail"""
+ user_agent = '%(base)s %(class)s/%(version)s (%(platform)s)' % {
+ 'base': USER_AGENT_BASE,
+ 'class': __name__,
+ 'version': VERSION,
+ 'platform': platform.platform()
+ }
+ array_name = module.params['fa_url']
+ api = module.params['api_token']
+ if HAS_PYPURECLIENT and HAS_REQUESTS:
+ versions = requests.get("https://" + array_name + "/api/api_version", verify=False)
+ api_version = versions.json()['version'][-1]
+ if array_name and api:
+ system = flasharray.Client(target=array_name, api_token=api,
+ user_agent=user_agent, version=api_version)
+ elif environ.get('PUREFA_URL') and environ.get('PUREFA_API'):
+ system = flasharray.Client(target=(environ.get('PUREFA_URL')),
+ api_token=(environ.get('PUREFA_API')),
+ user_agent=user_agent)
+ else:
+ module.fail_json(msg="You must set PUREFA_URL and PUREFA_API environment variables "
+ "or the fa_url and api_token module arguments")
+ try:
+ system.get_hardware()
+ except Exception:
+ module.fail_json(msg="Pure Storage FlashArray authentication failed. Check your credentials")
+ else:
+ module.fail_json(msg="py-pure-client and/or requests are not installed.")
+ return system
+
+
+def purefa_argument_spec():
+ """Return standard base dictionary used for the argument_spec argument in AnsibleModule"""
+
+ return dict(
+ fa_url=dict(),
+ api_token=dict(no_log=True),
+ )
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_alert.py b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_alert.py
new file mode 100644
index 00000000..334d94f7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_alert.py
@@ -0,0 +1,173 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefa_alert
+version_added: '1.0.0'
+short_description: Configure Pure Storage FlashArray alert email settings
+description:
+- Configure alert email configuration for Pure Storage FlashArrays.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ type: str
+ description:
+ - Create or delete alert email
+ default: present
+ choices: [ absent, present ]
+ address:
+ type: str
+ description:
+ - Email address (valid format required)
+ required: true
+ enabled:
+ type: bool
+ default: true
+ description:
+ - Set specified email address to be enabled or disabled
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+'''
+
+EXAMPLES = r'''
+- name: Add new email recipient and enable, or enable existing email
+ purefa_alert:
+ address: "user@domain.com"
+ enabled: true
+ state: present
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+- name: Delete existing email recipient
+ purefa_alert:
+ state: absent
+ address: "user@domain.com"
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+'''
+
+RETURN = r'''
+'''
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import get_system, purefa_argument_spec
+
+
+def create_alert(module, array):
+ """Create Alert Email"""
+ changed = True
+ if not module.check_mode:
+ changed = False
+ try:
+ array.create_alert_recipient(module.params['address'])
+ changed = True
+ except Exception:
+ module.fail_json(msg='Failed to create alert email: {0}'.format(module.params['address']))
+
+ if not module.params['enabled']:
+ try:
+ array.disable_alert_recipient(module.params['address'])
+ changed = True
+ except Exception:
+ module.fail_json(msg='Failed to create alert email: {0}'.format(module.params['address']))
+
+ module.exit_json(changed=changed)
+
+
+def enable_alert(module, array):
+ """Enable Alert Email"""
+ changed = True
+ if not module.check_mode:
+ changed = False
+ try:
+ array.enable_alert_recipient(module.params['address'])
+ changed = True
+ except Exception:
+ module.fail_json(msg='Failed to enable alert email: {0}'.format(module.params['address']))
+
+ module.exit_json(changed=changed)
+
+
+def disable_alert(module, array):
+ """Disable Alert Email"""
+ changed = True
+ if not module.check_mode:
+ changed = False
+ try:
+ array.disable_alert_recipient(module.params['address'])
+ changed = True
+ except Exception:
+ module.fail_json(msg='Failed to disable alert email: {0}'.format(module.params['address']))
+
+ module.exit_json(changed=changed)
+
+
+def delete_alert(module, array):
+ """Delete Alert Email"""
+ changed = True
+ if module.params['address'] == "flasharray-alerts@purestorage.com":
+ module.fail_json(msg='Built-in address {0} cannot be deleted.'.format(module.params['address']))
+ if not module.check_mode:
+ changed = False
+ try:
+ array.delete_alert_recipient(module.params['address'])
+ changed = True
+ except Exception:
+ module.fail_json(msg='Failed to delete alert email: {0}'.format(module.params['address']))
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(dict(
+ address=dict(type='str', required=True),
+ enabled=dict(type='bool', default=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ ))
+
+ module = AnsibleModule(argument_spec,
+ supports_check_mode=True)
+
+ pattern = re.compile(r"^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$")
+ if not pattern.match(module.params['address']):
+ module.fail_json(msg='Valid email address not provided.')
+
+ array = get_system(module)
+
+ exists = False
+ try:
+ emails = array.list_alert_recipients()
+ except Exception:
+ module.fail_json(msg='Failed to get exisitng email list')
+ for email in range(0, len(emails)):
+ if emails[email]['name'] == module.params['address']:
+ exists = True
+ enabled = emails[email]['enabled']
+ break
+ if module.params['state'] == 'present' and not exists:
+ create_alert(module, array)
+ elif module.params['state'] == 'present' and exists and not enabled and module.params['enabled']:
+ enable_alert(module, array)
+ elif module.params['state'] == 'present' and exists and enabled and not module.params['enabled']:
+ disable_alert(module, array)
+ elif module.params['state'] == 'absent' and exists:
+ delete_alert(module, array)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_apiclient.py b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_apiclient.py
new file mode 100644
index 00000000..e3d1c0ff
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_apiclient.py
@@ -0,0 +1,215 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefa_apiclient
+version_added: '1.5.0'
+short_description: Manage FlashArray API Clients
+description:
+- Enable or disable FlashArray API Clients
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Name of the API Client
+ type: str
+ required: true
+ state:
+ description:
+ - Define whether the API client should exist or not.
+ default: present
+ choices: [ absent, present ]
+ type: str
+ role:
+ description:
+ - The maximum role allowed for ID Tokens issued by this API client
+ type: str
+ choices: [readonly, ops_admin, storage_admin, array_admin]
+ issuer:
+ description:
+ - The name of the identity provider that will be issuing ID Tokens for this API client
+ - If not specified, defaults to the API client name, I(name).
+ type: str
+ public_key:
+ description:
+ - The API clients PEM formatted (Base64 encoded) RSA public key.
+ - Include the I(—–BEGIN PUBLIC KEY—–) and I(—–END PUBLIC KEY—–) lines
+ type: str
+ token_ttl:
+ description:
+ - Time To Live length in seconds for the exchanged access token
+ - Range is 1 second to 1 day (86400 seconds)
+ type: int
+ default: 86400
+ enabled:
+ description:
+ - State of the API Client Key
+ type: bool
+ default: true
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+'''
+
+EXAMPLES = r'''
+- name: Create API token ansible-token
+ purefa_apiclient:
+ name: ansible-token
+ issuer: "Pure Storage"
+ ttl: 3000
+ role: array_admin
+ public_key: "{{lookup('file', 'public_pem_file') }}"
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Disable API CLient
+ purefa_apiclient:
+ name: ansible-token
+ enabled: false
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Enable API CLient
+ purefa_apiclient:
+ name: ansible-token
+ enabled: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete API Client
+ purefa_apiclient:
+ state: absent
+ name: ansible-token
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+'''
+
+RETURN = r'''
+'''
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import get_system, get_array, purefa_argument_spec
+
+MIN_REQUIRED_API_VERSION = '2.1'
+
+
+def delete_client(module, array):
+ changed = True
+ if not module.check_mode:
+ try:
+ array.delete_api_clients(names=[module.params['name']])
+ except Exception:
+ module.fail_json(msg="Failed to delete API Client {0}".format(module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def update_client(module, array, client):
+ """Update API Client"""
+ changed = True
+ if not module.check_mode:
+ changed = False
+ if client.enabled != module.params['enabled']:
+ try:
+ array.patch_api_clients(names=[module.params['name']],
+ api_clients=flasharray.ApiClientPatch(enabled=module.params['enabled']))
+ changed = True
+ except Exception:
+ module.fail_json(msg='Failed to update API Client {0}'.format(module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def create_client(module, array):
+ """Create API Client"""
+ changed = True
+ if not module.check_mode:
+ changed = False
+ if not 1 <= module.params['token_ttl'] <= 86400:
+ module.fail_json(msg="token_ttl parameter is out of range (1 to 86400)")
+ else:
+ token_ttl = module.params['token_ttl'] * 1000
+ if not module.params['issuer']:
+ module.params['issuer'] = module.params['name']
+ try:
+ client = flasharray.ApiClientPost(max_role=module.params['role'],
+ issuer=module.params['issuer'],
+ access_token_ttl_in_ms=token_ttl,
+ public_key=module.params['public_key'])
+ res = array.post_api_clients(names=[module.params['name']], api_clients=client)
+ if res.status_code != 200:
+ module.fail_json(msg="Failed to create API CLient {0}. Error message: {1}".format(module.params['name'],
+ res.errors[0].message))
+ if module.params['enabled']:
+ try:
+ array.patch_api_clients(names=[module.params['name']],
+ api_clients=flasharray.ApiClientPatch(enabled=module.params['enabled']))
+ except Exception:
+ array.delete_api_clients(names=[module.params['name']])
+ module.fail_json(msg="Failed to create API Client {0}".format(module.params['name']))
+ changed = True
+ except Exception:
+ module.fail_json(msg="Failed to create API Client {0}".format(module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(dict(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ enabled=dict(type='bool', default=True),
+ name=dict(type='str', required=True),
+ role=dict(type='str', choices=['readonly', 'ops_admin', 'storage_admin', 'array_admin']),
+ public_key=dict(type='str', no_log=True),
+ token_ttl=dict(type='int', default=86400),
+ issuer=dict(type='str')
+ ))
+
+ module = AnsibleModule(argument_spec,
+ supports_check_mode=True)
+
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg='py-pure-client sdk is required for this module')
+
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(msg='FlashArray REST version not supported. '
+ 'Minimum version required: {0}'.format(MIN_REQUIRED_API_VERSION))
+ array = get_array(module)
+ state = module.params['state']
+
+ try:
+ client = list(array.get_api_clients(names=[module.params['name']]).items)[0]
+ exists = True
+ except Exception:
+ exists = False
+
+ if not exists and state == 'present':
+ create_client(module, array)
+ elif exists and state == 'present':
+ update_client(module, array, client)
+ elif exists and state == 'absent':
+ delete_client(module, array)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_arrayname.py b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_arrayname.py
new file mode 100644
index 00000000..b2c90cf3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_arrayname.py
@@ -0,0 +1,91 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefa_arrayname
+version_added: '1.0.0'
+short_description: Configure Pure Storage FlashArray array name
+description:
+- Configure name of array for Pure Storage FlashArrays.
+- Ideal for Day 0 initial configuration.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Set the array name
+ type: str
+ default: present
+ choices: [ present ]
+ name:
+ description:
+ - Name of the array. Must conform to correct naming schema.
+ type: str
+ required: true
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+'''
+
+EXAMPLES = r'''
+- name: Set new array name
+ purefa_arrayname:
+ name: new-array-name
+ state: present
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+'''
+
+RETURN = r'''
+'''
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import get_system, purefa_argument_spec
+
+
+def update_name(module, array):
+ """Change aray name"""
+ changed = True
+ if not module.check_mode:
+
+ try:
+ array.set(name=module.params['name'])
+ except Exception:
+ module.fail_json(msg='Failed to change array name to {0}'.format(module.params['name']))
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['present']),
+ ))
+
+ module = AnsibleModule(argument_spec,
+ supports_check_mode=True)
+
+ array = get_system(module)
+ pattern = re.compile("^[a-zA-Z0-9]([a-zA-Z0-9-]{0,54}[a-zA-Z0-9])?$")
+ if not pattern.match(module.params['name']):
+ module.fail_json(msg='Array name {0} does not conform to array name rules'.format(module.params['name']))
+ if module.params['name'] != array.get()['array_name']:
+ update_name(module, array)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_banner.py b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_banner.py
new file mode 100644
index 00000000..a35d5128
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_banner.py
@@ -0,0 +1,116 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefa_banner
+version_added: '1.0.0'
+short_description: Configure Pure Storage FlashArray GUI and SSH MOTD message
+description:
+- Configure MOTD for Pure Storage FlashArrays.
+- This will be shown during an SSH or GUI login to the array.
+- Multiple line messages can be achieved using \\n.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Set ot delete the MOTD
+ default: present
+ type: str
+ choices: [ present, absent ]
+ banner:
+ description:
+ - Banner text, or MOTD, to use
+ type: str
+ default: "Welcome to the machine..."
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+'''
+
+EXAMPLES = r'''
+- name: Set new banner text
+ purefa_banner:
+ banner: "Banner over\ntwo lines"
+ state: present
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete banner text
+ purefa_banner:
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+'''
+
+RETURN = r'''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import get_system, purefa_argument_spec
+
+
+def set_banner(module, array):
+ """Set MOTD banner text"""
+ changed = True
+ if not module.check_mode:
+ try:
+ if not module.params['banner']:
+ module.fail_json(msg='Invalid MOTD banner given')
+
+ array.set(banner=module.params['banner'])
+ except Exception:
+ module.fail_json(msg='Failed to set MOTD banner text')
+
+ module.exit_json(changed=changed)
+
+
+def delete_banner(module, array):
+ """Delete MOTD banner text"""
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set(banner="")
+ except Exception:
+ module.fail_json(msg='Failed to delete current MOTD banner text')
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(dict(
+ banner=dict(type='str', default="Welcome to the machine..."),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ ))
+
+ required_if = [('state', 'present', ['banner'])]
+
+ module = AnsibleModule(argument_spec,
+ required_if=required_if,
+ supports_check_mode=True)
+
+ state = module.params['state']
+ array = get_system(module)
+ current_banner = array.get(banner=True)['banner']
+ # set banner if empty value or value differs
+ if state == 'present' and (not current_banner or current_banner != module.params['banner']):
+ set_banner(module, array)
+ # clear banner if it has a value
+ elif state == 'absent' and current_banner:
+ delete_banner(module, array)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_connect.py b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_connect.py
new file mode 100644
index 00000000..7b9690d3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_connect.py
@@ -0,0 +1,197 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefa_connect
+version_added: '1.0.0'
+short_description: Manage replication connections between two FlashArrays
+description:
+- Manage array connections to specified target array
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Create or delete array connection
+ default: present
+ type: str
+ choices: [ absent, present ]
+ target_url:
+ description:
+ - Management IP address of remote array.
+ type: str
+ required: true
+ target_api:
+ description:
+ - API token for target array
+ type: str
+ connection:
+ description:
+ - Type of connection between arrays.
+ type: str
+ choices: [ sync, async ]
+ default: async
+ transport:
+ description:
+ - Type of transport protocol to use for replication
+ type: str
+ choices: [ ip, fc ]
+ default: ip
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+'''
+
+EXAMPLES = r'''
+- name: Create an async connection to remote array
+ purefa_connect:
+ target_url: 10.10.10.20
+ target_api: 9c0b56bc-f941-f7a6-9f85-dcc3e9a8f7d6
+ connection: async
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+- name: Delete connection to remote array
+ purefa_connect:
+ state: absent
+ target_url: 10.10.10.20
+ target_api: 9c0b56bc-f941-f7a6-9f85-dcc3e9a8f7d6
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+'''
+
+RETURN = r'''
+'''
+
+HAS_PURESTORAGE = True
+try:
+ from purestorage import FlashArray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+HAS_PYPURECLIENT = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PYPURECLIENT = False
+
+import platform
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import get_array, get_system, purefa_argument_spec
+
+
+P53_API_VERSION = '1.17'
+FC_REPL_VERSION = '2.4'
+
+
+def _check_connected(module, array):
+ connected_arrays = array.list_array_connections()
+ api_version = array._list_available_rest_versions()
+ for target in range(0, len(connected_arrays)):
+ if P53_API_VERSION in api_version:
+ if connected_arrays[target]['management_address'] == module.params['target_url'] and \
+ "connected" in connected_arrays[target]['status']:
+ return connected_arrays[target]
+ else:
+ if connected_arrays[target]['management_address'] == module.params['target_url'] and \
+ connected_arrays[target]['connected']:
+ return connected_arrays[target]
+ return None
+
+
+def break_connection(module, array, target_array):
+ """Break connection between arrays"""
+ changed = True
+ if not module.check_mode:
+ source_array = array.get()['array_name']
+ try:
+ if target_array['management_address'] is None:
+ module.fail_json(msg="disconnect can only happen from the array that formed the connection")
+ array.disconnect_array(target_array['array_name'])
+ except Exception:
+ module.fail_json(msg="Failed to disconnect {0} from {1}.".format(target_array['array_name'], source_array))
+ module.exit_json(changed=changed)
+
+
+def create_connection(module, array):
+ """Create connection between arrays"""
+ changed = True
+ if not module.check_mode:
+ remote_array = module.params['target_url']
+ user_agent = '%(base)s %(class)s/%(version)s (%(platform)s)' % {
+ 'base': 'Ansible',
+ 'class': __name__,
+ 'version': 1.2,
+ 'platform': platform.platform()
+ }
+ try:
+ remote_system = FlashArray(module.params['target_url'],
+ api_token=module.params['target_api'],
+ user_agent=user_agent)
+ connection_key = remote_system.get(connection_key=True)['connection_key']
+ remote_array = remote_system.get()['array_name']
+ api_version = array._list_available_rest_versions()
+ # TODO: Refactor when FC async is supported
+ if FC_REPL_VERSION in api_version and module.params['transport'].lower() == 'fc':
+ if module.params['connection'].lower() == "async":
+ module.fail_json(msg='Asynchronous replication not supported using FC transport')
+ array_connection = flasharray.ArrayConnectionPost(type='sync-replication',
+ management_address=module.params['target_url'],
+ replication_transport='fc',
+ connection_key=connection_key)
+ array = get_array(module)
+ res = array.post_array_connections(array_connection=array_connection)
+ if res.status_code != 200:
+ module.fail_json(msg='Array Connection failed. Error: {0}'.format(res.errors[0].message))
+ else:
+ array.connect_array(module.params['target_url'], connection_key, [module.params['connection']])
+ except Exception:
+ module.fail_json(msg="Failed to connect to remote array {0}.".format(remote_array))
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(dict(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ connection=dict(type='str', default='async', choices=['async', 'sync']),
+ transport=dict(type='str', default='ip', choices=['ip', 'fc']),
+ target_url=dict(type='str', required=True),
+ target_api=dict(type='str'),
+ ))
+
+ required_if = [('state', 'present', ['target_api'])]
+
+ module = AnsibleModule(argument_spec,
+ required_if=required_if,
+ supports_check_mode=True)
+
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg='purestorage sdk is required for this module')
+
+ if module.params['transport'] == 'fc' and not HAS_PYPURECLIENT:
+ module.fail_json(msg='pypureclient sdk is required for this module')
+
+ state = module.params['state']
+ array = get_system(module)
+ target_array = _check_connected(module, array)
+
+ if state == 'present' and target_array is None:
+ create_connection(module, array)
+ elif state == 'absent' and target_array is not None:
+ break_connection(module, array, target_array)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_console.py b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_console.py
new file mode 100644
index 00000000..5252ca28
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_console.py
@@ -0,0 +1,98 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefa_console
+version_added: '1.0.0'
+short_description: Enable or Disable Pure Storage FlashArray Console Lock
+description:
+- Enablke or Disable root lockout from the array at the physical console for a Pure Storage FlashArray.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Define state of console lockout
+ - When set to I(enable) the console port is locked from root login.
+ type: str
+ default: disable
+ choices: [ enable, disable ]
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+'''
+
+EXAMPLES = r'''
+- name: Enable Console Lockout
+ purefa_console:
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Disable Console Lockout
+ purefa_console:
+ state: disable
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+'''
+
+RETURN = r'''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import get_system, purefa_argument_spec
+
+
+def enable_console(module, array):
+ """Enable Console Lockout"""
+ changed = True
+ if not module.check_mode:
+ if array.get_console_lock_status()['console_lock'] != 'enabled':
+ try:
+ array.enable_console_lock()
+ except Exception:
+ module.fail_json(msg='Enabling Console Lock failed')
+ module.exit_json(changed=changed)
+
+
+def disable_console(module, array):
+ """Disable Console Lock"""
+ changed = True
+ if not module.check_mode:
+ if array.get_console_lock_status()['console_lock'] == 'enabled':
+ try:
+ array.disable_console_lock()
+ except Exception:
+ module.fail_json(msg='Disabling Console Lock failed')
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(dict(
+ state=dict(type='str', default='disable', choices=['enable', 'disable']),
+ ))
+
+ module = AnsibleModule(argument_spec,
+ supports_check_mode=True)
+
+ array = get_system(module)
+
+ if module.params['state'] == 'enable':
+ enable_console(module, array)
+ else:
+ disable_console(module, array)
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_directory.py b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_directory.py
new file mode 100644
index 00000000..1637c845
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_directory.py
@@ -0,0 +1,189 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefa_directory
+version_added: '1.5.0'
+short_description: Manage FlashArray File System Directories
+description:
+- Create/Delete FlashArray File Systems
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Name of the directory
+ type: str
+ required: true
+ state:
+ description:
+ - Define whether the directory should exist or not.
+ default: present
+ choices: [ absent, present ]
+ type: str
+ filesystem:
+ description:
+ - Name of the filesystem the directory links to.
+ type: str
+ required: true
+ path:
+ description:
+ - Path of the managed directory in the file system
+ - If not provided will default to I(name)
+ type: str
+ rename:
+ description:
+ - Value to rename the specified directory to
+ type: str
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+'''
+
+EXAMPLES = r'''
+- name: Create direcotry foo in filesysten bar with path zeta
+ purefa_directory:
+ name: foo
+ filesystem: bar
+ path: zeta
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Rename directory foo to fin in filesystem bar
+ purefa_directory:
+ name: foo
+ rename: fin
+ filesystem: bar
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete diectory foo in filesystem bar
+ purefa_directory:
+ name: foo
+ filesystem: bar
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+'''
+
+RETURN = r'''
+'''
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import get_system, get_array, purefa_argument_spec
+
+MIN_REQUIRED_API_VERSION = '2.2'
+
+
+def delete_dir(module, array):
+ """Delete a file system"""
+ changed = True
+ if not module.check_mode:
+ res = array.delete_directories(names=[module.params['filesystem'] + ":" + module.params['name']])
+ if res.status_code != 200:
+ module.fail_json(msg="Failed to delete file system {0}. {1}".format(module.params['name'],
+ res.errors[0].message))
+ module.exit_json(changed=changed)
+
+
+def rename_dir(module, array):
+ """Recover a file system"""
+ changed = True
+ if not module.check_mode:
+ changed = False
+ target = array.get_directories(names=[module.params['filesystem'] + ":" + module.params['rename']])
+ if target.status_code != 200:
+ directory = flasharray.DirectoryPatch(name=module.params['filesystem'] + ":" + module.params['rename'])
+ res = array.patch_directories(names=[module.params['filesystem'] + ":" + module.params['name']],
+ directory=directory)
+ if res.status_code != 200:
+ module.fail_json(msg="Failed to delete file system {0}".format(module.params['name']))
+ else:
+ changed = True
+ else:
+ module.fail_json(msg="Target file system {0} already exists".format(module.params['rename']))
+ module.exit_json(changed=changed)
+
+
+def create_dir(module, array):
+ """Create a file system directory"""
+ changed = True
+ if not module.check_mode:
+ changed = False
+ if not module.params['path']:
+ module.params['path'] = module.params['name']
+ all_fs = list(array.get_directories(file_system_names=[module.params['filesystem']]).items)
+ for check in range(0, len(all_fs)):
+ if module.params['path'] == all_fs[check].path[1:]:
+ module.fail_json(msg="Path {0} already existis in file system {1}".format(module.params['path'],
+ module.params['filesystem']))
+ directory = flasharray.DirectoryPost(directory_name=module.params['name'],
+ path=module.params['path'])
+ res = array.post_directories(file_system_names=[module.params['filesystem']], directory=directory)
+ if res.status_code != 200:
+ module.fail_json(msg="Failed to create file system {0}. {1}".format(module.params['name'],
+ res.errors[0].message))
+ else:
+ changed = True
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(dict(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ filesystem=dict(type='str', required=True),
+ name=dict(type='str', required=True),
+ rename=dict(type='str'),
+ path=dict(type='str'),
+ ))
+
+ module = AnsibleModule(argument_spec,
+ supports_check_mode=True)
+
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg='py-pure-client sdk is required for this module')
+
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(msg='FlashArray REST version not supported. '
+ 'Minimum version required: {0}'.format(MIN_REQUIRED_API_VERSION))
+ array = get_array(module)
+ state = module.params['state']
+
+ try:
+ filesystem = list(array.get_file_systems(names=[module.params['filesystem']]).items)[0]
+ except Exception:
+ module.fail_json(msg="Selected file system {0} does not exist".format(module.params['filesystem']))
+ res = array.get_directories(names=[module.params['filesystem'] + ":" + module.params['name']])
+ exists = bool(res.status_code == 200)
+
+ if state == 'present' and not exists:
+ create_dir(module, array)
+ elif state == "present" and exists and module.params['rename'] and not filesystem.destroyed:
+ rename_dir(module, array)
+ elif state == 'absent' and exists:
+ delete_dir(module, array)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_dns.py b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_dns.py
new file mode 100644
index 00000000..fa4b5f56
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_dns.py
@@ -0,0 +1,137 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefa_dns
+version_added: '1.0.0'
+short_description: Configure FlashArray DNS settings
+description:
+- Set or erase configuration for the DNS settings.
+- Nameservers provided will overwrite any existing nameservers.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Set or delete directory service configuration
+ default: present
+ type: str
+ choices: [ absent, present ]
+ domain:
+ description:
+ - Domain suffix to be appended when perofrming DNS lookups.
+ type: str
+ nameservers:
+ description:
+ - List of up to 3 unique DNS server IP addresses. These can be
+ IPv4 or IPv6 - No validation is done of the addresses is performed.
+ type: list
+ elements: str
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+'''
+
+EXAMPLES = r'''
+- name: Delete exisitng DNS settings
+ purefa_dns:
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Set DNS settings
+ purefa_dns:
+ domain: purestorage.com
+ nameservers:
+ - 8.8.8.8
+ - 8.8.4.4
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+'''
+
+RETURN = r'''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import get_system, purefa_argument_spec
+
+
+def remove(duplicate):
+ final_list = []
+ for num in duplicate:
+ if num not in final_list:
+ final_list.append(num)
+ return final_list
+
+
+def delete_dns(module, array):
+ """Delete DNS settings"""
+ changed = True
+ if not module.check_mode:
+ current_dns = array.get_dns()
+ if current_dns['domain'] == '' and current_dns['nameservers'] == ['']:
+ module.exit_json(changed=changed)
+ else:
+ try:
+ array.set_dns(domain='', nameservers=[])
+ except Exception:
+ module.fail_json(msg='Delete DNS settigs failed')
+ module.exit_json(changed=changed)
+
+
+def create_dns(module, array):
+ """Set DNS settings"""
+ changed = True
+ if not module.check_mode:
+ changed = False
+ current_dns = array.get_dns()
+ if current_dns['domain'] != module.params['domain'] or \
+ sorted(module.params['nameservers']) != sorted(current_dns['nameservers']):
+ try:
+ array.set_dns(domain=module.params['domain'],
+ nameservers=module.params['nameservers'][0:3])
+ changed = True
+ except Exception:
+ module.fail_json(msg='Set DNS settings failed: Check configuration')
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(dict(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ domain=dict(type='str'),
+ nameservers=dict(type='list', elements='str'),
+ ))
+
+ required_if = [('state', 'present', ['domain', 'nameservers'])]
+
+ module = AnsibleModule(argument_spec,
+ required_if=required_if,
+ supports_check_mode=True)
+
+ state = module.params['state']
+ array = get_system(module)
+
+ if state == 'absent':
+ delete_dns(module, array)
+ elif state == 'present':
+ module.params['nameservers'] = remove(module.params['nameservers'])
+ create_dns(module, array)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_ds.py b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_ds.py
new file mode 100644
index 00000000..b968fa80
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_ds.py
@@ -0,0 +1,519 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefa_ds
+version_added: '1.0.0'
+short_description: Configure FlashArray Directory Service
+description:
+- Set or erase configuration for the directory service. There is no facility
+ to SSL certificates at this time. Use the FlashArray GUI for this
+ additional configuration work.
+- To modify an existing directory service configuration you must first delete
+ an exisitng configuration and then recreate with new settings.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ type: str
+ description:
+ - Create or delete directory service configuration
+ default: present
+ choices: [ absent, present ]
+ enable:
+ description:
+ - Whether to enable or disable directory service support.
+ default: false
+ type: bool
+ dstype:
+ description:
+ - The type of directory service to work on
+ choices: [ management, data ]
+ type: str
+ default: management
+ uri:
+ type: list
+ elements: str
+ description:
+ - A list of up to 30 URIs of the directory servers. Each URI must include
+ the scheme ldap:// or ldaps:// (for LDAP over SSL), a hostname, and a
+ domain name or IP address. For example, ldap://ad.company.com configures
+ the directory service with the hostname "ad" in the domain "company.com"
+ while specifying the unencrypted LDAP protocol.
+ base_dn:
+ type: str
+ description:
+ - Sets the base of the Distinguished Name (DN) of the directory service
+ groups. The base should consist of only Domain Components (DCs). The
+ base_dn will populate with a default value when a URI is entered by
+ parsing domain components from the URI. The base DN should specify DC=
+ for each domain component and multiple DCs should be separated by commas.
+ bind_password:
+ type: str
+ description:
+ - Sets the password of the bind_user user name account.
+ bind_user:
+ type: str
+ description:
+ - Sets the user name that can be used to bind to and query the directory.
+ - For Active Directory, enter the username - often referred to as
+ sAMAccountName or User Logon Name - of the account that is used to
+ perform directory lookups.
+ - For OpenLDAP, enter the full DN of the user.
+ group_base:
+ type: str
+ description:
+ - Specifies where the configured groups are located in the directory
+ tree. This field consists of Organizational Units (OUs) that combine
+ with the base DN attribute and the configured group CNs to complete
+ the full Distinguished Name of the groups. The group base should
+ specify OU= for each OU and multiple OUs should be separated by commas.
+ The order of OUs is important and should get larger in scope from left
+ to right. Each OU should not exceed 64 characters in length.
+ - Not Supported from Purity 5.2.0 or higher. Use I(purefa_dsrole) module.
+ ro_group:
+ type: str
+ description:
+ - Sets the common Name (CN) of the configured directory service group
+ containing users with read-only privileges on the FlashArray. This
+ name should be just the Common Name of the group without the CN=
+ specifier. Common Names should not exceed 64 characters in length.
+ - Not Supported from Purity 5.2.0 or higher. Use I(purefa_dsrole) module.
+ sa_group:
+ type: str
+ description:
+ - Sets the common Name (CN) of the configured directory service group
+ containing administrators with storage-related privileges on the
+ FlashArray. This name should be just the Common Name of the group
+ without the CN= specifier. Common Names should not exceed 64
+ characters in length.
+ - Not Supported from Purity 5.2.0 or higher. Use I(purefa_dsrole) module.
+ aa_group:
+ type: str
+ description:
+ - Sets the common Name (CN) of the directory service group containing
+ administrators with full privileges when managing the FlashArray.
+ The name should be just the Common Name of the group without the
+ CN= specifier. Common Names should not exceed 64 characters in length.
+ - Not Supported from Purity 5.2.0 or higher. Use I(purefa_dsrole) module.
+ user_login:
+ type: str
+ description:
+ - User login attribute in the structure of the configured LDAP servers.
+ Typically the attribute field that holds the users unique login name.
+ Default value is I(sAMAccountName) for Active Directory or I(uid)
+ for all other directory services
+ - Supported from Purity 6.0 or higher.
+ user_object:
+ type: str
+ description:
+ - Value of the object class for a management LDAP user.
+ Defaults to I(User) for Active Directory servers, I(posixAccount) or
+ I(shadowAccount) for OpenLDAP servers dependent on the group type
+ of the server, or person for all other directory servers.
+ - Supported from Purity 6.0 or higher.
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+'''
+
+EXAMPLES = r'''
+- name: Delete existing directory service
+ purefa_ds:
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create directory service (disabled) - Pre-5.2.0
+ purefa_ds:
+ uri: "ldap://lab.purestorage.com"
+ base_dn: "DC=lab,DC=purestorage,DC=com"
+ bind_user: Administrator
+ bind_password: password
+ group_base: "OU=Pure-Admin"
+ ro_group: PureReadOnly
+ sa_group: PureStorage
+ aa_group: PureAdmin
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create directory service (disabled) - 5.2.0 or higher
+ purefa_ds:
+ dstype: management
+ uri: "ldap://lab.purestorage.com"
+ base_dn: "DC=lab,DC=purestorage,DC=com"
+ bind_user: Administrator
+ bind_password: password
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Enable existing directory service
+ purefa_ds:
+ enable: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Disable existing directory service
+ purefa_ds:
+ enable: false
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create directory service (enabled) - Pre-5.2.0
+ purefa_ds:
+ enable: true
+ uri: "ldap://lab.purestorage.com"
+ base_dn: "DC=lab,DC=purestorage,DC=com"
+ bind_user: Administrator
+ bind_password: password
+ group_base: "OU=Pure-Admin"
+ ro_group: PureReadOnly
+ sa_group: PureStorage
+ aa_group: PureAdmin
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create directory service (enabled) - 5.2.0 or higher
+ purefa_ds:
+ enable: true
+ dstype: management
+ uri: "ldap://lab.purestorage.com"
+ base_dn: "DC=lab,DC=purestorage,DC=com"
+ bind_user: Administrator
+ bind_password: password
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+'''
+
+RETURN = r'''
+'''
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import get_array, get_system, purefa_argument_spec
+
+
+DS_ROLE_REQUIRED_API_VERSION = '1.16'
+FAFILES_API_VERSION = '2.2'
+
+
+def disable_ds(module, array):
+ """Disable Directory Service"""
+ changed = True
+ if not module.check_mode:
+ try:
+ array.disable_directory_service()
+ except Exception:
+ module.fail_json(msg='Disable Directory Service failed')
+ module.exit_json(changed=changed)
+
+
+def enable_ds(module, array):
+ """Enable Directory Service"""
+ changed = True
+ if not module.check_mode:
+ api_version = array._list_available_rest_versions()
+ if DS_ROLE_REQUIRED_API_VERSION in api_version:
+ try:
+ roles = array.list_directory_service_roles()
+ enough_roles = False
+ for role in range(0, len(roles)):
+ if roles[role]["group_base"]:
+ enough_roles = True
+ if enough_roles:
+ array.enable_directory_service()
+ else:
+ module.fail_json(msg='Cannot enable directory service - please create a directory service role')
+ except Exception:
+ module.fail_json(msg='Enable Directory Service failed: Check Configuration')
+ else:
+ try:
+ array.enable_directory_service()
+ except Exception:
+ module.fail_json(msg='Enable Directory Service failed: Check Configuration')
+ module.exit_json(changed=changed)
+
+
+def delete_ds(module, array):
+ """Delete Directory Service"""
+ changed = True
+ if not module.check_mode:
+ try:
+ api_version = array._list_available_rest_versions()
+ array.set_directory_service(enabled=False)
+ if DS_ROLE_REQUIRED_API_VERSION in api_version:
+ array.set_directory_service(uri=[''],
+ base_dn="",
+ bind_user="",
+ bind_password="",
+ certificate="")
+ else:
+ array.set_directory_service(uri=[''],
+ base_dn="",
+ group_base="",
+ bind_user="",
+ bind_password="",
+ readonly_group="",
+ storage_admin_group="",
+ array_admin_group="",
+ certificate="")
+ except Exception:
+ module.fail_json(msg='Delete Directory Service failed')
+ module.exit_json(changed=changed)
+
+
+def delete_ds_v6(module, array):
+ """Delete Directory Service"""
+ changed = False
+ if module.params('dstype') == 'management':
+ management = flasharray.DirectoryServiceManagement(user_login_attribute='',
+ user_object_class='')
+ directory_service = flasharray.DirectoryService(uris=[''],
+ base_dn='',
+ bind_user='',
+ bind_password='',
+ enabled=False,
+ services=module.params('dstype'),
+ management=management)
+ else:
+ directory_service = flasharray.DirectoryService(uris=[''],
+ base_dn='',
+ bind_user='',
+ bind_password='',
+ enabled=False,
+ services=module.params('dstype'))
+ if not module.check_mode:
+ res = array.patch_directory_services(names=[module.params['dstype']],
+ directory_service=directory_service)
+ changed = True
+ if res.status_code != 200:
+ module.fail_json(msg='Delete {0} Directory Service failed. Error message: {1}'.format(module.params['dstype'],
+ res.errors[0].message))
+ module.exit_json(changed=changed)
+
+
+def create_ds(module, array):
+ """Create Directory Service"""
+ changed = True
+ if not module.check_mode:
+ if None in (module.params['bind_password'], module.params['bind_user'], module.params['base_dn'], module.params['uri']):
+ module.fail_json(msg="Parameters \'bind_password\', \'bind_user\', \'base_dn\' and \'uri\' are all required")
+ api_version = array._list_available_rest_versions()
+ if DS_ROLE_REQUIRED_API_VERSION in api_version:
+ try:
+ array.set_directory_service(uri=module.params['uri'],
+ base_dn=module.params['base_dn'],
+ bind_user=module.params['bind_user'],
+ bind_password=module.params['bind_password'])
+ roles = array.list_directory_service_roles()
+ enough_roles = False
+ for role in range(0, len(roles)):
+ if roles[role]["group_base"]:
+ enough_roles = True
+ if enough_roles:
+ array.set_directory_service(enabled=module.params['enable'])
+ else:
+ module.fail_json(msg='Cannot enable directory service - please create a directory service role')
+ except Exception:
+ module.fail_json(msg='Create Directory Service failed: Check configuration')
+ else:
+ groups_rule = [not module.params['ro_group'],
+ not module.params['sa_group'],
+ not module.params['aa_group']]
+
+ if all(groups_rule):
+ module.fail_json(msg='At least one group must be configured')
+ try:
+ array.set_directory_service(uri=module.params['uri'],
+ base_dn=module.params['base_dn'],
+ group_base=module.params['group_base'],
+ bind_user=module.params['bind_user'],
+ bind_password=module.params['bind_password'],
+ readonly_group=module.params['ro_group'],
+ storage_admin_group=module.params['sa_group'],
+ array_admin_group=module.params['aa_group'])
+ array.set_directory_service(enabled=module.params['enable'])
+ except Exception:
+ module.fail_json(msg='Create Directory Service failed: Check configuration')
+ module.exit_json(changed=changed)
+
+
+def update_ds_v6(module, array):
+ """Update Directory Service"""
+ changed = False
+ ds_change = False
+ password_required = False
+ dirserv = list(array.get_directory_services(filter='name=\'' + module.params['dstype'] + '\'').items)[0]
+ current_ds = dirserv
+ if current_ds.uris is None or module.params['bind_password'] is not None:
+ password_required = True
+ if current_ds.uris != module.params['uri']:
+ uris = module.params['uri']
+ ds_change = True
+ else:
+ uris = current_ds.uris
+ try:
+ base_dn = current_ds.base_dn
+ except AttributeError:
+ base_dn = ''
+ try:
+ bind_user = current_ds.bind_user
+ except AttributeError:
+ bind_user = ''
+ if module.params['base_dn'] != '' and module.params['base_dn'] != base_dn:
+ base_dn = module.params['base_dn']
+ ds_change = True
+ if module.params['bind_user'] != '' and module.params['bind_user'] != bind_user:
+ bind_user = module.params['bind_user']
+ password_required = True
+ ds_change = True
+ if module.params['bind_password'] is not None:
+ bind_password = module.params['bind_password']
+ ds_change = True
+ if module.params['enable'] != current_ds.enabled:
+ ds_change = True
+ if password_required and not module.params['bind_password']:
+ module.fail_json(msg='\'bind_password\' must be provided with new/changed \'bind_user\'')
+ if module.params['dstype'] == 'management':
+ try:
+ user_login = current_ds.management.user_login_attribute
+ except AttributeError:
+ user_login = ''
+ try:
+ user_object = current_ds.management.user_object_class
+ except AttributeError:
+ user_object = ''
+ if module.params['user_object'] is not None and user_object != module.params['user_object']:
+ user_object = module.params['user_object']
+ ds_change = True
+ if module.params['user_login'] is not None and user_login != module.params['user_login']:
+ user_login = module.params['user_login']
+ ds_change = True
+ management = flasharray.DirectoryServiceManagement(user_login_attribute=user_login,
+ user_object_class=user_object)
+ if password_required:
+ directory_service = flasharray.DirectoryService(uris=uris,
+ base_dn=base_dn,
+ bind_user=bind_user,
+ bind_password=bind_password,
+ enabled=module.params['enable'],
+ services=module.params['dstype'],
+ management=management)
+ else:
+ directory_service = flasharray.DirectoryService(uris=uris,
+ base_dn=base_dn,
+ bind_user=bind_user,
+ enabled=module.params['enable'],
+ services=module.params['dstype'],
+ management=management)
+ else:
+ if password_required:
+ directory_service = flasharray.DirectoryService(uris=uris,
+ base_dn=base_dn,
+ bind_user=bind_user,
+ bind_password=bind_password,
+ enabled=module.params['enable'],
+ services=module.params['dstype'])
+ else:
+ directory_service = flasharray.DirectoryService(uris=uris,
+ base_dn=base_dn,
+ bind_user=bind_user,
+ enabled=module.params['enable'],
+ services=module.params['dstype'])
+ if ds_change:
+ changed = True
+ if not module.check_mode:
+ res = array.patch_directory_services(names=[module.params['dstype']],
+ directory_service=directory_service)
+ if res.status_code != 200:
+ module.fail_json(msg='{0} Directory Service failed. Error message: {1}'.format(module.params['dstype'].capitalize(),
+ res.errors[0].message))
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(dict(
+ uri=dict(type='list', elements='str'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ enable=dict(type='bool', default=False),
+ bind_password=dict(type='str', no_log=True),
+ bind_user=dict(type='str'),
+ base_dn=dict(type='str'),
+ group_base=dict(type='str'),
+ user_login=dict(type='str'),
+ user_object=dict(type='str'),
+ ro_group=dict(type='str'),
+ sa_group=dict(type='str'),
+ aa_group=dict(type='str'),
+ dstype=dict(type='str', default='management', choices=['management', 'data']),
+ ))
+
+ module = AnsibleModule(argument_spec,
+ supports_check_mode=True)
+
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg='py-pure-client sdk is required to for this module')
+
+ if FAFILES_API_VERSION in api_version:
+ arrayv6 = get_array(module)
+
+ if module.params['dstype'] == 'data':
+ if FAFILES_API_VERSION in api_version:
+ if len(list(arrayv6.get_directory_services().items)) == 1:
+ module.warn('FA-Files is not enabled - ignoring')
+ module.exit_json(changed=False)
+ else:
+ module.fail_json(msg='\'data\' directory service requires Purity//FA 6.0.0 or higher')
+
+ state = module.params['state']
+ ds_exists = False
+ if FAFILES_API_VERSION in api_version:
+ dirserv = list(arrayv6.get_directory_services(filter='name=\'' + module.params['dstype'] + '\'').items)[0]
+ if state == 'absent' and dirserv.uris != []:
+ delete_ds_v6(module, arrayv6)
+ else:
+ update_ds_v6(module, arrayv6)
+ else:
+ dirserv = array.get_directory_service()
+ ds_enabled = dirserv['enabled']
+ if dirserv['base_dn']:
+ ds_exists = True
+
+ if state == 'absent' and ds_exists:
+ delete_ds(module, array)
+ elif ds_exists and module.params['enable'] and ds_enabled:
+ module.warn('To update an existing directory service configuration in Purity//Fa 5.x, please delete and recreate')
+ module.exit_json(changed=False)
+ elif ds_exists and not module.params['enable'] and ds_enabled:
+ disable_ds(module, array)
+ elif ds_exists and module.params['enable'] and not ds_enabled:
+ enable_ds(module, array)
+ elif not ds_exists and state == 'present':
+ create_ds(module, array)
+ else:
+ module.exit_json(changed=False)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_dsrole.py b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_dsrole.py
new file mode 100644
index 00000000..35cf4ef5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_dsrole.py
@@ -0,0 +1,168 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2019, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefa_dsrole
+version_added: '1.0.0'
+short_description: Configure FlashArray Directory Service Roles
+description:
+- Set or erase directory services role configurations.
+- Only available for FlashArray running Purity 5.2.0 or higher
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Create or delete directory service role
+ type: str
+ default: present
+ choices: [ absent, present ]
+ role:
+ description:
+ - The directory service role to work on
+ type: str
+ required: true
+ choices: [ array_admin, ops_admin, readonly, storage_admin ]
+ group_base:
+ type: str
+ description:
+ - Specifies where the configured group is located in the directory
+ tree. This field consists of Organizational Units (OUs) that combine
+ with the base DN attribute and the configured group CNs to complete
+ the full Distinguished Name of the groups. The group base should
+ specify OU= for each OU and multiple OUs should be separated by commas.
+ The order of OUs is important and should get larger in scope from left
+ to right.
+ - Each OU should not exceed 64 characters in length.
+ group:
+ type: str
+ description:
+ - Sets the common Name (CN) of the configured directory service group
+ containing users for the FlashBlade. This name should be just the
+ Common Name of the group without the CN= specifier.
+ - Common Names should not exceed 64 characters in length.
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+'''
+
+EXAMPLES = r'''
+- name: Delete exisitng array_admin directory service role
+ purefa_dsrole:
+ role: array_admin
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create array_admin directory service role
+ purefa_dsrole:
+ role: array_admin
+ group_base: "OU=PureGroups,OU=SANManagers"
+ group: pureadmins
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Update ops_admin directory service role
+ purefa_dsrole:
+ role: ops_admin
+ group_base: "OU=PureGroups"
+ group: opsgroup
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+'''
+
+RETURN = r'''
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import get_system, purefa_argument_spec
+
+
+def update_role(module, array):
+ """Update Directory Service Role"""
+ changed = True
+ if not module.check_mode:
+ changed = False
+ role = array.list_directory_service_roles(names=[module.params['role']])
+ if role[0]['group_base'] != module.params['group_base'] or role[0]['group'] != module.params['group']:
+ try:
+ array.set_directory_service_roles(names=[module.params['role']],
+ group_base=module.params['group_base'],
+ group=module.params['group'])
+ changed = True
+ except Exception:
+ module.fail_json(msg='Update Directory Service Role {0} failed'.format(module.params['role']))
+ module.exit_json(changed=changed)
+
+
+def delete_role(module, array):
+ """Delete Directory Service Role"""
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set_directory_service_roles(names=[module.params['role']],
+ group_base='',
+ group='')
+ except Exception:
+ module.fail_json(msg='Delete Directory Service Role {0} failed'.format(module.params['role']))
+ module.exit_json(changed=changed)
+
+
+def create_role(module, array):
+ """Create Directory Service Role"""
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set_directory_service_roles(names=[module.params['role']],
+ group_base=module.params['group_base'],
+ group=module.params['group'])
+ except Exception:
+ module.fail_json(msg='Create Directory Service Role {0} failed'.format(module.params['role']))
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(dict(
+ role=dict(required=True, type='str', choices=['array_admin', 'ops_admin', 'readonly', 'storage_admin']),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ group_base=dict(type='str'),
+ group=dict(type='str'),
+ ))
+
+ required_together = [['group', 'group_base']]
+
+ module = AnsibleModule(argument_spec,
+ required_together=required_together,
+ supports_check_mode=True)
+
+ state = module.params['state']
+ array = get_system(module)
+ role_configured = False
+ role = array.list_directory_service_roles(names=[module.params['role']])
+ if role[0]['group'] is not None:
+ role_configured = True
+
+ if state == 'absent' and role_configured:
+ delete_role(module, array)
+ elif role_configured and state == 'present':
+ update_role(module, array)
+ elif not role_configured and state == 'present':
+ create_role(module, array)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_endpoint.py b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_endpoint.py
new file mode 100644
index 00000000..209efa5e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_endpoint.py
@@ -0,0 +1,298 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefa_endpoint
+short_description: Manage VMware protocol-endpoints on Pure Storage FlashArrays
+version_added: '1.0.0'
+description:
+- Create, delete or eradicate the an endpoint on a Pure Storage FlashArray.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - The name of the endpoint.
+ type: str
+ required: true
+ state:
+ description:
+ - Define whether the endpoint should exist or not.
+ default: present
+ choices: [ absent, present ]
+ type: str
+ eradicate:
+ description:
+ - Define whether to eradicate the endpoint on delete or leave in trash.
+ type: bool
+ default: 'no'
+ rename:
+ description:
+ - Value to rename the specified endpoint to.
+ - Rename only applies to the container the current endpoint is in.
+ type: str
+ host:
+ description:
+ - name of host to attach endpoint to
+ type: str
+ hgroup:
+ description:
+ - name of hostgroup to attach endpoint to
+ type: str
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+'''
+
+EXAMPLES = r'''
+- name: Create new endpoint named foo
+ purefa_endpoint:
+ name: test-endpoint
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: present
+
+- name: Delete and eradicate endpoint named foo
+ purefa_endpoint:
+ name: foo
+ eradicate: yes
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: absent
+
+- name: Rename endpoint foor to bar
+ purefa_endpoint:
+ name: foo
+ rename: bar
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+'''
+
+RETURN = r'''
+volume:
+ description: A dictionary describing the changed volume. Only some
+ attributes below will be returned with various actions.
+ type: dict
+ returned: success
+ contains:
+ source:
+ description: Volume name of source volume used for volume copy
+ type: str
+ serial:
+ description: Volume serial number
+ type: str
+ sample: '361019ECACE43D83000120A4'
+ created:
+ description: Volume creation time
+ type: str
+ sample: '2019-03-13T22:49:24Z'
+ name:
+ description: Volume name
+ type: str
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import get_system, purefa_argument_spec
+
+
+VGROUPS_API_VERSION = "1.13"
+
+
+def get_volume(volume, array):
+ """Return Volume or None"""
+ try:
+ return array.get_volume(volume, pending=True)
+ except Exception:
+ return None
+
+
+def get_target(volume, array):
+ """Return Volume or None"""
+ try:
+ return array.get_volume(volume, pending=True)
+ except Exception:
+ return None
+
+
+def get_endpoint(vol, array):
+ """Return Endpoint or None"""
+ try:
+ return array.get_volume(vol, protocol_endpoint=True)
+ except Exception:
+ return None
+
+
+def get_destroyed_endpoint(vol, array):
+ """Return Endpoint Endpoint or None"""
+ try:
+ return bool(array.get_volume(vol, protocol_endpoint=True, pending=True)['time_remaining'] != '')
+ except Exception:
+ return None
+
+
+def check_vgroup(module, array):
+ """Check is the requested VG to create volume in exists"""
+ vg_exists = False
+ vg_name = module.params["name"].split("/")[0]
+ try:
+ vgs = array.list_vgroups()
+ except Exception:
+ module.fail_json(msg="Failed to get volume groups list. Check array.")
+ for vgroup in range(0, len(vgs)):
+ if vg_name == vgs[vgroup]['name']:
+ vg_exists = True
+ break
+ return vg_exists
+
+
+def create_endpoint(module, array):
+ """Create Endpoint"""
+ changed = True
+ volfact = []
+ if not module.check_mode:
+ if "/" in module.params['name'] and not check_vgroup(module, array):
+ module.fail_json(msg="Failed to create endpoint {0}. Volume Group does not exist.".format(module.params["name"]))
+ try:
+ volfact = array.create_conglomerate_volume(module.params['name'])
+ except Exception:
+ module.fail_json(msg='Endpoint {0} creation failed.'.format(module.params['name']))
+ if module.params['host']:
+ try:
+ array.connect_host(module.params['host'], module.params['name'])
+ except Exception:
+ module.fail_json(msg='Failed to attach endpoint {0} to host {1}.'.format(module.params['name'], module.params['host']))
+ if module.params['hgroup']:
+ try:
+ array.connect_hgroup(module.params['hgroup'], module.params['name'])
+ except Exception:
+ module.fail_json(msg='Failed to attach endpoint {0} to hostgroup {1}.'.format(module.params['name'], module.params['hgroup']))
+
+ module.exit_json(changed=changed, volume=volfact)
+
+
+def rename_endpoint(module, array):
+ """Rename endpoint within a container, ie vgroup or local array"""
+ changed = True
+ volfact = []
+ if not module.check_mode:
+ changed = False
+ target_name = module.params['rename']
+ if "/" in module.params['rename'] or "::" in module.params['rename']:
+ module.fail_json(msg="Target endpoint cannot include a container name")
+ if "/" in module.params['name']:
+ vgroup_name = module.params["name"].split("/")[0]
+ target_name = vgroup_name + "/" + module.params['rename']
+ if get_target(target_name, array) or get_destroyed_endpoint(target_name, array):
+ module.fail_json(msg="Target endpoint {0} already exists.".format(target_name))
+ else:
+ try:
+ volfact = array.rename_volume(module.params["name"], target_name)
+ changed = True
+ except Exception:
+ module.fail_json(msg='Rename endpoint {0} to {1} failed.'.format(module.params["name"], module.params['rename']))
+
+ module.exit_json(changed=changed, volume=volfact)
+
+
+def delete_endpoint(module, array):
+ """ Delete Endpoint"""
+ changed = True
+ volfact = []
+ if not module.check_mode:
+ try:
+
+ array.destroy_volume(module.params['name'])
+ if module.params['eradicate']:
+ try:
+ volfact = array.eradicate_volume(module.params['name'])
+ except Exception:
+ module.fail_json(msg='Eradicate endpoint {0} failed.'.format(module.params['name']))
+ except Exception:
+ module.fail_json(msg='Delete endpoint {0} failed.'.format(module.params['name']))
+ module.exit_json(changed=changed, volume=volfact)
+
+
+def recover_endpoint(module, array):
+ """ Recover Deleted Endpoint"""
+ changed = True
+ volfact = []
+ if not module.check_mode:
+ try:
+ array.recover_volume(module.params['name'])
+ except Exception:
+ module.fail_json(msg='Recovery of endpoint {0} failed'.format(module.params['name']))
+ module.exit_json(changed=changed, volume=volfact)
+
+
+def eradicate_endpoint(module, array):
+ """ Eradicate Deleted Endpoint"""
+ changed = True
+ volfact = []
+ if not module.check_mode:
+ if module.params['eradicate']:
+ try:
+ array.eradicate_volume(module.params['name'], protocol_endpoint=True)
+ except Exception:
+ module.fail_json(msg='Eradication of endpoint {0} failed'.format(module.params['name']))
+ module.exit_json(changed=changed, volume=volfact)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(dict(
+ name=dict(type='str', required=True),
+ rename=dict(type='str'),
+ host=dict(type='str'),
+ hgroup=dict(type='str'),
+ eradicate=dict(type='bool', default=False),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ ))
+
+ mutually_exclusive = [['rename', 'eradicate'],
+ ['host', 'hgroup']]
+
+ module = AnsibleModule(argument_spec,
+ mutually_exclusive=mutually_exclusive,
+ supports_check_mode=True)
+
+ state = module.params['state']
+ destroyed = False
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+ if VGROUPS_API_VERSION not in api_version:
+ module.fail_json(msg='Purity version does not support endpoints. Please contact support')
+ volume = get_volume(module.params['name'], array)
+ if volume:
+ module.fail_json(msg='Volume {0} is an true volume. Please use the purefa_volume module'.format(module.params['name']))
+ endpoint = get_endpoint(module.params['name'], array)
+ if not endpoint:
+ destroyed = get_destroyed_endpoint(module.params['name'], array)
+
+ if state == 'present' and not endpoint and not destroyed:
+ create_endpoint(module, array)
+ elif state == 'present' and endpoint and module.params['rename']:
+ rename_endpoint(module, array)
+ elif state == 'present' and destroyed:
+ recover_endpoint(module, array)
+ elif state == 'absent' and endpoint:
+ delete_endpoint(module, array)
+ elif state == 'absent' and destroyed:
+ eradicate_endpoint(module, array)
+ elif state == 'absent' and not endpoint and not volume:
+ module.exit_json(changed=False)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_eula.py b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_eula.py
new file mode 100644
index 00000000..64b74e21
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_eula.py
@@ -0,0 +1,106 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefa_eula
+version_added: '1.0.0'
+short_description: Sign Pure Storage FlashArray EULA
+description:
+- Sign the FlashArray EULA for Day 0 config, or change signatory.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ company:
+ description:
+ - Full legal name of the entity.
+ - The value must be between 1 and 64 characters in length.
+ type: str
+ required: true
+ name:
+ description:
+ - Full legal name of the individual at the company who has the authority to accept the terms of the agreement.
+ - The value must be between 1 and 64 characters in length.
+ type: str
+ required: true
+ title:
+ description:
+ - Individual's job title at the company.
+ - The value must be between 1 and 64 characters in length.
+ type: str
+ required: true
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+'''
+
+EXAMPLES = r'''
+- name: Sign EULA for FlashArrayt
+ purefa_eula:
+ company: "ACME Storage, Inc."
+ name: "Fred Bloggs"
+ title: "Storage Manager"
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+'''
+
+RETURN = r'''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import get_system, purefa_argument_spec
+
+
+EULA_API_VERSION = '1.17'
+
+
+def set_eula(module, array):
+ """Sign EULA"""
+ changed = True
+ if not module.check_mode:
+ try:
+ current_eula = array.get_eula()
+ except Exception:
+ module.fail_json(msg='Failed to get current EULA')
+ if current_eula['company'] != module.params['company'] or \
+ current_eula['title'] != module.params['title'] or \
+ current_eula['name'] != module.params['name']:
+ try:
+ array.set_eula(company=module.params['company'],
+ title=module.params['title'],
+ name=module.params['name']
+ )
+ except Exception:
+ module.fail_json(msg='Signing EULA failed')
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(dict(
+ company=dict(type='str', required=True),
+ name=dict(type='str', required=True),
+ title=dict(type='str', required=True),
+ ))
+
+ module = AnsibleModule(argument_spec,
+ supports_check_mode=True)
+
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+ if EULA_API_VERSION in api_version:
+ set_eula(module, array)
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_export.py b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_export.py
new file mode 100644
index 00000000..233ec548
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_export.py
@@ -0,0 +1,195 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefa_export
+version_added: '1.5.0'
+short_description: Manage FlashArray File System Exports
+description:
+- Create/Delete FlashArray File Systems Exports
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Name of the export
+ type: str
+ required: true
+ state:
+ description:
+ - Define whether the export should exist or not.
+ - You must specify an NFS or SMB policy, or both on creation and deletion.
+ default: present
+ choices: [ absent, present ]
+ type: str
+ filesystem:
+ description:
+ - Name of the filesystem the export applies to
+ type: str
+ required: true
+ directory:
+ description:
+ - Name of the managed directory in the file system the export applies to
+ type: str
+ required: true
+ nfs_policy:
+ description:
+ - Name of NFS Policy to apply to the export
+ type: str
+ smb_policy:
+ description:
+ - Name of SMB Policy to apply to the export
+ type: str
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+'''
+
+EXAMPLES = r'''
+- name: Create NFS and SMB exports for directory foo in filesysten bar
+ purefa_export:
+ name: export1
+ filesystem: bar
+ directory: foo
+ nfs_policy: nfs-example
+ smb_polict: smb-example
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete NFS export for directory foo in filesystem bar
+ purefa_export:
+ name: export1
+ filesystem: bar
+ directory: foo
+ nfs_policy: nfs-example
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+'''
+
+RETURN = r'''
+'''
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import get_system, get_array, purefa_argument_spec
+
+MIN_REQUIRED_API_VERSION = '2.3'
+
+
+def delete_export(module, array):
+ """Delete a file system export"""
+ changed = True
+ if not module.check_mode:
+ all_policies = []
+ directory = module.params['filesystem'] + ":" + module.params['directory']
+ if not module.params['nfs_policy'] and not module.params['smb_policy']:
+ module.fail_json(msg="At least one policy must be provided")
+ if module.params['nfs_policy']:
+ policy_exists = bool(array.get_directory_exports(export_names=[module.params['name']],
+ policy_names=[module.params['nfs_policy']],
+ directory_names=[directory]).status_code == 200)
+ if policy_exists:
+ all_policies.append(module.params['nfs_policy'])
+ if module.params['smb_policy']:
+ policy_exists = bool(array.get_directory_exports(export_names=[module.params['name']],
+ policy_names=[module.params['smb_policy']],
+ directory_names=[directory]).status_code == 200)
+ if policy_exists:
+ all_policies.append(module.params['smb_policy'])
+ if all_policies:
+ res = array.delete_directory_exports(export_names=[module.params['name']], policy_names=all_policies)
+ if res.status_code != 200:
+ module.fail_json(msg="Failed to delete file system export {0}. {1}".format(module.params['name'],
+ res.errors[0].message))
+ else:
+ changed = False
+ module.exit_json(changed=changed)
+
+
+def create_export(module, array):
+ """Create a file system export"""
+ changed = True
+ if not module.check_mode:
+ changed = False
+ if not module.params['nfs_policy'] and not module.params['smb_policy']:
+ module.fail_json(msg="At least one policy must be provided")
+ all_policies = []
+ if module.params['nfs_policy']:
+ if bool(array.get_policies_nfs(names=[module.params['nfs_policy']]).status_code != 200):
+ module.fail_json(msg="NFS Policy {0} does not exist.".format(module.params['nfs_policy']))
+ if bool(array.get_directory_exports(export_names=[module.params['name']], policy_names=[module.params['nfs_policy']]).status_code != 200):
+ all_policies.append(module.params['nfs_policy'])
+ if module.params['smb_policy']:
+ if bool(array.get_policies_smb(names=[module.params['smb_policy']]).status_code != 200):
+ module.fail_json(msg="SMB Policy {0} does not exist.".format(module.params['smb_policy']))
+ if bool(array.get_directory_exports(export_names=[module.params['name']], policy_names=[module.params['smb_policy']]).status_code != 200):
+ all_policies.append(module.params['smb_policy'])
+ if all_policies:
+ export = flasharray.DirectoryExportPost(export_name=module.params['name'])
+ res = array.post_directory_exports(directory_names=[module.params['filesystem'] + ":" + module.params['directory']],
+ exports=export,
+ policy_names=all_policies)
+ if res.sttaus_code == 200:
+ changed = True
+ else:
+ module.fail_json(msg="Failed to create file system exports for {0}:{1}. Error: {2}".format(module.params['filesystem'],
+ module.params['directory'],
+ res.errors[0].message))
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(dict(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ filesystem=dict(type='str', required=True),
+ directory=dict(type='str', required=True),
+ name=dict(type='str', required=True),
+ nfs_policy=dict(type='str'),
+ smb_policy=dict(type='str'),
+ ))
+
+ required_if = [['state', 'present', ['filesystem', 'directory']]]
+ module = AnsibleModule(argument_spec,
+ required_if=required_if,
+ supports_check_mode=True)
+
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg='py-pure-client sdk is required for this module')
+
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(msg='FlashArray REST version not supported. '
+ 'Minimum version required: {0}'.format(MIN_REQUIRED_API_VERSION))
+ array = get_array(module)
+ state = module.params['state']
+
+ exists = bool(array.get_directory_exports(export_names=[module.params['name']]).status_code == 200)
+
+ if state == 'present':
+ create_export(module, array)
+ elif state == 'absent' and exists:
+ delete_export(module, array)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_fs.py b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_fs.py
new file mode 100644
index 00000000..dcb12fbe
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_fs.py
@@ -0,0 +1,207 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefa_fs
+version_added: '1.5.0'
+short_description: Manage FlashArray File Systems
+description:
+- Create/Delete FlashArray File Systems
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Name of the file system
+ type: str
+ required: true
+ state:
+ description:
+ - Define whether the file system should exist or not.
+ default: present
+ choices: [ absent, present ]
+ type: str
+ eradicate:
+ description:
+ - Define whether to eradicate the file system on delete or leave in trash.
+ type: bool
+ default: false
+ rename:
+ description:
+ - Value to rename the specified file system to
+ type: str
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+'''
+
+EXAMPLES = r'''
+- name: Create file system foo
+ purefa_fs:
+ name: foo
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete file system foo
+ purefa_apiclient:
+ name: ansible-token
+ enabled: false
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Enable API CLient
+ purefa_apiclient:
+ name: ansible-token
+ enabled: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete API Client
+ purefa_apiclient:
+ state: absent
+ name: ansible-token
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+'''
+
+RETURN = r'''
+'''
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import get_system, get_array, purefa_argument_spec
+
+MIN_REQUIRED_API_VERSION = '2.2'
+
+
+def delete_fs(module, array):
+ """Delete a file system"""
+ changed = True
+ if not module.check_mode:
+ try:
+ file_system = flasharray.FileSystemPatch(destroyed=True)
+ array.patch_file_systems(names=[module.params['name']], file_system=file_system)
+ except Exception:
+ module.fail_json(msg="Failed to delete file system {0}".format(module.params['name']))
+ if module.params['eradicate']:
+ try:
+ array.delete_file_systems(names=[module.params['name']])
+ except Exception:
+ module.fail_json(msg="Eradication of file system {0} failed".format(module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def recover_fs(module, array):
+ """Recover a file system"""
+ changed = True
+ if not module.check_mode:
+ try:
+ file_system = flasharray.FileSystemPatch(destroyed=False)
+ array.patch_file_systems(names=[module.params['name']], file_system=file_system)
+ except Exception:
+ module.fail_json(msg="Failed to delete file system {0}".format(module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def eradicate_fs(module, array):
+ """Eradicate a file system"""
+ changed = True
+ if not module.check_mode:
+ try:
+ array.delete_file_systems(names=[module.params['name']])
+ except Exception:
+ module.fail_json(msg="Failed to eradicate file system {0}".format(module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def rename_fs(module, array):
+ """Recover a file system"""
+ changed = True
+ if not module.check_mode:
+ try:
+ target = list(array.get_file_systems(names=[module.params['rename']]).items)[0]
+ except Exception:
+ target = None
+ if not target:
+ try:
+ file_system = flasharray.FileSystemPatch(name=module.params['rename'])
+ array.patch_file_systems(names=[module.params['name']], file_system=file_system)
+ except Exception:
+ module.fail_json(msg="Failed to delete file system {0}".format(module.params['name']))
+ else:
+ module.fail_json(msg="Target file system {0} already exists".format(module.params['rename']))
+ module.exit_json(changed=changed)
+
+
+def create_fs(module, array):
+ """Create a file system"""
+ changed = True
+ if not module.check_mode:
+ try:
+ array.post_file_systems(names=[module.params['name']])
+ except Exception:
+ module.fail_json(msg="Failed to create file system {0}".format(module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(dict(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ eradicate=dict(type='bool', default=False),
+ name=dict(type='str', required=True),
+ rename=dict(type='str'),
+ ))
+
+ module = AnsibleModule(argument_spec,
+ supports_check_mode=True)
+
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg='py-pure-client sdk is required for this module')
+
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(msg='FlashArray REST version not supported. '
+ 'Minimum version required: {0}'.format(MIN_REQUIRED_API_VERSION))
+ array = get_array(module)
+ state = module.params['state']
+
+ try:
+ filesystem = list(array.get_file_systems(names=[module.params['name']]).items)[0]
+ exists = True
+ except Exception:
+ exists = False
+
+ if state == 'present' and not exists:
+ create_fs(module, array)
+ elif state == "present" and exists and module.params['rename'] and not filesystem.destroyed:
+ rename_fs(module, array)
+ elif state == 'present' and exists and filesystem.destroyed and not module.params['rename']:
+ recover_fs(module, array)
+ elif state == 'absent' and exists and not filesystem.destroyed:
+ delete_fs(module, array)
+ elif state == 'absent' and exists and module.params['eradicate'] and filesystem.destroyed:
+ eradicate_fs(module, array)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_hg.py b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_hg.py
new file mode 100644
index 00000000..5be2cdb6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_hg.py
@@ -0,0 +1,306 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefa_hg
+version_added: '1.0.0'
+short_description: Manage hostgroups on Pure Storage FlashArrays
+description:
+- Create, delete or modifiy hostgroups on Pure Storage FlashArrays.
+author:
+- Pure Storage ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ hostgroup:
+ description:
+ - The name of the hostgroup.
+ type: str
+ required: true
+ state:
+ description:
+ - Define whether the hostgroup should exist or not.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ host:
+ type: list
+ elements: str
+ description:
+ - List of existing hosts to add to hostgroup.
+ - Note that hostnames are case-sensitive however FlashArray hostnames are unique
+ and ignore case - you cannot have I(hosta) and I(hostA)
+ volume:
+ type: list
+ elements: str
+ description:
+ - List of existing volumes to add to hostgroup.
+ - Note that volumes are case-sensitive however FlashArray volume names are unique
+ and ignore case - you cannot have I(volumea) and I(volumeA)
+ lun:
+ description:
+ - LUN ID to assign to volume for hostgroup. Must be unique.
+ - Only applicable when only one volume is specified for connection.
+ - If not provided the ID will be automatically assigned.
+ - Range for LUN ID is 1 to 4095.
+ type: int
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+'''
+
+EXAMPLES = r'''
+- name: Create empty hostgroup
+ purefa_hg:
+ hostgroup: foo
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Add hosts and volumes to existing or new hostgroup
+ purefa_hg:
+ hostgroup: foo
+ host:
+ - host1
+ - host2
+ volume:
+ - vol1
+ - vol2
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete hosts and volumes from hostgroup
+ purefa_hg:
+ hostgroup: foo
+ host:
+ - host1
+ - host2
+ volume:
+ - vol1
+ - vol2
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: absent
+
+# This will disconnect all hosts and volumes in the hostgroup
+- name: Delete hostgroup
+ purefa_hg:
+ hostgroup: foo
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: absent
+
+- name: Create host group with hosts and volumes
+ purefa_hg:
+ hostgroup: bar
+ host:
+ - host1
+ - host2
+ volume:
+ - vol1
+ - vol2
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+'''
+
+RETURN = r'''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import get_system, purefa_argument_spec
+
+
+def get_hostgroup(module, array):
+
+ hostgroup = None
+
+ for host in array.list_hgroups():
+ if host["name"] == module.params['hostgroup']:
+ hostgroup = host
+ break
+
+ return hostgroup
+
+
+def make_hostgroup(module, array):
+
+ changed = True
+ if not module.check_mode:
+ try:
+ array.create_hgroup(module.params['hostgroup'])
+ except Exception:
+ module.fail_json(msg='Failed to create hostgroup {0}'.format(module.params['hostgroup']))
+ if module.params['host']:
+ array.set_hgroup(module.params['hostgroup'], hostlist=module.params['host'])
+ if module.params['volume']:
+ if len(module.params['volume']) == 1 and module.params['lun']:
+ try:
+ array.connect_hgroup(module.params['hostgroup'], module.params['volume'][0], lun=module.params['lun'])
+ except Exception:
+ module.fail_json(msg="Failed to add volume {0} with LUN ID {1}".format(module.params['volume'][0], module.params['lun']))
+ else:
+ for vol in module.params['volume']:
+ try:
+ array.connect_hgroup(module.params['hostgroup'], vol)
+ except Exception:
+ module.fail_json(msg='Failed to add volume to hostgroup')
+ module.exit_json(changed=changed)
+
+
+def update_hostgroup(module, array):
+ changed = True
+ if not module.check_mode:
+ changed = False
+ hgroup = get_hostgroup(module, array)
+ volumes = array.list_hgroup_connections(module.params['hostgroup'])
+ if module.params['state'] == "present":
+ if module.params['host']:
+ cased_hosts = [host.lower() for host in module.params['host']]
+ cased_hghosts = [host.lower() for host in hgroup['hosts']]
+ new_hosts = list(set(cased_hosts).difference(cased_hghosts))
+ if new_hosts:
+ try:
+ array.set_hgroup(module.params['hostgroup'], addhostlist=new_hosts)
+ changed = True
+ except Exception:
+ module.fail_json(msg='Failed to add host(s) to hostgroup')
+ if module.params['volume']:
+ if volumes:
+ current_vols = [vol['vol'].lower() for vol in volumes]
+ cased_vols = [vol.lower() for vol in module.params['volume']]
+ new_volumes = list(set(cased_vols).difference(set(current_vols)))
+ if len(new_volumes) == 1 and module.params['lun']:
+ try:
+ array.connect_hgroup(module.params['hostgroup'], new_volumes[0], lun=module.params['lun'])
+ changed = True
+ except Exception:
+ module.fail_json(msg="Failed to add volume {0} with LUN ID {1}".format(new_volumes[0], module.params['lun']))
+ else:
+ for cvol in new_volumes:
+ try:
+ array.connect_hgroup(module.params['hostgroup'], cvol)
+ changed = True
+ except Exception:
+ module.fail_json(msg='Failed to connect volume {0} to hostgroup {1}.'.format(cvol, module.params['hostgroup']))
+ else:
+ if len(module.params['volume']) == 1 and module.params['lun']:
+ try:
+ array.connect_hgroup(module.params['hostgroup'], module.params['volume'][0], lun=module.params['lun'])
+ changed = True
+ except Exception:
+ module.fail_json(msg="Failed to add volume {0} with LUN ID {1}".format(module.params['volume'], module.params['lun']))
+ else:
+ for cvol in module.params['volume']:
+ try:
+ array.connect_hgroup(module.params['hostgroup'], cvol)
+ changed = True
+ except Exception:
+ module.fail_json(msg='Failed to connect volume {0} to hostgroup {1}.'.format(cvol, module.params['hostgroup']))
+ else:
+ if module.params['host']:
+ cased_old_hosts = [host.lower() for host in module.params['host']]
+ cased_hosts = [host.lower() for host in hgroup['hosts']]
+ old_hosts = list(set(cased_old_hosts).intersection(cased_hosts))
+ if old_hosts:
+ try:
+ array.set_hgroup(module.params['hostgroup'], remhostlist=old_hosts)
+ changed = True
+ except Exception:
+ module.fail_json(msg='Failed to remove hosts {0} from hostgroup {1}'.format(old_hosts, module.params['hostgroup']))
+ if module.params['volume']:
+ cased_old_vols = [vol.lower() for vol in module.params['volume']]
+ old_volumes = list(set(cased_old_vols).difference(set([vol['name'].lower() for vol in volumes])))
+ for cvol in old_volumes:
+ try:
+ array.disconnect_hgroup(module.params['hostgroup'], cvol)
+ changed = True
+ except Exception:
+ module.fail_json(msg='Failed to disconnect volume {0} from hostgroup {1}'.format(cvol, module.params['hostgroup']))
+
+ module.exit_json(changed=changed)
+
+
+def delete_hostgroup(module, array):
+ changed = True
+ if not module.check_mode:
+ try:
+ vols = array.list_hgroup_connections(module.params['hostgroup'])
+ for vol in vols:
+ try:
+ array.disconnect_hgroup(module.params['hostgroup'], vol["vol"])
+ except Exception:
+ module.fail_json(msg='Failed to disconnect volume {0} from hostgroup {1}'.format(vol["vol"], module.params['hostgroup']))
+ host = array.get_hgroup(module.params['hostgroup'])
+ try:
+ array.set_hgroup(module.params['hostgroup'], remhostlist=host['hosts'])
+ try:
+ array.delete_hgroup(module.params['hostgroup'])
+ except Exception:
+ module.fail_json(msg='Failed to delete hostgroup {0}'.format(module.params['hostgroup']))
+ except Exception:
+ module.fail_json(msg='Failed to remove hosts {0} from hostgroup {1}'.format(host['hosts'], module.params['hostgroup']))
+ except Exception:
+ module.fail_json(msg='Failed to get volume connection for hostgroup {0}'.format(module.params['hostgroup']))
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(dict(
+ hostgroup=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ host=dict(type='list', elements='str'),
+ lun=dict(type='int'),
+ volume=dict(type='list', elements='str'),
+ ))
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ state = module.params['state']
+ array = get_system(module)
+ hostgroup = get_hostgroup(module, array)
+
+ if module.params['host']:
+ try:
+ for hst in module.params['host']:
+ array.get_host(hst)
+ except Exception:
+ module.fail_json(msg='Host {0} not found'.format(hst))
+ if module.params['lun'] and len(module.params['volume']) > 1:
+ module.fail_json(msg='LUN ID cannot be specified with multiple volumes.')
+
+ if module.params['lun'] and not 1 <= module.params['lun'] <= 4095:
+ module.fail_json(msg='LUN ID of {0} is out of range (1 to 4095)'.format(module.params['lun']))
+
+ if module.params['volume']:
+ try:
+ for vol in module.params['volume']:
+ array.get_volume(vol)
+ except Exception:
+ module.fail_json(msg='Volume {0} not found'.format(vol))
+
+ if hostgroup and state == 'present':
+ update_hostgroup(module, array)
+ elif hostgroup and module.params['volume'] and state == 'absent':
+ update_hostgroup(module, array)
+ elif hostgroup and module.params['host'] and state == 'absent':
+ update_hostgroup(module, array)
+ elif hostgroup and state == 'absent':
+ delete_hostgroup(module, array)
+ elif hostgroup is None and state == 'absent':
+ module.exit_json(changed=False)
+ else:
+ make_hostgroup(module, array)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_host.py b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_host.py
new file mode 100644
index 00000000..e9b5e74b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_host.py
@@ -0,0 +1,787 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefa_host
+version_added: '1.0.0'
+short_description: Manage hosts on Pure Storage FlashArrays
+description:
+- Create, delete or modify hosts on Pure Storage FlashArrays.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+notes:
+- If specifying C(lun) option ensure host support requested value
+options:
+ name:
+ description:
+ - The name of the host.
+ - Note that hostnames are case-sensitive however FlashArray hostnames are unique
+ and ignore case - you cannot have I(hosta) and I(hostA)
+ - Multi-host support available from Purity//FA 6.0.0
+ B(***NOTE***) Manual deletion of individual hosts created
+ using multi-host will cause idempotency to fail
+ - Multi-host support only exists for host creation
+ type: str
+ required: true
+ aliases: [ host ]
+ rename:
+ description:
+ - The name to rename to.
+ - Note that hostnames are case-sensitive however FlashArray hostnames are unique
+ and ignore case - you cannot have I(hosta) and I(hostA)
+ type: str
+ state:
+ description:
+ - Define whether the host should exist or not.
+ - When removing host all connected volumes will be disconnected.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ protocol:
+ description:
+ - Defines the host connection protocol for volumes.
+ type: str
+ default: iscsi
+ choices: [ fc, iscsi, nvme, mixed ]
+ wwns:
+ type: list
+ elements: str
+ description:
+ - List of wwns of the host if protocol is fc or mixed.
+ iqn:
+ type: list
+ elements: str
+ description:
+ - List of IQNs of the host if protocol is iscsi or mixed.
+ nqn:
+ type: list
+ elements: str
+ description:
+ - List of NQNs of the host if protocol is nvme or mixed.
+ volume:
+ type: str
+ description:
+ - Volume name to map to the host.
+ lun:
+ description:
+ - LUN ID to assign to volume for host. Must be unique.
+ - If not provided the ID will be automatically assigned.
+ - Range for LUN ID is 1 to 4095.
+ type: int
+ count:
+ description:
+ - Number of hosts to be created in a multiple host creation
+ - Only supported from Purity//FA v6.0.0 and higher
+ type: int
+ start:
+ description:
+ - Number at which to start the multiple host creation index
+ - Only supported from Purity//FA v6.0.0 and higher
+ type: int
+ default: 0
+ digits:
+ description:
+ - Number of digits to use for multiple host count. This
+ will pad the index number with zeros where necessary
+ - Only supported from Purity//FA v6.0.0 and higher
+ - Range is between 1 and 10
+ type: int
+ default: 1
+ suffix:
+ description:
+ - Suffix string, if required, for multiple host create
+ - Host names will be formed as I(<name>#I<suffix>), where
+ I(#) is a placeholder for the host index
+ See associated descriptions
+ - Only supported from Purity//FA v6.0.0 and higher
+ type: str
+ personality:
+ type: str
+ description:
+ - Define which operating system the host is. Recommended for
+ ActiveCluster integration.
+ default: ''
+ choices: ['hpux', 'vms', 'aix', 'esxi', 'solaris', 'hitachi-vsp', 'oracle-vm-server', 'delete', '']
+ preferred_array:
+ type: list
+ elements: str
+ description:
+ - List of preferred arrays in an ActiveCluster environment.
+ - To remove existing preferred arrays from the host, specify I(delete).
+ target_user:
+ type: str
+ description:
+ - Sets the target user name for CHAP authentication
+ - Required with I(target_password)
+ - To clear the username/password pair use I(clear) as the password
+ target_password:
+ type: str
+ description:
+ - Sets the target password for CHAP authentication
+ - Password length between 12 and 255 characters
+ - To clear the username/password pair use I(clear) as the password
+ - SETTING A PASSWORD IS NON-IDEMPOTENT
+ host_user:
+ type: str
+ description:
+ - Sets the host user name for CHAP authentication
+ - Required with I(host_password)
+ - To clear the username/password pair use I(clear) as the password
+ host_password:
+ type: str
+ description:
+ - Sets the host password for CHAP authentication
+ - Password length between 12 and 255 characters
+ - To clear the username/password pair use I(clear) as the password
+ - SETTING A PASSWORD IS NON-IDEMPOTENT
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+'''
+
+EXAMPLES = r'''
+- name: Create new AIX host
+ purefa_host:
+ name: foo
+ personality: aix
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create 10 hosts with index starting at 10 but padded with 3 digits
+ purefa_host:
+ name: foo
+ personality: vms
+ suffix: bar
+ count: 10
+ start: 10
+ digits: 3
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: present
+
+- name: Rename host foo to bar
+ purefa_host:
+ name: foo
+ rename: bar
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete host
+ purefa_host:
+ name: foo
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: absent
+
+- name: Make host bar with wwn ports
+ purefa_host:
+ name: bar
+ protocol: fc
+ wwns:
+ - 00:00:00:00:00:00:00
+ - 11:11:11:11:11:11:11
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Make host bar with iSCSI ports
+ purefa_host:
+ name: bar
+ protocol: iscsi
+ iqn:
+ - iqn.1994-05.com.redhat:7d366003913
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Make host bar with NVMe ports
+ purefa_host:
+ name: bar
+ protocol: nvme
+ nqn:
+ - nqn.2014-08.com.vendor:nvme:nvm-subsystem-sn-d78432
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Make mixed protocol host
+ purefa_host:
+ name: bar
+ protocol: mixed
+ nqn:
+ - nqn.2014-08.com.vendor:nvme:nvm-subsystem-sn-d78432
+ iqn:
+ - iqn.1994-05.com.redhat:7d366003914
+ wwns:
+ - 00:00:00:00:00:00:01
+ - 11:11:11:11:11:11:12
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Map host foo to volume bar as LUN ID 12
+ purefa_host:
+ name: foo
+ volume: bar
+ lun: 12
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Disconnect volume bar from host foo
+ purefa_host:
+ name: foo
+ volume: bar
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Add preferred arrays to host foo
+ purefa_host:
+ name: foo
+ preferred_array:
+ - array1
+ - array2
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete preferred arrays from host foo
+ purefa_host:
+ name: foo
+ preferred_array: delete
+ fa_url: 10.10.10.2
+
+- name: Set CHAP target and host username/password pairs
+ purefa_host:
+ name: foo
+ target_user: user1
+ target_password: passwrodpassword
+ host_user: user2
+ host_password: passwrodpassword
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete CHAP target and host username/password pairs
+ purefa_host:
+ name: foo
+ target_user: user
+ target_password: clear
+ host_user: user
+ host_password: clear
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+'''
+
+RETURN = r'''
+'''
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import get_array, get_system, purefa_argument_spec
+
+
+AC_REQUIRED_API_VERSION = '1.14'
+PREFERRED_ARRAY_API_VERSION = '1.15'
+NVME_API_VERSION = '1.16'
+MULTI_HOST_VERSION = '2.2'
+
+
+def _is_cbs(array, is_cbs=False):
+ """Is the selected array a Cloud Block Store"""
+ model = array.get(controllers=True)[0]['model']
+ is_cbs = bool('CBS' in model)
+ return is_cbs
+
+
+def _set_host_initiators(module, array):
+ """Set host initiators."""
+ if module.params['protocol'] in ['nvme', 'mixed']:
+ if module.params['nqn']:
+ try:
+ array.set_host(module.params['name'],
+ nqnlist=module.params['nqn'])
+ except Exception:
+ module.fail_json(msg='Setting of NVMe NQN failed.')
+ if module.params['protocol'] in ['iscsi', 'mixed']:
+ if module.params['iqn']:
+ try:
+ array.set_host(module.params['name'],
+ iqnlist=module.params['iqn'])
+ except Exception:
+ module.fail_json(msg='Setting of iSCSI IQN failed.')
+ if module.params['protocol'] in ['fc', 'mixed']:
+ if module.params['wwns']:
+ try:
+ array.set_host(module.params['name'],
+ wwnlist=module.params['wwns'])
+ except Exception:
+ module.fail_json(msg='Setting of FC WWNs failed.')
+
+
+def _update_host_initiators(module, array, answer=False):
+ """Change host initiator if iscsi or nvme or add new FC WWNs"""
+ if module.params['protocol'] in ['nvme', 'mixed']:
+ if module.params['nqn']:
+ current_nqn = array.get_host(module.params['name'])['nqn']
+ if current_nqn != module.params['nqn']:
+ try:
+ array.set_host(module.params['name'],
+ nqnlist=module.params['nqn'])
+ answer = True
+ except Exception:
+ module.fail_json(msg='Change of NVMe NQN failed.')
+ if module.params['protocol'] in ['iscsi', 'mixed']:
+ if module.params['iqn']:
+ current_iqn = array.get_host(module.params['name'])['iqn']
+ if current_iqn != module.params['iqn']:
+ try:
+ array.set_host(module.params['name'],
+ iqnlist=module.params['iqn'])
+ answer = True
+ except Exception:
+ module.fail_json(msg='Change of iSCSI IQN failed.')
+ if module.params['protocol'] in ['fc', 'mixed']:
+ if module.params['wwns']:
+ module.params['wwns'] = [wwn.replace(':', '') for wwn in module.params['wwns']]
+ module.params['wwns'] = [wwn.upper() for wwn in module.params['wwns']]
+ current_wwn = array.get_host(module.params['name'])['wwn']
+ if current_wwn != module.params['wwns']:
+ try:
+ array.set_host(module.params['name'],
+ wwnlist=module.params['wwns'])
+ answer = True
+ except Exception:
+ module.fail_json(msg='FC WWN change failed.')
+ return answer
+
+
+def _connect_new_volume(module, array, answer=False):
+ """Connect volume to host"""
+ api_version = array._list_available_rest_versions()
+ if AC_REQUIRED_API_VERSION in api_version and module.params['lun']:
+ try:
+ array.connect_host(module.params['name'],
+ module.params['volume'],
+ lun=module.params['lun'])
+ answer = True
+ except Exception:
+ module.fail_json(msg='LUN ID {0} invalid. Check for duplicate LUN IDs.'.format(module.params['lun']))
+ else:
+ array.connect_host(module.params['name'], module.params['volume'])
+ answer = True
+ return answer
+
+
+def _disconnect_volume(module, array, answer=False):
+ """Disconnect volume from host"""
+ try:
+ array.disconnect_host(module.params['name'], module.params['volume'])
+ answer = True
+ except Exception:
+ module.fail_json(msg="Failed to disconnect volume {0}".format(module.params['volume']))
+ return answer
+
+
+def _set_host_personality(module, array):
+ """Set host personality. Only called when supported"""
+ if module.params['personality'] != 'delete':
+ array.set_host(module.params['name'],
+ personality=module.params['personality'])
+ else:
+ array.set_host(module.params['name'], personality='')
+
+
+def _set_preferred_array(module, array):
+ """Set preferred array list. Only called when supported"""
+ if module.params['preferred_array'] != ['delete']:
+ array.set_host(module.params['name'],
+ preferred_array=module.params['preferred_array'])
+ else:
+ array.set_host(module.params['name'], personality='')
+
+
+def _set_chap_security(module, array):
+ """Set CHAP usernames and passwords"""
+ pattern = re.compile("[^ ]{12,255}")
+ if module.params['host_user']:
+ if not pattern.match(module.params['host_password']):
+ module.fail_json(msg='host_password must contain a minimum of 12 and a maximum of 255 characters')
+ try:
+ array.set_host(module.params['name'], host_user=module.params['host_user'],
+ host_password=module.params['host_password'])
+ except Exception:
+ module.params(msg='Failed to set CHAP host username and password')
+ if module.params['target_user']:
+ if not pattern.match(module.params['target_password']):
+ module.fail_json(msg='target_password must contain a minimum of 12 and a maximum of 255 characters')
+ try:
+ array.set_host(module.params['name'], target_user=module.params['target_user'],
+ target_password=module.params['target_password'])
+ except Exception:
+ module.params(msg='Failed to set CHAP target username and password')
+
+
+def _update_chap_security(module, array, answer=False):
+ """Change CHAP usernames and passwords"""
+ pattern = re.compile("[^ ]{12,255}")
+ chap = array.get_host(module.params['name'], chap=True)
+ if module.params['host_user']:
+ if module.params['host_password'] == 'clear':
+ if chap['host_user']:
+ try:
+ array.set_host(module.params['name'], host_user="")
+ answer = True
+ except Exception:
+ module.params(msg='Failed to clear CHAP host username and password')
+ else:
+ if not pattern.match(module.params['host_password']):
+ module.fail_json(msg='host_password must contain a minimum of 12 and a maximum of 255 characters')
+ try:
+ array.set_host(module.params['name'], host_user=module.params['host_user'],
+ host_password=module.params['host_password'])
+ answer = True
+ except Exception:
+ module.params(msg='Failed to set CHAP host username and password')
+ if module.params['target_user']:
+ if module.params['target_password'] == 'clear':
+ if chap['target_user']:
+ try:
+ array.set_host(module.params['name'], target_user="")
+ answer = True
+ except Exception:
+ module.params(msg='Failed to clear CHAP target username and password')
+ else:
+ if not pattern.match(module.params['target_password']):
+ module.fail_json(msg='target_password must contain a minimum of 12 and a maximum of 255 characters')
+ try:
+ array.set_host(module.params['name'], target_user=module.params['target_user'],
+ target_password=module.params['target_password'])
+ answer = True
+ except Exception:
+ module.params(msg='Failed to set CHAP target username and password')
+ return answer
+
+
+def _update_host_personality(module, array, answer=False):
+ """Change host personality. Only called when supported"""
+ personality = array.get_host(module.params['name'], personality=True)['personality']
+ if personality is None and module.params['personality'] != 'delete':
+ try:
+ array.set_host(module.params['name'],
+ personality=module.params['personality'])
+ answer = True
+ except Exception:
+ module.fail_json(msg='Personality setting failed.')
+ if personality is not None:
+ if module.params['personality'] == 'delete':
+ try:
+ array.set_host(module.params['name'], personality='')
+ answer = True
+ except Exception:
+ module.fail_json(msg='Personality deletion failed.')
+ elif personality != module.params['personality']:
+ try:
+ array.set_host(module.params['name'],
+ personality=module.params['personality'])
+ answer = True
+ except Exception:
+ module.fail_json(msg='Personality change failed.')
+ return answer
+
+
+def _update_preferred_array(module, array, answer=False):
+ """Update existing preferred array list. Only called when supported"""
+ preferred_array = array.get_host(module.params['name'], preferred_array=True)['preferred_array']
+ if preferred_array == [] and module.params['preferred_array'] != ['delete']:
+ try:
+ array.set_host(module.params['name'],
+ preferred_array=module.params['preferred_array'])
+ answer = True
+ except Exception:
+ module.fail_json(msg='Preferred array list creation failed for {0}.'.format(module.params['name']))
+ elif preferred_array != []:
+ if module.params['preferred_array'] == ['delete']:
+ try:
+ array.set_host(module.params['name'], preferred_array=[])
+ answer = True
+ except Exception:
+ module.fail_json(msg='Preferred array list deletion failed for {0}.'.format(module.params['name']))
+ elif preferred_array != module.params['preferred_array']:
+ try:
+ array.set_host(module.params['name'],
+ preferred_array=module.params['preferred_array'])
+ answer = True
+ except Exception:
+ module.fail_json(msg='Preferred array list change failed for {0}.'.format(module.params['name']))
+ return answer
+
+
+def get_multi_hosts(module):
+ """Return True is all hosts exist"""
+ hosts = []
+ array = get_array(module)
+ for host_num in range(module.params['start'], module.params['count'] + module.params['start']):
+ hosts.append(module.params['name'] + str(host_num).zfill(module.params['digits']) + module.params['suffix'])
+ return bool(array.get_hosts(names=hosts).status_code == 200)
+
+
+def get_host(module, array):
+ """Return host or None"""
+ host = None
+ for hst in array.list_hosts():
+ if hst["name"].lower() == module.params['name'].lower():
+ module.params['name'] = hst["name"]
+ host = hst
+ break
+ return host
+
+
+def rename_exists(module, array):
+ """Determine if rename target already exists"""
+ exists = False
+ for hst in array.list_hosts():
+ if hst["name"].lower() == module.params['rename'].lower():
+ exists = True
+ break
+ return exists
+
+
+def make_multi_hosts(module):
+ """Create multiple hosts"""
+ changed = True
+ if not module.check_mode:
+ hosts = []
+ array = get_array(module)
+ for host_num in range(module.params['start'], module.params['count'] + module.params['start']):
+ hosts.append(module.params['name'] + str(host_num).zfill(module.params['digits']) + module.params['suffix'])
+ if module.params['personality']:
+ host = flasharray.HostPost(personality=module.params['personality'])
+ else:
+ host = flasharray.HostPost()
+ res = array.post_hosts(names=hosts, host=host)
+ if res.status_code != 200:
+ module.fail_json(msg='Multi-Host {0}#{1} creation failed: {2}'.format(module.params['name'],
+ module.params['suffix'],
+ res.errors[0].message))
+ module.exit_json(changed=changed)
+
+
+def make_host(module, array):
+ """Create a new host"""
+ changed = True
+ if not module.check_mode:
+ try:
+ array.create_host(module.params['name'])
+ except Exception:
+ module.fail_json(msg='Host {0} creation failed.'.format(module.params['name']))
+ try:
+ _set_host_initiators(module, array)
+ api_version = array._list_available_rest_versions()
+ if AC_REQUIRED_API_VERSION in api_version and module.params['personality']:
+ _set_host_personality(module, array)
+ if PREFERRED_ARRAY_API_VERSION in api_version and module.params['preferred_array']:
+ _set_preferred_array(module, array)
+ if module.params['host_user'] or module.params['target_user']:
+ _set_chap_security(module, array)
+ if module.params['volume']:
+ if module.params['lun']:
+ array.connect_host(module.params['name'],
+ module.params['volume'],
+ lun=module.params['lun'])
+ else:
+ array.connect_host(module.params['name'], module.params['volume'])
+ except Exception:
+ module.fail_json(msg='Host {0} configuration failed.'.format(module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def update_host(module, array):
+ """Modify a host"""
+ changed = True
+ renamed = False
+ if not module.check_mode:
+ if module.params['state'] == 'present':
+ if module.params['rename']:
+ if not rename_exists(module, array):
+ try:
+ array.rename_host(module.params['name'], module.params['rename'])
+ module.params['name'] = module.params['rename']
+ renamed = True
+ except Exception:
+ module.fail_json(msg='Rename to {0} failed.'.format(module.params['rename']))
+ else:
+ module.warn('Rename failed. Target hostname {0} already exists. '
+ 'Continuing with any other changes...'.format(module.params['rename']))
+ init_changed = vol_changed = pers_changed = pref_changed = chap_changed = False
+ volumes = array.list_host_connections(module.params['name'])
+ if module.params['iqn'] or module.params['wwns'] or module.params['nqn']:
+ init_changed = _update_host_initiators(module, array)
+ if module.params['volume']:
+ current_vols = [vol['vol'] for vol in volumes]
+ if not module.params['volume'] in current_vols:
+ vol_changed = _connect_new_volume(module, array)
+ api_version = array._list_available_rest_versions()
+ if AC_REQUIRED_API_VERSION in api_version:
+ if module.params['personality']:
+ pers_changed = _update_host_personality(module, array)
+ if PREFERRED_ARRAY_API_VERSION in api_version:
+ if module.params['preferred_array']:
+ pref_changed = _update_preferred_array(module, array)
+ if module.params['target_user'] or module.params['host_user']:
+ chap_changed = _update_chap_security(module, array)
+ changed = init_changed or vol_changed or pers_changed or pref_changed or chap_changed or renamed
+ else:
+ if module.params['volume']:
+ volumes = array.list_host_connections(module.params['name'])
+ current_vols = [vol['vol'] for vol in volumes]
+ if module.params['volume'] in current_vols:
+ vol_changed = _disconnect_volume(module, array)
+ else:
+ changed = False
+ module.exit_json(changed=changed)
+
+
+def delete_host(module, array):
+ """Delete a host"""
+ changed = True
+ if not module.check_mode:
+ try:
+ hgroup = array.get_host(module.params['name'])['hgroup']
+ if hgroup is not None:
+ array.set_hgroup(hgroup, remhostlist=[module.params['name']])
+ for vol in array.list_host_connections(module.params['name']):
+ array.disconnect_host(module.params['name'], vol["vol"])
+ array.delete_host(module.params['name'])
+ except Exception:
+ module.fail_json(msg='Host {0} deletion failed'.format(module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(dict(
+ name=dict(type='str', required=True, aliases=['host']),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ protocol=dict(type='str', default='iscsi', choices=['fc', 'iscsi', 'nvme', 'mixed']),
+ nqn=dict(type='list', elements='str'),
+ iqn=dict(type='list', elements='str'),
+ wwns=dict(type='list', elements='str'),
+ host_password=dict(type='str', no_log=True),
+ host_user=dict(type='str'),
+ target_password=dict(type='str', no_log=True),
+ target_user=dict(type='str'),
+ volume=dict(type='str'),
+ rename=dict(type='str'),
+ lun=dict(type='int'),
+ count=dict(type='int'),
+ start=dict(type='int', default=0),
+ digits=dict(type='int', default=1),
+ suffix=dict(type='str'),
+ personality=dict(type='str', default='',
+ choices=['hpux', 'vms', 'aix', 'esxi', 'solaris',
+ 'hitachi-vsp', 'oracle-vm-server', 'delete', '']),
+ preferred_array=dict(type='list', elements='str'),
+ ))
+
+ required_together = [['host_password', 'host_user'],
+ ['target_password', 'target_user']]
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True, required_together=required_together)
+
+ array = get_system(module)
+ module.params['name'] = module.params['name'].lower()
+ pattern = re.compile("^[a-zA-Z0-9]([a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?$")
+ if module.params['rename']:
+ module.params['rename'] = module.params['rename'].lower()
+ if not pattern.match(module.params['rename']):
+ module.fail_json(msg='Rename value {0} does not conform to naming convention'.format(module.params['rename']))
+ if not pattern.match(module.params['name']):
+ module.fail_json(msg='Host name {0} does not conform to naming convention'.format(module.params['name']))
+ if _is_cbs(array):
+ if module.params['wwns'] or module.params['nqn']:
+ module.fail_json(msg='Cloud Block Store only supports iSCSI as a protocol')
+ api_version = array._list_available_rest_versions()
+ if module.params['nqn'] is not None and NVME_API_VERSION not in api_version:
+ module.fail_json(msg='NVMe protocol not supported. Please upgrade your array.')
+ state = module.params['state']
+ if module.params['count']:
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg='py-pure-client sdk is required to support \'count\' parameter')
+ if MULTI_HOST_VERSION not in api_version:
+ module.fail_json(msg='\'count\' parameter is not supported until Purity//FA 6.0.0 or higher')
+ if module.params['digits'] and module.params['digits'] not in range(1, 10):
+ module.fail_json(msg='\'digits\' must be in the range of 1 to 10')
+ if module.params['start'] < 0:
+ module.fail_json(msg='\'start\' must be a positive number')
+ if not pattern.match(module.params['name'] + module.params['suffix']):
+ module.fail_json(msg='Host name pattern {0} does not conform to naming convention'.format(module.params['name'] +
+ '#' +
+ module.params['suffix']))
+ elif len(module.params['name']) + \
+ max(len(str(module.params['count'] + module.params['start'])), module.params['digits']) + \
+ len(module.params['suffix']) > 63:
+ module.fail_json(msg='Host name length exceeds maximum allowed')
+ host = get_multi_hosts(module)
+ if not host and state == 'present':
+ make_multi_hosts(module)
+ else:
+ host = get_host(module, array)
+ if module.params['lun'] and not 1 <= module.params['lun'] <= 4095:
+ module.fail_json(msg='LUN ID of {0} is out of range (1 to 4095)'.format(module.params['lun']))
+ if module.params['volume']:
+ try:
+ array.get_volume(module.params['volume'])
+ except Exception:
+ module.fail_json(msg='Volume {0} not found'.format(module.params['volume']))
+ if module.params['preferred_array']:
+ try:
+ if module.params['preferred_array'] != ['delete']:
+ all_connected_arrays = array.list_array_connections()
+ if not all_connected_arrays:
+ module.fail_json(msg='No target arrays connected to source array - preferred arrays not possible.')
+ else:
+ current_arrays = [array.get()['array_name']]
+ api_version = array._list_available_rest_versions()
+ if NVME_API_VERSION in api_version:
+ for current_array in range(0, len(all_connected_arrays)):
+ if all_connected_arrays[current_array]['type'] == "sync-replication":
+ current_arrays.append(all_connected_arrays[current_array]['array_name'])
+ else:
+ for current_array in range(0, len(all_connected_arrays)):
+ if all_connected_arrays[current_array]['type'] == ["sync-replication"]:
+ current_arrays.append(all_connected_arrays[current_array]['array_name'])
+ for array_to_connect in range(0, len(module.params['preferred_array'])):
+ if module.params['preferred_array'][array_to_connect] not in current_arrays:
+ module.fail_json(msg='Array {0} is not a synchronously connected array.'.format(module.params['preferred_array'][array_to_connect]))
+ except Exception:
+ module.fail_json(msg='Failed to get existing array connections.')
+
+ if host is None and state == 'present' and not module.params['rename']:
+ make_host(module, array)
+ elif host is None and state == 'present' and module.params['rename']:
+ module.exit_json(changed=False)
+ elif host and state == 'present':
+ update_host(module, array)
+ elif host and state == 'absent' and module.params['volume']:
+ update_host(module, array)
+ elif host and state == 'absent':
+ delete_host(module, array)
+ elif host is None and state == 'absent':
+ module.exit_json(changed=False)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_info.py b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_info.py
new file mode 100644
index 00000000..e8f19643
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_info.py
@@ -0,0 +1,1445 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2019, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefa_info
+version_added: '1.0.0'
+short_description: Collect information from Pure Storage FlashArray
+description:
+ - Collect information from a Pure Storage Flasharray running the
+ Purity//FA operating system. By default, the module will collect basic
+ information including hosts, host groups, protection
+ groups and volume counts. Additional information can be collected
+ based on the configured set of arguements.
+author:
+ - Pure Storage ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ gather_subset:
+ description:
+ - When supplied, this argument will define the information to be collected.
+ Possible values for this include all, minimum, config, performance,
+ capacity, network, subnet, interfaces, hgroups, pgroups, hosts,
+ admins, volumes, snapshots, pods, replication, vgroups, offload, apps,
+ arrays, certs, kmip, clients, policies, dir_snaps and filesystems.
+ type: list
+ elements: str
+ required: false
+ default: minimum
+extends_documentation_fragment:
+ - purestorage.flasharray.purestorage.fa
+'''
+
+EXAMPLES = r'''
+- name: collect default set of information
+ purefa_info:
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ register: array_info
+- name: show default information
+ debug:
+ msg: "{{ array_info['purefa_info']['default'] }}"
+
+- name: collect configuration and capacity information
+ purefa_info:
+ gather_subset:
+ - config
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ register: array_info
+- name: show configuration information
+ debug:
+ msg: "{{ array_info['purefa_info']['config'] }}"
+
+- name: collect all information
+ purefa_info:
+ gather_subset:
+ - all
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+- name: show all information
+ debug:
+ msg: "{{ array_info['purefa_info'] }}"
+'''
+
+RETURN = r'''
+purefa_info:
+ description: Returns the information collected from the FlashArray
+ returned: always
+ type: complex
+ sample: {
+ "admins": {
+ "pureuser": {
+ "role": "array_admin",
+ "type": "local"
+ }
+ },
+ "apps": {
+ "offload": {
+ "description": "Snapshot offload to NFS or Amazon S3",
+ "status": "healthy",
+ "version": "5.2.1"
+ }
+ },
+ "arrays": {},
+ "capacity": {
+ "data_reduction": 11.664774599686346,
+ "free_space": 6995782867042,
+ "provisioned_space": 442391871488,
+ "shared_space": 3070918120,
+ "snapshot_space": 284597118,
+ "system_space": 0,
+ "thin_provisioning": 0.8201773449669771,
+ "total_capacity": 7002920315199,
+ "total_reduction": 64.86821472825108,
+ "volume_space": 3781932919
+ },
+ "config": {
+ "directory_service": {
+ "data": {
+ "base_dn": "dc=example,dc=lab",
+ "bind_user": "CN=user,OU=Users,OU=Example Lab,DC=example,DC=lab",
+ "enabled": true,
+ "services": [
+ "data"
+ ],
+ "uris": [
+ "ldap://1.2.3.11"
+ ]
+ },
+ "management": {
+ "base_dn": "DC=example,DC=lab",
+ "bind_user": "svc.ldap",
+ "enabled": true,
+ "services": [
+ "management"
+ ],
+ "uris": [
+ "ldap://1.2.3.10",
+ "ldap://1.2.3.11"
+ ]
+ }
+ },
+ "directory_service_roles": {
+ "array_admin": {
+ "group": null,
+ "group_base": null
+ },
+ "ops_admin": {
+ "group": null,
+ "group_base": null
+ },
+ "readonly": {
+ "group": null,
+ "group_base": null
+ },
+ "storage_admin": {
+ "group": null,
+ "group_base": null
+ }
+ },
+ "dns": {
+ "domain": "acme.com",
+ "nameservers": [
+ "8.8.4.4"
+ ]
+ },
+ "global_admin": {
+ "lockout_duration": null,
+ "max_login_attempts": null,
+ "min_password_length": 1,
+ "single_sign_on_enabled": false
+ },
+ "idle_timeout": 0,
+ "ntp": [
+ "prod-ntp1.puretec.purestorage.com"
+ ],
+ "phonehome": "enabled",
+ "proxy": "",
+ "relayhost": "smtp.puretec.purestorage.com",
+ "scsi_timeout": 60,
+ "senderdomain": "purestorage.com",
+ "smtp": [
+ {
+ "enabled": true,
+ "name": "flasharray-alerts@purestorage.com"
+ }
+ ],
+ "snmp": [
+ {
+ "auth_passphrase": null,
+ "auth_protocol": null,
+ "community": "",
+ "host": "10.21.23.34",
+ "name": "manager1",
+ "notification": "trap",
+ "privacy_passphrase": null,
+ "privacy_protocol": null,
+ "user": null,
+ "version": "v2c"
+ }
+ ],
+ "syslog": [
+ "udp://prod-ntp2.puretec.purestorage.com:333"
+ ]
+ },
+ "default": {
+ "admins": 1,
+ "array_model": "FA-405",
+ "array_name": "array",
+ "connected_arrays": 0,
+ "connection_key": "c6033033-fe69-2515-a9e8-966bb7fe4b40",
+ "hostgroups": 0,
+ "hosts": 15,
+ "pods": 1,
+ "protection_groups": 1,
+ "purity_version": "5.2.1",
+ "snapshots": 2,
+ "volume_groups": 1
+ },
+ "hgroups": {},
+ "hosts": {
+ "@offload": {
+ "hgroup": null,
+ "iqn": [],
+ "nqn": [],
+ "personality": null,
+ "preferred_array": [],
+ "target_port": [],
+ "wwn": []
+ },
+ "docker-host": {
+ "hgroup": null,
+ "iqn": [
+ "iqn.1994-05.com.redhat:d97adf78472"
+ ],
+ "nqn": [],
+ "personality": null,
+ "preferred_array": [],
+ "target_port": [
+ "CT0.ETH4",
+ "CT1.ETH4"
+ ],
+ "wwn": [],
+ }
+ },
+ "interfaces": {
+ "CT0.ETH4": "iqn.2010-06.com.purestorage:flasharray.2111b767484e4682",
+ "CT1.ETH4": "iqn.2010-06.com.purestorage:flasharray.2111b767484e4682",
+ },
+ "network": {
+ "@offload.data0": {
+ "address": "10.21.200.222",
+ "gateway": "10.21.200.1",
+ "hwaddr": "52:54:30:02:b9:4e",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "app"
+ ],
+ "speed": 10000000000
+ },
+ "ct0.eth0": {
+ "address": "10.21.200.211",
+ "gateway": "10.21.200.1",
+ "hwaddr": "ec:f4:bb:c8:8a:04",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "management"
+ ],
+ "speed": 1000000000
+ },
+ "ct0.eth2": {
+ "address": "10.21.200.218",
+ "gateway": null,
+ "hwaddr": "ec:f4:bb:c8:8a:00",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "replication"
+ ],
+ "speed": 10000000000
+ },
+ "ct0.eth4": {
+ "address": "10.21.200.214",
+ "gateway": null,
+ "hwaddr": "90:e2:ba:83:79:0c",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "iscsi"
+ ],
+ "speed": 10000000000
+ },
+ "ct1.eth0": {
+ "address": "10.21.200.212",
+ "gateway": "10.21.200.1",
+ "hwaddr": "ec:f4:bb:e4:c6:3c",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "management"
+ ],
+ "speed": 1000000000
+ },
+ "ct1.eth2": {
+ "address": "10.21.200.220",
+ "gateway": null,
+ "hwaddr": "ec:f4:bb:e4:c6:38",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "replication"
+ ],
+ "speed": 10000000000
+ },
+ "ct1.eth4": {
+ "address": "10.21.200.216",
+ "gateway": null,
+ "hwaddr": "90:e2:ba:8b:b1:8c",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "iscsi"
+ ],
+ "speed": 10000000000
+ },
+ "vir0": {
+ "address": "10.21.200.210",
+ "gateway": "10.21.200.1",
+ "hwaddr": "fe:ba:e9:e7:6b:0f",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "management"
+ ],
+ "speed": 1000000000
+ }
+ },
+ "nfs_offload": {},
+ "performance": {
+ "input_per_sec": 0,
+ "local_queue_usec_per_op": 0,
+ "output_per_sec": 0,
+ "qos_rate_limit_usec_per_read_op": 0,
+ "qos_rate_limit_usec_per_write_op": 0,
+ "queue_depth": 0,
+ "queue_usec_per_read_op": 0,
+ "queue_usec_per_write_op": 0,
+ "reads_per_sec": 0,
+ "san_usec_per_read_op": 0,
+ "san_usec_per_write_op": 0,
+ "time": "2019-08-14T21:33:51Z",
+ "usec_per_read_op": 0,
+ "usec_per_write_op": 0,
+ "writes_per_sec": 0
+ },
+ "pgroups": {
+ "test_pg": {
+ "hgroups": null,
+ "hosts": null,
+ "source": "docker-host",
+ "targets": null,
+ "volumes": null
+ }
+ },
+ "pods": {
+ "test": {
+ "arrays": [
+ {
+ "array_id": "043be47c-1233-4399-b9d6-8fe38727dd9d",
+ "mediator_status": "online",
+ "name": "array2",
+ "status": "online"
+ }
+ ],
+ "source": null
+ }
+ },
+ "s3_offload": {
+ "s3-offload": {
+ "access_key_id": "AKIAILNVEPWZTV4FGWZQ",
+ "bucket": "offload-bucket",
+ "protocol": "s3",
+ "status": "connected"
+ }
+ },
+ "snapshots": {
+ "@offload_boot.1": {
+ "created": "2019-03-14T15:29:20Z",
+ "size": 68719476736,
+ "source": "@offload_boot"
+ }
+ },
+ "subnet": {},
+ "vgroups": {
+ "test": {
+ "volumes": [
+ "test/test",
+ "test/test1"
+ ]
+ }
+ },
+ "volumes": {
+ "@offload_boot": {
+ "bandwidth": null,
+ "hosts": [
+ [
+ "@offload",
+ 1
+ ]
+ ],
+ "serial": "43BE47C12334399B00013959",
+ "size": 68719476736,
+ "source": null
+ },
+ "docker-store": {
+ "bandwidth": null,
+ "hosts": [
+ [
+ "docker-host",
+ 1
+ ]
+ ],
+ "serial": "43BE47C12334399B00011418",
+ "size": 21474836480,
+ "source": null
+ }
+ }
+ }
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import get_array, get_system, purefa_argument_spec
+import time
+
+ADMIN_API_VERSION = '1.14'
+S3_REQUIRED_API_VERSION = '1.16'
+LATENCY_REQUIRED_API_VERSION = '1.16'
+AC_REQUIRED_API_VERSION = '1.14'
+CAP_REQUIRED_API_VERSION = '1.6'
+SAN_REQUIRED_API_VERSION = '1.10'
+NVME_API_VERSION = '1.16'
+PREFERRED_API_VERSION = '1.15'
+P53_API_VERSION = '1.17'
+ACTIVE_DR_API = '1.19'
+V6_MINIMUM_API_VERSION = '2.2'
+FILES_API_VERSION = '2.3'
+FC_REPL_API_VERSION = '2.4'
+
+
+def generate_default_dict(module, array):
+ default_info = {}
+ defaults = array.get()
+ api_version = array._list_available_rest_versions()
+ if FILES_API_VERSION in api_version:
+ arrayv6 = get_array(module)
+ default_info['snapshot_policies'] = len(arrayv6.get_policies_snapshot().items)
+ default_info['nfs_policies'] = len(arrayv6.get_policies_nfs().items)
+ default_info['smb_policies'] = len(arrayv6.get_policies_smb().items)
+ default_info['filesystems'] = len(arrayv6.get_file_systems().items)
+ default_info['directories'] = len(arrayv6.get_directories().items)
+ default_info['exports'] = len(arrayv6.get_directory_exports().items)
+ default_info['directory_snapshots'] = len(arrayv6.get_directory_snapshots().items)
+ if AC_REQUIRED_API_VERSION in api_version:
+ default_info['volume_groups'] = len(array.list_vgroups())
+ default_info['connected_arrays'] = len(array.list_array_connections())
+ default_info['pods'] = len(array.list_pods())
+ default_info['connection_key'] = array.get(connection_key=True)['connection_key']
+ hosts = array.list_hosts()
+ admins = array.list_admins()
+ snaps = array.list_volumes(snap=True, pending=True)
+ volumes = array.list_volumes(pending=True)
+ pgroups = array.list_pgroups(pending=True)
+ hgroups = array.list_hgroups()
+ default_info['array_model'] = array.get(controllers=True)[0]['model']
+ default_info['array_name'] = defaults['array_name']
+ default_info['purity_version'] = defaults['version']
+ default_info['hosts'] = len(hosts)
+ default_info['snapshots'] = len(snaps)
+ default_info['volumes'] = len(volumes)
+ default_info['protection_groups'] = len(pgroups)
+ default_info['hostgroups'] = len(hgroups)
+ default_info['admins'] = len(admins)
+ default_info['remote_assist'] = array.get_remote_assist_status()['status']
+ if P53_API_VERSION in api_version:
+ default_info['maintenance_window'] = array.list_maintenance_windows()
+ return default_info
+
+
+def generate_perf_dict(array):
+ perf_info = {}
+ api_version = array._list_available_rest_versions()
+ if LATENCY_REQUIRED_API_VERSION in api_version:
+ latency_info = array.get(action='monitor', latency=True)[0]
+ perf_info = array.get(action='monitor')[0]
+ perf_info['writes_per_sec'] = perf_info['writes_per_sec']
+ perf_info['reads_per_sec'] = perf_info['reads_per_sec']
+
+ perf_info['input_per_sec'] = perf_info['input_per_sec']
+ perf_info['output_per_sec'] = perf_info['output_per_sec']
+
+ if LATENCY_REQUIRED_API_VERSION in api_version:
+ perf_info['san_usec_per_read_op'] = latency_info['san_usec_per_read_op']
+ perf_info['san_usec_per_write_op'] = latency_info['san_usec_per_write_op']
+ perf_info['queue_usec_per_read_op'] = latency_info['queue_usec_per_read_op']
+ perf_info['queue_usec_per_write_op'] = latency_info['queue_usec_per_write_op']
+ perf_info['qos_rate_limit_usec_per_read_op'] = latency_info['qos_rate_limit_usec_per_read_op']
+ perf_info['qos_rate_limit_usec_per_write_op'] = latency_info['qos_rate_limit_usec_per_write_op']
+ perf_info['local_queue_usec_per_op'] = perf_info['local_queue_usec_per_op']
+ perf_info['usec_per_read_op'] = perf_info['usec_per_read_op']
+ perf_info['usec_per_write_op'] = perf_info['usec_per_write_op']
+ perf_info['queue_depth'] = perf_info['queue_depth']
+ return perf_info
+
+
+def generate_config_dict(module, array):
+ config_info = {}
+ api_version = array._list_available_rest_versions()
+ config_info['console_lock'] = array.get_console_lock_status()['console_lock']
+ config_info['dns'] = array.get_dns()
+ config_info['smtp'] = array.list_alert_recipients()
+ config_info['snmp'] = array.list_snmp_managers()
+ config_info['snmp_v3_engine_id'] = array.get_snmp_engine_id()['engine_id']
+ if V6_MINIMUM_API_VERSION in api_version:
+ config_info['directory_service'] = {}
+ arrayv6 = get_array(module)
+ services = list(arrayv6.get_directory_services().items)
+ for service in range(0, len(services)):
+ service_type = services[service].name
+ config_info['directory_service'][service_type] = {
+ 'base_dn': services[service].base_dn,
+ 'bind_user': services[service].bind_user,
+ 'enabled': services[service].enabled,
+ 'services': services[service].services,
+ 'uris': services[service].uris,
+ }
+ config_info['directory_service_roles'] = {}
+ roles = list(arrayv6.get_directory_services_roles().items)
+ for role in range(0, len(roles)):
+ role_name = roles[role].role.name
+ try:
+ config_info['directory_service_roles'][role_name] = {
+ 'group': roles[role].group,
+ 'group_base': roles[role].group_base,
+ }
+ except Exception:
+ pass
+ else:
+ config_info['directory_service'] = {}
+ # TODO: Remove for 1.7.0 release from here...
+ config_info['directory_service'] = array.get_directory_service()
+ module.warn("Deprecation Notice: The 'directory_services' dictionary moves previous entries")
+ module.warn("down into a 'management' sub-dictionary. Please amend your playbooks as necessary")
+ module.warn('The old dictionary entries will be removed in Collections 1.7.0')
+ # to here.
+ config_info['directory_service']['management'] = array.get_directory_service()
+ if S3_REQUIRED_API_VERSION in api_version:
+ config_info['directory_service_roles'] = {}
+ roles = array.list_directory_service_roles()
+ for role in range(0, len(roles)):
+ role_name = roles[role]['name']
+ config_info['directory_service_roles'][role_name] = {
+ 'group': roles[role]['group'],
+ 'group_base': roles[role]['group_base'],
+ }
+ else:
+ config_info['directory_service'].update(array.get_directory_service(groups=True))
+ config_info['ntp'] = array.get(ntpserver=True)['ntpserver']
+ config_info['syslog'] = array.get(syslogserver=True)['syslogserver']
+ config_info['phonehome'] = array.get(phonehome=True)['phonehome']
+ config_info['proxy'] = array.get(proxy=True)['proxy']
+ config_info['relayhost'] = array.get(relayhost=True)['relayhost']
+ config_info['senderdomain'] = array.get(senderdomain=True)['senderdomain']
+ config_info['syslog'] = array.get(syslogserver=True)['syslogserver']
+ config_info['idle_timeout'] = array.get(idle_timeout=True)['idle_timeout']
+ config_info['scsi_timeout'] = array.get(scsi_timeout=True)['scsi_timeout']
+ if S3_REQUIRED_API_VERSION in api_version:
+ config_info['global_admin'] = array.get_global_admin_attributes()
+ if V6_MINIMUM_API_VERSION in api_version:
+ array = get_array(module)
+ smi_s = list(array.get_smi_s().items)[0]
+ config_info['smi-s'] = {
+ 'slp_enabled': smi_s.slp_enabled,
+ 'wbem_https_enabled': smi_s.wbem_https_enabled
+ }
+ if FILES_API_VERSION in api_version:
+ try:
+ ad_accounts = list(array.get_active_directory().items)
+ for ad_account in range(0, len(ad_accounts)):
+ ad_name = ad_accounts[ad_account].name
+ config_info['active_directory'][ad_name] = ad_accounts[ad_account]
+ except Exception:
+ module.warn("FA-Files is not enabled on this array")
+ return config_info
+
+
+def generate_filesystems_dict(array):
+ files_info = {}
+ filesystems = list(array.get_file_systems().items)
+ for filesystem in range(0, len(filesystems)):
+ fs_name = filesystems[filesystem].name
+ files_info[fs_name] = {
+ 'destroyed': filesystems[filesystem].destroyed,
+ 'directories': {},
+ }
+ directories = list(array.get_directories(file_system_names=[fs_name]).items)
+ for directory in range(0, len(directories)):
+ d_name = directories[directory].directory_name
+ files_info[fs_name]['directories'][d_name] = {
+ 'path': directories[directory].path,
+ 'data_reduction': directories[directory].space.data_reduction,
+ 'snapshots_space': directories[directory].space.snapshots,
+ 'total_physical_space': directories[directory].space.total_physical,
+ 'unique_space': directories[directory].space.unique,
+ 'virtual_space': directories[directory].space.virtual,
+ 'destroyed': directories[directory].destroyed,
+ 'full_name': directories[directory].name,
+ 'exports': {}
+ }
+ exports = list(array.get_directory_exports(directory_names=[files_info[fs_name]['directories'][d_name]['full_name']]).items)
+ for export in range(0, len(exports)):
+ e_name = exports[export].export_name
+ files_info[fs_name]['directories'][d_name]['exports'][e_name] = {
+ 'enabled': exports[export].enabled,
+ 'policy': {
+ 'name': exports[export].policy.name,
+ 'type': exports[export].policy.resource_type
+ }
+ }
+ return files_info
+
+
+def generate_dir_snaps_dict(array):
+ dir_snaps_info = {}
+ snapshots = list(array.get_directory_snapshots().items)
+ for snapshot in range(0, len(snapshots)):
+ s_name = snapshots[snapshot].name
+ dir_snaps_info[s_name] = {
+ 'destroyed': snapshots[snapshot].destroyed,
+ 'source': snapshots[snapshot].source.name,
+ 'suffix': snapshots[snapshot].suffix,
+ 'client_name': snapshots[snapshot].client_name,
+ 'snapshot_space': snapshots[snapshot].space.snapshots,
+ 'total_physical_space': snapshots[snapshot].space.total_physical,
+ 'unique_space': snapshots[snapshot].space.unique,
+ }
+ try:
+ dir_snaps_info[s_name]['policy'] = snapshots[snapshot].policy.name
+ except Exception:
+ dir_snaps_info[s_name]['policy'] = ''
+ if dir_snaps_info[s_name]['destroyed']:
+ dir_snaps_info[s_name]['time_remaining'] = snapshots[snapshot].time_remaining
+ return dir_snaps_info
+
+
+def generate_policies_dict(array):
+ policy_info = {}
+ policies = list(array.get_policies().items)
+ for policy in range(0, len(policies)):
+ p_name = policies[policy].name
+ policy_info[p_name] = {
+ 'type': policies[policy].policy_type,
+ 'enabled': policies[policy].enabled,
+ 'members': []
+ }
+ members = list(array.get_directories_policies(policy_names=[p_name]).items)
+ for member in range(0, len(members)):
+ m_name = members[member].member.name
+ policy_info[p_name]['members'].append(m_name)
+ return policy_info
+
+
+def generate_clients_dict(array):
+ clients_info = {}
+ clients = list(array.get_api_clients().items)
+ for client in range(0, len(clients)):
+ c_name = clients[client].name
+ clients_info[c_name] = {
+ 'enabled': clients[client].enabled,
+ 'TTL(seconds)': clients[client].access_token_ttl_in_ms / 1000,
+ 'key_id': clients[client].key_id,
+ 'client_id': clients[client].id,
+ 'max_role': clients[client].max_role,
+ 'public_key': clients[client].public_key
+ }
+ return clients_info
+
+
+def generate_admin_dict(array):
+ admin_info = {}
+ api_version = array._list_available_rest_versions()
+ if ADMIN_API_VERSION in api_version:
+ admins = array.list_admins()
+ for admin in range(0, len(admins)):
+ admin_name = admins[admin]['name']
+ admin_info[admin_name] = {
+ 'type': admins[admin]['type'],
+ 'role': admins[admin]['role'],
+ }
+ return admin_info
+
+
+def generate_subnet_dict(array):
+ sub_info = {}
+ subnets = array.list_subnets()
+ for sub in range(0, len(subnets)):
+ sub_name = subnets[sub]['name']
+ if subnets[sub]['enabled']:
+ sub_info[sub_name] = {
+ 'gateway': subnets[sub]['gateway'],
+ 'mtu': subnets[sub]['mtu'],
+ 'vlan': subnets[sub]['vlan'],
+ 'prefix': subnets[sub]['prefix'],
+ 'interfaces': subnets[sub]['interfaces'],
+ 'services': subnets[sub]['services'],
+ }
+ return sub_info
+
+
+def generate_network_dict(array):
+ net_info = {}
+ ports = array.list_network_interfaces()
+ for port in range(0, len(ports)):
+ int_name = ports[port]['name']
+ net_info[int_name] = {
+ 'hwaddr': ports[port]['hwaddr'],
+ 'mtu': ports[port]['mtu'],
+ 'enabled': ports[port]['enabled'],
+ 'speed': ports[port]['speed'],
+ 'address': ports[port]['address'],
+ 'slaves': ports[port]['slaves'],
+ 'services': ports[port]['services'],
+ 'gateway': ports[port]['gateway'],
+ 'netmask': ports[port]['netmask'],
+ }
+ if ports[port]['subnet']:
+ subnets = array.get_subnet(ports[port]['subnet'])
+ if subnets['enabled']:
+ net_info[int_name]['subnet'] = {
+ 'name': subnets['name'],
+ 'prefix': subnets['prefix'],
+ 'vlan': subnets['vlan'],
+ }
+ return net_info
+
+
+def generate_capacity_dict(array):
+ capacity_info = {}
+ api_version = array._list_available_rest_versions()
+ if CAP_REQUIRED_API_VERSION in api_version:
+ volumes = array.list_volumes(pending=True)
+ capacity_info['provisioned_space'] = sum(item['size'] for item in volumes)
+ capacity = array.get(space=True)
+ total_capacity = capacity[0]['capacity']
+ used_space = capacity[0]["total"]
+ capacity_info['free_space'] = total_capacity - used_space
+ capacity_info['total_capacity'] = total_capacity
+ capacity_info['data_reduction'] = capacity[0]['data_reduction']
+ capacity_info['system_space'] = capacity[0]['system']
+ capacity_info['volume_space'] = capacity[0]['volumes']
+ capacity_info['shared_space'] = capacity[0]['shared_space']
+ capacity_info['snapshot_space'] = capacity[0]['snapshots']
+ capacity_info['thin_provisioning'] = capacity[0]['thin_provisioning']
+ capacity_info['total_reduction'] = capacity[0]['total_reduction']
+ return capacity_info
+
+
+def generate_snap_dict(module, array):
+ snap_info = {}
+ api_version = array._list_available_rest_versions()
+ snaps = array.list_volumes(snap=True)
+ for snap in range(0, len(snaps)):
+ snapshot = snaps[snap]['name']
+ snap_info[snapshot] = {
+ 'size': snaps[snap]['size'],
+ 'source': snaps[snap]['source'],
+ 'created': snaps[snap]['created'],
+ 'tags': [],
+ 'remote': [],
+ }
+ if FC_REPL_API_VERSION in api_version:
+ arrayv6 = get_array(module)
+ offloads = list(arrayv6.get_offloads().items)
+ for offload in range(0, len(offloads)):
+ offload_name = offloads[offload].name
+ remote_snaps = list(arrayv6.get_remote_volume_snapshots(on=offload_name, destroyed=False).items)
+ for remote_snap in range(0, len(remote_snaps)):
+ remote_snap_name = remote_snaps[remote_snap].name.split(':')[1]
+ remote_transfer = list(arrayv6.get_remote_volume_snapshots_transfer(on=offload_name,
+ names=[remote_snaps[remote_snap].name]).items)[0]
+ remote_dict = {
+ 'source': remote_snaps[remote_snap].source.name,
+ 'suffix': remote_snaps[remote_snap].suffix,
+ 'size': remote_snaps[remote_snap].provisioned,
+ 'data_transferred': remote_transfer.data_transferred,
+ 'completed': time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(remote_transfer.completed / 1000)) + " UTC",
+ 'physical_bytes_written': remote_transfer.physical_bytes_written,
+ 'progress': remote_transfer.progress,
+ 'created': time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(remote_snaps[remote_snap].created / 1000)) + " UTC",
+ }
+ try:
+ snap_info[remote_snap_name]['remote'].append(remote_dict)
+ except KeyError:
+ snap_info[remote_snap_name] = {'remote': []}
+ snap_info[remote_snap_name]['remote'].append(remote_dict)
+ if ACTIVE_DR_API in api_version:
+ snaptags = array.list_volumes(snap=True, tags=True, namespace="*")
+ for snaptag in range(0, len(snaptags)):
+ if snaptags[snaptag]['namespace'] != "vasa-integration.purestorage.com":
+ snapname = snaptags[snaptag]['name']
+ tagdict = {
+ 'key': snaptags[snaptag]['key'],
+ 'value': snaptags[snaptag]['value'],
+ 'namespace': snaptags[snaptag]['namespace']
+ }
+ snap_info[snapname]['tags'].append(tagdict)
+ return snap_info
+
+
+def generate_del_snap_dict(module, array):
+ snap_info = {}
+ api_version = array._list_available_rest_versions()
+ snaps = array.list_volumes(snap=True, pending_only=True)
+ for snap in range(0, len(snaps)):
+ snapshot = snaps[snap]['name']
+ snap_info[snapshot] = {
+ 'size': snaps[snap]['size'],
+ 'source': snaps[snap]['source'],
+ 'created': snaps[snap]['created'],
+ 'time_remaining': snaps[snap]['time_remaining'],
+ 'tags': [],
+ 'remote': [],
+ }
+ if FC_REPL_API_VERSION in api_version:
+ arrayv6 = get_array(module)
+ offloads = list(arrayv6.get_offloads().items)
+ for offload in range(0, len(offloads)):
+ offload_name = offloads[offload].name
+ remote_snaps = list(arrayv6.get_remote_volume_snapshots(on=offload_name, destroyed=True).items)
+ for remote_snap in range(0, len(remote_snaps)):
+ remote_snap_name = remote_snaps[remote_snap].name.split(':')[1]
+ remote_transfer = list(arrayv6.get_remote_volume_snapshots_transfer(on=offload_name,
+ names=[remote_snaps[remote_snap].name]).items)[0]
+ remote_dict = {
+ 'source': remote_snaps[remote_snap].source.name,
+ 'suffix': remote_snaps[remote_snap].suffix,
+ 'size': remote_snaps[remote_snap].provisioned,
+ 'data_transferred': remote_transfer.data_transferred,
+ 'completed': time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(remote_transfer.completed / 1000)) + " UTC",
+ 'physical_bytes_written': remote_transfer.physical_bytes_written,
+ 'progress': remote_transfer.progress,
+ 'created': time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(remote_snaps[remote_snap].created / 1000)) + " UTC",
+ }
+ try:
+ snap_info[remote_snap_name]['remote'].append(remote_dict)
+ except KeyError:
+ snap_info[remote_snap_name] = {'remote': []}
+ snap_info[remote_snap_name]['remote'].append(remote_dict)
+ if ACTIVE_DR_API in api_version:
+ snaptags = array.list_volumes(snap=True, tags=True, pending_only=True, namespace="*")
+ for snaptag in range(0, len(snaptags)):
+ if snaptags[snaptag]['namespace'] != "vasa-integration.purestorage.com":
+ snapname = snaptags[snaptag]['name']
+ tagdict = {
+ 'key': snaptags[snaptag]['key'],
+ 'value': snaptags[snaptag]['value'],
+ 'namespace': snaptags[snaptag]['namespace']
+ }
+ snap_info[snapname]['tags'].append(tagdict)
+ return snap_info
+
+
+def generate_del_vol_dict(array):
+ volume_info = {}
+ api_version = array._list_available_rest_versions()
+ vols = array.list_volumes(pending_only=True)
+ for vol in range(0, len(vols)):
+ volume = vols[vol]['name']
+ volume_info[volume] = {
+ 'size': vols[vol]['size'],
+ 'source': vols[vol]['source'],
+ 'created': vols[vol]['created'],
+ 'serial': vols[vol]['serial'],
+ 'time_remaining': vols[vol]['time_remaining'],
+ 'tags': [],
+ }
+ if ACTIVE_DR_API in api_version:
+ voltags = array.list_volumes(tags=True, pending_only=True)
+ for voltag in range(0, len(voltags)):
+ if voltags[voltag]['namespace'] != "vasa-integration.purestorage.com":
+ volume = voltags[voltag]['name']
+ tagdict = {
+ 'key': voltags[voltag]['key'],
+ 'value': voltags[voltag]['value'],
+ 'copyable': voltags[voltag]['copyable'],
+ 'namespace': voltags[voltag]['namespace']
+ }
+ volume_info[volume]['tags'].append(tagdict)
+ return volume_info
+
+
+def generate_vol_dict(array):
+ volume_info = {}
+ vols = array.list_volumes()
+ for vol in range(0, len(vols)):
+ volume = vols[vol]['name']
+ volume_info[volume] = {
+ 'protocol_endpoint': False,
+ 'source': vols[vol]['source'],
+ 'size': vols[vol]['size'],
+ 'serial': vols[vol]['serial'],
+ 'tags': [],
+ 'hosts': [],
+ 'bandwidth': ""
+ }
+ api_version = array._list_available_rest_versions()
+ if AC_REQUIRED_API_VERSION in api_version:
+ qvols = array.list_volumes(qos=True)
+ for qvol in range(0, len(qvols)):
+ volume = qvols[qvol]['name']
+ qos = qvols[qvol]['bandwidth_limit']
+ volume_info[volume]['bandwidth'] = qos
+ if P53_API_VERSION in api_version:
+ iops = qvols[qvol]['iops_limit']
+ volume_info[volume]['iops_limit'] = iops
+ vvols = array.list_volumes(protocol_endpoint=True)
+ for vvol in range(0, len(vvols)):
+ volume = vvols[vvol]['name']
+ volume_info[volume] = {
+ 'protocol_endpoint': True,
+ 'source': vvols[vvol]['source'],
+ 'serial': vvols[vvol]['serial'],
+ 'tags': [],
+ 'hosts': [],
+ }
+ if P53_API_VERSION in array._list_available_rest_versions():
+ pe_e2ees = array.list_volumes(protocol_endpoint=True, host_encryption_key=True)
+ for pe_e2ee in range(0, len(pe_e2ees)):
+ volume = pe_e2ees[pe_e2ee]['name']
+ volume_info[volume]['host_encryption_key_status'] = pe_e2ees[pe_e2ee]['host_encryption_key_status']
+ if P53_API_VERSION in array._list_available_rest_versions():
+ e2ees = array.list_volumes(host_encryption_key=True)
+ for e2ee in range(0, len(e2ees)):
+ volume = e2ees[e2ee]['name']
+ volume_info[volume]['host_encryption_key_status'] = e2ees[e2ee]['host_encryption_key_status']
+ cvols = array.list_volumes(connect=True)
+ for cvol in range(0, len(cvols)):
+ volume = cvols[cvol]['name']
+ voldict = {'host': cvols[cvol]['host'], 'lun': cvols[cvol]['lun']}
+ volume_info[volume]['hosts'].append(voldict)
+ if ACTIVE_DR_API in api_version:
+ voltags = array.list_volumes(tags=True)
+ for voltag in range(0, len(voltags)):
+ if voltags[voltag]['namespace'] != "vasa-integration.purestorage.com":
+ volume = voltags[voltag]['name']
+ tagdict = {
+ 'key': voltags[voltag]['key'],
+ 'value': voltags[voltag]['value'],
+ 'copyable': voltags[voltag]['copyable'],
+ 'namespace': voltags[voltag]['namespace']
+ }
+ volume_info[volume]['tags'].append(tagdict)
+ return volume_info
+
+
+def generate_host_dict(array):
+ api_version = array._list_available_rest_versions()
+ host_info = {}
+ hosts = array.list_hosts()
+ for host in range(0, len(hosts)):
+ hostname = hosts[host]['name']
+ tports = []
+ host_all_info = array.get_host(hostname, all=True)
+ if host_all_info:
+ tports = host_all_info[0]['target_port']
+ host_info[hostname] = {
+ 'hgroup': hosts[host]['hgroup'],
+ 'iqn': hosts[host]['iqn'],
+ 'wwn': hosts[host]['wwn'],
+ 'personality': array.get_host(hostname,
+ personality=True)['personality'],
+ 'target_port': tports,
+ 'volumes': []
+ }
+ host_connections = array.list_host_connections(hostname)
+ for connection in range(0, len(host_connections)):
+ connection_dict = {
+ 'hostgroup': host_connections[connection]['hgroup'],
+ 'volume': host_connections[connection]['vol'],
+ 'lun': host_connections[connection]['lun']
+ }
+ host_info[hostname]['volumes'].append(connection_dict)
+ if host_info[hostname]['iqn']:
+ chap_data = array.get_host(hostname, chap=True)
+ host_info[hostname]['target_user'] = chap_data['target_user']
+ host_info[hostname]['host_user'] = chap_data['host_user']
+ if NVME_API_VERSION in api_version:
+ host_info[hostname]['nqn'] = hosts[host]['nqn']
+ if PREFERRED_API_VERSION in api_version:
+ hosts = array.list_hosts(preferred_array=True)
+ for host in range(0, len(hosts)):
+ hostname = hosts[host]['name']
+ host_info[hostname]['preferred_array'] = hosts[host]['preferred_array']
+ return host_info
+
+
+def generate_pgroups_dict(array):
+ pgroups_info = {}
+ pgroups = array.list_pgroups()
+ for pgroup in range(0, len(pgroups)):
+ protgroup = pgroups[pgroup]['name']
+ pgroups_info[protgroup] = {
+ 'hgroups': pgroups[pgroup]['hgroups'],
+ 'hosts': pgroups[pgroup]['hosts'],
+ 'source': pgroups[pgroup]['source'],
+ 'targets': pgroups[pgroup]['targets'],
+ 'volumes': pgroups[pgroup]['volumes'],
+ }
+ prot_sched = array.get_pgroup(protgroup, schedule=True)
+ prot_reten = array.get_pgroup(protgroup, retention=True)
+ if prot_sched['snap_enabled'] or prot_sched['replicate_enabled']:
+ pgroups_info[protgroup]['snap_freqyency'] = prot_sched['snap_frequency']
+ pgroups_info[protgroup]['replicate_freqyency'] = prot_sched['replicate_frequency']
+ pgroups_info[protgroup]['snap_enabled'] = prot_sched['snap_enabled']
+ pgroups_info[protgroup]['replicate_enabled'] = prot_sched['replicate_enabled']
+ pgroups_info[protgroup]['snap_at'] = prot_sched['snap_at']
+ pgroups_info[protgroup]['replicate_at'] = prot_sched['replicate_at']
+ pgroups_info[protgroup]['replicate_blackout'] = prot_sched['replicate_blackout']
+ pgroups_info[protgroup]['per_day'] = prot_reten['per_day']
+ pgroups_info[protgroup]['target_per_day'] = prot_reten['target_per_day']
+ pgroups_info[protgroup]['target_days'] = prot_reten['target_days']
+ pgroups_info[protgroup]['days'] = prot_reten['days']
+ pgroups_info[protgroup]['all_for'] = prot_reten['all_for']
+ pgroups_info[protgroup]['target_all_for'] = prot_reten['target_all_for']
+ if ":" in protgroup:
+ snap_transfers = array.get_pgroup(protgroup, snap=True, transfer=True)
+ pgroups_info[protgroup]['snaps'] = {}
+ for snap_transfer in range(0, len(snap_transfers)):
+ snap = snap_transfers[snap_transfer]['name']
+ pgroups_info[protgroup]['snaps'][snap] = {
+ 'created': snap_transfers[snap_transfer]['created'],
+ 'started': snap_transfers[snap_transfer]['started'],
+ 'completed': snap_transfers[snap_transfer]['completed'],
+ 'physical_bytes_written': snap_transfers[snap_transfer]['physical_bytes_written'],
+ 'data_transferred': snap_transfers[snap_transfer]['data_transferred'],
+ 'progress': snap_transfers[snap_transfer]['progress'],
+ }
+ return pgroups_info
+
+
+def generate_rl_dict(module, array):
+ rl_info = {}
+ api_version = array._list_available_rest_versions()
+ if ACTIVE_DR_API in api_version:
+ try:
+ rlinks = array.list_pod_replica_links()
+ for rlink in range(0, len(rlinks)):
+ link_name = rlinks[rlink]['local_pod_name']
+ since_epoch = rlinks[rlink]['recovery_point'] / 1000
+ recovery_datatime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(since_epoch))
+ rl_info[link_name] = {
+ 'status': rlinks[rlink]['status'],
+ 'direction': rlinks[rlink]['direction'],
+ 'lag': str(rlinks[rlink]['lag'] / 1000) + 's',
+ 'remote_pod_name': rlinks[rlink]['remote_pod_name'],
+ 'remote_names': rlinks[rlink]['remote_names'],
+ 'recovery_point': recovery_datatime,
+ }
+ except Exception:
+ module.warn('Replica Links info requires purestorage SDK 1.19 or hisher')
+ return rl_info
+
+
+def generate_del_pods_dict(array):
+ pods_info = {}
+ api_version = array._list_available_rest_versions()
+ if AC_REQUIRED_API_VERSION in api_version:
+ pods = array.list_pods(mediator=True, pending_only=True)
+ for pod in range(0, len(pods)):
+ acpod = pods[pod]['name']
+ pods_info[acpod] = {
+ 'source': pods[pod]['source'],
+ 'arrays': pods[pod]['arrays'],
+ 'mediator': pods[pod]['mediator'],
+ 'mediator_version': pods[pod]['mediator_version'],
+ 'time_remaining': pods[pod]['time_remaining'],
+ }
+ if ACTIVE_DR_API in api_version:
+ if pods_info[acpod]['arrays'][0]['frozen_at']:
+ frozen_time = pods_info[acpod]['arrays'][0]['frozen_at'] / 1000
+ frozen_datetime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(frozen_time))
+ pods_info[acpod]['arrays'][0]['frozen_at'] = frozen_datetime
+ pods_info[acpod]['link_source_count'] = pods[pod]['link_source_count']
+ pods_info[acpod]['link_target_count'] = pods[pod]['link_target_count']
+ pods_info[acpod]['promotion_status'] = pods[pod]['promotion_status']
+ pods_info[acpod]['requested_promotion_state'] = pods[pod]['requested_promotion_state']
+ if PREFERRED_API_VERSION in api_version:
+ pods_fp = array.list_pods(failover_preference=True, pending_only=True)
+ for pod in range(0, len(pods_fp)):
+ acpod = pods_fp[pod]['name']
+ pods_info[acpod]['failover_preference'] = pods_fp[pod]['failover_preference']
+ return pods_info
+
+
+def generate_pods_dict(array):
+ pods_info = {}
+ api_version = array._list_available_rest_versions()
+ if AC_REQUIRED_API_VERSION in api_version:
+ pods = array.list_pods(mediator=True)
+ for pod in range(0, len(pods)):
+ acpod = pods[pod]['name']
+ pods_info[acpod] = {
+ 'source': pods[pod]['source'],
+ 'arrays': pods[pod]['arrays'],
+ 'mediator': pods[pod]['mediator'],
+ 'mediator_version': pods[pod]['mediator_version'],
+ }
+ if ACTIVE_DR_API in api_version:
+ if pods_info[acpod]['arrays'][0]['frozen_at']:
+ frozen_time = pods_info[acpod]['arrays'][0]['frozen_at'] / 1000
+ frozen_datetime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(frozen_time))
+ pods_info[acpod]['arrays'][0]['frozen_at'] = frozen_datetime
+ pods_info[acpod]['link_source_count'] = pods[pod]['link_source_count']
+ pods_info[acpod]['link_target_count'] = pods[pod]['link_target_count']
+ pods_info[acpod]['promotion_status'] = pods[pod]['promotion_status']
+ pods_info[acpod]['requested_promotion_state'] = pods[pod]['requested_promotion_state']
+ if PREFERRED_API_VERSION in api_version:
+ pods_fp = array.list_pods(failover_preference=True)
+ for pod in range(0, len(pods_fp)):
+ acpod = pods_fp[pod]['name']
+ pods_info[acpod]['failover_preference'] = pods_fp[pod]['failover_preference']
+ return pods_info
+
+
+def generate_conn_array_dict(module, array):
+ conn_array_info = {}
+ api_version = array._list_available_rest_versions()
+ if FC_REPL_API_VERSION not in api_version:
+ carrays = array.list_array_connections()
+ for carray in range(0, len(carrays)):
+ arrayname = carrays[carray]['array_name']
+ conn_array_info[arrayname] = {
+ 'array_id': carrays[carray]['id'],
+ 'throttled': carrays[carray]['throttled'],
+ 'version': carrays[carray]['version'],
+ 'type': carrays[carray]['type'],
+ 'mgmt_ip': carrays[carray]['management_address'],
+ 'repl_ip': carrays[carray]['replication_address'],
+ }
+ if P53_API_VERSION in api_version:
+ conn_array_info[arrayname]['status'] = carrays[carray]['status']
+ else:
+ conn_array_info[arrayname]['connected'] = carrays[carray]['connected']
+ throttles = array.list_array_connections(throttle=True)
+ for throttle in range(0, len(throttles)):
+ arrayname = throttles[throttle]['array_name']
+ if conn_array_info[arrayname]['throttled']:
+ conn_array_info[arrayname]['throttling'] = {
+ 'default_limit': throttles[throttle]['default_limit'],
+ 'window_limit': throttles[throttle]['window_limit'],
+ 'window': throttles[throttle]['window'],
+ }
+ else:
+ arrayv6 = get_array(module)
+ carrays = list(arrayv6.get_array_connections().items)
+ for carray in range(0, len(carrays)):
+ arrayname = carrays[carray].name
+ conn_array_info[arrayname] = {
+ 'array_id': carrays[carray].id,
+ 'version': carrays[carray].version,
+ 'type': carrays[carray].type,
+ 'mgmt_ip': carrays[carray].management_address,
+ 'repl_ip': carrays[carray].replication_addresses,
+ 'transport': carrays[carray].replication_transport,
+ }
+
+ if bool(carrays[carray].throttle.to_dict()):
+ conn_array_info[arrayname]['throttled'] = True
+ conn_array_info[arrayname]['throttling'] = {}
+ try:
+ if bool(carrays[carray].throttle.window):
+ conn_array_info[arrayname]['throttling']['window'] = carrays[carray].throttle.window.to_dict()
+ except AttributeError:
+ pass
+ try:
+ if bool(carrays[carray].throttle.default_limit):
+ conn_array_info[arrayname]['throttling']['default_limit'] = carrays[carray].throttle.default_limit
+ except AttributeError:
+ pass
+ try:
+ if bool(carrays[carray].throttle.window_limit):
+ conn_array_info[arrayname]['throttling']['window_limit'] = carrays[carray].throttle.window_limit
+ except AttributeError:
+ pass
+ else:
+ conn_array_info[arrayname]['throttled'] = False
+ return conn_array_info
+
+
+def generate_apps_dict(array):
+ apps_info = {}
+ api_version = array._list_available_rest_versions()
+ if SAN_REQUIRED_API_VERSION in api_version:
+ apps = array.list_apps()
+ for app in range(0, len(apps)):
+ appname = apps[app]['name']
+ apps_info[appname] = {
+ 'version': apps[app]['version'],
+ 'status': apps[app]['status'],
+ 'description': apps[app]['description'],
+ }
+ if P53_API_VERSION in api_version:
+ app_nodes = array.list_app_nodes()
+ for app in range(0, len(app_nodes)):
+ appname = app_nodes[app]['name']
+ apps_info[appname]['index'] = app_nodes[app]['index']
+ apps_info[appname]['vnc'] = app_nodes[app]['vnc']
+ return apps_info
+
+
+def generate_vgroups_dict(array):
+ vgroups_info = {}
+ api_version = array._list_available_rest_versions()
+ if AC_REQUIRED_API_VERSION in api_version:
+ vgroups = array.list_vgroups()
+ for vgroup in range(0, len(vgroups)):
+ virtgroup = vgroups[vgroup]['name']
+ vgroups_info[virtgroup] = {
+ 'volumes': vgroups[vgroup]['volumes'],
+ }
+ return vgroups_info
+
+
+def generate_certs_dict(array):
+ certs_info = {}
+ api_version = array._list_available_rest_versions()
+ if P53_API_VERSION in api_version:
+ certs = array.list_certificates()
+ for cert in range(0, len(certs)):
+ certificate = certs[cert]['name']
+ valid_from = time.strftime("%a, %d %b %Y %H:%M:%S %Z", time.localtime(certs[cert]['valid_from'] / 1000))
+ valid_to = time.strftime("%a, %d %b %Y %H:%M:%S %Z", time.localtime(certs[cert]['valid_to'] / 1000))
+ certs_info[certificate] = {
+ 'status': certs[cert]['status'],
+ 'issued_to': certs[cert]['issued_to'],
+ 'valid_from': valid_from,
+ 'locality': certs[cert]['locality'],
+ 'country': certs[cert]['country'],
+ 'issued_by': certs[cert]['issued_by'],
+ 'valid_to': valid_to,
+ 'state': certs[cert]['state'],
+ 'key_size': certs[cert]['key_size'],
+ 'org_unit': certs[cert]['organizational_unit'],
+ 'common_name': certs[cert]['common_name'],
+ 'organization': certs[cert]['organization'],
+ 'email': certs[cert]['email'],
+ }
+ return certs_info
+
+
+def generate_kmip_dict(array):
+ kmip_info = {}
+ api_version = array._list_available_rest_versions()
+ if P53_API_VERSION in api_version:
+ kmips = array.list_kmip()
+ for kmip in range(0, len(kmips)):
+ key = kmips[kmip]['name']
+ kmip_info[key] = {
+ 'certificate': kmips[kmip]['certificate'],
+ 'ca_cert_configured': kmips[kmip]['ca_certificate_configured'],
+ 'uri': kmips[kmip]['uri'],
+ }
+ return kmip_info
+
+
+def generate_nfs_offload_dict(array):
+ offload_info = {}
+ api_version = array._list_available_rest_versions()
+ if AC_REQUIRED_API_VERSION in api_version:
+ offload = array.list_nfs_offload()
+ for target in range(0, len(offload)):
+ offloadt = offload[target]['name']
+ offload_info[offloadt] = {
+ 'status': offload[target]['status'],
+ 'mount_point': offload[target]['mount_point'],
+ 'protocol': offload[target]['protocol'],
+ 'mount_options': offload[target]['mount_options'],
+ 'address': offload[target]['address'],
+ }
+ return offload_info
+
+
+def generate_s3_offload_dict(array):
+ offload_info = {}
+ api_version = array._list_available_rest_versions()
+ if S3_REQUIRED_API_VERSION in api_version:
+ offload = array.list_s3_offload()
+ for target in range(0, len(offload)):
+ offloadt = offload[target]['name']
+ offload_info[offloadt] = {
+ 'status': offload[target]['status'],
+ 'bucket': offload[target]['bucket'],
+ 'protocol': offload[target]['protocol'],
+ 'access_key_id': offload[target]['access_key_id'],
+ }
+ if P53_API_VERSION in api_version:
+ offload_info[offloadt]['placement_strategy'] = offload[target]['placement_strategy']
+ return offload_info
+
+
+def generate_azure_offload_dict(array):
+ offload_info = {}
+ api_version = array._list_available_rest_versions()
+ if P53_API_VERSION in api_version:
+ offload = array.list_azure_offload()
+ for target in range(0, len(offload)):
+ offloadt = offload[target]['name']
+ offload_info[offloadt] = {
+ 'status': offload[target]['status'],
+ 'account_name': offload[target]['account_name'],
+ 'protocol': offload[target]['protocol'],
+ 'secret_access_key': offload[target]['secret_access_key'],
+ 'container_name': offload[target]['container_name'],
+ }
+ return offload_info
+
+
+def generate_hgroups_dict(array):
+ hgroups_info = {}
+ hgroups = array.list_hgroups()
+ for hgroup in range(0, len(hgroups)):
+ hostgroup = hgroups[hgroup]['name']
+ hgroups_info[hostgroup] = {
+ 'hosts': hgroups[hgroup]['hosts'],
+ 'pgs': [],
+ 'vols': [],
+ }
+ pghgroups = array.list_hgroups(protect=True)
+ for pghg in range(0, len(pghgroups)):
+ pgname = pghgroups[pghg]['name']
+ hgroups_info[pgname]['pgs'].append(pghgroups[pghg]['protection_group'])
+ volhgroups = array.list_hgroups(connect=True)
+ for pgvol in range(0, len(volhgroups)):
+ pgname = volhgroups[pgvol]['name']
+ volpgdict = [volhgroups[pgvol]['vol'], volhgroups[pgvol]['lun']]
+ hgroups_info[pgname]['vols'].append(volpgdict)
+ return hgroups_info
+
+
+def generate_interfaces_dict(array):
+ api_version = array._list_available_rest_versions()
+ int_info = {}
+ ports = array.list_ports()
+ for port in range(0, len(ports)):
+ int_name = ports[port]['name']
+ if ports[port]['wwn']:
+ int_info[int_name] = ports[port]['wwn']
+ if ports[port]['iqn']:
+ int_info[int_name] = ports[port]['iqn']
+ if NVME_API_VERSION in api_version:
+ if ports[port]['nqn']:
+ int_info[int_name] = ports[port]['nqn']
+ return int_info
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(dict(
+ gather_subset=dict(default='minimum', type='list', elements='str')
+ ))
+
+ module = AnsibleModule(argument_spec, supports_check_mode=False)
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+
+ subset = [test.lower() for test in module.params['gather_subset']]
+ valid_subsets = ('all', 'minimum', 'config', 'performance', 'capacity',
+ 'network', 'subnet', 'interfaces', 'hgroups', 'pgroups',
+ 'hosts', 'admins', 'volumes', 'snapshots', 'pods', 'replication',
+ 'vgroups', 'offload', 'apps', 'arrays', 'certs', 'kmip',
+ 'clients', 'policies', 'dir_snaps', 'filesystems')
+ subset_test = (test in valid_subsets for test in subset)
+ if not all(subset_test):
+ module.fail_json(msg="value must gather_subset must be one or more of: %s, got: %s"
+ % (",".join(valid_subsets), ",".join(subset)))
+
+ info = {}
+
+ if 'minimum' in subset or 'all' in subset or 'apps' in subset:
+ info['default'] = generate_default_dict(module, array)
+ if 'performance' in subset or 'all' in subset:
+ info['performance'] = generate_perf_dict(array)
+ if 'config' in subset or 'all' in subset:
+ info['config'] = generate_config_dict(module, array)
+ if 'capacity' in subset or 'all' in subset:
+ info['capacity'] = generate_capacity_dict(array)
+ if 'network' in subset or 'all' in subset:
+ info['network'] = generate_network_dict(array)
+ if 'subnet' in subset or 'all' in subset:
+ info['subnet'] = generate_subnet_dict(array)
+ if 'interfaces' in subset or 'all' in subset:
+ info['interfaces'] = generate_interfaces_dict(array)
+ if 'hosts' in subset or 'all' in subset:
+ info['hosts'] = generate_host_dict(array)
+ if 'volumes' in subset or 'all' in subset:
+ info['volumes'] = generate_vol_dict(array)
+ info['deleted_volumes'] = generate_del_vol_dict(array)
+ if 'snapshots' in subset or 'all' in subset:
+ info['snapshots'] = generate_snap_dict(module, array)
+ info['deleted_snapshots'] = generate_del_snap_dict(module, array)
+ if 'hgroups' in subset or 'all' in subset:
+ info['hgroups'] = generate_hgroups_dict(array)
+ if 'pgroups' in subset or 'all' in subset:
+ info['pgroups'] = generate_pgroups_dict(array)
+ if 'pods' in subset or 'all' in subset or 'replication' in subset:
+ info['replica_links'] = generate_rl_dict(module, array)
+ info['pods'] = generate_pods_dict(array)
+ info['deleted_pods'] = generate_del_pods_dict(array)
+ if 'admins' in subset or 'all' in subset:
+ info['admins'] = generate_admin_dict(array)
+ if 'vgroups' in subset or 'all' in subset:
+ info['vgroups'] = generate_vgroups_dict(array)
+ if 'offload' in subset or 'all' in subset:
+ info['azure_offload'] = generate_azure_offload_dict(array)
+ info['nfs_offload'] = generate_nfs_offload_dict(array)
+ info['s3_offload'] = generate_s3_offload_dict(array)
+ if 'apps' in subset or 'all' in subset:
+ if 'CBS' not in info['default']['array_model']:
+ info['apps'] = generate_apps_dict(array)
+ else:
+ info['apps'] = {}
+ if 'arrays' in subset or 'all' in subset:
+ info['arrays'] = generate_conn_array_dict(module, array)
+ if 'certs' in subset or 'all' in subset:
+ info['certs'] = generate_certs_dict(array)
+ if 'kmip' in subset or 'all' in subset:
+ info['kmip'] = generate_kmip_dict(array)
+ if FILES_API_VERSION in api_version:
+ array_v6 = get_array(module)
+ if 'filesystems' in subset or 'all' in subset:
+ info['filesystems'] = generate_filesystems_dict(array_v6)
+ if 'policies' in subset or 'all' in subset:
+ info['policies'] = generate_policies_dict(array_v6)
+ if 'clients' in subset or 'all' in subset:
+ info['clients'] = generate_clients_dict(array_v6)
+ if 'dir_snaps' in subset or 'all' in subset:
+ info['dir_snaps'] = generate_dir_snaps_dict(array_v6)
+
+ module.exit_json(changed=False, purefa_info=info)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_inventory.py b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_inventory.py
new file mode 100644
index 00000000..d11a98aa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_inventory.py
@@ -0,0 +1,222 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefa_inventory
+short_description: Collect information from Pure Storage FlashArray
+version_added: '1.0.0'
+description:
+ - Collect hardware inventory information from a Pure Storage Flasharray
+author:
+ - Pure Storage ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+extends_documentation_fragment:
+ - purestorage.flasharray.purestorage.fa
+'''
+
+EXAMPLES = r'''
+- name: collect FlashArray invenroty
+ purefa_inventory:
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+- name: show inventory information
+ debug:
+ msg: "{{ array_info['purefa_info'] }}"
+
+'''
+
+RETURN = r'''
+purefa_inventory:
+ description: Returns the inventory information for the FlashArray
+ returned: always
+ type: complex
+ sample: {
+ "controllers": {
+ "CT0": {
+ "model": null,
+ "serial": null,
+ "status": "ok"
+ },
+ "CT1": {
+ "model": "FA-405",
+ "serial": "FHVBT52",
+ "status": "ok"
+ }
+ },
+ "drives": {
+ "SH0.BAY0": {
+ "capacity": 2147483648,
+ "protocol": "SAS",
+ "serial": "S18NNEAFA01416",
+ "status": "healthy",
+ "type": "NVRAM"
+ },
+ "SH0.BAY1": {
+ "capacity": 511587647488,
+ "protocol": "SAS",
+ "serial": "S0WZNEACC00517",
+ "status": "healthy",
+ "type": "SSD"
+ },
+ "SH0.BAY10": {
+ "capacity": 511587647488,
+ "protocol": "SAS",
+ "serial": "S0WZNEACB00266",
+ "status": "healthy",
+ "type": "SSD"
+ }
+ },
+ "fans": {
+ "CT0.FAN0": {
+ "status": "ok"
+ },
+ "CT0.FAN1": {
+ "status": "ok"
+ },
+ "CT0.FAN10": {
+ "status": "ok"
+ }
+ },
+ "interfaces": {
+ "CT0.ETH0": {
+ "speed": 1000000000,
+ "status": "ok"
+ },
+ "CT0.ETH1": {
+ "speed": 0,
+ "status": "ok"
+ },
+ "CT0.FC0": {
+ "speed": 8000000000,
+ "status": "ok"
+ },
+ "CT1.IB1": {
+ "speed": 56000000000,
+ "status": "ok"
+ },
+ "CT1.SAS0": {
+ "speed": 24000000000,
+ "status": "ok"
+ }
+ },
+ "power": {
+ "CT0.PWR0": {
+ "model": null,
+ "serial": null,
+ "status": "ok",
+ "voltage": null
+ },
+ "CT0.PWR1": {
+ "model": null,
+ "serial": null,
+ "status": "ok",
+ "voltage": null
+ }
+ },
+ "temps": {
+ "CT0.TMP0": {
+ "status": "ok",
+ "temperature": 18
+ },
+ "CT0.TMP1": {
+ "status": "ok",
+ "temperature": 32
+ }
+ }
+ }
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import get_system, purefa_argument_spec
+
+
+def generate_hardware_dict(array):
+ hw_info = {'fans': {},
+ 'controllers': {},
+ 'temps': {},
+ 'drives': {},
+ 'interfaces': {},
+ 'power': {},
+ }
+ components = array.list_hardware()
+ for component in range(0, len(components)):
+ component_name = components[component]['name']
+ if 'FAN' in component_name:
+ fan_name = component_name
+ hw_info['fans'][fan_name] = {'status': components[component]['status']}
+ if 'PWR' in component_name:
+ pwr_name = component_name
+ hw_info['power'][pwr_name] = {'status': components[component]['status'],
+ 'voltage': components[component]['voltage'],
+ 'serial': components[component]['serial'],
+ 'model': components[component]['model']
+ }
+ if 'IB' in component_name:
+ ib_name = component_name
+ hw_info['interfaces'][ib_name] = {'status': components[component]['status'],
+ 'speed': components[component]['speed']
+ }
+ if 'SAS' in component_name:
+ sas_name = component_name
+ hw_info['interfaces'][sas_name] = {'status': components[component]['status'],
+ 'speed': components[component]['speed']
+ }
+ if 'ETH' in component_name:
+ eth_name = component_name
+ hw_info['interfaces'][eth_name] = {'status': components[component]['status'],
+ 'speed': components[component]['speed']
+ }
+ if 'FC' in component_name:
+ eth_name = component_name
+ hw_info['interfaces'][eth_name] = {'status': components[component]['status'],
+ 'speed': components[component]['speed']
+ }
+ if 'TMP' in component_name:
+ tmp_name = component_name
+ hw_info['temps'][tmp_name] = {'status': components[component]['status'],
+ 'temperature': components[component]['temperature']
+ }
+ if component_name in ['CT0', 'CT1']:
+ cont_name = component_name
+ hw_info['controllers'][cont_name] = {'status': components[component]['status'],
+ 'serial': components[component]['serial'],
+ 'model': components[component]['model']
+ }
+
+ drives = array.list_drives()
+ for drive in range(0, len(drives)):
+ drive_name = drives[drive]['name']
+ hw_info['drives'][drive_name] = {'capacity': drives[drive]['capacity'],
+ 'status': drives[drive]['status'],
+ 'protocol': drives[drive]['protocol'],
+ 'type': drives[drive]['type']
+ }
+ for disk in range(0, len(components)):
+ if components[disk]['name'] == drive_name:
+ hw_info['drives'][drive_name]['serial'] = components[disk]['serial']
+
+ return hw_info
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+ array = get_system(module)
+
+ module.exit_json(changed=False, purefa_info=generate_hardware_dict(array))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_network.py b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_network.py
new file mode 100644
index 00000000..5eb00029
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_network.py
@@ -0,0 +1,232 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: purefa_network
+short_description: Manage network interfaces in a Pure Storage FlashArray
+version_added: '1.0.0'
+description:
+ - This module manages the physical and virtual network interfaces on a Pure Storage FlashArray.
+ - To manage VLAN interfaces use the I(purefa_vlan) module.
+ - To manage network subnets use the I(purefa_subnet) module.
+ - To remove an IP address from a non-management port use 0.0.0.0/0
+author: Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Interface name (physical or virtual).
+ required: true
+ type: str
+ state:
+ description:
+ - State of existing interface (on/off).
+ required: false
+ default: present
+ choices: [ "present", "absent" ]
+ type: str
+ address:
+ description:
+ - IPv4 or IPv6 address of interface in CIDR notation.
+ - To remove an IP address from a non-management port use 0.0.0.0/0
+ required: false
+ type: str
+ gateway:
+ description:
+ - IPv4 or IPv6 address of interface gateway.
+ required: false
+ type: str
+ mtu:
+ description:
+ - MTU size of the interface. Range is 1280 to 9216.
+ required: false
+ default: 1500
+ type: int
+extends_documentation_fragment:
+ - purestorage.flasharray.purestorage.fa
+'''
+
+EXAMPLES = '''
+- name: Configure and enable network interface ct0.eth8
+ purefa_network:
+ name: ct0.eth8
+ gateway: 10.21.200.1
+ address: "10.21.200.18/24"
+ mtu: 9000
+ state: present
+ fa_url: 10.10.10.2
+ api_token: c6033033-fe69-2515-a9e8-966bb7fe4b40
+
+- name: Disable physical interface ct1.eth2
+ purefa_network:
+ name: ct1.eth2
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: c6033033-fe69-2515-a9e8-966bb7fe4b40
+
+- name: Enable virtual network interface vir0
+ purefa_network:
+ name: vir0
+ state: present
+ fa_url: 10.10.10.2
+ api_token: c6033033-fe69-2515-a9e8-966bb7fe4b40
+
+- name: Remove an IP address from iSCSI interface ct0.eth4
+ purefa_network:
+ name: ct0.eth4
+ address: 0.0.0.0/0
+ gateway: 0.0.0.0
+ fa_url: 10.10.10.2
+ api_token: c6033033-fe69-2515-a9e8-966bb7fe4b40
+'''
+
+RETURN = '''
+'''
+
+try:
+ from netaddr import IPAddress, IPNetwork
+ HAS_NETADDR = True
+except ImportError:
+ HAS_NETADDR = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import get_system, purefa_argument_spec
+
+
+def _get_interface(module, array):
+ """Return Interface or None"""
+ interface = {}
+ if module.params['name'][0] == "v":
+ try:
+ interface = array.get_network_interface(module.params['name'])
+ except Exception:
+ return None
+ else:
+ try:
+ interfaces = array.list_network_interfaces()
+ except Exception:
+ return None
+ for ints in range(0, len(interfaces)):
+ if interfaces[ints]['name'] == module.params['name']:
+ interface = interfaces[ints]
+ break
+ return interface
+
+
+def update_interface(module, array, interface):
+ """Modify Interface settings"""
+ changed = True
+ if not module.check_mode:
+ current_state = {'mtu': interface['mtu'],
+ 'gateway': interface['gateway'],
+ 'address': interface['address'],
+ 'netmask': interface['netmask']}
+ if not module.params['address']:
+ address = interface['address']
+ else:
+ if module.params['gateway'] and module.params['gateway'] not in IPNetwork(module.params['address']):
+ module.fail_json(msg='Gateway and subnet are not compatible.')
+ elif not module.params['gateway'] and interface['gateway'] not in [None, IPNetwork(module.params['address'])]:
+ module.fail_json(msg='Gateway and subnet are not compatible.')
+ address = str(module.params['address'].split("/", 1)[0])
+ if not module.params['mtu']:
+ mtu = interface['mtu']
+ else:
+ if not 1280 <= module.params['mtu'] <= 9216:
+ module.fail_json(msg='MTU {0} is out of range (1280 to 9216)'.format(module.params['mtu']))
+ else:
+ mtu = module.params['mtu']
+ if module.params['address']:
+ netmask = str(IPNetwork(module.params['address']).netmask)
+ else:
+ netmask = interface['netmask']
+ if not module.params['gateway']:
+ gateway = interface['gateway']
+ else:
+ cidr = str(IPAddress(netmask).netmask_bits())
+ full_addr = address + "/" + cidr
+ if module.params['gateway'] not in IPNetwork(full_addr):
+ module.fail_json(msg='Gateway and subnet are not compatible.')
+ gateway = module.params['gateway']
+ new_state = {'address': address,
+ 'mtu': mtu,
+ 'gateway': gateway,
+ 'netmask': netmask}
+ if new_state == current_state:
+ changed = False
+ else:
+ if 'management' in interface['services'] or 'app' in interface['services'] and address == "0.0.0.0/0":
+ module.fail_json(msg="Removing IP address from a management or app port is not supported")
+ try:
+ if new_state['gateway'] is not None:
+ array.set_network_interface(interface['name'],
+ address=new_state['address'],
+ mtu=new_state['mtu'],
+ netmask=new_state['netmask'],
+ gateway=new_state['gateway'])
+ else:
+ array.set_network_interface(interface['name'],
+ address=new_state['address'],
+ mtu=new_state['mtu'],
+ netmask=new_state['netmask'])
+ except Exception:
+ module.fail_json(msg="Failed to change settings for interface {0}.".format(interface['name']))
+ if not interface['enabled'] and module.params['state'] == 'present':
+ try:
+ array.enable_network_interface(interface['name'])
+ changed = True
+ except Exception:
+ module.fail_json(msg="Failed to enable interface {0}.".format(interface['name']))
+ if interface['enabled'] and module.params['state'] == 'absent':
+ try:
+ array.disable_network_interface(interface['name'])
+ changed = True
+ except Exception:
+ module.fail_json(msg="Failed to disable interface {0}.".format(interface['name']))
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ address=dict(type='str'),
+ gateway=dict(type='str'),
+ mtu=dict(type='int', default=1500),
+ )
+ )
+
+ module = AnsibleModule(argument_spec,
+ supports_check_mode=True)
+
+ if not HAS_NETADDR:
+ module.fail_json(msg='netaddr module is required')
+
+ array = get_system(module)
+ interface = _get_interface(module, array)
+ if not interface:
+ module.fail_json(msg="Invalid network interface specified.")
+
+ update_interface(module, array, interface)
+
+ module.exit_json(changed=False)
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_ntp.py b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_ntp.py
new file mode 100644
index 00000000..9a9aeb5a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_ntp.py
@@ -0,0 +1,139 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefa_ntp
+version_added: '1.0.0'
+short_description: Configure Pure Storage FlashArray NTP settings
+description:
+- Set or erase NTP configuration for Pure Storage FlashArrays.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Create or delete NTP servers configuration
+ type: str
+ default: present
+ choices: [ absent, present ]
+ ntp_servers:
+ type: list
+ elements: str
+ description:
+ - A list of up to 4 alternate NTP servers. These may include IPv4,
+ IPv6 or FQDNs. Invalid IP addresses will cause the module to fail.
+ No validation is performed for FQDNs.
+ - If more than 4 servers are provided, only the first 4 unique
+ nameservers will be used.
+ - if no servers are given a default of I(0.pool.ntp.org) will be used.
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+'''
+
+EXAMPLES = r'''
+- name: Delete exisitng NTP server entries
+ purefa_ntp:
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Set array NTP servers
+ purefa_ntp:
+ state: present
+ ntp_servers:
+ - "0.pool.ntp.org"
+ - "1.pool.ntp.org"
+ - "2.pool.ntp.org"
+ - "3.pool.ntp.org"
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+'''
+
+RETURN = r'''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import get_system, purefa_argument_spec
+
+
+def _is_cbs(array, is_cbs=False):
+ """Is the selected array a Cloud Block Store"""
+ model = array.get(controllers=True)[0]['model']
+ is_cbs = bool('CBS' in model)
+ return is_cbs
+
+
+def remove(duplicate):
+ final_list = []
+ for num in duplicate:
+ if num not in final_list:
+ final_list.append(num)
+ return final_list
+
+
+def delete_ntp(module, array):
+ """Delete NTP Servers"""
+ changed = True
+ if not module.check_mode:
+ if array.get(ntpserver=True)['ntpserver'] != []:
+ try:
+ array.set(ntpserver=[])
+ except Exception:
+ module.fail_json(msg='Deletion of NTP servers failed')
+ module.exit_json(changed=changed)
+
+
+def create_ntp(module, array):
+ """Set NTP Servers"""
+ changed = True
+ if not module.check_mode:
+ if not module.params['ntp_servers']:
+ module.params['ntp_servers'] = ['0.pool.ntp.org']
+ try:
+ array.set(ntpserver=module.params['ntp_servers'][0:4])
+ except Exception:
+ module.fail_json(msg='Update of NTP servers failed')
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(dict(
+ ntp_servers=dict(type='list', elements='str'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ ))
+
+ required_if = [['state', 'present', ['ntp_servers']]]
+
+ module = AnsibleModule(argument_spec,
+ required_if=required_if,
+ supports_check_mode=True)
+
+ array = get_system(module)
+ if _is_cbs(array):
+ module.warn('NTP settings are not necessary for a CBS array - ignoring...')
+ module.exit_json(changed=False)
+
+ if module.params['state'] == 'absent':
+ delete_ntp(module, array)
+ else:
+ module.params['ntp_servers'] = remove(module.params['ntp_servers'])
+ if sorted(array.get(ntpserver=True)['ntpserver']) != sorted(module.params['ntp_servers'][0:4]):
+ create_ntp(module, array)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_offload.py b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_offload.py
new file mode 100644
index 00000000..c0161dbc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_offload.py
@@ -0,0 +1,348 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2019, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefa_offload
+version_added: '1.0.0'
+short_description: Create, modify and delete NFS, S3 or Azure offload targets
+description:
+- Create, modify and delete NFS, S3 or Azure offload targets.
+- Only supported on Purity v5.2.0 or higher.
+- You must have a correctly configured offload network for offload to work.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Define state of offload
+ default: present
+ choices: [ absent, present ]
+ type: str
+ name:
+ description:
+ - The name of the offload target
+ required: true
+ type: str
+ protocol:
+ description:
+ - Define which protocol the offload engine uses
+ default: nfs
+ choices: [ nfs, s3, azure, gcp ]
+ type: str
+ address:
+ description:
+ - The IP or FQDN address of the NFS server
+ type: str
+ share:
+ description:
+ - NFS export on the NFS server
+ type: str
+ options:
+ description:
+ - Additonal mount options for the NFS share
+ - Supported mount options include I(port), I(rsize),
+ I(wsize), I(nfsvers), and I(tcp) or I(udp)
+ required: false
+ default: ""
+ type: str
+ access_key:
+ description:
+ - Access Key ID of the offload target
+ type: str
+ container:
+ description:
+ - Name of the blob container of the Azure target
+ default: offload
+ type: str
+ bucket:
+ description:
+ - Name of the bucket for the S3 or GCP target
+ type: str
+ account:
+ description:
+ - Name of the Azure blob storage account
+ type: str
+ secret:
+ description:
+ - Secret Access Key for the offload target
+ type: str
+ initialize:
+ description:
+ - Define whether to initialize the offload bucket
+ type: bool
+ default: true
+ placement:
+ description:
+ - AWS S3 placement strategy
+ type: str
+ choices: ['retention-based', 'aws-standard-class']
+ default: retention-based
+
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+'''
+
+EXAMPLES = r'''
+- name: Create NFS offload target
+ purefa_offload:
+ name: nfs-offload
+ protocol: nfs
+ address: 10.21.200.4
+ share: "/offload_target"
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create S3 offload target
+ purefa_offload:
+ name: s3-offload
+ protocol: s3
+ access_key: "3794fb12c6204e19195f"
+ bucket: offload-bucket
+ secret: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"
+ placement: aws-standard-class
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create Azure offload target
+ purefa_offload:
+ name: azure-offload
+ protocol: azure
+ secret: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"
+ container: offload-container
+ account: user1
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete offload target
+ purefa_offload:
+ name: nfs-offload
+ protocol: nfs
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+'''
+
+RETURN = r'''
+'''
+
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+import re
+from distutils.version import LooseVersion
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import get_array, get_system, purefa_argument_spec
+
+MIN_REQUIRED_API_VERSION = '1.16'
+REGEX_TARGET_NAME = re.compile(r"^[a-zA-Z0-9\-]*$")
+P53_API_VERSION = '1.17'
+GCP_API_VERSION = '2.3'
+
+
+def get_target(module, array):
+ """Return target or None"""
+ try:
+ return array.get_offload(module.params['name'])
+ except Exception:
+ return None
+
+
+def create_offload(module, array):
+ """Create offload target"""
+ changed = True
+ api_version = array._list_available_rest_versions()
+ if not module.check_mode:
+ # First check if the offload network inteface is there and enabled
+ try:
+ if not array.get_network_interface('@offload.data')['enabled']:
+ module.fail_json(msg='Offload Network interface not enabled. Please resolve.')
+ except Exception:
+ module.fail_json(msg='Offload Network interface not correctly configured. Please resolve.')
+ if module.params['protocol'] == 'nfs':
+ try:
+ array.connect_nfs_offload(module.params['name'],
+ mount_point=module.params['share'],
+ address=module.params['address'],
+ mount_options=module.params['options'])
+ except Exception:
+ module.fail_json(msg='Failed to create NFS offload {0}. '
+ 'Please perform diagnostic checks.'.format(module.params['name']))
+ if module.params['protocol'] == 's3':
+ if P53_API_VERSION in api_version:
+ try:
+ array.connect_s3_offload(module.params['name'],
+ access_key_id=module.params['access_key'],
+ secret_access_key=module.params['secret'],
+ bucket=module.params['bucket'],
+ placement_strategy=module.params['placement'],
+ initialize=module.params['initialize'])
+ except Exception:
+ module.fail_json(msg='Failed to create S3 offload {0}. '
+ 'Please perform diagnostic checks.'.format(module.params['name']))
+ else:
+ try:
+ array.connect_s3_offload(module.params['name'],
+ access_key_id=module.params['access_key'],
+ secret_access_key=module.params['secret'],
+ bucket=module.params['bucket'],
+ initialize=module.params['initialize'])
+ except Exception:
+ module.fail_json(msg='Failed to create S3 offload {0}. '
+ 'Please perform diagnostic checks.'.format(module.params['name']))
+ if module.params['protocol'] == 'azure' and P53_API_VERSION in api_version:
+ try:
+ array.connect_azure_offload(module.params['name'],
+ container_name=module.params['container'],
+ secret_access_key=module.params['secret'],
+ account_name=module.params['.bucket'],
+ initialize=module.params['initialize'])
+ except Exception:
+ module.fail_json(msg='Failed to create Azure offload {0}. '
+ 'Please perform diagnostic checks.'.format(module.params['name']))
+ if module.params['protocol'] == 'gcp' and GCP_API_VERSION in api_version:
+ arrayv6 = get_array(module)
+ bucket = flasharray.OffloadGoogleCloud(access_key_id=module.params['access_key'],
+ bucket=module.params['bucket'],
+ secret_access_key=module.params['secret'])
+ offload = flasharray.OffloadPost(google_cloud=bucket)
+ res = arrayv6.post_offloads(offload=offload, initialize=module.params['initialize'], names=[module.params['name']])
+ if res.status_code != 200:
+ module.fail_json(msg='Failed to create GCP offload {0}. Error: {1}'
+ 'Please perform diagnostic checks.'.format(module.params['name'], res.errors[0].message))
+ module.exit_json(changed=changed)
+
+
+def update_offload(module, array):
+ """Update offload target"""
+ changed = False
+ module.exit_json(changed=changed)
+
+
+def delete_offload(module, array):
+ """Delete offload target"""
+ changed = True
+ api_version = array._list_available_rest_versions()
+ if not module.check_mode:
+ if module.params['protocol'] == 'nfs':
+ try:
+ array.disconnect_nfs_offload(module.params['name'])
+ except Exception:
+ module.fail_json(msg='Failed to delete NFS offload {0}.'.format(module.params['name']))
+ if module.params['protocol'] == 's3':
+ try:
+ array.disconnect_s3_offload(module.params['name'])
+ except Exception:
+ module.fail_json(msg='Failed to delete S3 offload {0}.'.format(module.params['name']))
+ if module.params['protocol'] == 'azure' and P53_API_VERSION in api_version:
+ try:
+ array.disconnect_azure_offload(module.params['name'])
+ except Exception:
+ module.fail_json(msg='Failed to delete Azure offload {0}.'.format(module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(dict(
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ protocol=dict(type='str', default='nfs', choices=['nfs', 's3', 'azure', 'gcp']),
+ placement=dict(type='str', default='retention-based', choices=['retention-based', 'aws-standard-class']),
+ name=dict(type='str', required=True),
+ initialize=dict(default=True, type='bool'),
+ access_key=dict(type='str'),
+ secret=dict(type='str', no_log=True),
+ bucket=dict(type='str'),
+ container=dict(type='str', default='offload'),
+ account=dict(type='str'),
+ share=dict(type='str'),
+ address=dict(type='str'),
+ options=dict(type='str', default=''),
+ ))
+
+ required_if = []
+
+ if argument_spec['state'] == "present":
+ required_if = [
+ ('protocol', 'nfs', ['address', 'share']),
+ ('protocol', 's3', ['access_key', 'secret', 'bucket']),
+ ['protocol', 'gcp', ['access_key', 'secret', 'bucket']],
+ ('protocol', 'azure', ['account', 'secret'])
+ ]
+
+ module = AnsibleModule(argument_spec,
+ required_if=required_if,
+ supports_check_mode=True)
+
+ if not HAS_PURESTORAGE and module.params['protocol'] == 'gcp':
+ module.fail_json(msg='py-pure-client sdk is required for this module')
+
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(msg='FlashArray REST version not supported. '
+ 'Minimum version required: {0}'.format(MIN_REQUIRED_API_VERSION))
+
+ if not re.match(r"^[a-zA-Z][a-zA-Z0-9\-]*[a-zA-Z0-9]$", module.params['name']) or len(module.params['name']) > 56:
+ module.fail_json(msg='Target name invalid. '
+ 'Target name must be between 1 and 56 characters (alphanumeric and -) in length '
+ 'and begin and end with a letter or number. The name must include at least one letter.')
+ if module.params['protocol'] in ['s3', 'gcp']:
+ if not re.match(r"^[a-z0-9][a-z0-9.\-]*[a-z0-9]$", module.params['bucket']) or \
+ len(module.params['bucket']) > 63:
+ module.fail_json(msg='Bucket name invalid. '
+ 'Bucket name must be between 3 and 63 characters '
+ '(lowercase, alphanumeric, dash or period) in length '
+ 'and begin and end with a letter or number.')
+
+ apps = array.list_apps()
+ app_version = 0
+ all_good = False
+ for app in range(0, len(apps)):
+ if apps[app]['name'] == 'offload':
+ if (apps[app]['enabled'] and apps[app]['status'] == 'healthy' and LooseVersion(apps[app]['version']) >= LooseVersion('5.2.0')):
+ all_good = True
+ app_version = apps[app]['version']
+ break
+
+ if not all_good:
+ module.fail_json(msg='Correct Offload app not installed or incorrectly configured')
+ else:
+ if LooseVersion(array.get()['version']) != LooseVersion(app_version):
+ module.fail_json(msg='Offload app version must match Purity version. Please upgrade.')
+
+ target = get_target(module, array)
+ if module.params['state'] == 'present' and not target:
+ target_count = len(array.list_offload())
+ # Currently only 1 offload target is supported
+ # TODO: (SD) when more targets supported add in REST version check as well
+ if target_count != 0:
+ module.fail_json(msg='Currently only 1 Offload Target is supported.')
+ create_offload(module, array)
+ elif module.params['state'] == 'present' and target:
+ update_offload(module, array)
+ elif module.params['state'] == 'absent' and target:
+ delete_offload(module, array)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pg.py b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pg.py
new file mode 100644
index 00000000..6efd48f9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pg.py
@@ -0,0 +1,563 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefa_pg
+version_added: '1.0.0'
+short_description: Manage protection groups on Pure Storage FlashArrays
+description:
+- Create, delete or modify protection groups on Pure Storage FlashArrays.
+- If a protection group exists and you try to add non-valid types, eg. a host
+ to a volume protection group the module will ignore the invalid types.
+- Protection Groups on Offload targets are supported.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ pgroup:
+ description:
+ - The name of the protection group.
+ type: str
+ required: true
+ state:
+ description:
+ - Define whether the protection group should exist or not.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ volume:
+ description:
+ - List of existing volumes to add to protection group.
+ - Note that volume are case-sensitive however FlashArray volume names are unique
+ and ignore case - you cannot have I(volumea) and I(volumeA)
+ type: list
+ elements: str
+ host:
+ description:
+ - List of existing hosts to add to protection group.
+ - Note that hostnames are case-sensitive however FlashArray hostnames are unique
+ and ignore case - you cannot have I(hosta) and I(hostA)
+ type: list
+ elements: str
+ hostgroup:
+ description:
+ - List of existing hostgroups to add to protection group.
+ - Note that hostgroups are case-sensitive however FlashArray hostgroup names are unique
+ and ignore case - you cannot have I(groupa) and I(groupA)
+ type: list
+ elements: str
+ eradicate:
+ description:
+ - Define whether to eradicate the protection group on delete and leave in trash.
+ type : bool
+ default: 'no'
+ enabled:
+ description:
+ - Define whether to enabled snapshots for the protection group.
+ type : bool
+ default: 'yes'
+ target:
+ description:
+ - List of remote arrays or offload target for replication protection group
+ to connect to.
+ - Note that all replicated protection groups are asynchronous.
+ - Target arrays or offload targets must already be connected to the source array.
+ - Maximum number of targets per Portection Group is 4, assuming your
+ configuration suppors this.
+ type: list
+ elements: str
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+'''
+
+EXAMPLES = r'''
+- name: Create new local protection group
+ purefa_pg:
+ pgroup: foo
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create new protection group called bar in pod called foo
+ purefa_pg:
+ pgroup: "foo::bar"
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create new replicated protection group
+ purefa_pg:
+ pgroup: foo
+ target:
+ - arrayb
+ - arrayc
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create new replicated protection group to offload target and remote array
+ purefa_pg:
+ pgroup: foo
+ target:
+ - offload
+ - arrayc
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create new protection group with snapshots disabled
+ purefa_pg:
+ pgroup: foo
+ enabled: false
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete protection group
+ purefa_pg:
+ pgroup: foo
+ eradicate: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: absent
+
+- name: Eradicate protection group foo on offload target where source array is arrayA
+ purefa_pg:
+ pgroup: "arrayA:foo"
+ target: offload
+ eradicate: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: absent
+
+- name: Create protection group for hostgroups
+ purefa_pg:
+ pgroup: bar
+ hostgroup:
+ - hg1
+ - hg2
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create protection group for hosts
+ purefa_pg:
+ pgroup: bar
+ host:
+ - host1
+ - host2
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create replicated protection group for volumes
+ purefa_pg:
+ pgroup: bar
+ volume:
+ - vol1
+ - vol2
+ target: arrayb
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+'''
+
+RETURN = r'''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import get_system, purefa_argument_spec
+
+
+OFFLOAD_API_VERSION = '1.16'
+P53_API_VERSION = '1.17'
+AC_PG_API_VERSION = '1.13'
+
+
+def get_pod(module, array):
+ """Get ActiveCluster Pod"""
+ pod_name = module.params['pgroup'].split("::")[0]
+ try:
+ return array.get_pod(pod=pod_name)
+ except Exception:
+ return None
+
+
+def get_targets(array):
+ """Get Offload Targets"""
+ targets = []
+ try:
+ target_details = array.list_offload()
+ except Exception:
+ return None
+
+ for targetcnt in range(0, len(target_details)):
+ if target_details[targetcnt]['status'] == "connected":
+ targets.append(target_details[targetcnt]['name'])
+ return targets
+
+
+def get_arrays(array):
+ """ Get Connected Arrays"""
+ arrays = []
+ array_details = array.list_array_connections()
+ api_version = array._list_available_rest_versions()
+ for arraycnt in range(0, len(array_details)):
+ if P53_API_VERSION in api_version:
+ if array_details[arraycnt]['status'] == "connected":
+ arrays.append(array_details[arraycnt]['array_name'])
+ else:
+ if array_details[arraycnt]['connected']:
+ arrays.append(array_details[arraycnt]['array_name'])
+ return arrays
+
+
+def get_pending_pgroup(module, array):
+ """ Get Protection Group"""
+ pgroup = None
+ if ":" in module.params['pgroup']:
+ if "::" not in module.params['pgroup']:
+ for pgrp in array.list_pgroups(pending=True, on="*"):
+ if pgrp["name"] == module.params['pgroup'] and pgrp['time_remaining']:
+ pgroup = pgrp
+ break
+ else:
+ for pgrp in array.list_pgroups(pending=True):
+ if pgrp["name"] == module.params['pgroup'] and pgrp['time_remaining']:
+ pgroup = pgrp
+ break
+ else:
+ for pgrp in array.list_pgroups(pending=True):
+ if pgrp["name"] == module.params['pgroup'] and pgrp['time_remaining']:
+ pgroup = pgrp
+ break
+
+ return pgroup
+
+
+def get_pgroup(module, array):
+ """ Get Protection Group"""
+ pgroup = None
+ if ":" in module.params['pgroup']:
+ if "::" not in module.params['pgroup']:
+ for pgrp in array.list_pgroups(on="*"):
+ if pgrp["name"] == module.params['pgroup']:
+ pgroup = pgrp
+ break
+ else:
+ for pgrp in array.list_pgroups():
+ if pgrp["name"] == module.params['pgroup']:
+ pgroup = pgrp
+ break
+ else:
+ for pgrp in array.list_pgroups():
+ if pgrp["name"] == module.params['pgroup']:
+ pgroup = pgrp
+ break
+
+ return pgroup
+
+
+def get_pgroup_sched(module, array):
+ """ Get Protection Group Schedule"""
+ pgroup = None
+
+ for pgrp in array.list_pgroups(schedule=True):
+ if pgrp["name"] == module.params['pgroup']:
+ pgroup = pgrp
+ break
+
+ return pgroup
+
+
+def check_pg_on_offload(module, array):
+ """ Check if PG already exists on offload target """
+ array_name = array.get()['array_name']
+ remote_pg = array_name + ":" + module.params['pgroup']
+ targets = get_targets(array)
+ for target in targets:
+ remote_pgs = array.list_pgroups(pending=True, on=target)
+ for rpg in range(0, len(remote_pgs)):
+ if remote_pg == remote_pgs[rpg]['name']:
+ return target
+ return None
+
+
+def make_pgroup(module, array):
+ """ Create Protection Group"""
+ changed = True
+ if not module.check_mode:
+ if module.params['target']:
+ api_version = array._list_available_rest_versions()
+ connected_targets = []
+ connected_arrays = get_arrays(array)
+ if OFFLOAD_API_VERSION in api_version:
+ connected_targets = get_targets(array)
+ offload_name = check_pg_on_offload(module, array)
+ if offload_name and offload_name in module.params['target'][0:4]:
+ module.fail_json(msg='Protection Group {0} already exists on offload target {1}.'.format(module.params['pgroup'], offload_name))
+
+ connected_arrays = connected_arrays + connected_targets
+ if connected_arrays == []:
+ module.fail_json(msg='No connected targets on source array.')
+ if set(module.params['target'][0:4]).issubset(connected_arrays):
+ try:
+ array.create_pgroup(module.params['pgroup'], targetlist=module.params['target'][0:4])
+ except Exception:
+ module.fail_json(msg='Creation of replicated pgroup {0} failed. {1}'.format(module.params['pgroup'], module.params['target'][0:4]))
+ else:
+ module.fail_json(msg='Check all selected targets are connected to the source array.')
+ else:
+ try:
+ array.create_pgroup(module.params['pgroup'])
+ except Exception:
+ module.fail_json(msg='Creation of pgroup {0} failed.'.format(module.params['pgroup']))
+ try:
+ if module.params['target']:
+ array.set_pgroup(module.params['pgroup'], replicate_enabled=module.params['enabled'])
+ else:
+ array.set_pgroup(module.params['pgroup'], snap_enabled=module.params['enabled'])
+ except Exception:
+ module.fail_json(msg='Enabling pgroup {0} failed.'.format(module.params['pgroup']))
+ if module.params['volume']:
+ try:
+ array.set_pgroup(module.params['pgroup'], vollist=module.params['volume'])
+ except Exception:
+ module.fail_json(msg='Adding volumes to pgroup {0} failed.'.format(module.params['pgroup']))
+ if module.params['host']:
+ try:
+ array.set_pgroup(module.params['pgroup'], hostlist=module.params['host'])
+ except Exception:
+ module.fail_json(msg='Adding hosts to pgroup {0} failed.'.format(module.params['pgroup']))
+ if module.params['hostgroup']:
+ try:
+ array.set_pgroup(module.params['pgroup'], hgrouplist=module.params['hostgroup'])
+ except Exception:
+ module.fail_json(msg='Adding hostgroups to pgroup {0} failed.'.format(module.params['pgroup']))
+ module.exit_json(changed=changed)
+
+
+def update_pgroup(module, array):
+ """ Update Protection Group"""
+ changed = True
+ if not module.check_mode:
+ changed = False
+ if module.params['target']:
+ api_version = array._list_available_rest_versions()
+ connected_targets = []
+ connected_arrays = get_arrays(array)
+
+ if OFFLOAD_API_VERSION in api_version:
+ connected_targets = get_targets(array)
+ offload_name = check_pg_on_offload(module, array)
+ if offload_name and offload_name in module.params['target'][0:4]:
+ module.fail_json(msg='Protection Group {0} already exists on offload target {1}.'.format(module.params['pgroup'], offload_name))
+
+ connected_arrays = connected_arrays + connected_targets
+ if connected_arrays == []:
+ module.fail_json(msg='No targets connected to source array.')
+ current_connects = array.get_pgroup(module.params['pgroup'])['targets']
+ current_targets = []
+
+ if current_connects:
+ for targetcnt in range(0, len(current_connects)):
+ current_targets.append(current_connects[targetcnt]['name'])
+
+ if set(module.params['target'][0:4]) != set(current_targets):
+ if not set(module.params['target'][0:4]).issubset(connected_arrays):
+ module.fail_json(msg='Check all selected targets are connected to the source array.')
+ try:
+ array.set_pgroup(module.params['pgroup'], targetlist=module.params['target'][0:4])
+ changed = True
+ except Exception:
+ module.fail_json(msg='Changing targets for pgroup {0} failed.'.format(module.params['pgroup']))
+
+ if module.params['target'] and module.params['enabled'] != get_pgroup_sched(module, array)['replicate_enabled']:
+ try:
+ array.set_pgroup(module.params['pgroup'], replicate_enabled=module.params['enabled'])
+ changed = True
+ except Exception:
+ module.fail_json(msg='Changing enabled status of pgroup {0} failed.'.format(module.params['pgroup']))
+ elif not module.params['target'] and module.params['enabled'] != get_pgroup_sched(module, array)['snap_enabled']:
+ try:
+ array.set_pgroup(module.params['pgroup'], snap_enabled=module.params['enabled'])
+ changed = True
+ except Exception:
+ module.fail_json(msg='Changing enabled status of pgroup {0} failed.'.format(module.params['pgroup']))
+
+ if module.params['volume'] and \
+ get_pgroup(module, array)['hosts'] is None and \
+ get_pgroup(module, array)['hgroups'] is None:
+ if get_pgroup(module, array)['volumes'] is None:
+ try:
+ array.set_pgroup(module.params['pgroup'], vollist=module.params['volume'])
+ changed = True
+ except Exception:
+ module.fail_json(msg='Adding volumes to pgroup {0} failed.'.format(module.params['pgroup']))
+ else:
+ cased_vols = [vol.lower() for vol in module.params['volume']]
+ cased_pgvols = [vol.lower() for vol in get_pgroup(module, array)['volumes']]
+ if not all(x in cased_pgvols for x in cased_vols):
+ try:
+ array.set_pgroup(module.params['pgroup'], vollist=module.params['volume'])
+ changed = True
+ except Exception:
+ module.fail_json(msg='Changing volumes in pgroup {0} failed.'.format(module.params['pgroup']))
+
+ if module.params['host'] and \
+ get_pgroup(module, array)['volumes'] is None and \
+ get_pgroup(module, array)['hgroups'] is None:
+ if get_pgroup(module, array)['hosts'] is None:
+ try:
+ array.set_pgroup(module.params['pgroup'], hostlist=module.params['host'])
+ changed = True
+ except Exception:
+ module.fail_json(msg='Adding hosts to pgroup {0} failed.'.format(module.params['pgroup']))
+ else:
+ cased_hosts = [host.lower() for host in module.params['host']]
+ cased_pghosts = [host.lower() for host in get_pgroup(module, array)['hosts']]
+ if not all(x in cased_pghosts for x in cased_hosts):
+ try:
+ array.set_pgroup(module.params['pgroup'], hostlist=module.params['host'])
+ changed = True
+ module.warn('Hello')
+ except Exception:
+ module.fail_json(msg='Changing hosts in pgroup {0} failed.'.format(module.params['pgroup']))
+
+ if module.params['hostgroup'] and \
+ get_pgroup(module, array)['hosts'] is None and \
+ get_pgroup(module, array)['volumes'] is None:
+ if get_pgroup(module, array)['hgroups'] is None:
+ try:
+ array.set_pgroup(module.params['pgroup'], hgrouplist=module.params['hostgroup'])
+ changed = True
+ except Exception:
+ module.fail_json(msg='Adding hostgroups to pgroup {0} failed.'.format(module.params['pgroup']))
+ else:
+ cased_hostg = [hostg.lower() for hostg in module.params['hostgroup']]
+ cased_pghostg = [hostg.lower() for hostg in get_pgroup(module, array)['hgroups']]
+ if not all(x in cased_pghostg for x in cased_hostg):
+ try:
+ array.set_pgroup(module.params['pgroup'], hgrouplist=module.params['hostgroup'])
+ changed = True
+ except Exception:
+ module.fail_json(msg='Changing hostgroups in pgroup {0} failed.'.format(module.params['pgroup']))
+
+ module.exit_json(changed=changed)
+
+
+def eradicate_pgroup(module, array):
+ """ Eradicate Protection Group"""
+ changed = True
+ if not module.check_mode:
+ if ":" in module.params['pgroup']:
+ if "::" not in module.params['pgroup']:
+ try:
+ target = ''.join(module.params['target'])
+ array.destroy_pgroup(module.params['pgroup'], on=target, eradicate=True)
+ except Exception:
+ module.fail_json(msg='Eradicating pgroup {0} failed.'.format(module.params['pgroup']))
+ else:
+ try:
+ array.destroy_pgroup(module.params['pgroup'], eradicate=True)
+ except Exception:
+ module.fail_json(msg='Eradicating pgroup {0} failed.'.format(module.params['pgroup']))
+ else:
+ try:
+ array.destroy_pgroup(module.params['pgroup'], eradicate=True)
+ except Exception:
+ module.fail_json(msg='Eradicating pgroup {0} failed.'.format(module.params['pgroup']))
+ module.exit_json(changed=changed)
+
+
+def delete_pgroup(module, array):
+ """ Delete Protection Group"""
+ changed = True
+ if not module.check_mode:
+ if ":" in module.params['pgroup']:
+ if "::" not in module.params['pgroup']:
+ try:
+ target = ''.join(module.params['target'])
+ array.destroy_pgroup(module.params['pgroup'], on=target)
+ except Exception:
+ module.fail_json(msg='Deleting pgroup {0} failed.'.format(module.params['pgroup']))
+ else:
+ try:
+ array.destroy_pgroup(module.params['pgroup'])
+ except Exception:
+ module.fail_json(msg='Deleting pgroup {0} failed.'.format(module.params['pgroup']))
+ else:
+ try:
+ array.destroy_pgroup(module.params['pgroup'])
+ except Exception:
+ module.fail_json(msg='Deleting pgroup {0} failed.'.format(module.params['pgroup']))
+ if module.params['eradicate']:
+ eradicate_pgroup(module, array)
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(dict(
+ pgroup=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ volume=dict(type='list', elements='str'),
+ host=dict(type='list', elements='str'),
+ hostgroup=dict(type='list', elements='str'),
+ target=dict(type='list', elements='str'),
+ eradicate=dict(type='bool', default=False),
+ enabled=dict(type='bool', default=True),
+ ))
+
+ mutually_exclusive = [['volume', 'host', 'hostgroup']]
+ module = AnsibleModule(argument_spec,
+ mutually_exclusive=mutually_exclusive,
+ supports_check_mode=True)
+
+ state = module.params['state']
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+ if ":" in module.params['pgroup'] and OFFLOAD_API_VERSION not in api_version:
+ module.fail_json(msg='API version does not support offload protection groups.')
+ if "::" in module.params['pgroup'] and AC_PG_API_VERSION not in api_version:
+ module.fail_json(msg='API version does not support ActiveCluster protection groups.')
+
+ pgroup = get_pgroup(module, array)
+ xpgroup = get_pending_pgroup(module, array)
+ if "::" in module.params['pgroup']:
+ if not get_pod(module, array):
+ module.fail_json(msg='Pod {0} does not exist.'.format(module.params['pgroup'].split('::')[0]))
+
+ if module.params['host']:
+ try:
+ for hst in module.params['host']:
+ array.get_host(hst)
+ except Exception:
+ module.fail_json(msg='Host {0} not found'.format(hst))
+
+ if module.params['hostgroup']:
+ try:
+ for hstg in module.params['hostgroup']:
+ array.get_hgroup(hstg)
+ except Exception:
+ module.fail_json(msg='Hostgroup {0} not found'.format(hstg))
+
+ if pgroup and state == 'present':
+ update_pgroup(module, array)
+ elif pgroup and state == 'absent':
+ delete_pgroup(module, array)
+ elif xpgroup and state == 'absent' and module.params['eradicate']:
+ eradicate_pgroup(module, array)
+ elif not pgroup and not xpgroup and state == 'present':
+ make_pgroup(module, array)
+ elif pgroup is None and state == 'absent':
+ module.exit_json(changed=False)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pgsched.py b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pgsched.py
new file mode 100644
index 00000000..d366739f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pgsched.py
@@ -0,0 +1,448 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefa_pgsched
+short_description: Manage protection groups replication schedules on Pure Storage FlashArrays
+version_added: '1.0.0'
+description:
+- Modify or delete protection groups replication schedules on Pure Storage FlashArrays.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - The name of the protection group.
+ type: str
+ required: true
+ state:
+ description:
+ - Define whether to set or delete the protection group schedule.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ schedule:
+ description:
+ - Which schedule to change.
+ type: str
+ choices: ['replication', 'snapshot']
+ required: True
+ enabled:
+ description:
+ - Enable the schedule being configured.
+ type: bool
+ default: True
+ replicate_at:
+ description:
+ - Specifies the preferred time as HH:MM:SS, using 24-hour clock, at which to generate snapshots.
+ type: int
+ blackout_start:
+ description:
+ - Specifies the time at which to suspend replication.
+ - Provide a time in 12-hour AM/PM format, eg. 11AM
+ type: str
+ blackout_end:
+ description:
+ - Specifies the time at which to restart replication.
+ - Provide a time in 12-hour AM/PM format, eg. 5PM
+ type: str
+ replicate_frequency:
+ description:
+ - Specifies the replication frequency in seconds.
+ - Range 900 - 34560000 (FA-405, //M10, //X10i and Cloud Block Store).
+ - Range 300 - 34560000 (all other arrays).
+ type: int
+ snap_at:
+ description:
+ - Specifies the preferred time as HH:MM:SS, using 24-hour clock, at which to generate snapshots.
+ - Only valid if I(snap_frequency) is an exact multiple of 86400, ie 1 day.
+ type: int
+ snap_frequency:
+ description:
+ - Specifies the snapshot frequency in seconds.
+ - Range available 300 - 34560000.
+ type: int
+ days:
+ description:
+ - Specifies the number of days to keep the I(per_day) snapshots beyond the
+ I(all_for) period before they are eradicated
+ - Max retention period is 4000 days
+ type: int
+ all_for:
+ description:
+ - Specifies the length of time, in seconds, to keep the snapshots on the
+ source array before they are eradicated.
+ - Range available 1 - 34560000.
+ type: int
+ per_day:
+ description:
+ - Specifies the number of I(per_day) snapshots to keep beyond the I(all_for) period.
+ - Maximum number is 1440
+ type: int
+ target_all_for:
+ description:
+ - Specifies the length of time, in seconds, to keep the replicated snapshots on the targets.
+ - Range is 1 - 34560000 seconds.
+ type: int
+ target_per_day:
+ description:
+ - Specifies the number of I(per_day) replicated snapshots to keep beyond the I(target_all_for) period.
+ - Maximum number is 1440
+ type: int
+ target_days:
+ description:
+ - Specifies the number of days to keep the I(target_per_day) replicated snapshots
+ beyond the I(target_all_for) period before they are eradicated.
+ - Max retention period is 4000 days
+ type: int
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+'''
+
+EXAMPLES = r'''
+- name: Update protection group snapshot schedule
+ purefa_pgsched:
+ name: foo
+ schedule: snapshot
+ enabled: true
+ snap_frequency: 86400
+ snap_at: 15:30:00
+ per_day: 5
+ all_for: 5
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Update protection group replication schedule
+ purefa_pgsched:
+ name: foo
+ schedule: replication
+ enabled: true
+ replicate_frequency: 86400
+ replicate_at: 15:30:00
+ target_per_day: 5
+ target_all_for: 5
+ blackout_start: 2AM
+ blackout_end: 5AM
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete protection group snapshot schedule
+ purefa_pgsched:
+ name: foo
+ scheduke: snapshot
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete protection group replication schedule
+ purefa_pgsched:
+ name: foo
+ scheduke: replication
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+'''
+
+RETURN = r'''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import get_system, purefa_argument_spec
+
+
+def get_pending_pgroup(module, array):
+ """ Get Protection Group"""
+ pgroup = None
+ if ":" in module.params['name']:
+ for pgrp in array.list_pgroups(pending=True, on="*"):
+ if pgrp["name"] == module.params['name'] and pgrp['time_remaining']:
+ pgroup = pgrp
+ break
+ else:
+ for pgrp in array.list_pgroups(pending=True):
+ if pgrp["name"] == module.params['name'] and pgrp['time_remaining']:
+ pgroup = pgrp
+ break
+
+ return pgroup
+
+
+def get_pgroup(module, array):
+ """ Get Protection Group"""
+ pgroup = None
+ if ":" in module.params['name']:
+ for pgrp in array.list_pgroups(on="*"):
+ if pgrp["name"] == module.params['name']:
+ pgroup = pgrp
+ break
+ else:
+ for pgrp in array.list_pgroups():
+ if pgrp["name"] == module.params['name']:
+ pgroup = pgrp
+ break
+
+ return pgroup
+
+
+def _convert_to_minutes(hour):
+ if hour[-2:] == "AM" and hour[:2] == "12":
+ return 0
+ elif hour[-2:] == "AM":
+ return int(hour[:-2]) * 3600
+ elif hour[-2:] == "PM" and hour[:2] == "12":
+ return 43200
+ return (int(hour[:-2]) + 12) * 3600
+
+
+def update_schedule(module, array):
+ """ Update Protection Group Schedule"""
+ changed = True
+ if not module.check_mode:
+ changed = False
+ try:
+ schedule = array.get_pgroup(module.params['name'], schedule=True)
+ retention = array.get_pgroup(module.params['name'], retention=True)
+ if not schedule['replicate_blackout']:
+ schedule['replicate_blackout'] = [{'start': 0, 'end': 0}]
+ except Exception:
+ module.fail_json(msg="Failed to get current schedule for pgroup {0}.".format(module.params['name']))
+ current_repl = {'replicate_frequency': schedule['replicate_frequency'],
+ 'replicate_enabled': schedule['replicate_enabled'],
+ 'target_days': retention['target_days'],
+ 'replicate_at': schedule['replicate_at'],
+ 'target_per_day': retention['target_per_day'],
+ 'target_all_for': retention['target_all_for'],
+ 'blackout_start': schedule['replicate_blackout'][0]['start'],
+ 'blackout_end': schedule['replicate_blackout'][0]['end']}
+ current_snap = {'days': retention['days'],
+ 'snap_frequency': schedule['snap_frequency'],
+ 'snap_enabled': schedule['snap_enabled'],
+ 'snap_at': schedule['snap_at'],
+ 'per_day': retention['per_day'],
+ 'all_for': retention['all_for']}
+ if module.params['schedule'] == 'snapshot':
+ if not module.params['snap_frequency']:
+ snap_frequency = current_snap['snap_frequency']
+ else:
+ if not 300 <= module.params['snap_frequency'] <= 34560000:
+ module.fail_json(msg="Snap Frequency support is out of range (300 to 34560000)")
+ else:
+ snap_frequency = module.params['snap_frequency']
+
+ if not module.params['snap_at']:
+ snap_at = current_snap['snap_at']
+ else:
+ snap_at = module.params['snap_at']
+
+ if not module.params['days']:
+ days = current_snap['days']
+ else:
+ if module.params['days'] > 4000:
+ module.fail_json(msg="Maximum value for days is 4000")
+ else:
+ days = module.params['days']
+
+ if not module.params['per_day']:
+ per_day = current_snap['per_day']
+ else:
+ if module.params['per_day'] > 1440:
+ module.fail_json(msg="Maximum value for per_day is 1440")
+ else:
+ per_day = module.params['per_day']
+
+ if not module.params['all_for']:
+ all_for = current_snap['all_for']
+ else:
+ if module.params['all_for'] > 34560000:
+ module.fail_json(msg="Maximum all_for value is 34560000")
+ else:
+ all_for = module.params['all_for']
+ new_snap = {'days': days,
+ 'snap_frequency': snap_frequency,
+ 'snap_enabled': module.params['enabled'],
+ 'snap_at': snap_at,
+ 'per_day': per_day,
+ 'all_for': all_for}
+ if current_snap != new_snap:
+ try:
+ array.set_pgroup(module.params['name'],
+ snap_enabled=module.params['enabled'])
+ array.set_pgroup(module.params['name'],
+ snap_frequency=snap_frequency,
+ snap_at=snap_at)
+ array.set_pgroup(module.params['name'],
+ days=days,
+ per_day=per_day,
+ all_for=all_for)
+ changed = True
+ except Exception:
+ module.fail_json(msg='Failed to change snapshot schedule for pgroup {0}.'.format(module.params['name']))
+ else:
+ if not module.params['replicate_frequency']:
+ replicate_frequency = current_repl['replicate_frequency']
+ else:
+ model = array.get(controllers=True)[0]['model']
+ if '405' in model or '10' in model or 'CBS' in model:
+ if not 900 <= module.params['replicate_frequency'] <= 34560000:
+ module.fail_json(msg="Replication Frequency support is out of range (900 to 34560000)")
+ else:
+ replicate_frequency = module.params['replicate_frequency']
+ else:
+ if not 300 <= module.params['replicate_frequency'] <= 34560000:
+ module.fail_json(msg="Replication Frequency support is out of range (300 to 34560000)")
+ else:
+ replicate_frequency = module.params['replicate_frequency']
+
+ if not module.params['replicate_at']:
+ replicate_at = current_repl['replicate_at']
+ else:
+ replicate_at = module.params['replicate_at']
+
+ if not module.params['target_days']:
+ target_days = current_repl['target_days']
+ else:
+ if module.params['target_days'] > 4000:
+ module.fail_json(msg="Maximum value for target_days is 4000")
+ else:
+ target_days = module.params['target_days']
+
+ if not module.params['target_per_day']:
+ target_per_day = current_repl['target_per_day']
+ else:
+ if module.params['target_per_day'] > 1440:
+ module.fail_json(msg="Maximum value for target_per_day is 1440")
+ else:
+ target_per_day = module.params['target_per_day']
+
+ if not module.params['target_all_for']:
+ target_all_for = current_repl['target_all_for']
+ else:
+ if module.params['target_all_for'] > 34560000:
+ module.fail_json(msg="Maximum target_all_for value is 34560000")
+ else:
+ target_all_for = module.params['target_all_for']
+ if not module.params['blackout_end']:
+ blackout_end = current_repl['blackout_start']
+ else:
+ blackout_end = _convert_to_minutes(module.params['blackout_end'])
+ if not module.params['blackout_start']:
+ blackout_start = current_repl['blackout_start']
+ else:
+ blackout_start = _convert_to_minutes(module.params['blackout_start'])
+
+ new_repl = {'replicate_frequency': replicate_frequency,
+ 'replicate_enabled': module.params['enabled'],
+ 'target_days': target_days,
+ 'replicate_at': replicate_at,
+ 'target_per_day': target_per_day,
+ 'target_all_for': target_all_for,
+ 'blackout_start': blackout_start,
+ 'blackout_end': blackout_end}
+ if current_repl != new_repl:
+ blackout = {'start': blackout_start, 'end': blackout_end}
+ try:
+ array.set_pgroup(module.params['name'],
+ replicate_enabled=module.params['enabled'])
+ array.set_pgroup(module.params['name'],
+ replicate_frequency=replicate_frequency,
+ replicate_at=replicate_at)
+ if blackout_start == 0:
+ array.set_pgroup(module.params['name'],
+ replicate_blackout=None)
+ else:
+ array.set_pgroup(module.params['name'],
+ replicate_blackout=blackout)
+ array.set_pgroup(module.params['name'],
+ target_days=target_days,
+ target_per_day=target_per_day,
+ target_all_for=target_all_for)
+ changed = True
+ except Exception:
+ module.fail_json(msg='Failed to change replication schedule for pgroup {0}.'.format(module.params['name']))
+
+ module.exit_json(changed=changed)
+
+
+def delete_schedule(module, array):
+ """ Delete, ie. disable, Protection Group Schedules"""
+ changed = True
+ if not module.check_mode:
+ try:
+ current_state = array.get_pgroup(module.params['name'], schedule=True)
+ if module.params['schedule'] == "replication":
+ if current_state['replicate_enabled']:
+ array.set_pgroup(module.params['name'], replicate_enabled=False)
+ array.set_pgroup(module.params['name'], target_days=0, target_per_day=0,
+ target_all_for=1)
+ array.set_pgroup(module.params['name'], replicate_frequency=14400,
+ replicate_blackout=None)
+ else:
+ changed = False
+ else:
+ if current_state['snap_enabled']:
+ array.set_pgroup(module.params['name'], snap_enabled=False)
+ array.set_pgroup(module.params['name'], days=0, per_day=0, all_for=1)
+ array.set_pgroup(module.params['name'], snap_frequency=300)
+ else:
+ changed = False
+ except Exception:
+ module.fail_json(msg='Deleting pgroup {0} {1} schedule failed.'.format(module.params['name'],
+ module.params['schedule']))
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ schedule=dict(type='str', required=True, choices=['replication', 'snapshot']),
+ blackout_start=dict(type='str'),
+ blackout_end=dict(type='str'),
+ snap_at=dict(type='int'),
+ replicate_at=dict(type='int'),
+ replicate_frequency=dict(type='int'),
+ snap_frequency=dict(type='int'),
+ all_for=dict(type='int'),
+ days=dict(type='int'),
+ per_day=dict(type='int'),
+ target_all_for=dict(type='int'),
+ target_per_day=dict(type='int'),
+ target_days=dict(type='int'),
+ enabled=dict(type='bool', default=True)
+ ))
+
+ required_together = [['blackout_start', 'blackout_end']]
+
+ module = AnsibleModule(argument_spec,
+ required_together=required_together,
+ supports_check_mode=True)
+
+ state = module.params['state']
+ array = get_system(module)
+
+ pgroup = get_pgroup(module, array)
+ if module.params['snap_at'] and module.params['snap_frequency']:
+ if not module.params['snap_frequency'] % 86400 == 0:
+ module.fail_json(msg="snap_at not valid unless snapshot frequency is measured in days, ie. a multiple of 86400")
+ if pgroup and state == 'present':
+ update_schedule(module, array)
+ elif pgroup and state == 'absent':
+ delete_schedule(module, array)
+ elif pgroup is None:
+ module.fail_json(msg="Specified protection group {0} does not exist.".format(module.params['pgroup']))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pgsnap.py b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pgsnap.py
new file mode 100644
index 00000000..d76c4b22
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pgsnap.py
@@ -0,0 +1,378 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefa_pgsnap
+version_added: '1.0.0'
+short_description: Manage protection group snapshots on Pure Storage FlashArrays
+description:
+- Create or delete protection group snapshots on Pure Storage FlashArray.
+- Recovery of replicated snapshots on the replica target array is enabled.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - The name of the source protection group.
+ type: str
+ required: true
+ suffix:
+ description:
+ - Suffix of snapshot name.
+ - Special case. If I(latest) the module will select the latest snapshot created in the group
+ type: str
+ state:
+ description:
+ - Define whether the protection group snapshot should exist or not.
+ Copy (added in 2.7) will create a full read/write clone of the
+ snapshot.
+ type: str
+ choices: [ absent, present, copy ]
+ default: present
+ eradicate:
+ description:
+ - Define whether to eradicate the snapshot on delete or leave in trash.
+ type: bool
+ default: 'no'
+ restore:
+ description:
+ - Restore a specific volume from a protection group snapshot.
+ type: str
+ overwrite:
+ description:
+ - Define whether to overwrite the target volume if it already exists.
+ type: bool
+ default: 'no'
+ target:
+ description:
+ - Volume to restore a specified volume to.
+ - If not supplied this will default to the volume defined in I(restore)
+ type: str
+ offload:
+ description:
+ - Name of offload target on which the snapshot exists.
+ - This is only applicable for deletion and erasure of snapshots
+ type: str
+ now:
+ description:
+ - Whether to initiate a snapshot of the protection group immeadiately
+ type: bool
+ default: False
+ apply_retention:
+ description:
+ - Apply retention schedule settings to the snapshot
+ type: bool
+ default: False
+ remote:
+ description:
+ - Force immeadiate snapshot to remote targets
+ type: bool
+ default: False
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+'''
+
+EXAMPLES = r'''
+- name: Create protection group snapshot foo.ansible
+ purefa_pgsnap:
+ name: foo
+ suffix: ansible
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: present
+
+- name: Delete and eradicate protection group snapshot named foo.snap
+ purefa_pgsnap:
+ name: foo
+ suffix: snap
+ eradicate: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: absent
+
+- name: Restore volume data from local protection group snapshot named foo.snap to volume data2
+ purefa_pgsnap:
+ name: foo
+ suffix: snap
+ restore: data
+ target: data2
+ overwrite: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: copy
+
+- name: Restore remote protection group snapshot arrayA:pgname.snap.data to local copy
+ purefa_pgsnap:
+ name: arrayA:pgname
+ suffix: snap
+ restore: data
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: copy
+
+- name: Restore AC pod protection group snapshot pod1::pgname.snap.data to pdo1::data2
+ purefa_pgsnap:
+ name: pod1::pgname
+ suffix: snap
+ restore: data
+ target: pod1::data2
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: copy
+
+- name: Create snapshot of existing pgroup foo with suffix and force immeadiate copy to remote targets
+ purefa_pgsnap:
+ name: pgname
+ suffix: force
+ now: True
+ apply_retention: True
+ remote: True
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: copy
+
+- name: Delete and eradicate snapshot named foo.snap on offload target bar from arrayA
+ purefa_pgsnap:
+ name: "arrayA:foo"
+ suffix: snap
+ offload: bar
+ eradicate: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: absent
+'''
+
+RETURN = r'''
+'''
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import get_system, purefa_argument_spec
+
+from datetime import datetime
+
+OFFLOAD_API = '1.16'
+
+
+def _check_offload(module, array):
+ try:
+ offload = array.get_offload(module.params['offload'])
+ if offload['status'] == "connected":
+ return True
+ return False
+ except Exception:
+ return None
+
+
+def get_pgroup(module, array):
+ """Return Protection Group or None"""
+ try:
+ return array.get_pgroup(module.params['name'])
+ except Exception:
+ return None
+
+
+def get_pgroupvolume(module, array):
+ """Return Protection Group Volume or None"""
+ try:
+ pgroup = array.get_pgroup(module.params['name'])
+ if "::" in module.params['name']:
+ restore_volume = module.params['name'].split("::")[0] + "::" + module.params['restore']
+ else:
+ restore_volume = module.params['restore']
+ for volume in pgroup['volumes']:
+ if volume == restore_volume:
+ return volume
+ except Exception:
+ return None
+
+
+def get_rpgsnapshot(module, array):
+ """Return iReplicated Snapshot or None"""
+ try:
+ snapname = module.params['name'] + "." + module.params['suffix'] + "." + module.params['restore']
+ for snap in array.list_volumes(snap=True):
+ if snap['name'] == snapname:
+ return snapname
+ except Exception:
+ return None
+
+
+def get_offload_snapshot(module, array):
+ """Return Snapshot (active or deleted) or None"""
+ try:
+ snapname = module.params['name'] + "." + module.params['suffix']
+ for snap in array.get_pgroup(module.params['name'], snap=True, on=module.params['offload']):
+ if snap['name'] == snapname:
+ return snapname
+ except Exception:
+ return None
+
+
+def get_pgsnapshot(module, array):
+ """Return Snapshot (active or deleted) or None"""
+ try:
+ snapname = module.params['name'] + "." + module.params['suffix']
+ for snap in array.get_pgroup(module.params['name'], pending=True, snap=True):
+ if snap['name'] == snapname:
+ return snapname
+ except Exception:
+ return None
+
+
+def create_pgsnapshot(module, array):
+ """Create Protection Group Snapshot"""
+ changed = True
+ if not module.check_mode:
+ try:
+ if module.params['now'] and array.get_pgroup(module.params['name'])['targets'] is not None:
+ array.create_pgroup_snapshot(source=module.params['name'],
+ suffix=module.params['suffix'],
+ snap=True,
+ apply_retention=module.params['apply_retention'],
+ replicate_now=module.params['remote'])
+ else:
+ array.create_pgroup_snapshot(source=module.params['name'],
+ suffix=module.params['suffix'],
+ snap=True,
+ apply_retention=module.params['apply_retention'])
+ except Exception:
+ module.fail_json(msg="Snapshot of pgroup {0} failed.".format(module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def restore_pgsnapvolume(module, array):
+ """Restore a Protection Group Snapshot Volume"""
+ changed = True
+ if not module.check_mode:
+ if module.params['suffix'] == 'latest':
+ all_snaps = array.get_pgroup(module.params['name'], snap=True)
+ latest_snap = all_snaps[len(all_snaps) - 1]['name']
+ module.params['suffix'] = latest_snap.split('.')[1]
+ if ":" in module.params['name'] and "::" not in module.params['name']:
+ if get_rpgsnapshot(module, array) is None:
+ module.fail_json(msg="Selected restore snapshot {0} does not exist in the Protection Group".format(module.params['restore']))
+ else:
+ if get_pgroupvolume(module, array) is None:
+ module.fail_json(msg="Selected restore volume {0} does not exist in the Protection Group".format(module.params['restore']))
+ volume = module.params['name'] + "." + module.params['suffix'] + "." + module.params['restore']
+ try:
+ array.copy_volume(volume, module.params['target'], overwrite=module.params['overwrite'])
+ except Exception:
+ module.fail_json(msg="Failed to restore {0} from pgroup {1}".format(volume, module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def delete_offload_snapshot(module, array):
+ """ Delete Offloaded Protection Group Snapshot"""
+ changed = True
+ if not module.check_mode:
+ snapname = module.params['name'] + "." + module.params['suffix']
+ if ":" in module.params['name'] and module.params['offload']:
+ if _check_offload(module, array):
+ try:
+ array.destroy_pgroup(snapname, on=module.params['offload'])
+ if module.params['eradicate']:
+ try:
+ array.eradicate_pgroup(snapname, on=module.params['offload'])
+ except Exception:
+ module.fail_json(msg='Failed to eradicate offloaded snapshot {0} on target {1}'.format(snapname, module.params['offload']))
+ except Exception:
+ changed = False
+ else:
+ module.fail_json(msg='Offload target {0} does not exist or not connected'.format(module.params['offload']))
+ else:
+ module.fail_json(msg='Protection Group name not in the correct format')
+
+ module.exit_json(changed=changed)
+
+
+def delete_pgsnapshot(module, array):
+ """ Delete Protection Group Snapshot"""
+ changed = True
+ if not module.check_mode:
+ snapname = module.params['name'] + "." + module.params['suffix']
+ try:
+ array.destroy_pgroup(snapname)
+ if module.params['eradicate']:
+ try:
+ array.eradicate_pgroup(snapname)
+ except Exception:
+ module.fail_json(msg="Failed to eradicate pgroup {0}".format(snapname))
+ except Exception:
+ module.fail_json(msg="Failed to delete pgroup {0}".format(snapname))
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(dict(
+ name=dict(type='str', required=True),
+ suffix=dict(type='str'),
+ restore=dict(type='str'),
+ offload=dict(type='str'),
+ overwrite=dict(type='bool', default=False),
+ target=dict(type='str'),
+ eradicate=dict(type='bool', default=False),
+ now=dict(type='bool', default=False),
+ apply_retention=dict(type='bool', default=False),
+ remote=dict(type='bool', default=False),
+ state=dict(type='str', default='present', choices=['absent', 'present', 'copy']),
+ ))
+
+ required_if = [('state', 'copy', ['suffix', 'restore'])]
+
+ module = AnsibleModule(argument_spec,
+ required_if=required_if,
+ supports_check_mode=True)
+ pattern = re.compile("^(?=.*[a-zA-Z-])[a-zA-Z0-9]([a-zA-Z0-9-]{0,63}[a-zA-Z0-9])?$")
+ if module.params['suffix'] is None:
+ suffix = "snap-" + str((datetime.utcnow() - datetime(1970, 1, 1, 0, 0, 0, 0)).total_seconds())
+ module.params['suffix'] = suffix.replace(".", "")
+ else:
+ if not pattern.match(module.params['suffix']):
+ module.fail_json(msg='Suffix name {0} does not conform to suffix name rules'.format(module.params['suffix']))
+
+ if not module.params['target'] and module.params['restore']:
+ module.params['target'] = module.params['restore']
+
+ state = module.params['state']
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+ if OFFLOAD_API not in api_version and module.params['offload']:
+ module.fail_json(msg='Minimum version {0} required for offload support'.format(OFFLOAD_API))
+ pgroup = get_pgroup(module, array)
+ if pgroup is None:
+ module.fail_json(msg="Protection Group {0} does not exist.".format(module.params['name']))
+ pgsnap = get_pgsnapshot(module, array)
+ if state != "absent" and module.params['offload']:
+ module.fail_json(msg='offload parameter not supported for state {0}'.format(state))
+ elif state == 'copy':
+ restore_pgsnapvolume(module, array)
+ elif state == 'present' and not pgsnap:
+ create_pgsnapshot(module, array)
+ elif state == 'present' and pgsnap:
+ module.exit_json(changed=False)
+ elif state == 'absent' and module.params['offload'] and get_offload_snapshot(module, array):
+ delete_offload_snapshot(module, array)
+ elif state == 'absent' and pgsnap:
+ delete_pgsnapshot(module, array)
+ elif state == 'absent' and not pgsnap:
+ module.exit_json(changed=False)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_phonehome.py b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_phonehome.py
new file mode 100644
index 00000000..059c6e86
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_phonehome.py
@@ -0,0 +1,99 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefa_phonehome
+version_added: '1.0.0'
+short_description: Enable or Disable Pure Storage FlashArray Phonehome
+description:
+- Enablke or Disable Phonehome for a Pure Storage FlashArray.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Define state of phonehome
+ type: str
+ default: present
+ choices: [ present, absent ]
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+'''
+
+EXAMPLES = r'''
+- name: Enable Phonehome
+ purefa_phonehome:
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Disable Phonehome
+ purefa_phonehome:
+ state: disable
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+'''
+
+RETURN = r'''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import get_system, purefa_argument_spec
+
+
+def enable_ph(module, array):
+ """Enable Remote Assist"""
+ changed = False
+ if array.get_phonehome()['phonehome'] != 'enabled':
+ try:
+ if not module.check_mode:
+ array.enable_phonehome()
+ changed = True
+ except Exception:
+ module.fail_json(msg='Enabling Phonehome failed')
+ module.exit_json(changed=changed)
+
+
+def disable_ph(module, array):
+ """Disable Remote Assist"""
+ changed = False
+ if array.get_phonehome()['phonehome'] == 'enabled':
+ try:
+ if not module.check_mode:
+ array.disable_phonehome()
+ changed = True
+ except Exception:
+ module.fail_json(msg='Disabling Remote Assist failed')
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(dict(
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ ))
+
+ module = AnsibleModule(argument_spec,
+ supports_check_mode=True)
+
+ array = get_system(module)
+
+ if module.params['state'] == 'present':
+ enable_ph(module, array)
+ else:
+ disable_ph(module, array)
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pod.py b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pod.py
new file mode 100644
index 00000000..3f576766
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pod.py
@@ -0,0 +1,468 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2019, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefa_pod
+short_description: Manage AC pods in Pure Storage FlashArrays
+version_added: '1.0.0'
+description:
+- Manage AC pods in a Pure Storage FlashArray.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - The name of the pod.
+ type: str
+ required: true
+ stretch:
+ description:
+ - The name of the array to stretch to/unstretch from. Must be synchromously replicated.
+ - To unstretch an array use state I(absent)
+ - You can only specify a remote array, ie you cannot unstretch a pod from the
+ current array and then restretch back to the current array.
+ - To restretch a pod you must perform this from the remaining array the pod
+ resides on.
+ type: str
+ failover:
+ description:
+ - The name of the array given priority to stay online if arrays loose
+ contact with eachother.
+ - Oprions are either array in the cluster, or I(auto)
+ type: list
+ elements: str
+ state:
+ description:
+ - Define whether the pod should exist or not.
+ default: present
+ choices: [ absent, present ]
+ type: str
+ eradicate:
+ description:
+ - Define whether to eradicate the pod on delete or leave in trash.
+ type: bool
+ default: false
+ target:
+ description:
+ - Name of clone target pod.
+ type: str
+ mediator:
+ description:
+ - Name of the mediator to use for a pod
+ type: str
+ default: purestorage
+ promote:
+ description:
+ - Promote/demote any pod not in a stretched relationship. .
+ - Demoting a pod will render it read-only.
+ required: false
+ type: bool
+ quiesce:
+ description:
+ - Quiesce/Skip quiesce when I(promote) is false and demoting an ActiveDR pod.
+ - Quiesce will ensure all local data has been replicated before demotion.
+ - Skipping quiesce looses all pending data to be replicated to the remote pod.
+ - Can only demote the pod if it is in a Acrive DR replica link relationship.
+ - This will default to True
+ required: false
+ type: bool
+ undo:
+ description:
+ - Use the I(undo-remote) pod when I(promote) is true and promoting an ActiveDR pod.
+ - This will default to True
+ required: false
+ type: bool
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+'''
+
+EXAMPLES = r'''
+- name: Create new pod named foo
+ purefa_pod:
+ name: foo
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: present
+
+- name: Delete and eradicate pod named foo
+ purefa_pod:
+ name: foo
+ eradicate: yes
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: absent
+
+- name: Set failover array for pod named foo
+ purefa_pod:
+ name: foo
+ failover:
+ - array1
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Set mediator for pod named foo
+ purefa_pod:
+ name: foo
+ mediator: bar
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Stretch a pod named foo to array2
+ purefa_pod:
+ name: foo
+ stretch: array2
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Unstretch a pod named foo from array2
+ purefa_pod:
+ name: foo
+ stretch: array2
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create clone of pod foo named bar
+ purefa_pod:
+ name: foo
+ target: bar
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: present
+'''
+
+RETURN = r'''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import get_system, purefa_argument_spec
+
+
+POD_API_VERSION = "1.13"
+
+
+def get_pod(module, array):
+ """Return Pod or None"""
+ try:
+ return array.get_pod(module.params['name'])
+ except Exception:
+ return None
+
+
+def get_undo_pod(module, array):
+ """Return Undo Pod or None"""
+ try:
+ return array.get_pod(module.params['name'] + '.undo-demote', pending_only=True)
+ except Exception:
+ return None
+
+
+def get_target(module, array):
+ """Return Pod or None"""
+ try:
+ return array.get_pod(module.params['target'])
+ except Exception:
+ return None
+
+
+def get_destroyed_pod(module, array):
+ """Return Destroyed Volume or None"""
+ try:
+ return bool(array.get_pod(module.params['name'], pending=True)['time_remaining'] != '')
+ except Exception:
+ return False
+
+
+def get_destroyed_target(module, array):
+ """Return Destroyed Volume or None"""
+ try:
+ return bool(array.get_pod(module.params['target'], pending=True)['time_remaining'] != '')
+ except Exception:
+ return False
+
+
+def check_arrays(module, array):
+ """Check if array name provided are sync-replicated"""
+ good_arrays = []
+ good_arrays.append(array.get()['array_name'])
+ connected_arrays = array.list_array_connections()
+ for arr in range(0, len(connected_arrays)):
+ if connected_arrays[arr]['type'] == 'sync-replication':
+ good_arrays.append(connected_arrays[arr]['array_name'])
+ if module.params['failover'] is not None:
+ if module.params['failover'] == ['auto']:
+ failover_array = []
+ else:
+ failover_array = module.params['failover']
+ if failover_array != []:
+ for arr in range(0, len(failover_array)):
+ if failover_array[arr] not in good_arrays:
+ module.fail_json(msg='Failover array {0} is not valid.'.format(failover_array[arr]))
+ if module.params['stretch'] is not None:
+ if module.params['stretch'] not in good_arrays:
+ module.fail_json(msg='Stretch: Array {0} is not connected.'.format(module.params['stretch']))
+ return None
+
+
+def create_pod(module, array):
+ """Create Pod"""
+ changed = True
+ if not module.check_mode:
+ if module.params['target']:
+ module.fail_json(msg='Cannot clone non-existant pod.')
+ try:
+ if module.params['failover']:
+ array.create_pod(module.params['name'], failover_list=module.params['failover'])
+ else:
+ array.create_pod(module.params['name'])
+ except Exception:
+ module.fail_json(msg='Pod {0} creation failed.'.format(module.params['name']))
+ if module.params['mediator'] != 'purestorage':
+ try:
+ array.set_pod(module.params['name'], mediator=module.params['mediator'])
+ except Exception:
+ module.warn('Failed to communicate with mediator {0}, using default value'.format(module.params['mediator']))
+ if module.params['stretch']:
+ current_array = array.get()['array_name']
+ if module.params['stretch'] != current_array:
+ try:
+ array.add_pod(module.params['name'], module.params['rrays'])
+ except Exception:
+ module.fail_json(msg='Failed to stretch pod {0} to array {1}.'.format(module.params['name'],
+ module.params['stretch']))
+ module.exit_json(changed=changed)
+
+
+def clone_pod(module, array):
+ """Create Pod Clone"""
+ changed = True
+ if not module.check_mode:
+ changed = False
+ if get_target(module, array) is None:
+ if not get_destroyed_target(module, array):
+ try:
+ array.clone_pod(module.params['name'],
+ module.params['target'])
+ changed = True
+ except Exception:
+ module.fail_json(msg='Clone pod {0} to pod {1} failed.'.format(module.params['name'],
+ module.params['target']))
+ else:
+ module.fail_json(msg='Target pod {0} already exists but deleted.'.format(module.params['target']))
+
+ module.exit_json(changed=changed)
+
+
+def update_pod(module, array):
+ """Update Pod configuration"""
+ changed = True
+ if not module.check_mode:
+ changed = False
+ current_config = array.get_pod(module.params['name'], failover_preference=True)
+ if module.params['failover']:
+ current_failover = current_config['failover_preference']
+ if current_failover == [] or sorted(module.params['failover']) != sorted(current_failover):
+ try:
+ if module.params['failover'] == ['auto']:
+ if current_failover != []:
+ array.set_pod(module.params['name'], failover_preference=[])
+ changed = True
+ else:
+ array.set_pod(module.params['name'], failover_preference=module.params['failover'])
+ changed = True
+ except Exception:
+ module.fail_json(msg='Failed to set failover preference for pod {0}.'.format(module.params['name']))
+ current_config = array.get_pod(module.params['name'], mediator=True)
+ if current_config['mediator'] != module.params['mediator']:
+ try:
+ array.set_pod(module.params['name'], mediator=module.params['mediator'])
+ changed = True
+ except Exception:
+ module.warn('Failed to communicate with mediator {0}. Setting unchanged.'.format(module.params['mediator']))
+ if module.params['promote'] is not None:
+ if len(current_config['arrays']) > 1:
+ module.fail_json(msg='Promotion/Demotion not permitted. Pod {0} is stretched'.format(module.params['name']))
+ else:
+ if current_config['promotion_status'] == 'demoted' and module.params['promote']:
+ try:
+ if module.params['undo'] is None:
+ module.params['undo'] = True
+ if current_config['promotion_status'] == 'quiescing':
+ module.fail_json(msg='Cannot promote pod {0} as it is still quiesing'.format(module.params['name']))
+ elif module.params['undo']:
+ if get_undo_pod(module, array):
+ array.promote_pod(module.params['name'], promote_from=module.params['name'] + '.undo-demote')
+ else:
+ array.promote_pod(module.params['name'])
+ module.warn('undo-demote pod remaining for {0}. Consider eradicating this.'.format(module.params['name']))
+ changed = True
+ else:
+ array.promote_pod(module.params['name'])
+ changed = True
+ except Exception:
+ module.fail_json(msg='Failed to promote pod {0}.'.format(module.params['name']))
+ elif current_config['promotion_status'] != 'demoted' and not module.params['promote']:
+ try:
+ if get_undo_pod(module, array):
+ module.fail_json(msg='Cannot demote pod {0} due to associated undo-demote pod not being eradicated'.format(module.params['name']))
+ if module.params['quiesce'] is None:
+ module.params['quiesce'] = True
+ if current_config['link_target_count'] == 0:
+ array.demote_pod(module.params['name'])
+ changed = True
+ elif not module.params['quiesce']:
+ array.demote_pod(module.params['name'], skip_quiesce=True)
+ changed = True
+ else:
+ array.demote_pod(module.params['name'], quiesce=True)
+ changed = True
+ except Exception:
+ module.fail_json(msg='Failed to demote pod {0}.'.format(module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def stretch_pod(module, array):
+ """Stretch/unstretch Pod configuration"""
+ changed = True
+ if not module.check_mode:
+ changed = False
+ current_config = array.get_pod(module.params['name'], failover_preference=True)
+ if module.params['stretch']:
+ current_arrays = []
+ for arr in range(0, len(current_config['arrays'])):
+ current_arrays.append(current_config['arrays'][arr]['name'])
+ if module.params['stretch'] not in current_arrays and module.params['state'] == 'present':
+ try:
+ array.add_pod(module.params['name'], module.params['stretch'])
+ changed = True
+ except Exception:
+ module.fail_json(msg="Failed to stretch pod {0} to array {1}.".format(module.params['name'],
+ module.params['stretch']))
+
+ if module.params['stretch'] in current_arrays and module.params['state'] == 'absent':
+ try:
+ array.remove_pod(module.params['name'], module.params['stretch'])
+ changed = True
+ except Exception:
+ module.fail_json(msg="Failed to unstretch pod {0} from array {1}.".format(module.params['name'],
+ module.params['stretch']))
+
+ module.exit_json(changed=changed)
+
+
+def delete_pod(module, array):
+ """ Delete Pod"""
+ changed = True
+ if not module.check_mode:
+ try:
+ array.destroy_pod(module.params['name'])
+ if module.params['eradicate']:
+ try:
+ array.eradicate_pod(module.params['name'])
+ except Exception:
+ module.fail_json(msg='Eradicate pod {0} failed.'.format(module.params['name']))
+ except Exception:
+ module.fail_json(msg='Delete pod {0} failed.'.format(module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def eradicate_pod(module, array):
+ """ Eradicate Deleted Pod"""
+ changed = True
+ if not module.check_mode:
+ if module.params['eradicate']:
+ try:
+ array.eradicate_pod(module.params['name'])
+ except Exception:
+ module.fail_json(msg='Eradication of pod {0} failed'.format(module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def recover_pod(module, array):
+ """ Recover Deleted Pod"""
+ changed = True
+ if not module.check_mode:
+ try:
+ array.recover_pod(module.params['name'])
+ except Exception:
+ module.fail_json(msg='Recovery of pod {0} failed'.format(module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(dict(
+ name=dict(type='str', required=True),
+ stretch=dict(type='str'),
+ target=dict(type='str'),
+ mediator=dict(type='str', default='purestorage'),
+ failover=dict(type='list', elements='str'),
+ promote=dict(type='bool'),
+ undo=dict(type='bool'),
+ quiesce=dict(type='bool'),
+ eradicate=dict(type='bool', default=False),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ ))
+
+ mutually_exclusive = [['stretch', 'failover'],
+ ['stretch', 'eradicate'],
+ ['stretch', 'mediator'],
+ ['target', 'mediator'],
+ ['target', 'stretch'],
+ ['target', 'failover'],
+ ['target', 'eradicate']]
+
+ module = AnsibleModule(argument_spec,
+ mutually_exclusive=mutually_exclusive,
+ supports_check_mode=True)
+
+ module.params['name'] = module.params['name'].lower()
+
+ state = module.params['state']
+ array = get_system(module)
+
+ api_version = array._list_available_rest_versions()
+ if POD_API_VERSION not in api_version:
+ module.fail_json(msg='FlashArray REST version not supported. '
+ 'Minimum version required: {0}'.format(POD_API_VERSION))
+
+ pod = get_pod(module, array)
+ destroyed = ''
+ if not pod:
+ destroyed = get_destroyed_pod(module, array)
+ if module.params['failover'] or module.params['failover'] != 'auto':
+ check_arrays(module, array)
+
+ if state == 'present' and not pod:
+ create_pod(module, array)
+ elif pod and module.params['stretch']:
+ stretch_pod(module, array)
+ elif state == 'present' and pod and module.params['target']:
+ clone_pod(module, array)
+ elif state == 'present' and pod and module.params['target']:
+ clone_pod(module, array)
+ elif state == 'present' and pod:
+ update_pod(module, array)
+ elif state == 'absent' and pod and not module.params['stretch']:
+ delete_pod(module, array)
+ elif state == 'present' and destroyed:
+ recover_pod(module, array)
+ elif state == 'absent' and destroyed:
+ eradicate_pod(module, array)
+ elif state == 'absent' and not pod:
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pod_replica.py b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pod_replica.py
new file mode 100644
index 00000000..95a5c637
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pod_replica.py
@@ -0,0 +1,237 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: purefa_pod_replica
+short_description: Manage ActiveDR pod replica links between Pure Storage FlashArrays
+version_added: '1.0.0'
+description:
+ - This module manages ActiveDR pod replica links between Pure Storage FlashArrays.
+author: Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - ActiveDR source pod name
+ required: true
+ type: str
+ state:
+ description:
+ - Creates or modifies a pod replica link
+ required: false
+ default: present
+ type: str
+ choices: [ "present", "absent" ]
+ target_array:
+ description:
+ - Remote array name to create replica on.
+ required: false
+ type: str
+ target_pod:
+ description:
+ - Name of target pod
+ - Must not be the same as the local pod.
+ type: str
+ required: false
+ pause:
+ description:
+ - Pause/unpause a pod replica link
+ required: false
+ type: bool
+extends_documentation_fragment:
+ - purestorage.flasharray.purestorage.fa
+'''
+
+EXAMPLES = '''
+- name: Create new pod replica link from foo to bar on arrayB
+ purefa_pod_replica:
+ name: foo
+ target_array: arrayB
+ target_pod: bar
+ state: present
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Pause an pod replica link
+ purefa_pod_replica:
+ name: foo
+ pause: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete and eradicate pod replica link
+ purefa_pod_replica:
+ name: foo
+ state: absent
+ eradicate: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+'''
+
+RETURN = '''
+'''
+
+MIN_REQUIRED_API_VERSION = '1.19'
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import get_system, purefa_argument_spec
+
+
+def get_local_pod(module, array):
+ """Return Pod or None"""
+ try:
+ return array.get_pod(module.params['name'])
+ except Exception:
+ return None
+
+
+def get_local_rl(module, array):
+ """Return Pod Replica Link or None"""
+ try:
+ rlinks = array.list_pod_replica_links()
+ for link in range(0, len(rlinks)):
+ if rlinks[link]['local_pod_name'] == module.params['name']:
+ return rlinks[link]
+ return None
+ except Exception:
+ return None
+
+
+def _get_arrays(array):
+ """ Get Connected Arrays"""
+ arrays = []
+ array_details = array.list_array_connections()
+ for arraycnt in range(0, len(array_details)):
+ arrays.append(array_details[arraycnt]['array_name'])
+ return arrays
+
+
+def update_rl(module, array, local_rl):
+ """Create Pod Replica Link"""
+ changed = True
+ if not module.check_mode:
+ changed = False
+ if module.params['pause'] is not None:
+ if local_rl['status'] != 'paused' and module.params['pause']:
+ try:
+ array.pause_pod_replica_link(local_pod_name=module.params['name'],
+ remote_pod_name=local_rl['remote_pod_name'])
+ changed = True
+ except Exception:
+ module.fail_json(msg='Failed to pause replica link {0}.'.format(module.params['name']))
+ elif local_rl['status'] == 'paused' and not module.params['pause']:
+ try:
+ array.resume_pod_replica_link(local_pod_name=module.params['name'],
+ remote_pod_name=local_rl['remote_pod_name'])
+ changed = True
+ except Exception:
+ module.fail_json(msg='Failed to resume replica link {0}.'.format(module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def create_rl(module, array):
+ """Create Pod Replica Link"""
+ changed = True
+ if not module.check_mode:
+ if not module.params['target_pod']:
+ module.fail_json(msg='target_pod required to create a new replica link.')
+ if not module.params['target_array']:
+ module.fail_json(msg='target_array required to create a new replica link.')
+ try:
+ connected_arrays = array.list_array_connections()
+ if connected_arrays == []:
+ module.fail_json(msg='No connected arrays.')
+ else:
+ good_array = False
+ for conn_array in range(0, len(connected_arrays)):
+ if connected_arrays[conn_array]['array_name'] == module.params['target_array'] and \
+ connected_arrays[conn_array]['status'] in ["connected", "connecting", "partially_connected"]:
+ good_array = True
+ break
+ if not good_array:
+ module.fail_json(msg='Target array {0} is not connected to the source array.'.format(module.params['target_array']))
+ else:
+ try:
+ array.create_pod_replica_link(local_pod_name=module.params['name'],
+ remote_name=module.params['target_array'],
+ remote_pod_name=module.params['target_pod'])
+ except Exception:
+ module.fail_json(msg='Failed to create replica link {0} to target array {1}'.format(module.params['name'],
+ module.params['target_array']))
+ except Exception:
+ module.fail_json(msg="Failed to create replica link for pod {0}.".format(module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def delete_rl(module, array, local_rl):
+ """ Delete Pod Replica Link"""
+ changed = True
+ if not module.check_mode:
+ changed = False
+ try:
+ array.delete_pod_replica_link(module.params['name'],
+ remote_pod_name=local_rl['remote_pod_name'])
+ changed = True
+ except Exception:
+ module.fail_json(msg="Failed to delete replica link for pod {0}.".format(module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type='str', required=True),
+ target_pod=dict(type='str'),
+ target_array=dict(type='str'),
+ pause=dict(type='bool'),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+ )
+
+ module = AnsibleModule(argument_spec,
+ supports_check_mode=True)
+
+ state = module.params['state']
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(msg='Purity v6.0.0 or higher required.')
+
+ local_pod = get_local_pod(module, array)
+ local_replica_link = get_local_rl(module, array)
+
+ if not local_pod:
+ module.fail_json(msg='Selected local pod {0} does not exist.'.format(module.params['name']))
+
+ if len(local_pod['arrays']) > 1:
+ module.fail_json(msg='Local Pod {0} is already stretched.'.format(module.params['name']))
+
+ if local_replica_link:
+ if local_replica_link['status'] == 'unhealthy':
+ module.fail_json(msg='Replca Link unhealthy - please check remote array')
+ if state == 'present' and not local_replica_link:
+ create_rl(module, array)
+ elif state == 'present' and local_replica_link:
+ update_rl(module, array, local_replica_link)
+ elif state == 'absent' and local_replica_link:
+ delete_rl(module, array, local_replica_link)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_policy.py b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_policy.py
new file mode 100644
index 00000000..9ba0eb9c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_policy.py
@@ -0,0 +1,579 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefa_policy
+version_added: '1.5.0'
+short_description: Manage FlashArray File System Policies
+description:
+- Manage FlashArray file system policies for NFS, SMB and snapshot
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Name of the policy
+ type: str
+ required: true
+ state:
+ description:
+ - Define whether the policy should exist or not.
+ default: present
+ choices: [ absent, present ]
+ type: str
+ policy:
+ description:
+ - The type of policy to use
+ choices: [ nfs, smb, snapshot ]
+ required: true
+ type: str
+ enabled:
+ description:
+ - Define if policy is enabled or not
+ type: bool
+ default: true
+ smb_anon_allowed:
+ description:
+ - Specifies whether access to information is allowed for anonymous users
+ type: bool
+ default: false
+ client:
+ description:
+ - Specifies which SMB or NFS clients are given access
+ - Accepted notation, IP, IP mask, or hostname
+ type: str
+ smb_encrypt:
+ description:
+ - Specifies whether the remote client is required to use SMB encryption
+ type: bool
+ default: False
+ nfs_access:
+ description:
+ - Specifies access control for the export
+ choices: [ root-squash, no-root-squash ]
+ type: str
+ default: root-squash
+ nfs_permission:
+ description:
+ - Specifies which read-write client access permissions are allowed for the export
+ choices: [ ro, rw ]
+ default: rw
+ type: str
+ snap_at:
+ description:
+ - Specifies the number of hours since midnight at which to take a snapshot
+ or the hour including AM/PM
+ - Can only be set on the rule with the smallest I(snap_every) value.
+ - Cannot be set if the I(snap_every) value is not measured in days.
+ - Can only be set for at most one rule in the same policy.
+ type: str
+ snap_every:
+ description:
+ - Specifies the interval between snapshots, in minutes.
+ - The value for all rules must be multiples of one another.
+ - Must be unique for each rule in the same policy.
+ - Value must be between 5 and 525600.
+ type: int
+ snap_keep_for:
+ description:
+ - Specifies the period that snapshots are retained before they are eradicated, in minutes.
+ - Cannot be less than the I(snap_every) value of the rule.
+ - Value must be unique for each rule in the same policy.
+ - Value must be between 5 and 525600.
+ type: int
+ snap_client_name:
+ description:
+ - The customizable portion of the client visible snapshot name.
+ type: str
+ rename:
+ description:
+ - New name of policy
+ type: str
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+'''
+
+EXAMPLES = r'''
+- name: Create an NFS policy with no rules
+ purefa_policy:
+ name: export1
+ policy: nfs
+ nfs_access: no-root-squash
+ nfs_permission: ro
+ client: client1
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create an NFS policy with initial rule
+ purefa_policy:
+ name: export1
+ policy: nfs
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Disable a policy
+ purefa_policy:
+ name: export1
+ enabled: false
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Add rule to existing NFS export policy
+ purefa_policy:
+ name: export1
+ policy: nfs
+ nfs_access: no-root-squash
+ nfs_permission: ro
+ client: client2
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Add rule to existing SMB export policy
+ purefa_policy:
+ name: export1
+ policy: nfs
+ smb_encrypt: yes
+ smb_anon_allowed: no
+ client: client1
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete policy rule for a client
+ purefa_policy:
+ name: export1
+ policy: nfs
+ client: client2
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete policy
+ purefa_policy:
+ name: export1
+ policy: nfs
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+'''
+
+RETURN = r'''
+'''
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import get_system, get_array, purefa_argument_spec
+
+MIN_REQUIRED_API_VERSION = '2.3'
+
+
+def _convert_to_millisecs(hour):
+ if hour[-2:].upper() == "AM" and hour[:2] == "12":
+ return 0
+ elif hour[-2:].upper() == "AM":
+ return int(hour[:-2]) * 3600000
+ elif hour[-2:].upper() == "PM" and hour[:2] == "12":
+ return 43200000
+ return (int(hour[:-2]) + 12) * 3600000
+
+
+def rename_policy(module, array):
+ """Rename a file system policy"""
+ changed = True
+ if not module.check_mode:
+ changed = False
+ target_exists = bool(array.get_policies(names=[module.params['rename']]).status_code == 200)
+ if target_exists:
+ module.fail_json(msg="Rename failed - Target policy {0} already exists".format(module.params['rename']))
+ else:
+ if module.params['policy'] == 'nfs':
+ res = array.patch_policies_nfs(names=[module.params['name']],
+ policy=flasharray.PolicyPatch(name=module.params['rename']))
+ if res.status_code == 200:
+ changed = True
+ else:
+ module.fail_json(msg="Failed to rename NFS policy {0} to {1}".format(module.params['name'], module.params['rename']))
+ elif module.params['policy'] == 'smb':
+ res = array.patch_policies_smb(names=[module.params['name']],
+ policy=flasharray.PolicyPatch(name=module.params['rename']))
+ if res.status_code == 200:
+ changed = True
+ else:
+ module.fail_json(msg="Failed to rename SMB policy {0} to {1}".format(module.params['name'], module.params['rename']))
+ else:
+ res = array.patch_policies_snapshot(names=[module.params['name']],
+ policy=flasharray.PolicyPatch(name=module.params['rename']))
+ if res.status_code == 200:
+ changed = True
+ else:
+ module.fail_json(msg="Failed to rename snapshot policy {0} to {1}".format(module.params['name'], module.params['rename']))
+ module.exit_json(changed=changed)
+
+
+def delete_policy(module, array):
+ """Delete a file system policy or rule within a policy"""
+ changed = True
+ if not module.check_mode:
+ changed = False
+ if module.params['policy'] == 'nfs':
+ if not module.params['client']:
+ res = array.delete_policies_nfs(names=[module.params['name']])
+ if res.status_code == 200:
+ changed = True
+ else:
+ module.fail_json(msg="Deletion of NFS policy {0} failed. Error: {1}".format(module.params['name'], res.errors[0].message))
+ else:
+ rules = list(array.get_policies_nfs_client_rules(policy_names=[module.params['name']]).items)
+ if rules:
+ rule_name = ''
+ for rule in range(0, len(rules)):
+ if rules[rule].client == module.params['client']:
+ rule_name = rules[rule].name
+ break
+ if rule_name:
+ deleted = bool(array.delete_policies_nfs_client_rules(policy_names=[module.params['name']], names=[rule_name]).status_code == 200)
+ if deleted:
+ changed = True
+ else:
+ module.fail_json(msg="Failed to delete client {0} from NFS policy {1}. Error: {2}".format(module.params['client'],
+ module.params['name'],
+ deleted.errors[0].message))
+ elif module.params['policy'] == 'smb':
+ if not module.params['client']:
+ res = array.delete_policies_smb(names=[module.params['name']])
+ if res.status_code == 200:
+ changed = True
+ else:
+ module.fail_json(msg="Deletion of SMB policy {0} failed. Error: {1}".format(module.params['name'], res.errors[0].message))
+ else:
+ rules = list(array.get_policies_smb_client_rules(policy_names=[module.params['name']]).items)
+ if rules:
+ rule_name = ''
+ for rule in range(0, len(rules)):
+ if rules[rule].client == module.params['client']:
+ rule_name = rules[rule].name
+ break
+ if rule_name:
+ deleted = bool(array.delete_policies_smb_client_rules(policy_names=[module.params['name']], names=[rule_name]).status_code == 200)
+ if deleted:
+ changed = True
+ else:
+ module.fail_json(msg="Failed to delete client {0} from SMB policy {1}. Error: {2}".format(module.params['client'],
+ module.params['name'],
+ deleted.errors[0].message))
+ else:
+ if not module.params['snap_client_name']:
+ res = array.delete_policies_snapshot(names=[module.params['name']])
+ if res.status_code == 200:
+ changed = True
+ else:
+ module.fail_json(msg="Deletion of Snapshot policy {0} failed. Error: {1}".format(module.params['name'], res.errors[0].message))
+ else:
+ rules = list(array.get_policies_snapshot_rules(policy_names=[module.params['name']]).items)
+ if rules:
+ rule_name = ''
+ for rule in range(0, len(rules)):
+ if rules[rule].client_name == module.params['snap_client_name']:
+ rule_name = rules[rule].name
+ break
+ if rule_name:
+ deleted = bool(array.delete_policies_snapshot_rules(policy_names=[module.params['name']], names=[rule_name]).status_code == 200)
+ if deleted:
+ changed = True
+ else:
+ module.fail_json(msg="Failed to delete client {0} from Snapshot policy {1}. Error: {2}".format(module.params['snap_client_name'],
+ module.params['name'],
+ deleted.errors[0].message))
+
+ module.exit_json(changed=changed)
+
+
+def create_policy(module, array):
+ """Create a file system export"""
+ changed = True
+ if not module.check_mode:
+ changed = False
+ if module.params['policy'] == 'nfs':
+ created = array.post_policies_nfs(names=[module.params['name']], policy=flasharray.PolicyPost(enabled=module.params['enabled']))
+ if created.status_code == 200:
+ if module.params['client']:
+ rules = flasharray.PolicyrulenfsclientpostRules(access=module.params['nfs_access'],
+ client=module.params['client'],
+ permission=module.params['nfs_permission'])
+ rule = flasharray.PolicyRuleNfsClientPost(rules=[rules])
+ rule_created = array.post_policies_nfs_client_rules(policy_names=[module.params['name']], rules=rule)
+ if rule_created.status_code != 200:
+ module.fail_json(msg="Failed to create rule for NFS policy {0}. Error: {1}".format(module.params['name'],
+ rule_created.errors[0].message))
+ changed = True
+ else:
+ module.params(msg="Failed to create NFS policy {0}. Error: {1}".format(module.params['name'], created.errors[0].message))
+ elif module.params['policy'] == 'smb':
+ created = array.post_policies_smb(names=[module.params['name']], policy=flasharray.PolicyPost(enabled=module.params['enabled']))
+ if created.status_code == 200:
+ changed = True
+ if module.params['client']:
+ rules = flasharray.PolicyrulesmbclientpostRules(anonymous_access_allowed=module.params['smb_anon_allowed'],
+ client=module.params['client'],
+ smb_encryption_required=module.params['smb_encrypt'])
+ rule = flasharray.PolicyRuleSmbClientPost(rules=[rules])
+ rule_created = array.post_policies_smb_client_rules(policy_names=[module.params['name']], rules=rule)
+ if rule_created.status_code != 200:
+ module.fail_json(msg="Failed to create rule for SMB policy {0}. Error: {1}".format(module.params['name'],
+ rule_created.errors[0].message))
+ else:
+ module.params(msg="Failed to create SMB policy {0}. Error: {1}".format(module.params['name'], created.errors[0].message))
+ else:
+ created = array.post_policies_snapshot(names=[module.params['name']], policy=flasharray.PolicyPost(enabled=module.params['enabled']))
+ if created.status_code == 200:
+ changed = True
+ if module.params['snap_client_name']:
+ if module.params['snap_keep_for'] < module.params['snap_every']:
+ module.fail_json(msg='Retention period (snap_keep_for) cannot be less than snapshot interval (snap_every).')
+ if module.params['snap_at']:
+ if not module.params['snap_every'] % 1440 == 0:
+ module.fail_json(msg='snap_at time can only be set if snap_every is multiple of 1440')
+ rules = flasharray.PolicyrulesnapshotpostRules(at=_convert_to_millisecs(module.params['snap_at']),
+ client_name=module.params['snap_client_name'],
+ every=module.params['snap_every'] * 60000,
+ keep_for=module.params['snap_keep_for'] * 60000)
+ else:
+ rules = flasharray.PolicyrulesnapshotpostRules(client_name=module.params['snap_client_name'],
+ every=module.params['snap_every'] * 60000,
+ keep_for=module.params['snap_keep_for'] * 60000)
+ rule = flasharray.PolicyRuleSnapshotPost(rules=[rules])
+ rule_created = array.post_policies_snapshot_rules(policy_names=[module.params['name']], rules=rule)
+ if rule_created.status_code != 200:
+ module.fail_json(msg="Failed to create rule for Snapshot policy {0}. Error: {1}".format(module.params['name'],
+ rule_created.errors[0].message))
+ else:
+ module.params(msg="Failed to create Snapshot policy {0}. Error: {1}".format(module.params['name'], created.errors[0].message))
+ module.exit_json(changed=changed)
+
+
+def update_policy(module, array):
+ """ Update an existing policy including add/remove rules"""
+ changed = True
+ if not module.check_mode:
+ changed = changed_rule = changed_enable = False
+ if module.params['policy'] == 'nfs':
+ try:
+ current_enabled = list(array.get_policies_nfs(names=[module.params['name']]).items)[0].enabled
+ except Exception:
+ module.fail_json(msg='Incorrect policy type specified for existing policy {0}'.format(module.params['name']))
+ if current_enabled != module.params['enabled']:
+ res = array.patch_policies_nfs(names=[module.params['name']], policy=flasharray.PolicyPatch(enabled=module.params['enabled']))
+ if res.status_code != 200:
+ module.exit_json(msg="Failed to enable/disable NFS policy {0}".format(module.params['name']))
+ else:
+ changed_enable = True
+ if module.params['client']:
+ rules = list(array.get_policies_nfs_client_rules(policy_names=[module.params['name']]).items)
+ if rules:
+ rule_name = ''
+ for rule in range(0, len(rules)):
+ if rules[rule].client == module.params['client']:
+ rule_name = rules[rule].name
+ break
+ if not rule_name:
+ rules = flasharray.PolicyrulesmbclientpostRules(anonymous_access_allowed=module.params['smb_anon_allowed'],
+ client=module.params['client'],
+ smb_encryption_required=module.params['smb_encrypt'])
+ rule = flasharray.PolicyRuleNfsClientPost(rules=[rules])
+ rule_created = array.post_policies_nfs_client_rules(policy_names=[module.params['name']], rules=rule)
+ if rule_created.status_code != 200:
+ module.fail_json(msg="Failed to create new rule for NFS policy {0}. Error: {1}".format(module.params['name'],
+ rule_created.errors[0].message))
+ else:
+ changed_rule = True
+ else:
+ rules = flasharray.PolicyrulesmbclientpostRules(anonymous_access_allowed=module.params['smb_anon_allowed'],
+ client=module.params['client'],
+ smb_encryption_required=module.params['smb_encrypt'])
+ rule = flasharray.PolicyRuleNfsClientPost(rules=[rules])
+ rule_created = array.post_policies_nfs_client_rules(policy_names=[module.params['name']], rules=rule)
+ if rule_created.status_code != 200:
+ module.fail_json(msg="Failed to create new rule for SMB policy {0}. Error: {1}".format(module.params['name'],
+ rule_created.errors[0].message))
+ else:
+ changed_rule = True
+ elif module.params['policy'] == 'smb':
+ try:
+ current_enabled = list(array.get_policies_smb(names=[module.params['name']]).items)[0].enabled
+ except Exception:
+ module.fail_json(msg='Incorrect policy type specified for existing policy {0}'.format(module.params['name']))
+ if current_enabled != module.params['enabled']:
+ res = array.patch_policies_smb(names=[module.params['name']], policy=flasharray.PolicyPatch(enabled=module.params['enabled']))
+ if res.status_code != 200:
+ module.exit_json(msg="Failed to enable/disable SMB policy {0}".format(module.params['name']))
+ else:
+ changed_enable = True
+ if module.params['client']:
+ rules = list(array.get_policies_smb_client_rules(policy_names=[module.params['name']]).items)
+ if rules:
+ rule_name = ''
+ for rule in range(0, len(rules)):
+ if rules[rule].client == module.params['client']:
+ rule_name = rules[rule].name
+ break
+ if not rule_name:
+ rules = flasharray.PolicyrulesmbclientpostRules(access=module.params['nfs_access'],
+ client=module.params['client'],
+ permission=module.params['nfs_permission'])
+ rule = flasharray.PolicyRuleSmbClientPost(rules=[rules])
+ rule_created = array.post_policies_smb_client_rules(policy_names=[module.params['name']], rules=rule)
+ if rule_created.status_code != 200:
+ module.fail_json(msg="Failed to create new rule for SMB policy {0}. Error: {1}".format(module.params['name'],
+ rule_created.errors[0].message))
+ else:
+ changed_rule = True
+ else:
+ rules = flasharray.PolicyrulesmbclientpostRules(access=module.params['nfs_access'],
+ client=module.params['client'],
+ permission=module.params['nfs_permission'])
+ rule = flasharray.PolicyRuleSmbClientPost(rules=[rules])
+ rule_created = array.post_policies_smb_client_rules(policy_names=[module.params['name']], rules=rule)
+ if rule_created.status_code != 200:
+ module.fail_json(msg="Failed to create new rule for SMB policy {0}. Error: {1}".format(module.params['name'],
+ rule_created.errors[0].message))
+ else:
+ changed_rule = True
+ else:
+ try:
+ current_enabled = list(array.get_policies_snapshot(names=[module.params['name']]).items)[0].enabled
+ except Exception:
+ module.fail_json(msg='Incorrect policy type specified for existing policy {0}'.format(module.params['name']))
+ if current_enabled != module.params['enabled']:
+ res = array.patch_policies_snapshot(names=[module.params['name']], policy=flasharray.PolicyPost(enabled=module.params['enabled']))
+ if res.status_code != 200:
+ module.exit_json(msg="Failed to enable/disable snapshot policy {0}".format(module.params['name']))
+ else:
+ changed_enable = True
+ if module.params['snap_client_name']:
+ if module.params['snap_at']:
+ if not module.params['snap_every'] % 1440 == 0:
+ module.fail_json(msg='snap_at time can only be set if snap_every is multiple of 1440')
+ if module.params['snap_keep_for'] < module.params['snap_every']:
+ module.fail_json(msg='Retention period (snap_keep_for) cannot be less than snapshot interval (snap_every).')
+ rules = list(array.get_policies_snapshot_rules(policy_names=[module.params['name']]).items)
+ if rules:
+ rule_name = ''
+ for rule in range(0, len(rules)):
+ if rules[rule].client_name == module.params['snap_client_name']:
+ rule_name = rules[rule].name
+ break
+ if not rule_name:
+ if module.params['snap_keep_for'] < module.params['snap_every']:
+ module.fail_json(msg='Retention period (snap_keep_for) cannot be less than snapshot interval (snap_every).')
+ if module.params['snap_at']:
+ if not module.params['snap_every'] % 1440 == 0:
+ module.fail_json(msg='snap_at time can only be set if snap_every is multiple of 1440')
+ rules = flasharray.PolicyrulesnapshotpostRules(at=_convert_to_millisecs(module.params['snap_at']),
+ client_name=module.params['snap_client_name'],
+ every=module.params['snap_every'] * 60000,
+ keep_for=module.params['snap_keep_for'] * 60000)
+ else:
+ rules = flasharray.PolicyrulesnapshotpostRules(client_name=module.params['snap_client_name'],
+ every=module.params['snap_every'] * 60000,
+ keep_for=module.params['snap_keep_for'] * 60000)
+ rule = flasharray.PolicyRuleSnapshotPost(rules=[rules])
+ rule_created = array.post_policies_snapshot_rules(policy_names=[module.params['name']], rules=rule)
+ if rule_created.status_code != 200:
+ err_no = len(rule_created.errors) - 1
+ module.fail_json(msg="Failed to create new rule for Snapshot policy {0}. Error: {1}".format(module.params['name'],
+ rule_created.errors[err_no].message))
+ else:
+ changed_rule = True
+ else:
+ if module.params['snap_keep_for'] < module.params['snap_every']:
+ module.fail_json(msg='Retention period (snap_keep_for) cannot be less than snapshot interval (snap_every).')
+ if module.params['snap_at']:
+ if not module.params['snap_every'] % 1440 == 0:
+ module.fail_json(msg='snap_at time can only be set if snap_every is multiple of 1440')
+ rules = flasharray.PolicyrulesnapshotpostRules(at=_convert_to_millisecs(module.params['snap_at']),
+ client_name=module.params['snap_client_name'],
+ every=module.params['snap_every'] * 60000,
+ keep_for=module.params['snap_keep_for'] * 60000)
+ else:
+ rules = flasharray.PolicyrulesnapshotpostRules(client_name=module.params['snap_client_name'],
+ every=module.params['snap_every'] * 60000,
+ keep_for=module.params['snap_keep_for'] * 60000)
+ rule = flasharray.PolicyRuleSnapshotPost(rules=[rules])
+ rule_created = array.post_policies_snapshot_rules(policy_names=[module.params['name']], rules=rule)
+ if rule_created.status_code != 200:
+ err_no = len(rule_created.errors) - 1
+ module.fail_json(msg="Failed to create new rule for Snapshot policy {0}. Error: {1}".format(module.params['name'],
+ rule_created.errors[err_no].message))
+ else:
+ changed_rule = True
+ if changed_rule or changed_enable:
+ changed = True
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(dict(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ nfs_access=dict(type='str', default='root-squash', choices=['root-squash', 'no-root-squash']),
+ nfs_permission=dict(type='str', default='rw', choices=['rw', 'ro']),
+ policy=dict(type='str', required=True, choices=['nfs', 'smb', 'snapshot']),
+ name=dict(type='str', required=True),
+ rename=dict(type='str'),
+ client=dict(type='str'),
+ enabled=dict(type='bool', default=True),
+ snap_at=dict(type='str'),
+ snap_every=dict(type='int'),
+ snap_keep_for=dict(type='int'),
+ snap_client_name=dict(type='str'),
+ smb_anon_allowed=dict(type='bool', default=False),
+ smb_encrypt=dict(type='bool', default=False),
+ ))
+
+ required_together = [['snap_keep_for', 'snap_every']]
+ module = AnsibleModule(argument_spec,
+ required_together=required_together,
+ supports_check_mode=True)
+
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg='py-pure-client sdk is required for this module')
+
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(msg='FlashArray REST version not supported. '
+ 'Minimum version required: {0}'.format(MIN_REQUIRED_API_VERSION))
+ array = get_array(module)
+ state = module.params['state']
+
+ exists = bool(array.get_policies(names=[module.params['name']]).status_code == 200)
+
+ if state == 'present' and not exists:
+ create_policy(module, array)
+ elif state == 'present' and exists and module.params['rename']:
+ rename_policy(module, array)
+ elif state == 'present' and exists:
+ update_policy(module, array)
+ elif state == 'absent' and exists:
+ delete_policy(module, array)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_proxy.py b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_proxy.py
new file mode 100644
index 00000000..c7e106d2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_proxy.py
@@ -0,0 +1,123 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2019, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefa_proxy
+version_added: '1.0.0'
+author:
+ - Pure Storage ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+short_description: Configure FlashArray phonehome HTTPs proxy settings
+description:
+- Set or erase configuration for the HTTPS phonehome proxy settings.
+options:
+ state:
+ description:
+ - Set or delete proxy configuration
+ default: present
+ type: str
+ choices: [ absent, present ]
+ host:
+ description:
+ - The proxy host name.
+ type: str
+ port:
+ description:
+ - The proxy TCP/IP port number.
+ type: int
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+'''
+
+EXAMPLES = r'''
+- name: Delete exisitng proxy settings
+ purefa_proxy:
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Set proxy settings
+ purefa_proxy:
+ host: purestorage.com
+ port: 8080
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+'''
+
+RETURN = r'''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import get_system, purefa_argument_spec
+
+
+def delete_proxy(module, array):
+ """Delete proxy settings"""
+ changed = True
+ if not module.check_mode:
+ current_proxy = array.get(proxy=True)['proxy']
+ if current_proxy != '':
+ try:
+ array.set(proxy='')
+ except Exception:
+ module.fail_json(msg='Delete proxy settigs failed')
+ else:
+ changed = False
+ module.exit_json(changed=changed)
+
+
+def create_proxy(module, array):
+ """Set proxy settings"""
+ changed = True
+ if not module.check_mode:
+ current_proxy = array.get(proxy=True)
+ if current_proxy is not None:
+ new_proxy = "https://" + module.params['host'] + ":" + str(module.params['port'])
+ if new_proxy != current_proxy['proxy']:
+ try:
+ array.set(proxy=new_proxy)
+ except Exception:
+ module.fail_json(msg='Set phone home proxy failed.')
+ else:
+ changed = False
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(dict(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ host=dict(type='str'),
+ port=dict(type='int'),
+ ))
+
+ required_together = [['host', 'port']]
+
+ module = AnsibleModule(argument_spec,
+ required_together=required_together,
+ supports_check_mode=True)
+
+ state = module.params['state']
+ array = get_system(module)
+
+ if state == 'absent':
+ delete_proxy(module, array)
+ elif state == 'present':
+ create_proxy(module, array)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_ra.py b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_ra.py
new file mode 100644
index 00000000..678fbec3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_ra.py
@@ -0,0 +1,113 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefa_ra
+version_added: '1.0.0'
+short_description: Enable or Disable Pure Storage FlashArray Remote Assist
+description:
+- Enablke or Disable Remote Assist for a Pure Storage FlashArray.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Define state of remote assist
+ - When set to I(enable) the RA port can be exposed using the
+ I(debug) module.
+ type: str
+ default: enable
+ choices: [ enable, disable ]
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+'''
+
+EXAMPLES = r'''
+- name: Enable Remote Assist port
+ purefa_ra:
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ register: result
+
+ debug:
+ msg: "Remote Assist: {{ result['ra_facts'] }}"
+
+- name: Disable Remote Assist port
+ purefa_ra:
+ state: disable
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+'''
+
+RETURN = r'''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import get_system, purefa_argument_spec
+
+
+def enable_ra(module, array):
+ """Enable Remote Assist"""
+ changed = True
+ ra_facts = {}
+ if not module.check_mode:
+ if array.get_remote_assist_status()['status'] != 'enabled':
+ try:
+ ra_data = array.enable_remote_assist()
+ ra_facts['fa_ra'] = {'name': ra_data['name'],
+ 'port': ra_data['port']}
+ except Exception:
+ module.fail_json(msg='Enabling Remote Assist failed')
+ else:
+ try:
+ ra_data = array.get_remote_assist_status()
+ ra_facts['fa_ra'] = {'name': ra_data['name'],
+ 'port': ra_data['port']}
+ except Exception:
+ module.fail_json(msg='Getting Remote Assist failed')
+ module.exit_json(changed=changed, ra_info=ra_facts)
+
+
+def disable_ra(module, array):
+ """Disable Remote Assist"""
+ changed = True
+ if not module.check_mode:
+ if array.get_remote_assist_status()['status'] == 'enabled':
+ try:
+ array.disable_remote_assist()
+ except Exception:
+ module.fail_json(msg='Disabling Remote Assist failed')
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(dict(
+ state=dict(type='str', default='enable', choices=['enable', 'disable']),
+ ))
+
+ module = AnsibleModule(argument_spec,
+ supports_check_mode=True)
+
+ array = get_system(module)
+
+ if module.params['state'] == 'enable':
+ enable_ra(module, array)
+ else:
+ disable_ra(module, array)
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_smis.py b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_smis.py
new file mode 100644
index 00000000..cc464b65
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_smis.py
@@ -0,0 +1,122 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefa_smis
+version_added: '1.0.0'
+short_description: Enable or disable FlashArray SMI-S features
+description:
+- Enable or disable FlashArray SMI-S Provider and/or SLP
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ slp:
+ description:
+ - Enable/Disable Service Locator Protocol
+ - Ports used are TCP 427 and UDP 427
+ type: bool
+ default: true
+ smis:
+ description:
+ - Enable/Disable SMI-S Provider
+ - Port used is TCP 5989
+ type: bool
+ default: true
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+'''
+
+EXAMPLES = r'''
+- name: Enable SMI-S and SLP
+ purefa_smis:
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Disable SMI-S and SLP
+ purefa_smis:
+ smis: false
+ slp: false
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+'''
+
+RETURN = r'''
+'''
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import get_system, get_array, purefa_argument_spec
+
+MIN_REQUIRED_API_VERSION = '2.2'
+
+
+def update_smis(module, array):
+ """Update SMI-S features"""
+ changed = True
+ if not module.check_mode:
+ changed = False
+ smis_changed = False
+ try:
+ current = list(array.get_smi_s().items)[0]
+ except Exception:
+ module.fail_json(msg='Failed to get current SMI-S settings.')
+ slp_enabled = current.slp_enabled
+ wbem_enabled = current.wbem_https_enabled
+ if slp_enabled != module.params['slp']:
+ slp_enabled = module.params['slp']
+ smis_changed = True
+ if wbem_enabled != module.params['smis']:
+ wbem_enabled = module.params['smis']
+ smis_changed = True
+ if smis_changed:
+ smi_s = flasharray.Smis(slp_enabled=slp_enabled, wbem_https_enabled=wbem_enabled)
+ try:
+ array.patch_smi_s(smi_s=smi_s)
+ changed = True
+ except Exception:
+ module.fail_json(msg='Failed to change SMI-S settings.')
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(dict(
+ smis=dict(type='bool', default=True),
+ slp=dict(type='bool', default=True),
+ ))
+
+ module = AnsibleModule(argument_spec,
+ supports_check_mode=True)
+
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg='py-pure-client sdk is required for this module')
+
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(msg='FlashArray REST version not supported. '
+ 'Minimum version required: {0}'.format(MIN_REQUIRED_API_VERSION))
+ array = get_array(module)
+
+ update_smis(module, array)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_smtp.py b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_smtp.py
new file mode 100644
index 00000000..47b8b0ac
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_smtp.py
@@ -0,0 +1,149 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefa_smtp
+version_added: '1.0.0'
+author:
+ - Pure Storage ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+short_description: Configure FlashArray SMTP settings
+description:
+- Set or erase configuration for the SMTP settings.
+- If username/password are set this will always force a change as there is
+ no way to see if the password is differnet from the current SMTP configuration.
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Set or delete SMTP configuration
+ default: present
+ type: str
+ choices: [ absent, present ]
+ password:
+ description:
+ - The SMTP password.
+ type: str
+ user:
+ description:
+ - The SMTP username.
+ type: str
+ relay_host:
+ description:
+ - IPv4 or IPv6 address or FQDN. A port number may be appended.
+ type: str
+ sender_domain:
+ description:
+ - Domain name.
+ type: str
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+'''
+
+EXAMPLES = r'''
+- name: Delete exisitng SMTP settings
+ purefa_smtp:
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+- name: Set SMTP settings
+ purefa_smtp:
+ sender_domain: purestorage.com
+ password: account_password
+ user: smtp_account
+ relay_host: 10.2.56.78:2345
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+'''
+
+RETURN = r'''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import get_system, purefa_argument_spec
+
+
+def delete_smtp(module, array):
+ """Delete SMTP settings"""
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set_smtp(sender_domain='', username='', password='', relay_host='')
+ except Exception:
+ module.fail_json(msg='Delete SMTP settigs failed')
+ module.exit_json(changed=changed)
+
+
+def create_smtp(module, array):
+ """Set SMTP settings"""
+ changed = True
+ current_smtp = array.get_smtp()
+ if not module.check_mode:
+ if module.params['sender_domain'] and current_smtp['sender_domain'] != module.params['sender_domain']:
+ try:
+ array.set_smtp(sender_domain=module.params['sender_domain'])
+ changed_sender = True
+ except Exception:
+ module.fail_json(msg='Set SMTP sender domain failed.')
+ else:
+ changed_sender = False
+ if module.params['relay_host'] and current_smtp['relay_host'] != module.params['relay_host']:
+ try:
+ array.set_smtp(relay_host=module.params['relay_host'])
+ changed_relay = True
+ except Exception:
+ module.fail_json(msg='Set SMTP relay host failed.')
+ else:
+ changed_relay = False
+ if module.params['user']:
+ try:
+ array.set_smtp(user_name=module.params['user'], password=module.params['password'])
+ changed_creds = True
+ except Exception:
+ module.fail_json(msg='Set SMTP username/password failed.')
+ else:
+ changed_creds = False
+ changed = bool(changed_sender or changed_relay or changed_creds)
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(dict(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ sender_domain=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ user=dict(type='str'),
+ relay_host=dict(type='str'),
+ ))
+
+ required_together = [['user', 'password']]
+
+ module = AnsibleModule(argument_spec,
+ required_together=required_together,
+ supports_check_mode=True)
+
+ state = module.params['state']
+ array = get_system(module)
+
+ if state == 'absent':
+ delete_smtp(module, array)
+ elif state == 'present':
+ create_smtp(module, array)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_snap.py b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_snap.py
new file mode 100644
index 00000000..c2410b4c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_snap.py
@@ -0,0 +1,475 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefa_snap
+version_added: '1.0.0'
+short_description: Manage volume snapshots on Pure Storage FlashArrays
+description:
+- Create or delete volumes and volume snapshots on Pure Storage FlashArray.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - The name of the source volume.
+ type: str
+ required: true
+ suffix:
+ description:
+ - Suffix of snapshot name.
+ - Not used during creation if I(offload) is provided.
+ type: str
+ target:
+ description:
+ - Name of target volume if creating from snapshot.
+ - Name of new snapshot suffix if renaming a snapshot
+ type: str
+ overwrite:
+ description:
+ - Define whether to overwrite existing volume when creating from snapshot.
+ type: bool
+ default: false
+ offload:
+ description:
+ - Only valid for Purity//FA 6.1 or higher
+ - Name of offload target for the snapshot.
+ - Target can be either another FlashArray or an Offload Target
+ - This is only applicable for creation, deletion and eradication of snapshots
+ - I(state) of I(copy) is not supported.
+ - I(suffix) is not supported for offload snapshots.
+ type: str
+ state:
+ description:
+ - Define whether the volume snapshot should exist or not.
+ choices: [ absent, copy, present, rename ]
+ type: str
+ default: present
+ eradicate:
+ description:
+ - Define whether to eradicate the snapshot on delete or leave in trash.
+ type: bool
+ default: false
+ ignore_repl:
+ description:
+ - Only valid for Purity//FA 6.1 or higher
+ - If set to true, allow destruction/eradication of snapshots in use by replication.
+ - If set to false, allow destruction/eradication of snapshots not in use by replication
+ type: bool
+ default: false
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+'''
+
+EXAMPLES = r'''
+- name: Create snapshot foo.ansible
+ purefa_snap:
+ name: foo
+ suffix: ansible
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: present
+
+- name: Create R/W clone foo_clone from snapshot foo.snap
+ purefa_snap:
+ name: foo
+ suffix: snap
+ target: foo_clone
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: copy
+
+- name: Create R/W clone foo_clone from remote mnapshot arrayB:foo.snap
+ purefa_snap:
+ name: arrayB:foo
+ suffix: snap
+ target: foo_clone
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: copy
+
+- name: Overwrite existing volume foo_clone with snapshot foo.snap
+ purefa_snap:
+ name: foo
+ suffix: snap
+ target: foo_clone
+ overwrite: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: copy
+
+- name: Delete and eradicate snapshot named foo.snap
+ purefa_snap:
+ name: foo
+ suffix: snap
+ eradicate: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: absent
+
+- name: Rename snapshot foo.fred to foo.dave
+ purefa_snap:
+ name: foo
+ suffix: fred
+ target: dave
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: rename
+
+- name: Create a remote volume snapshot on offload device arrayB
+ purefa_snap:
+ name: foo
+ offload: arrayB
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete and eradicate a volume snapshot foo.1 on offload device arrayB
+ purefa_snap:
+ name: foo
+ suffix: 1
+ offload: arrayB
+ eradicate: true
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+'''
+
+RETURN = r'''
+'''
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import get_array, get_system, purefa_argument_spec
+from datetime import datetime
+
+GET_SEND_API = '2.4'
+
+
+def _check_offload(module, array):
+ try:
+ offload = list(array.get_offloads(names=[module.params['offload']]).items)[0]
+ if offload.status == "connected":
+ return True
+ return False
+ except Exception:
+ return False
+
+
+def _check_target(module, array):
+ try:
+ target = list(array.get_array_connections(names=[module.params['offload']]).items)[0]
+ if target.status == "connected":
+ return True
+ return False
+ except Exception:
+ return False
+
+
+def _check_offload_snapshot(module, array):
+ """Return Remote Snapshot (active or deleted) or None"""
+ source_array = list(array.get_arrays().items)[0].name
+ snapname = source_array + ":" + module.params['name'] + "." + module.params['suffix']
+ if _check_offload(module, array):
+ res = array.get_remote_volume_snapshots(on=module.params['offload'],
+ names=[snapname],
+ destroyed=False)
+ else:
+ res = array.get_volume_snapshots(names=[snapname], destroyed=False)
+ if res.status_code != 200:
+ return None
+ return list(res.items)[0]
+
+
+def get_volume(module, array):
+ """Return Volume or None"""
+ try:
+ return array.get_volume(module.params['name'])
+ except Exception:
+ return None
+
+
+def get_target(module, array):
+ """Return Volume or None"""
+ try:
+ return array.get_volume(module.params['target'])
+ except Exception:
+ return None
+
+
+def get_deleted_snapshot(module, array, arrayv6):
+ """Return Deleted Snapshot"""
+ snapname = module.params['name'] + "." + module.params['suffix']
+ if module.params['offload']:
+ source_array = list(arrayv6.get_arrays().items)[0].name
+ snapname = module.params['name'] + "." + module.params['suffix']
+ full_snapname = source_array + ":" + snapname
+ if _check_offload(module, arrayv6):
+ res = arrayv6.get_remote_volume_snapshots(on=module.params['offload'],
+ names=[full_snapname],
+ destroyed=True)
+ else:
+ res = arrayv6.get_volume_snapshots(names=[snapname],
+ destroyed=True)
+ if res.status_code == 200:
+ return list(res.items)[0].destroyed
+ else:
+ return False
+ else:
+ try:
+ return bool(array.get_volume(snapname, snap=True, pending=True)[0]['time_remaining'] != '')
+ except Exception:
+ return False
+
+
+def get_snapshot(module, array):
+ """Return Snapshot or None"""
+ try:
+ snapname = module.params['name'] + "." + module.params['suffix']
+ for snaps in array.get_volume(module.params['name'], snap=True, pending=False):
+ if snaps['name'] == snapname:
+ return True
+ except Exception:
+ return False
+
+
+def create_snapshot(module, array, arrayv6):
+ """Create Snapshot"""
+ changed = True
+ if not module.check_mode:
+ if module.params['offload']:
+ if module.params['suffix']:
+ module.warn('Suffix not supported for Remote Volume Offload Snapshot. Using next incremental integer')
+ module.params['suffix'] = None
+ res = arrayv6.post_remote_volume_snapshots(source_names=[module.params['name']], on=module.params['offload'])
+ if res.status_code != 200:
+ module.fail_json(msg='Failed to crete remote snapshot for volume {0}. Error: {1}'.format(module.params['name'],
+ res.errors[0].message))
+ else:
+ try:
+ array.create_snapshot(module.params['name'], suffix=module.params['suffix'])
+ except Exception:
+ changed = False
+ module.exit_json(changed=changed)
+
+
+def create_from_snapshot(module, array):
+ """Create Volume from Snapshot"""
+ source = module.params['name'] + "." + module.params['suffix']
+ tgt = get_target(module, array)
+ if tgt is None:
+ changed = True
+ if not module.check_mode:
+ array.copy_volume(source, module.params['target'])
+ elif tgt is not None and module.params['overwrite']:
+ changed = True
+ if not module.check_mode:
+ array.copy_volume(source,
+ module.params['target'],
+ overwrite=module.params['overwrite'])
+ elif tgt is not None and not module.params['overwrite']:
+ changed = False
+ module.exit_json(changed=changed)
+
+
+def recover_snapshot(module, array, arrayv6):
+ """Recover Snapshot"""
+ changed = False
+ snapname = module.params['name'] + "." + module.params['suffix']
+ if not module.check_mode:
+ if module.params['offload'] and _check_offload(module, arrayv6):
+ source_array = list(array.get_arrays().items)[0].name
+ snapname = source_array + module.params['name'] + "." + module.params['suffix']
+ res = arrayv6.patch_remote_volume_snapshots(names=[snapname], on=module.params['offload'],
+ remote_volume_snapshot=flasharray.DestroyedPatchPost(destroyed=False))
+ changed = True
+ if res.status_code != 200:
+ module.fail_json(msg='Failed to recover remote snapshot '.format())
+ else:
+ try:
+ array.recover_volume(snapname)
+ changed = True
+ except Exception:
+ module.fail_json(msg='Recovery of snapshot {0} failed'.format(snapname))
+ module.exit_json(changed=changed)
+
+
+def update_snapshot(module, array):
+ """Update Snapshot - basically just rename..."""
+ changed = True
+ if not module.check_mode:
+ current_name = module.params['name'] + "." + module.params['suffix']
+ new_name = module.params['name'] + "." + module.params['target']
+ res = array.patch_volume_snapshots(names=[current_name], volume_snapshot=flasharray.VolumeSnapshotPatch(name=new_name))
+ if res.status_code != 200:
+ module.fail_json(msg='Failed to rename {0} to {1}. Error: {2}'.format(current_name, new_name,
+ res.errors[0].message))
+ module.exit_json(changed=changed)
+
+
+def delete_snapshot(module, array, arrayv6):
+ """Delete Snapshot"""
+ changed = True
+ snapname = module.params['name'] + "." + module.params['suffix']
+ if not module.check_mode:
+ if module.params['offload'] and _check_offload(module, arrayv6):
+ source_array = list(arrayv6.get_arrays().items)[0].name
+ full_snapname = source_array + ":" + snapname
+ res = arrayv6.patch_remote_volume_snapshots(names=[full_snapname], on=module.params['offload'],
+ volume_snapshot=flasharray.VolumeSnapshotPatch(destroyed=True),
+ replication_snapshot=module.params['ignore_repl'])
+ if res.status_code != 200:
+ module.fail_json(msg='Failed to delete remote snapshot {0}. Error: {1}'.format(snapname, res.errors[0].message))
+ if module.params['eradicate']:
+ res = arrayv6.delete_remote_volume_snapshots(names=[full_snapname], on=module.params['offload'],
+ replication_snapshot=module.params['ignore_repl'])
+ if res.status_code != 200:
+ module.fail_json(msg='Failed to eradicate remote snapshot {0}. Error: {1}'.format(snapname, res.errors[0].message))
+ elif module.params['offload'] and _check_target(module, arrayv6):
+ res = arrayv6.patch_volume_snapshots(names=[snapname],
+ volume_snapshot=flasharray.DestroyedPatchPost(destroyed=True),
+ replication_snapshot=module.params['ignore_repl'])
+ if res.status_code != 200:
+ module.fail_json(msg='Failed to delete remote snapshot {0}. Error: {1}'.format(snapname, res.errors[0].message))
+ if module.params['eradicate']:
+ res = arrayv6.delete_volume_snapshots(names=[snapname],
+ replication_snapshot=module.params['ignore_repl'])
+ if res.status_code != 200:
+ module.fail_json(msg='Failed to eradicate remote snapshot {0}. Error: {1}'.format(snapname, res.errors[0].message))
+ else:
+ try:
+ array.destroy_volume(snapname)
+ if module.params['eradicate']:
+ try:
+ array.eradicate_volume(snapname)
+ except Exception:
+ changed = False
+ except Exception:
+ changed = False
+ module.exit_json(changed=changed)
+
+
+def eradicate_snapshot(module, array, arrayv6):
+ """Eradicate snapshot"""
+ changed = True
+ snapname = module.params['name'] + "." + module.params['suffix']
+ if not module.check_mode:
+ if module.params['offload'] and _check_offload(module, arrayv6):
+ source_array = list(arrayv6.get_arrays().items)[0].name
+ full_snapname = source_array + ":" + snapname
+ res = arrayv6.delete_remote_volume_snapshots(names=[full_snapname], on=module.params['offload'],
+ replication_snapshot=module.params['ignore_repl'])
+ if res.status_code != 200:
+ module.fail_json(msg='Failed to eradicate remote snapshot {0}. Error: {1}'.format(snapname, res.errors[0].message))
+ elif module.params['offload'] and _check_target(module, arrayv6):
+ res = arrayv6.delete_volume_snapshots(names=[snapname],
+ replication_snapshot=module.params['ignore_repl'])
+ if res.status_code != 200:
+ module.fail_json(msg='Failed to eradicate remote snapshot {0}. Error: {1}'.format(snapname, res.errors[0].message))
+ else:
+ try:
+ array.eradicate_volume(snapname)
+ except Exception:
+ changed = False
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(dict(
+ name=dict(type='str', required=True),
+ suffix=dict(type='str'),
+ target=dict(type='str'),
+ offload=dict(type='str'),
+ ignore_repl=dict(type='bool', default=False),
+ overwrite=dict(type='bool', default=False),
+ eradicate=dict(type='bool', default=False),
+ state=dict(type='str', default='present', choices=['absent', 'copy', 'present', 'rename']),
+ ))
+
+ required_if = [('state', 'copy', ['target', 'suffix'])]
+
+ module = AnsibleModule(argument_spec,
+ required_if=required_if,
+ supports_check_mode=True)
+ pattern = re.compile("^(?=.*[a-zA-Z-])[a-zA-Z0-9]([a-zA-Z0-9-]{0,63}[a-zA-Z0-9])?$")
+
+ state = module.params['state']
+ if module.params['suffix'] is None:
+ suffix = "snap-" + str((datetime.utcnow() - datetime(1970, 1, 1, 0, 0, 0, 0)).total_seconds())
+ module.params['suffix'] = suffix.replace(".", "")
+ else:
+ if not module.params['offload']:
+ if not pattern.match(module.params['suffix']) and state not in ['absent', 'rename']:
+ module.fail_json(msg='Suffix name {0} does not conform to suffix name rules'.format(module.params['suffix']))
+ if state == 'rename' and module.params['target'] is not None:
+ if not pattern.match(module.params['target']):
+ module.fail_json(msg='Suffix target {0} does not conform to suffix name rules'.format(module.params['target']))
+
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+ if GET_SEND_API not in api_version:
+ arrayv6 = None
+ if module.params['offload']:
+ module.fail_json(msg="Purity 6.1, or higher, is required to support single volume offload snapshots")
+ if state == 'rename':
+ module.fail_json(msg="Purity 6.1, or higher, is required to support snapshot rename")
+ else:
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg='py-pure-client sdk is required for this module')
+ arrayv6 = get_array(module)
+ if module.params['offload']:
+ if not _check_offload(module, arrayv6) and not _check_target(module, arrayv6):
+ module.fail_json(msg='Selected offload {0} not connected.'.format(module.params['offload']))
+ if state == 'copy' and module.params['offload'] and not _check_target(module, arrayv6):
+ module.fail_json(msg="Snapshot copy is not supported when an offload target is defined")
+ destroyed = False
+ array_snap = offload_snap = False
+ volume = get_volume(module, array)
+ if module.params['offload'] and not _check_target(module, arrayv6):
+ offload_snap = _check_offload_snapshot(module, arrayv6)
+ if offload_snap is None:
+ offload_snap = False
+ else:
+ offload_snap = not offload_snap.destroyed
+ else:
+ array_snap = get_snapshot(module, array)
+ snap = array_snap or offload_snap
+ if not snap:
+ destroyed = get_deleted_snapshot(module, array, arrayv6)
+ if state == 'present' and volume and not destroyed:
+ create_snapshot(module, array, arrayv6)
+ elif state == 'present' and volume and destroyed:
+ recover_snapshot(module, array, arrayv6)
+ elif state == 'rename' and volume and snap:
+ update_snapshot(module, arrayv6)
+ elif state == 'copy' and snap:
+ create_from_snapshot(module, array)
+ elif state == 'absent' and snap and not destroyed:
+ delete_snapshot(module, array, arrayv6)
+ elif state == 'absent' and destroyed and module.params['eradicate']:
+ eradicate_snapshot(module, array, arrayv6)
+ elif state == 'absent' and not snap:
+ module.exit_json(changed=False)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_snmp.py b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_snmp.py
new file mode 100644
index 00000000..cb8779f6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_snmp.py
@@ -0,0 +1,328 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2019, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefa_snmp
+version_added: '1.0.0'
+short_description: Configure FlashArray SNMP Managers
+description:
+- Manage SNMP managers on a Pure Storage FlashArray.
+- Changing of a named SNMP managers version is not supported.
+- This module is not idempotent and will always modify an
+ existing SNMP manager due to hidden parameters that cannot
+ be compared to the play parameters.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Name of SNMP Manager
+ required: True
+ type: str
+ state:
+ description:
+ - Create or delete SNMP manager
+ type: str
+ default: present
+ choices: [ absent, present ]
+ auth_passphrase:
+ type: str
+ description:
+ - SNMPv3 only. Passphrase of 8 - 32 characters.
+ auth_protocol:
+ type: str
+ description:
+ - SNMP v3 only. Hash algorithm to use
+ choices: [ MD5, SHA ]
+ community:
+ type: str
+ description:
+ - SNMP v2c only. Manager community ID. Between 1 and 32 characters long.
+ host:
+ type: str
+ description:
+ - IPv4 or IPv6 address or FQDN to send trap messages to.
+ user:
+ type: str
+ description:
+ - SNMP v3 only. User ID recognized by the specified SNMP manager.
+ Must be between 1 and 32 characters.
+ version:
+ type: str
+ description:
+ - Version of SNMP protocol to use for the manager.
+ choices: [ v2c, v3 ]
+ default: v2c
+ notification:
+ type: str
+ description:
+ - Action to perform on event.
+ default: trap
+ choices: [ inform, trap ]
+ privacy_passphrase:
+ type: str
+ description:
+ - SNMPv3 only. Passphrase to encrypt SNMP messages.
+ Must be between 8 and 63 non-space ASCII characters.
+ privacy_protocol:
+ type: str
+ description:
+ - SNMP v3 only. Encryption protocol to use
+ choices: [ AES, DES ]
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+'''
+
+EXAMPLES = r'''
+- name: Delete exisitng SNMP manager
+ purefa_snmp:
+ name: manager1
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+- name: Create v2c SNMP manager
+ purefa_snmp:
+ name: manager1
+ community: public
+ host: 10.21.22.23
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+- name: Create v3 SNMP manager
+ purefa_snmp:
+ name: manager2
+ version: v3
+ auth_protocol: MD5
+ auth_passphrase: password
+ host: 10.21.22.23
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+- name: Update existing SNMP manager
+ purefa_snmp:
+ name: manager1
+ community: private
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+'''
+
+RETURN = r'''
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import get_system, purefa_argument_spec
+
+
+def update_manager(module, array):
+ """Update SNMP Manager"""
+ changed = True
+ if not module.check_mode:
+ try:
+ mgr = array.get_snmp_manager(module.params['name'])
+ except Exception:
+ module.fail_json(msg="Failed to get configuration for SNMP manager {0}.".format(module.params['name']))
+ if mgr['version'] != module.params['version']:
+ module.fail_json(msg="Changing an SNMP managers version is not supported.")
+ elif module.params['version'] == "v2c":
+ try:
+ array.set_snmp_manager(module.params['name'],
+ community=module.params['community'],
+ notification=module.params['notification'],
+ host=module.params['host']
+ )
+ except Exception:
+ module.fail_json(msg="Failed to update SNMP manager {0}.".format(module.params['name']))
+ else:
+ if module.params['auth_protocol'] and module.params['privacy_protocol']:
+ try:
+ array.set_snmp_manager(module.params['name'],
+ auth_passphrase=module.params['auth_passphrase'],
+ auth_protocol=module.params['auth_protocol'],
+ privacy_passphrase=module.params['privacy_passphrase'],
+ privacy_protocol=module.params['privacy_protocol'],
+ notification=module.params['notification'],
+ user=module.params['user'],
+ host=module.params['host']
+ )
+ except Exception:
+ module.fail_json(msg="Failed to update SNMP manager {0}.".format(module.params['name']))
+ elif module.params['auth_protocol'] and not module.params['privacy_protocol']:
+ try:
+ array.set_snmp_manager(module.params['name'],
+ version=module.params['version'],
+ auth_passphrase=module.params['auth_passphrase'],
+ auth_protocol=module.params['auth_protocol'],
+ notification=module.params['notification'],
+ user=module.params['user'],
+ host=module.params['host']
+ )
+ except Exception:
+ module.fail_json(msg="Failed to update SNMP manager {0}.".format(module.params['name']))
+ elif not module.params['auth_protocol'] and module.params['privacy_protocol']:
+ try:
+ array.set_snmp_manager(module.params['name'],
+ version=module.params['version'],
+ privacy_passphrase=module.params['privacy_passphrase'],
+ privacy_protocol=module.params['privacy_protocol'],
+ notification=module.params['notification'],
+ user=module.params['user'],
+ host=module.params['host']
+ )
+ except Exception:
+ module.fail_json(msg="Failed to update SNMP manager {0}.".format(module.params['name']))
+ elif not module.params['auth_protocol'] and not module.params['privacy_protocol']:
+ try:
+ array.set_snmp_manager(module.params['name'],
+ version=module.params['version'],
+ notification=module.params['notification'],
+ user=module.params['user'],
+ host=module.params['host']
+ )
+ except Exception:
+ module.fail_json(msg="Failed to update SNMP manager {0}.".format(module.params['name']))
+ else:
+ module.fail_json(msg="Invalid parameters selected in update. Please raise issue in Ansible GitHub")
+
+ module.exit_json(changed=changed)
+
+
+def delete_manager(module, array):
+ """Delete SNMP Manager"""
+ changed = True
+ if not module.check_mode:
+ try:
+ array.delete_snmp_manager(module.params['name'])
+ except Exception:
+ module.fail_json(msg='Delete SNMP manager {0} failed'.format(module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def create_manager(module, array):
+ """Create SNMP Manager"""
+ changed = True
+ if not module.check_mode:
+ if module.params['version'] == "v2c":
+ try:
+ array.create_snmp_manager(module.params['name'],
+ version=module.params['version'],
+ community=module.params['community'],
+ notification=module.params['notification'],
+ host=module.params['host']
+ )
+ except Exception:
+ module.fail_json(msg="Failed to create SNMP manager {0}.".format(module.params['name']))
+ else:
+ if module.params['auth_protocol'] and module.params['privacy_protocol']:
+ try:
+ array.create_snmp_manager(module.params['name'],
+ version=module.params['version'],
+ auth_passphrase=module.params['auth_passphrase'],
+ auth_protocol=module.params['auth_protocol'],
+ privacy_passphrase=module.params['privacy_passphrase'],
+ privacy_protocol=module.params['privacy_protocol'],
+ notification=module.params['notification'],
+ user=module.params['user'],
+ host=module.params['host']
+ )
+ except Exception:
+ module.fail_json(msg="Failed to create SNMP manager {0}.".format(module.params['name']))
+ elif module.params['auth_protocol'] and not module.params['privacy_protocol']:
+ try:
+ array.create_snmp_manager(module.params['name'],
+ version=module.params['version'],
+ auth_passphrase=module.params['auth_passphrase'],
+ auth_protocol=module.params['auth_protocol'],
+ notification=module.params['notification'],
+ user=module.params['user'],
+ host=module.params['host']
+ )
+ except Exception:
+ module.fail_json(msg="Failed to create SNMP manager {0}.".format(module.params['name']))
+ elif not module.params['auth_protocol'] and module.params['privacy_protocol']:
+ try:
+ array.create_snmp_manager(module.params['name'],
+ version=module.params['version'],
+ privacy_passphrase=module.params['privacy_passphrase'],
+ privacy_protocol=module.params['privacy_protocol'],
+ notification=module.params['notification'],
+ user=module.params['user'],
+ host=module.params['host']
+ )
+ except Exception:
+ module.fail_json(msg="Failed to create SNMP manager {0}.".format(module.params['name']))
+ elif not module.params['auth_protocol'] and not module.params['privacy_protocol']:
+ try:
+ array.create_snmp_manager(module.params['name'],
+ version=module.params['version'],
+ notification=module.params['notification'],
+ user=module.params['user'],
+ host=module.params['host']
+ )
+ except Exception:
+ module.fail_json(msg="Failed to create SNMP manager {0}.".format(module.params['name']))
+ else:
+ module.fail_json(msg="Invalid parameters selected in create. Please raise issue in Ansible GitHub")
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(dict(
+ name=dict(type='str', required=True),
+ host=dict(type='str'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ user=dict(type='str'),
+ notification=dict(type='str', choices=['inform', 'trap'], default='trap'),
+ auth_passphrase=dict(type='str', no_log=True),
+ auth_protocol=dict(type='str', choices=['MD5', 'SHA']),
+ privacy_passphrase=dict(type='str', no_log=True),
+ privacy_protocol=dict(type='str', choices=['AES', 'DES']),
+ version=dict(type='str', default='v2c', choices=['v2c', 'v3']),
+ community=dict(type='str'),
+ ))
+
+ required_together = [['auth_passphrase', 'auth_protocol'],
+ ['privacy_passphrase', 'privacy_protocol']]
+ required_if = [['version', 'v2c', ['community', 'host']],
+ ['version', 'v3', ['host', 'user']]]
+
+ module = AnsibleModule(argument_spec,
+ required_together=required_together,
+ required_if=required_if,
+ supports_check_mode=True)
+
+ state = module.params['state']
+ array = get_system(module)
+ mgr_configured = False
+ mgrs = array.list_snmp_managers()
+ for mgr in range(0, len(mgrs)):
+ if mgrs[mgr]['name'] == module.params['name']:
+ mgr_configured = True
+ break
+ if module.params['version'] == "v3":
+ if module.params['auth_passphrase'] and (8 > len(module.params['auth_passphrase']) > 32):
+ module.fail_json(msg="auth_password must be between 8 and 32 characters")
+ if module.params['privacy_passphrase'] and 8 > len(module.params['privacy_passphrase']) > 63:
+ module.fail_json(msg="privacy_password must be between 8 and 63 characters")
+ if state == 'absent' and mgr_configured:
+ delete_manager(module, array)
+ elif mgr_configured and state == 'present':
+ update_manager(module, array)
+ elif not mgr_configured and state == 'present':
+ create_manager(module, array)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_subnet.py b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_subnet.py
new file mode 100644
index 00000000..017762f8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_subnet.py
@@ -0,0 +1,277 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: purefa_subnet
+version_added: '1.0.0'
+short_description: Manage network subnets in a Pure Storage FlashArray
+description:
+ - This module manages the network subnets on a Pure Storage FlashArray.
+author: Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Subnet name.
+ required: true
+ type: str
+ state:
+ description:
+ - Create or delete subnet.
+ required: false
+ default: present
+ choices: [ "present", "absent" ]
+ type: str
+ enabled:
+ description:
+ - whether the subnet should be enabled or not
+ default: True
+ type: bool
+ prefix:
+ description:
+ - Set the IPv4 or IPv6 address to be associated with the subnet.
+ required: false
+ type: str
+ gateway:
+ description:
+ - IPv4 or IPv6 address of subnet gateway.
+ required: false
+ type: str
+ mtu:
+ description:
+ - MTU size of the subnet. Range is 568 to 9000.
+ required: false
+ default: 1500
+ type: int
+ vlan:
+ description:
+ - VLAN ID. Range is 0 to 4094.
+ required: false
+ type: int
+extends_documentation_fragment:
+ - purestorage.flasharray.purestorage.fa
+'''
+
+EXAMPLES = '''
+- name: Create subnet subnet100
+ purefa_subnet:
+ name: subnet100
+ vlan: 100
+ gateway: 10.21.200.1
+ prefix: "10.21.200.0/24"
+ mtu: 9000
+ state: present
+ fa_url: 10.10.10.2
+ api_token: c6033033-fe69-2515-a9e8-966bb7fe4b40
+
+- name: Disable subnet subnet100
+ purefa_subnet:
+ name: subnet100
+ enabled: false
+ fa_url: 10.10.10.2
+ api_token: c6033033-fe69-2515-a9e8-966bb7fe4b40
+
+- name: Delete subnet subnet100
+ purefa_subnet:
+ name: subnet100
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: c6033033-fe69-2515-a9e8-966bb7fe4b40'''
+
+RETURN = '''
+'''
+
+try:
+ from netaddr import IPNetwork
+ HAS_NETADDR = True
+except ImportError:
+ HAS_NETADDR = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import get_system, purefa_argument_spec
+
+
+def _get_subnet(module, array):
+ """Return subnet or None"""
+ subnet = {}
+ try:
+ subnet = array.get_subnet(module.params['name'])
+ except Exception:
+ return None
+ return subnet
+
+
+def update_subnet(module, array, subnet):
+ """Modify subnet settings"""
+ changed = True
+ if not module.check_mode:
+ current_state = {'mtu': subnet['mtu'],
+ 'vlan': subnet['vlan'],
+ 'prefix': subnet['prefix'],
+ 'gateway': subnet['gateway']}
+ if not module.params['prefix']:
+ prefix = subnet['prefix']
+ else:
+ if module.params['gateway'] and module.params['gateway'] not in IPNetwork(module.params['prefix']):
+ module.fail_json(msg='Gateway and subnet are not compatible.')
+ elif not module.params['gateway'] and subnet['gateway'] not in IPNetwork(module.params['prefix']):
+ module.fail_json(msg='Gateway and subnet are not compatible.')
+ prefix = module.params['prefix']
+ if not module.params['vlan']:
+ vlan = subnet['vlan']
+ else:
+ if not 0 <= module.params['vlan'] <= 4094:
+ module.fail_json(msg='VLAN {0} is out of range (0 to 4094)'.format(module.params['vlan']))
+ else:
+ vlan = module.params['vlan']
+ if not module.params['mtu']:
+ mtu = subnet['mtu']
+ else:
+ if not 568 <= module.params['mtu'] <= 9000:
+ module.fail_json(msg='MTU {0} is out of range (568 to 9000)'.format(module.params['mtu']))
+ else:
+ mtu = module.params['mtu']
+ if not module.params['gateway']:
+ gateway = subnet['gateway']
+ else:
+ if module.params['gateway'] not in IPNetwork(prefix):
+ module.fail_json(msg='Gateway and subnet are not compatible.')
+ gateway = module.params['gateway']
+ new_state = {'prefix': prefix,
+ 'mtu': mtu,
+ 'gateway': gateway,
+ 'vlan': vlan}
+ if new_state == current_state:
+ changed = False
+ else:
+ try:
+ array.set_subnet(subnet['name'],
+ prefix=new_state['prefix'],
+ mtu=new_state['mtu'],
+ vlan=new_state['vlan'],
+ gateway=new_state['gateway'])
+ except Exception:
+ module.fail_json(msg="Failed to change settings for subnet {0}.".format(subnet['name']))
+ if subnet['enabled'] != module.params['enabled']:
+ if module.params['enabled']:
+ try:
+ array.enable_subnet(subnet['name'])
+ changed = True
+ except Exception:
+ module.fail_json(msg="Failed to enable subnet {0}.".format(subnet['name']))
+ else:
+ try:
+ array.disable_subnet(subnet['name'])
+ changed = True
+ except Exception:
+ module.fail_json(msg="Failed to disable subnet {0}.".format(subnet['name']))
+ module.exit_json(changed=changed)
+
+
+def create_subnet(module, array):
+ """ Create subnet"""
+ changed = True
+ if not module.check_mode:
+ if not module.params['prefix']:
+ module.fail_json(msg='Prefix required when creating subnet.')
+ else:
+ if module.params['gateway'] and module.params['gateway'] not in IPNetwork(module.params['prefix']):
+ module.fail_json(msg='Gateway and subnet are not compatible.')
+ prefix = module.params['prefix']
+ if module.params['vlan']:
+ if not 0 <= module.params['vlan'] <= 4094:
+ module.fail_json(msg='VLAN {0} is out of range (0 to 4094)'.format(module.params['vlan']))
+ else:
+ vlan = module.params['vlan']
+ else:
+ vlan = 0
+ if module.params['mtu']:
+ if not 568 <= module.params['mtu'] <= 9000:
+ module.fail_json(msg='MTU {0} is out of range (568 to 9000)'.format(module.params['mtu']))
+ else:
+ mtu = module.params['mtu']
+ if module.params['gateway']:
+ if module.params['gateway'] not in IPNetwork(prefix):
+ module.fail_json(msg='Gateway and subnet are not compatible.')
+ gateway = module.params['gateway']
+ else:
+ gateway = ''
+ try:
+ array.create_subnet(module.params['name'],
+ prefix=prefix,
+ mtu=mtu,
+ vlan=vlan,
+ gateway=gateway)
+ except Exception:
+ module.fail_json(msg="Failed to create subnet {0}.".format(module.params['name']))
+ if module.params['enabled']:
+ try:
+ array.enable_subnet(module.params['name'])
+ except Exception:
+ module.fail_json(msg="Failed to enable subnet {0}.".format(module.params['name']))
+ else:
+ try:
+ array.disable_subnet(module.params['name'])
+ except Exception:
+ module.fail_json(msg="Failed to disable subnet {0}.".format(module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def delete_subnet(module, array):
+ """ Delete subnet"""
+ changed = True
+ if not module.check_mode:
+ try:
+ array.delete_subnet(module.params['name'])
+ except Exception:
+ module.fail_json(msg="Failed to delete subnet {0}".format(module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type='str', required=True),
+ prefix=dict(type='str'),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ gateway=dict(type='str'),
+ enabled=dict(type='bool', default=True),
+ mtu=dict(type='int', default=1500),
+ vlan=dict(type='int'),
+ )
+ )
+
+ module = AnsibleModule(argument_spec,
+ supports_check_mode=True)
+
+ if not HAS_NETADDR:
+ module.fail_json(msg='netaddr module is required')
+
+ state = module.params['state']
+ array = get_system(module)
+ subnet = _get_subnet(module, array)
+ if state == 'present' and not subnet:
+ create_subnet(module, array)
+ if state == 'present' and subnet:
+ update_subnet(module, array, subnet)
+ elif state == 'absent' and subnet:
+ delete_subnet(module, array)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_syslog.py b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_syslog.py
new file mode 100644
index 00000000..c7b1e8c5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_syslog.py
@@ -0,0 +1,160 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefa_syslog
+version_added: '1.0.0'
+short_description: Configure Pure Storage FlashArray syslog settings
+description:
+- Configure syslog configuration for Pure Storage FlashArrays.
+- Add or delete an individual syslog server to the existing
+ list of serves.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Create or delete syslog servers configuration
+ default: present
+ type: str
+ choices: [ absent, present ]
+ protocol:
+ description:
+ - Protocol which server uses
+ required: true
+ type: str
+ choices: [ tcp, tls, udp ]
+ port:
+ description:
+ - Port at which the server is listening. If no port is specified
+ the system will use 514
+ type: str
+ address:
+ description:
+ - Syslog server address.
+ This field supports IPv4, IPv6 or FQDN.
+ An invalid IP addresses will cause the module to fail.
+ No validation is performed for FQDNs.
+ type: str
+ required: true
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+'''
+
+EXAMPLES = r'''
+- name: Delete exisitng syslog server entries
+ purefa_syslog:
+ address: syslog1.com
+ protocol: tcp
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Set array syslog servers
+ purefa_syslog:
+ state: present
+ address: syslog1.com
+ protocol: udp
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+'''
+
+RETURN = r'''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import get_system, purefa_argument_spec
+
+
+def delete_syslog(module, array):
+ """Delete Syslog Server"""
+ changed = True
+ if not module.check_mode:
+ noport_address = module.params['protocol'] + "://" + module.params['address']
+
+ if module.params['port']:
+ full_address = noport_address + ":" + module.params['port']
+ else:
+ full_address = noport_address
+
+ address_list = array.get(syslogserver=True)['syslogserver']
+
+ if address_list:
+ for address in range(0, len(address_list)):
+ if address_list[address] == full_address:
+ del address_list[address]
+ try:
+ array.set(syslogserver=address_list)
+ break
+ except Exception:
+ module.fail_json(msg='Failed to remove syslog server: {0}'.format(full_address))
+
+ module.exit_json(changed=changed)
+
+
+def add_syslog(module, array):
+ """Add Syslog Server"""
+ changed = True
+ if not module.check_mode:
+ changed = False
+ noport_address = module.params['protocol'] + "://" + module.params['address']
+
+ if module.params['port']:
+ full_address = noport_address + ":" + module.params['port']
+ else:
+ full_address = noport_address
+
+ address_list = array.get(syslogserver=True)['syslogserver']
+ exists = False
+
+ if address_list:
+ for address in range(0, len(address_list)):
+ if address_list[address] == full_address:
+ exists = True
+ break
+ if not exists:
+ try:
+ address_list.append(full_address)
+ array.set(syslogserver=address_list)
+ changed = True
+ except Exception:
+ module.fail_json(msg='Failed to add syslog server: {0}'.format(full_address))
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(dict(
+ address=dict(type='str', required=True),
+ protocol=dict(type='str', choices=['tcp', 'tls', 'udp'], required=True),
+ port=dict(type='str'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ ))
+
+ module = AnsibleModule(argument_spec,
+ supports_check_mode=True)
+
+ array = get_system(module)
+
+ if module.params['state'] == 'absent':
+ delete_syslog(module, array)
+ else:
+ add_syslog(module, array)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_timeout.py b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_timeout.py
new file mode 100644
index 00000000..b71f2fe1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_timeout.py
@@ -0,0 +1,110 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefa_timeout
+version_added: '1.0.0'
+short_description: Configure Pure Storage FlashArray GUI idle timeout
+description:
+- Configure GUI idle timeout for Pure Storage FlashArrays.
+- This does not affect existing GUI sessions.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Set or disable the GUI idle timeout
+ default: present
+ type: str
+ choices: [ present, absent ]
+ timeout:
+ description:
+ - Minutes for idle timeout.
+ type: int
+ default: 30
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+'''
+
+EXAMPLES = r'''
+- name: Set GUI idle timeout to 25 minutes
+ purefa_gui:
+ timeout: 25
+ state: present
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Disable idle timeout
+ purefa_gui:
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+'''
+
+RETURN = r'''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import get_system, purefa_argument_spec
+
+
+def set_timeout(module, array):
+ """Set GUI idle timeout"""
+ changed = True
+ if not module.check_mode:
+ try:
+ if not module.check_mode:
+ array.set(idle_timeout=module.params['timeout'])
+ except Exception:
+ module.fail_json(msg='Failed to set GUI idle timeout')
+
+ module.exit_json(changed=changed)
+
+
+def disable_timeout(module, array):
+ """Disable idle timeout"""
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set(idle_timeout=0)
+ except Exception:
+ module.fail_json(msg='Failed to disable GUI idle timeout')
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(dict(
+ timeout=dict(type='int', default=30),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ ))
+
+ module = AnsibleModule(argument_spec,
+ supports_check_mode=True)
+
+ state = module.params['state']
+ if 5 < module.params['timeout'] > 180 and module.params['timeout'] != 0:
+ module.fail_json(msg='Timeout value must be between 5 and 180 minutes')
+ array = get_system(module)
+ current_timeout = array.get(idle_timeout=True)['idle_timeout']
+ if state == 'present' and current_timeout != module.params['timeout']:
+ set_timeout(module, array)
+ elif state == 'absent' and current_timeout != 0:
+ disable_timeout(module, array)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_user.py b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_user.py
new file mode 100644
index 00000000..b487c961
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_user.py
@@ -0,0 +1,222 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefa_user
+version_added: '1.0.0'
+short_description: Create, modify or delete FlashArray local user account
+description:
+- Create, modify or delete local users on a Pure Stoage FlashArray.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Create, delete or update local user account
+ default: present
+ type: str
+ choices: [ absent, present ]
+ name:
+ description:
+ - The name of the local user account
+ type: str
+ required: true
+ role:
+ description:
+ - Sets the local user's access level to the array
+ type: str
+ choices: [ readonly, storage_admin, array_admin ]
+ password:
+ description:
+ - Password for the local user.
+ type: str
+ old_password:
+ description:
+ - If changing an existing password, you must provide the old password for security
+ type: str
+ api:
+ description:
+ - Define whether to create an API token for this user
+ - Token can be exposed using the I(debug) module
+ type: bool
+ default: false
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+'''
+
+EXAMPLES = r'''
+- name: Create new user ansible with API token
+ purefa_user:
+ name: ansible
+ password: apassword
+ role: storage_admin
+ api: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ register: result
+
+ debug:
+ msg: "API Token: {{ result['user_info']['user_api'] }}"
+
+- name: Change role type for existing user
+ purefa_user:
+ name: ansible
+ role: array_admin
+ state: update
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Change password type for existing user (NOT IDEMPOTENT)
+ purefa_user:
+ name: ansible
+ password: anewpassword
+ old_password: apassword
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Change API token for existing user
+ purefa_user:
+ name: ansible
+ api: true
+ state: update
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ register: result
+
+ debug:
+ msg: "API Token: {{ result['user_info']['user_api'] }}"
+'''
+
+RETURN = r'''
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import get_system, purefa_argument_spec
+
+MIN_REQUIRED_API_VERSION = '1.14'
+
+
+def get_user(module, array):
+ """Return Local User Account or None"""
+ user = None
+ users = array.list_admins()
+ for acct in range(0, len(users)):
+ if users[acct]['name'] == module.params['name']:
+ user = users[acct]
+ return user
+
+
+def create_user(module, array):
+ """Create or Update Local User Account"""
+ changed = True
+ if not module.check_mode:
+ user = get_user(module, array)
+ role = module.params['role']
+ api_changed = False
+ role_changed = False
+ passwd_changed = False
+ user_token = {}
+ if not user:
+ try:
+ if not role:
+ role = 'readonly'
+ array.create_admin(module.params['name'], role=role,
+ password=module.params['password'])
+ if module.params['api']:
+ try:
+ user_token['user_api'] = array.create_api_token(module.params['name'])['api_token']
+ except Exception:
+ array.delete_user(module.params['name'])
+ module.fail_json(msg='Local User {0}: Creation failed'.format(module.params['name']))
+ except Exception:
+ module.fail_json(msg='Local User {0}: Creation failed'.format(module.params['name']))
+ else:
+ if module.params['password'] and not module.params['old_password']:
+ changed = False
+ module.exit_json(changed=changed)
+ if module.params['password'] and module.params['old_password']:
+ if module.params['old_password'] and (module.params['password'] != module.params['old_password']):
+ try:
+ array.set_admin(module.params['name'], password=module.params['password'],
+ old_password=module.params['old_password'])
+ passwd_changed = True
+ except Exception:
+ module.fail_json(msg='Local User {0}: Password reset failed. '
+ 'Check old password.'.format(module.params['name']))
+ else:
+ module.fail_json(msg='Local User Account {0}: Password change failed - '
+ 'Check both old and new passwords'.format(module.params['name']))
+ if module.params['api']:
+ try:
+ if not array.get_api_token(module.params['name'])['api_token'] is None:
+ array.delete_api_token(module.params['name'])
+ user_token['user_api'] = array.create_api_token(module.params['name'])['api_token']
+ api_changed = True
+ except Exception:
+ module.fail_json(msg='Local User {0}: API token change failed'.format(module.params['name']))
+ if module.params['role'] != user['role']:
+ try:
+ array.set_admin(module.params['name'], role=module.params['role'])
+ role_changed = True
+ except Exception:
+ module.fail_json(msg='Local User {0}: Role changed failed'.format(module.params['name']))
+ changed = bool(passwd_changed or role_changed or api_changed)
+ module.exit_json(changed=changed, user_info=user_token)
+
+
+def delete_user(module, array):
+ """Delete Local User Account"""
+ changed = True
+ if not module.check_mode:
+ if get_user(module, array):
+ try:
+ array.delete_admin(module.params['name'])
+ except Exception:
+ module.fail_json(msg='Object Store Account {0}: Deletion failed'.format(module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(dict(
+ name=dict(required=True, type='str'),
+ role=dict(type='str', choices=['readonly', 'storage_admin', 'array_admin']),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ password=dict(type='str', no_log=True),
+ old_password=dict(type='str', no_log=True),
+ api=dict(type='bool', default=False),
+ ))
+
+ module = AnsibleModule(argument_spec,
+ supports_check_mode=True)
+
+ state = module.params['state']
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(msg='FlashArray REST version not supported. '
+ 'Minimum version required: {0}'.format(MIN_REQUIRED_API_VERSION))
+
+ if state == 'absent':
+ delete_user(module, array)
+ elif state == 'present':
+ create_user(module, array)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_vg.py b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_vg.py
new file mode 100644
index 00000000..fce1bfc9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_vg.py
@@ -0,0 +1,463 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefa_vg
+version_added: '1.0.0'
+short_description: Manage volume groups on Pure Storage FlashArrays
+description:
+- Create, delete or modify volume groups on Pure Storage FlashArrays.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - The name of the volume group.
+ - Multi-volume-group support available from Purity//FA 6.0.0
+ B(***NOTE***) Manual deletion or eradication of individual volume groups created
+ using multi-volume-group will cause idempotency to fail
+ - Multi-volume-group support only exists for volume group creation
+ type: str
+ required: true
+ state:
+ description:
+ - Define whether the volume group should exist or not.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ eradicate:
+ description:
+ - Define whether to eradicate the volume group on delete and leave in trash.
+ type : bool
+ default: 'no'
+ bw_qos:
+ description:
+ - Bandwidth limit for vgroup in M or G units.
+ M will set MB/s
+ G will set GB/s
+ To clear an existing QoS setting use 0 (zero)
+ type: str
+ iops_qos:
+ description:
+ - IOPs limit for vgroup - use value or K or M
+ K will mean 1000
+ M will mean 1000000
+ To clear an existing IOPs setting use 0 (zero)
+ type: str
+ count:
+ description:
+ - Number of volume groups to be created in a multiple volume group creation
+ - Only supported from Purity//FA v6.0.0 and higher
+ type: int
+ start:
+ description:
+ - Number at which to start the multiple volume group creation index
+ - Only supported from Purity//FA v6.0.0 and higher
+ type: int
+ default: 0
+ digits:
+ description:
+ - Number of digits to use for multiple volume group count. This
+ will pad the index number with zeros where necessary
+ - Only supported from Purity//FA v6.0.0 and higher
+ - Range is between 1 and 10
+ type: int
+ default: 1
+ suffix:
+ description:
+ - Suffix string, if required, for multiple volume group create
+ - Volume group names will be formed as I(<name>#I<suffix>), where
+ I(#) is a placeholder for the volume index
+ See associated descriptions
+ - Only supported from Purity//FA v6.0.0 and higher
+ type: str
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+'''
+
+EXAMPLES = r'''
+- name: Create new volune group
+ purefa_vg:
+ name: foo
+ bw_qos: 50M
+ iops_qos: 100
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create 10 volune groups of pattern foo#bar with QoS
+ purefa_vg:
+ name: foo
+ suffix: bar
+ count: 10
+ start: 10
+ digits: 3
+ bw_qos: 50M
+ iops_qos: 100
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Update volune group QoS limits
+ purefa_vg:
+ name: foo
+ bw_qos: 0
+ iops_qos: 5555
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Destroy volume group
+ purefa_vg:
+ name: foo
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: absent
+
+- name: Recover deleted volune group
+ purefa_vg:
+ name: foo
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Destroy and Eradicate volume group
+ purefa_vg:
+ name: foo
+ eradicate: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: absent
+'''
+
+RETURN = r'''
+'''
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import get_array, get_system, purefa_argument_spec
+
+
+VGROUP_API_VERSION = '1.13'
+VG_IOPS_VERSION = '1.17'
+MULTI_VG_VERSION = '2.2'
+
+
+def human_to_bytes(size):
+ """Given a human-readable byte string (e.g. 2G, 30M),
+ return the number of bytes. Will return 0 if the argument has
+ unexpected form.
+ """
+ bytes = size[:-1]
+ unit = size[-1].upper()
+ if bytes.isdigit():
+ bytes = int(bytes)
+ if unit == 'P':
+ bytes *= 1125899906842624
+ elif unit == 'T':
+ bytes *= 1099511627776
+ elif unit == 'G':
+ bytes *= 1073741824
+ elif unit == 'M':
+ bytes *= 1048576
+ elif unit == 'K':
+ bytes *= 1024
+ else:
+ bytes = 0
+ else:
+ bytes = 0
+ return bytes
+
+
+def human_to_real(iops):
+ """Given a human-readable IOPs string (e.g. 2K, 30M),
+ return the real number. Will return 0 if the argument has
+ unexpected form.
+ """
+ digit = iops[:-1]
+ unit = iops[-1].upper()
+ if unit.isdigit():
+ digit = iops
+ elif digit.isdigit():
+ digit = int(digit)
+ if unit == 'M':
+ digit *= 1000000
+ elif unit == 'K':
+ digit *= 1000
+ else:
+ digit = 0
+ else:
+ digit = 0
+ return digit
+
+
+def get_multi_vgroups(module, destroyed=False):
+ """Return True is all volume groups exist or None"""
+ names = []
+ array = get_array(module)
+ for vg_num in range(module.params['start'], module.params['count'] + module.params['start']):
+ names.append(module.params['name'] + str(vg_num).zfill(module.params['digits']) + module.params['suffix'])
+ return bool(array.get_volume_groups(names=names, destroyed=destroyed).status_code == 200)
+
+
+def get_pending_vgroup(module, array):
+ """ Get Deleted Volume Group"""
+ vgroup = None
+ for vgrp in array.list_vgroups(pending=True):
+ if vgrp["name"] == module.params['name'] and vgrp['time_remaining']:
+ vgroup = vgrp
+ break
+
+ return vgroup
+
+
+def get_vgroup(module, array):
+ """ Get Volume Group"""
+ vgroup = None
+ for vgrp in array.list_vgroups():
+ if vgrp["name"] == module.params['name']:
+ vgroup = vgrp
+ break
+
+ return vgroup
+
+
+def make_vgroup(module, array):
+ """ Create Volume Group"""
+ changed = True
+ if not module.check_mode:
+ api_version = array._list_available_rest_versions()
+ if module.params['bw_qos'] or module.params['iops_qos'] and VG_IOPS_VERSION in api_version:
+ if module.params['bw_qos'] and not module.params['iops_qos']:
+ if int(human_to_bytes(module.params['bw_qos'])) in range(1048576, 549755813888):
+ try:
+ array.create_vgroup(module.params['name'],
+ bandwidth_limit=module.params['bw_qos'])
+ except Exception:
+ module.fail_json(msg='Vgroup {0} creation failed.'.format(module.params['name']))
+ else:
+ module.fail_json(msg='Bandwidth QoS value {0} out of range.'.format(module.params['bw_qos']))
+ elif module.params['iops_qos'] and not module.params['bw_qos']:
+ if int(human_to_real(module.params['iops_qos'])) in range(100, 100000000):
+ try:
+ array.create_vgroup(module.params['name'],
+ iops_limit=module.params['iops_qos'])
+ except Exception:
+ module.fail_json(msg='Vgroup {0} creation failed.'.format(module.params['name']))
+ else:
+ module.fail_json(msg='IOPs QoS value {0} out of range.'.format(module.params['iops_qos']))
+ else:
+ bw_qos_size = int(human_to_bytes(module.params['bw_qos']))
+ if int(human_to_real(module.params['iops_qos'])) in range(100, 100000000) and bw_qos_size in range(1048576, 549755813888):
+ try:
+ array.create_vgroup(module.params['name'],
+ iops_limit=module.params['iops_qos'],
+ bandwidth_limit=module.params['bw_qos'])
+ except Exception:
+ module.fail_json(msg='Vgroup {0} creation failed.'.format(module.params['name']))
+ else:
+ module.fail_json(msg='IOPs or Bandwidth QoS value out of range.')
+ else:
+ try:
+ array.create_vgroup(module.params['name'])
+ except Exception:
+ module.fail_json(msg='creation of volume group {0} failed.'.format(module.params['name']))
+
+ module.exit_json(changed=changed)
+
+
+def make_multi_vgroups(module):
+ """Create multiple Volume Groups"""
+ changed = True
+ if not module.check_mode:
+ bw_qos_size = iops_qos_size = 0
+ names = []
+ array = get_array(module)
+ for vg_num in range(module.params['start'], module.params['count'] + module.params['start']):
+ names.append(module.params['name'] + str(vg_num).zfill(module.params['digits']) + module.params['suffix'])
+ if module.params['bw_qos']:
+ bw_qos = int(human_to_bytes(module.params['bw_qos']))
+ if bw_qos in range(1048576, 549755813888):
+ bw_qos_size = bw_qos
+ else:
+ module.fail_json(msg='Bandwidth QoS value out of range.')
+ if module.params['iops_qos']:
+ iops_qos = int(human_to_real(module.params['iops_qos']))
+ if iops_qos in range(100, 100000000):
+ iops_qos_size = iops_qos
+ else:
+ module.fail_json(msg='IOPs QoS value out of range.')
+ if bw_qos_size != 0 and iops_qos_size != 0:
+ volume_group = flasharray.VolumeGroupPost(qos=flasharray.Qos(bandwidth_limit=bw_qos_size,
+ iops_limit=iops_qos_size))
+ elif bw_qos_size == 0 and iops_qos_size == 0:
+ volume_group = flasharray.VolumeGroupPost()
+ elif bw_qos_size == 0 and iops_qos_size != 0:
+ volume_group = flasharray.VolumeGroupPost(qos=flasharray.Qos(iops_limit=iops_qos_size))
+ elif bw_qos_size != 0 and iops_qos_size == 0:
+ volume_group = flasharray.VolumeGroupPost(qos=flasharray.Qos(bandwidth_limit=bw_qos_size))
+ res = array.post_volume_groups(names=names, volume_group=volume_group)
+ if res.status_code != 200:
+ module.fail_json(msg='Multi-Vgroup {0}#{1} creation failed: {2}'.format(module.params['name'],
+ module.params['suffix'],
+ res.errors[0].message))
+ module.exit_json(changed=changed)
+
+
+def update_vgroup(module, array):
+ """ Update Volume Group"""
+ changed = True
+ if not module.check_mode:
+ api_version = array._list_available_rest_versions()
+ if VG_IOPS_VERSION in api_version:
+ try:
+ vg_qos = array.get_vgroup(module.params['name'], qos=True)
+ except Exception:
+ module.fail_json(msg='Failed to get QoS settings for vgroup {0}.'.format(module.params['name']))
+ if VG_IOPS_VERSION in api_version:
+ if vg_qos['bandwidth_limit'] is None:
+ vg_qos['bandwidth_limit'] = 0
+ if vg_qos['iops_limit'] is None:
+ vg_qos['iops_limit'] = 0
+ if module.params['bw_qos'] and VG_IOPS_VERSION in api_version:
+ if human_to_bytes(module.params['bw_qos']) != vg_qos['bandwidth_limit']:
+ if module.params['bw_qos'] == '0':
+ try:
+ array.set_volume(module.params['name'], bandwidth_limit='')
+ except Exception:
+ module.fail_json(msg='Vgroup {0} Bandwidth QoS removal failed.'.format(module.params['name']))
+ elif int(human_to_bytes(module.params['bw_qos'])) in range(1048576, 549755813888):
+ try:
+ array.set_volume(module.params['name'], bandwidth_limit=module.params['bw_qos'])
+ except Exception:
+ module.fail_json(msg='Vgroup {0} Bandwidth QoS change failed.'.format(module.params['name']))
+ else:
+ module.fail_json(msg='Bandwidth QoS value {0} out of range.'.format(module.params['bw_qos']))
+ if module.params['iops_qos'] and VG_IOPS_VERSION in api_version:
+ if human_to_real(module.params['iops_qos']) != vg_qos['iops_limit']:
+ if module.params['iops_qos'] == '0':
+ try:
+ array.set_volume(module.params['name'], iops_limit='')
+ except Exception:
+ module.fail_json(msg='Vgroup {0} IOPs QoS removal failed.'.format(module.params['name']))
+ elif int(human_to_real(module.params['iops_qos'])) in range(100, 100000000):
+ try:
+ array.set_volume(module.params['name'], iops_limit=module.params['iops_qos'])
+ except Exception:
+ module.fail_json(msg='Vgroup {0} IOPs QoS change failed.'.format(module.params['name']))
+ else:
+ module.fail_json(msg='Bandwidth QoS value {0} out of range.'.format(module.params['bw_qos']))
+
+ module.exit_json(changed=changed)
+
+
+def recover_vgroup(module, array):
+ """ Recover Volume Group"""
+ changed = True
+ if not module.check_mode:
+ try:
+ array.recover_vgroup(module.params['name'])
+ except Exception:
+ module.fail_json(msg='Recovery of volume group {0} failed.'.format(module.params['name']))
+
+ module.exit_json(changed=changed)
+
+
+def eradicate_vgroup(module, array):
+ """ Eradicate Volume Group"""
+ changed = True
+ if not module.check_mode:
+ try:
+ array.eradicate_vgroup(module.params['name'])
+ except Exception:
+ module.fail_json(msg='Eradicating vgroup {0} failed.'.format(module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def delete_vgroup(module, array):
+ """ Delete Volume Group"""
+ changed = True
+ if not module.check_mode:
+ try:
+ array.destroy_vgroup(module.params['name'])
+ except Exception:
+ module.fail_json(msg='Deleting vgroup {0} failed.'.format(module.params['name']))
+ if module.params['eradicate']:
+ eradicate_vgroup(module, array)
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ bw_qos=dict(type='str'),
+ iops_qos=dict(type='str'),
+ count=dict(type='int'),
+ start=dict(type='int', default=0),
+ digits=dict(type='int', default=1),
+ suffix=dict(type='str'),
+ eradicate=dict(type='bool', default=False),
+ ))
+
+ module = AnsibleModule(argument_spec,
+ supports_check_mode=True)
+
+ state = module.params['state']
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+ if VGROUP_API_VERSION not in api_version:
+ module.fail_json(msg='API version does not support volume groups.')
+
+ vgroup = get_vgroup(module, array)
+ xvgroup = get_pending_vgroup(module, array)
+
+ if module.params['count']:
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg='py-pure-client sdk is required to support \'count\' parameter')
+ if MULTI_VG_VERSION not in api_version:
+ module.fail_json(msg='\'count\' parameter is not supported until Purity//FA 6.0.0 or higher')
+ if module.params['digits'] and module.params['digits'] not in range(1, 10):
+ module.fail_json(msg='\'digits\' must be in the range of 1 to 10')
+ if module.params['start'] < 0:
+ module.fail_json(msg='\'start\' must be a positive number')
+ vgroup = get_multi_vgroups(module)
+ if state == 'present' and not vgroup:
+ make_multi_vgroups(module)
+ elif state == 'absent' and not vgroup:
+ module.exit_json(changed=False)
+ else:
+ module.warn('Method not yet supported for multi-vgroup')
+ else:
+ if xvgroup and state == 'present':
+ recover_vgroup(module, array)
+ elif vgroup and state == 'absent':
+ delete_vgroup(module, array)
+ elif xvgroup and state == 'absent' and module.params['eradicate']:
+ eradicate_vgroup(module, array)
+ elif not vgroup and not xvgroup and state == 'present':
+ make_vgroup(module, array)
+ elif vgroup and state == 'present':
+ update_vgroup(module, array)
+ elif vgroup is None and state == 'absent':
+ module.exit_json(changed=False)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_vlan.py b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_vlan.py
new file mode 100644
index 00000000..87b3bd85
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_vlan.py
@@ -0,0 +1,238 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: purefa_vlan
+version_added: '1.0.0'
+short_description: Manage network VLAN interfaces in a Pure Storage FlashArray
+description:
+ - This module manages the VLAN network interfaces on a Pure Storage FlashArray.
+author: Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Interface name, including controller indentifier.
+ - VLANs are only supported on iSCSI physical interfaces
+ required: true
+ type: str
+ state:
+ description:
+ - State of existing interface (on/off).
+ required: false
+ default: present
+ choices: [ "present", "absent" ]
+ type: str
+ enabled:
+ description:
+ - Define if VLAN interface is enabled or not.
+ required: false
+ default: true
+ type: bool
+ address:
+ description:
+ - IPv4 or IPv6 address of interface.
+ required: false
+ type: str
+ subnet:
+ description:
+ - Name of subnet interface associated with.
+ required: true
+ type: str
+extends_documentation_fragment:
+ - purestorage.flasharray.purestorage.fa
+'''
+
+EXAMPLES = '''
+- name: Configure and enable VLAN interface ct0.eth8 for subnet test
+ purefa_vlan:
+ name: ct0.eth8
+ subnet: test
+ address: 10.21.200.18
+ state: present
+ fa_url: 10.10.10.2
+ api_token: c6033033-fe69-2515-a9e8-966bb7fe4b40
+
+- name: Disable VLAN interface for subnet test on ct1.eth2
+ purefa_vlan:
+ name: ct1.eth2
+ subnet: test
+ enabled: false
+ fa_url: 10.10.10.2
+ api_token: c6033033-fe69-2515-a9e8-966bb7fe4b40
+
+- name: Delete VLAN inteface for subnet test on ct0.eth4
+ purefa_vlan:
+ name: ct0.eth4
+ subnet: test
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: c6033033-fe69-2515-a9e8-966bb7fe4b40'''
+
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import get_system, purefa_argument_spec
+
+
+def _get_subnet(module, array):
+ """Return subnet or None"""
+ subnet = {}
+ try:
+ subnet = array.get_subnet(module.params['subnet'])
+ except Exception:
+ return None
+ return subnet
+
+
+def _get_interface(module, array):
+ """Return Interface or None"""
+ interface = {}
+ if 'ct' in module.params['name']:
+ try:
+ interfaces = array.list_network_interfaces()
+ except Exception:
+ return None
+ for ints in range(0, len(interfaces)):
+ if interfaces[ints]['name'] == module.params['name']:
+ interface = interfaces[ints]
+ break
+ return interface
+
+
+def _get_vif(array, interface, subnet):
+ """Return VLAN Interface or None"""
+ vif_info = {}
+ vif_name = interface['name'] + "." + str(subnet['vlan'])
+ try:
+ interfaces = array.list_network_interfaces()
+ except Exception:
+ return None
+ for ints in range(0, len(interfaces)):
+ if interfaces[ints]['name'] == vif_name:
+ vif_info = interfaces[ints]
+ break
+ return vif_info
+
+
+def create_vif(module, array, interface, subnet):
+ """Create VLAN Interface"""
+ changed = True
+ if not module.check_mode:
+ vif_name = interface['name'] + "." + str(subnet['vlan'])
+ if module.params['address']:
+ try:
+ array.create_vlan_interface(vif_name, module.params['subnet'], address=module.params['address'])
+ except Exception:
+ module.fail_json(msg="Failed to create VLAN interface {0}.".format(vif_name))
+ else:
+ try:
+ array.create_vlan_interface(vif_name, module.params['subnet'])
+ except Exception:
+ module.fail_json(msg="Failed to create VLAN interface {0}.".format(vif_name))
+ if not module.params['enabled']:
+ try:
+ array.set_network_interface(vif_name, enabled=False)
+ except Exception:
+ module.fail_json(msg="Failed to disable VLAN interface {0} on creation.".format(vif_name))
+ module.exit_json(changed=changed)
+
+
+def update_vif(module, array, interface, subnet):
+ """Modify VLAN Interface settings"""
+ changed = True
+ if not module.check_mode:
+ changed = False
+ vif_info = _get_vif(array, interface, subnet)
+ vif_name = vif_info['name']
+ if module.params['address']:
+ if module.params['address'] != vif_info['address']:
+ try:
+ array.set_network_interface(vif_name, address=module.params['address'])
+ changed = True
+ except Exception:
+ module.fail_json(msg="Failed to change IP address for VLAN interface {0}.".format(subnet))
+
+ if module.params['enabled'] != vif_info['enabled']:
+ if module.params['enabled']:
+ try:
+ array.set_network_interface(vif_name, enabled=True)
+ changed = True
+ except Exception:
+ module.fail_json(msg="Failed to enable VLAN interface {0}.".format(vif_name))
+ else:
+ try:
+ array.set_network_interface(vif_name, enabled=False)
+ changed = True
+ except Exception:
+ module.fail_json(msg="Failed to disable VLAN interface {0}.".format(vif_name))
+
+ module.exit_json(changed=changed)
+
+
+def delete_vif(module, array, subnet):
+ """ Delete VLAN Interface"""
+ changed = True
+ if not module.check_mode:
+ vif_name = module.params['name'] + "." + str(subnet['vlan'])
+ try:
+ array.delete_vlan_interface(vif_name)
+ except Exception:
+ module.fail_json(msg="Failed to delete VLAN inerface {0}".format(vif_name))
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type='str', required=True),
+ subnet=dict(type='str', required=True),
+ enabled=dict(type='bool', default=True),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ address=dict(type='str'),
+ )
+ )
+
+ module = AnsibleModule(argument_spec,
+ supports_check_mode=True)
+
+ state = module.params['state']
+ array = get_system(module)
+ subnet = _get_subnet(module, array)
+ interface = _get_interface(module, array)
+ if not subnet:
+ module.fail_json(msg="Invalid subnet specified.")
+ if not interface:
+ module.fail_json(msg="Invalid interface specified.")
+ if 'iscsi' not in interface['services']:
+ module.fail_json(msg="Invalid interface specified - must have the iSCSI service enabled.")
+ if subnet['vlan']:
+ vif_name = module.params['name'] + "." + str(subnet['vlan'])
+ vif = bool(vif_name in subnet['interfaces'])
+
+ if state == 'present' and not vif:
+ create_vif(module, array, interface, subnet)
+ elif state == 'present' and vif:
+ update_vif(module, array, interface, subnet)
+ elif state == 'absent' and vif:
+ delete_vif(module, array, subnet)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_vnc.py b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_vnc.py
new file mode 100644
index 00000000..d4daa968
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_vnc.py
@@ -0,0 +1,144 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefa_vnc
+version_added: '1.0.0'
+short_description: Enable or Disable VNC port for installed apps
+description:
+- Enablke or Disable VNC access for installed apps
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Define state of VNC
+ type: str
+ default: present
+ choices: [ present, absent ]
+ name:
+ description:
+ - Name od app
+ type: str
+ required: True
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+'''
+
+EXAMPLES = r'''
+- name: Enable VNC for application test
+ purefa_vnc:
+ name: test
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Disable VNC for application test
+ purefa_vnc:
+ name: test
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+'''
+
+RETURN = r'''
+vnc:
+ description: VNC port information for application
+ type: dict
+ returned: success
+ contains:
+ status:
+ description: Status of application
+ type: str
+ sample: 'healthy'
+ index:
+ description: Application index number
+ type: int
+ version:
+ description: Application version installed
+ type: str
+ sample: '5.2.1'
+ vnc:
+ description: IP address and port number for VNC connection
+ type: dict
+ sample: ['10.21.200.34:5900']
+ name:
+ description: Application name
+ type: str
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import get_system, purefa_argument_spec
+
+MIN_REQUIRED_API_VERSION = "1.17"
+
+
+def enable_vnc(module, array, app):
+ """Enable VNC port"""
+ changed = False
+ vnc_fact = []
+ if not app['vnc_enabled']:
+ try:
+ if not module.check_mode:
+ array.enable_app_vnc(module.params['name'])
+ vnc_fact = array.get_app_node(module.params['name'])
+ changed = True
+ except Exception:
+ module.fail_json(msg='Enabling VNC for {0} failed'.format(module.params['name']))
+ module.exit_json(changed=changed, vnc=vnc_fact)
+
+
+def disable_vnc(module, array, app):
+ """Disable VNC port"""
+ changed = False
+ if app['vnc_enabled']:
+ try:
+ if not module.check_mode:
+ array.disable_app_vnc(module.params['name'])
+ changed = True
+ except Exception:
+ module.fail_json(msg='Disabling VNC for {0} failed'.format(module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(dict(
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ name=dict(type='str', required=True),
+ ))
+
+ module = AnsibleModule(argument_spec,
+ supports_check_mode=True)
+
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(msg='FlashArray REST version not supported. '
+ 'Minimum version required: {0}'.format(MIN_REQUIRED_API_VERSION))
+ try:
+ app = array.get_app(module.params['name'])
+ except Exception:
+ module.fail_json(msg='Selected application {0} does not exist'.format(module.params['name']))
+ if not app['enabled']:
+ module.fail_json(msg='Application {0} is not enabled'.format(module.params['name']))
+ if module.params['state'] == 'present':
+ enable_vnc(module, array, app)
+ else:
+ disable_vnc(module, array, app)
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_volume.py b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_volume.py
new file mode 100644
index 00000000..44e85043
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_volume.py
@@ -0,0 +1,913 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefa_volume
+version_added: '1.0.0'
+short_description: Manage volumes on Pure Storage FlashArrays
+description:
+- Create, delete or extend the capacity of a volume on Pure Storage FlashArray.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - The name of the volume.
+ - Volume could be created in a POD with this syntax POD_NAME::VOLUME_NAME.
+ - Volume could be created in a volume group with this syntax VG_NAME/VOLUME_NAME.
+ - Multi-volume support available from Purity//FA 6.0.0
+ B(***NOTE***) Manual deletion or eradication of individual volumes created
+ using multi-volume will cause idempotency to fail
+ - Multi-volume support only exists for volume creation
+ type: str
+ required: true
+ target:
+ description:
+ - The name of the target volume, if copying.
+ type: str
+ state:
+ description:
+ - Define whether the volume should exist or not.
+ default: present
+ choices: [ absent, present ]
+ type: str
+ eradicate:
+ description:
+ - Define whether to eradicate the volume on delete or leave in trash.
+ type: bool
+ default: 'no'
+ overwrite:
+ description:
+ - Define whether to overwrite a target volume if it already exisits.
+ type: bool
+ default: 'no'
+ size:
+ description:
+ - Volume size in M, G, T or P units.
+ type: str
+ count:
+ description:
+ - Number of volumes to be created in a multiple volume creation
+ - Only supported from Purity//FA v6.0.0 and higher
+ type: int
+ start:
+ description:
+ - Number at which to start the multiple volume creation index
+ - Only supported from Purity//FA v6.0.0 and higher
+ type: int
+ default: 0
+ digits:
+ description:
+ - Number of digits to use for multiple volume count. This
+ will pad the index number with zeros where necessary
+ - Only supported from Purity//FA v6.0.0 and higher
+ - Range is between 1 and 10
+ type: int
+ default: 1
+ suffix:
+ description:
+ - Suffix string, if required, for multiple volume create
+ - Volume names will be formed as I(<name>#I<suffix>), where
+ I(#) is a placeholder for the volume index
+ See associated descriptions
+ - Only supported from Purity//FA v6.0.0 and higher
+ type: str
+ bw_qos:
+ description:
+ - Bandwidth limit for volume in M or G units.
+ M will set MB/s
+ G will set GB/s
+ To clear an existing QoS setting use 0 (zero)
+ type: str
+ aliases: [ qos ]
+ iops_qos:
+ description:
+ - IOPs limit for volume - use value or K or M
+ K will mean 1000
+ M will mean 1000000
+ To clear an existing IOPs setting use 0 (zero)
+ type: str
+ move:
+ description:
+ - Move a volume in and out of a pod or vgroup
+ - Provide the name of pod or vgroup to move the volume to
+ - Pod and Vgroup names must be unique in the array
+ - To move to the local array, specify C(local)
+ - This is not idempotent - use C(ignore_errors) in the play
+ type: str
+ rename:
+ description:
+ - Value to rename the specified volume to.
+ - Rename only applies to the container the current volumes is in.
+ - There is no requirement to specify the pod or vgroup name as this is implied.
+ type: str
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+'''
+
+EXAMPLES = r'''
+- name: Create new volume named foo with a QoS limit
+ purefa_volume:
+ name: foo
+ size: 1T
+ bw_qos: 58M
+ iops_qos: 23K
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: present
+
+- name: Create new volume named foo in pod bar
+ purefa_volume:
+ name: bar::foo
+ size: 1T
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: present
+
+- name: Create 10 volumes with index starting at 10 but padded with 3 digits
+ purefa_volume:
+ name: foo
+ size: 1T
+ suffix: bar
+ count: 10
+ start: 10
+ digits: 3
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: present
+
+- name: Extend the size of an existing volume named foo
+ purefa_volume:
+ name: foo
+ size: 2T
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: present
+
+- name: Delete and eradicate volume named foo
+ purefa_volume:
+ name: foo
+ eradicate: yes
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: absent
+
+- name: Create clone of volume bar named foo
+ purefa_volume:
+ name: foo
+ target: bar
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: present
+
+- name: Overwrite volume bar with volume foo
+ purefa_volume:
+ name: foo
+ target: bar
+ overwrite: yes
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: present
+
+- name: Clear volume QoS from volume foo
+ purefa_volume:
+ name: foo
+ bw_qos: 0
+ iops_qos: 0
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: present
+
+- name: Move local volume foo from local array to pod bar
+ purefa_volume:
+ name: foo
+ move: bar
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Move volume foo in pod bar to local array
+ purefa_volume:
+ name: bar::foo
+ move: local
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Move volume foo in pod bar to vgroup fin
+ purefa_volume:
+ name: bar::foo
+ move: fin
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+'''
+
+RETURN = r'''
+volume:
+ description: A dictionary describing the changed volume. Only some
+ attributes below will be returned with various actions.
+ type: dict
+ returned: success
+ contains:
+ source:
+ description: Volume name of source volume used for volume copy
+ type: str
+ serial:
+ description: Volume serial number
+ type: str
+ sample: '361019ECACE43D83000120A4'
+ created:
+ description: Volume creation time
+ type: str
+ sample: '2019-03-13T22:49:24Z'
+ name:
+ description: Volume name
+ type: str
+ size:
+ description: Volume size in bytes
+ type: int
+ bandwidth_limit:
+ description: Volume bandwidth limit in bytes/sec
+ type: int
+ iops_limit:
+ description: Volume IOPs limit
+ type: int
+'''
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+import time
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import get_array, get_system, purefa_argument_spec
+
+
+QOS_API_VERSION = "1.14"
+VGROUPS_API_VERSION = "1.13"
+POD_API_VERSION = "1.13"
+AC_QOS_VERSION = "1.16"
+IOPS_API_VERSION = "1.17"
+MULTI_VOLUME_VERSION = "2.2"
+
+
+def human_to_bytes(size):
+ """Given a human-readable byte string (e.g. 2G, 30M),
+ return the number of bytes. Will return 0 if the argument has
+ unexpected form.
+ """
+ bytes = size[:-1]
+ unit = size[-1].upper()
+ if bytes.isdigit():
+ bytes = int(bytes)
+ if unit == 'P':
+ bytes *= 1125899906842624
+ elif unit == 'T':
+ bytes *= 1099511627776
+ elif unit == 'G':
+ bytes *= 1073741824
+ elif unit == 'M':
+ bytes *= 1048576
+ elif unit == 'K':
+ bytes *= 1024
+ else:
+ bytes = 0
+ else:
+ bytes = 0
+ return bytes
+
+
+def human_to_real(iops):
+ """Given a human-readable IOPs string (e.g. 2K, 30M),
+ return the real number. Will return 0 if the argument has
+ unexpected form.
+ """
+ digit = iops[:-1]
+ unit = iops[-1].upper()
+ if unit.isdigit():
+ digit = iops
+ elif digit.isdigit():
+ digit = int(digit)
+ if unit == 'M':
+ digit *= 1000000
+ elif unit == 'K':
+ digit *= 1000
+ else:
+ digit = 0
+ else:
+ digit = 0
+ return digit
+
+
+def get_multi_volumes(module, destroyed=False):
+ """Return True is all volumes exist or None"""
+ names = []
+ array = get_array(module)
+ for vol_num in range(module.params['start'], module.params['count'] + module.params['start']):
+ names.append(module.params['name'] + str(vol_num).zfill(module.params['digits']) + module.params['suffix'])
+ return bool(array.get_volumes(names=names, destroyed=destroyed).status_code == 200)
+
+
+def get_volume(module, array):
+ """Return Volume or None"""
+ try:
+ return array.get_volume(module.params['name'])
+ except Exception:
+ return None
+
+
+def get_endpoint(name, array):
+ """Return Endpoint or None"""
+ try:
+ return array.get_volume(name, pending=True, protocol_endpoint=True)
+ except Exception:
+ return None
+
+
+def get_destroyed_volume(vol, array):
+ """Return Destroyed Volume or None"""
+ try:
+ return bool(array.get_volume(vol, pending=True)['time_remaining'] != '')
+ except Exception:
+ return False
+
+
+def get_destroyed_endpoint(vol, array):
+ """Return Destroyed Endpoint or None"""
+ try:
+ return bool(array.get_volume(vol, protocol_endpoint=True, pending=True)['time_remaining'] != '')
+ except Exception:
+ return False
+
+
+def get_target(module, array):
+ """Return Volume or None"""
+ try:
+ return array.get_volume(module.params['target'])
+ except Exception:
+ return None
+
+
+def check_vgroup(module, array):
+ """Check is the requested VG to create volume in exists"""
+ vg_exists = False
+ api_version = array._list_available_rest_versions()
+ if VGROUPS_API_VERSION in api_version:
+ vg_name = module.params["name"].split("/")[0]
+ try:
+ vgs = array.list_vgroups()
+ except Exception:
+ module.fail_json(msg="Failed to get volume groups list. Check array.")
+ for vgroup in range(0, len(vgs)):
+ if vg_name == vgs[vgroup]['name']:
+ vg_exists = True
+ break
+ else:
+ module.fail_json(msg="VG volumes are not supported. Please upgrade your FlashArray.")
+ return vg_exists
+
+
+def check_pod(module, array):
+ """Check is the requested pod to create volume in exists"""
+ pod_exists = False
+ api_version = array._list_available_rest_versions()
+ if POD_API_VERSION in api_version:
+ pod_name = module.params["name"].split("::")[0]
+ try:
+ pods = array.list_pods()
+ except Exception:
+ module.fail_json(msg="Failed to get pod list. Check array.")
+ for pod in range(0, len(pods)):
+ if pod_name == pods[pod]['name']:
+ pod_exists = True
+ break
+ else:
+ module.fail_json(msg="Pod volumes are not supported. Please upgrade your FlashArray.")
+ return pod_exists
+
+
+def create_volume(module, array):
+ """Create Volume"""
+ changed = True
+ volfact = []
+ if not module.check_mode:
+ api_version = array._list_available_rest_versions()
+ if "/" in module.params['name'] and not check_vgroup(module, array):
+ module.fail_json(msg="Failed to create volume {0}. Volume Group does not exist.".format(module.params["name"]))
+ if "::" in module.params['name']:
+ if not check_pod(module, array):
+ module.fail_json(msg="Failed to create volume {0}. Pod does not exist".format(module.params["name"]))
+ pod_name = module.params["name"].split("::")[0]
+ if array.get_pod(pod_name)['promotion_status'] == 'demoted':
+ module.fail_json(msg='Volume cannot be created in a demoted pod')
+ if module.params['bw_qos'] or module.params['iops_qos']:
+ if AC_QOS_VERSION not in api_version:
+ module.warn("Pods cannot cannot contain volumes with QoS settings. Ignoring...")
+ module.params['bw_qos'] = module.params['iops_qos'] = None
+ if not module.params['size']:
+ module.fail_json(msg='Size for a new volume must be specified')
+ if module.params['bw_qos'] or module.params['iops_qos']:
+ if module.params['bw_qos'] and QOS_API_VERSION in api_version:
+ if module.params['iops_qos'] and IOPS_API_VERSION in api_version:
+ if module.params['bw_qos'] and not module.params['iops_qos']:
+ if int(human_to_bytes(module.params['bw_qos'])) in range(1048576, 549755813888):
+ try:
+ volfact = array.create_volume(module.params['name'],
+ module.params['size'],
+ bandwidth_limit=module.params['bw_qos'])
+ except Exception:
+ module.fail_json(msg='Volume {0} creation failed.'.format(module.params['name']))
+ else:
+ module.fail_json(msg='Bandwidth QoS value {0} out of range.'.format(module.params['bw_qos']))
+ elif module.params['iops_qos'] and not module.params['bw_qos']:
+ if 100000000 >= int(human_to_real(module.params['iops_qos'])) >= 100:
+ try:
+ volfact = array.create_volume(module.params['name'],
+ module.params['size'],
+ iops_limit=module.params['iops_qos'])
+ except Exception:
+ module.fail_json(msg='Volume {0} creation failed.'.format(module.params['name']))
+ else:
+ module.fail_json(msg='IOPs QoS value {0} out of range.'.format(module.params['iops_qos']))
+ else:
+ bw_qos_size = int(human_to_bytes(module.params['bw_qos']))
+ if int(human_to_real(module.params['iops_qos'])) in range(100, 100000000) and bw_qos_size in range(1048576, 549755813888):
+ try:
+ volfact = array.create_volume(module.params['name'],
+ module.params['size'],
+ iops_limit=module.params['iops_qos'],
+ bandwidth_limit=module.params['bw_qos'])
+ except Exception:
+ module.fail_json(msg='Volume {0} creation failed.'.format(module.params['name']))
+ else:
+ module.fail_json(msg='IOPs or Bandwidth QoS value out of range.')
+ else:
+ if module.params['bw_qos']:
+ if int(human_to_bytes(module.params['bw_qos'])) in range(1048576, 549755813888):
+ try:
+ volfact = array.create_volume(module.params['name'],
+ module.params['size'],
+ bandwidth_limit=module.params['bw_qos'])
+ except Exception:
+ module.fail_json(msg='Volume {0} creation failed.'.format(module.params['name']))
+ else:
+ module.fail_json(msg='Bandwidth QoS value {0} out of range.'.format(module.params['bw_qos']))
+ else:
+ try:
+ volfact = array.create_volume(module.params['name'], module.params['size'])
+ except Exception:
+ module.fail_json(msg='Volume {0} creation failed.'.format(module.params['name']))
+ else:
+ try:
+ volfact = array.create_volume(module.params['name'], module.params['size'])
+ except Exception:
+ module.fail_json(msg='Volume {0} creation failed.'.format(module.params['name']))
+
+ module.exit_json(changed=changed, volume=volfact)
+
+
+def create_multi_volume(module, array):
+ """Create Volume"""
+ changed = True
+ volfact = {}
+ if not module.check_mode:
+ bw_qos_size = iops_qos_size = 0
+ names = []
+ if "/" in module.params['name'] and not check_vgroup(module, array):
+ module.fail_json(msg="Multi-volume create failed. Volume Group {0} does not exist.".format(module.params["name"].split('/')[0]))
+ if "::" in module.params['name']:
+ if not check_pod(module, array):
+ module.fail_json(msg="Multi-volume create failed. Pod {0} does not exist".format(module.params["name"].split(':')[0]))
+ pod_name = module.params["name"].split("::")[0]
+ if array.get_pod(pod_name)['promotion_status'] == 'demoted':
+ module.fail_json(msg='Volume cannot be created in a demoted pod')
+ array = get_array(module)
+ for vol_num in range(module.params['start'], module.params['count'] + module.params['start']):
+ names.append(module.params['name'] + str(vol_num).zfill(module.params['digits']) + module.params['suffix'])
+ if module.params['bw_qos']:
+ bw_qos = int(human_to_bytes(module.params['bw_qos']))
+ if bw_qos in range(1048576, 549755813888):
+ bw_qos_size = bw_qos
+ else:
+ module.fail_json(msg='Bandwidth QoS value out of range.')
+ if module.params['iops_qos']:
+ iops_qos = int(human_to_real(module.params['iops_qos']))
+ if iops_qos in range(100, 100000000):
+ iops_qos_size = iops_qos
+ else:
+ module.fail_json(msg='IOPs QoS value out of range.')
+ if bw_qos_size != 0 and iops_qos_size != 0:
+ vols = flasharray.VolumePost(provisioned=human_to_bytes(module.params['size']),
+ qos=flasharray.Qos(bandwidth_limit=bw_qos_size,
+ iops_limit=iops_qos_size),
+ subtype='regular')
+ elif bw_qos_size == 0 and iops_qos_size == 0:
+ vols = flasharray.VolumePost(provisioned=human_to_bytes(module.params['size']),
+ subtype='regular')
+ elif bw_qos_size == 0 and iops_qos_size != 0:
+ vols = flasharray.VolumePost(provisioned=human_to_bytes(module.params['size']),
+ qos=flasharray.Qos(iops_limit=iops_qos_size),
+ subtype='regular')
+ elif bw_qos_size != 0 and iops_qos_size == 0:
+ vols = flasharray.VolumePost(provisioned=human_to_bytes(module.params['size']),
+ qos=flasharray.Qos(bandwidth_limit=bw_qos_size),
+ subtype='regular')
+ res = array.post_volumes(names=names, volume=vols)
+ if res.status_code != 200:
+ module.fail_json(msg='Multi-Volume {0}#{1} creation failed: {2}'.format(module.params['name'],
+ module.params['suffix'],
+ res.errors[0].message))
+ else:
+ temp = list(res.items)
+ for count in range(0, len(temp)):
+ vol_name = temp[count].name
+ volfact[vol_name] = {
+ 'size': temp[count].provisioned,
+ 'serial': temp[count].serial,
+ 'created': time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(temp[count].created / 1000)),
+ }
+ if bw_qos_size != 0:
+ volfact[vol_name]['bandwidth_limit'] = temp[count].qos.bandwidth_limit
+ if iops_qos_size != 0:
+ volfact[vol_name]['iops_limit'] = temp[count].qos.iops_limit
+
+ module.exit_json(changed=changed, volfact=volfact)
+
+
+def copy_from_volume(module, array):
+ """Create Volume Clone"""
+ changed = True
+ volfact = []
+ if not module.check_mode:
+ tgt = get_target(module, array)
+
+ if tgt is None:
+ try:
+ volfact = array.copy_volume(module.params['name'],
+ module.params['target'])
+ except Exception:
+ module.fail_json(msg='Copy volume {0} to volume {1} failed.'.format(module.params['name'],
+ module.params['target']))
+ elif tgt is not None and module.params['overwrite']:
+ try:
+ volfact = array.copy_volume(module.params['name'],
+ module.params['target'],
+ overwrite=module.params['overwrite'])
+ except Exception:
+ module.fail_json(msg='Copy volume {0} to volume {1} failed.'.format(module.params['name'],
+ module.params['target']))
+
+ module.exit_json(changed=changed, volume=volfact)
+
+
+def update_volume(module, array):
+ """Update Volume size and/or QoS"""
+ changed = True
+ volfact = []
+ if not module.check_mode:
+ change = False
+ api_version = array._list_available_rest_versions()
+ vol = array.get_volume(module.params['name'])
+ vol_qos = array.get_volume(module.params['name'], qos=True)
+ if QOS_API_VERSION in api_version:
+ if vol_qos['bandwidth_limit'] is None:
+ vol_qos['bandwidth_limit'] = 0
+ if IOPS_API_VERSION in api_version:
+ if vol_qos['iops_limit'] is None:
+ vol_qos['iops_limit'] = 0
+ if "::" in module.params['name']:
+ if module.params['bw_qos'] or module.params['iops_qos']:
+ if AC_QOS_VERSION not in api_version:
+ module.warn("Pods cannot cannot contain volumes with QoS settings. Ignoring...")
+ module.params['bw_qos'] = module.params['iops_qos'] = None
+ if module.params['size']:
+ if human_to_bytes(module.params['size']) != vol['size']:
+ if human_to_bytes(module.params['size']) > vol['size']:
+ try:
+ volfact = array.extend_volume(module.params['name'], module.params['size'])
+ change = True
+ except Exception:
+ module.fail_json(msg='Volume {0} resize failed.'.format(module.params['name']))
+ if module.params['bw_qos'] and QOS_API_VERSION in api_version:
+ if human_to_bytes(module.params['bw_qos']) != vol_qos['bandwidth_limit']:
+ if module.params['bw_qos'] == '0':
+ try:
+ volfact = array.set_volume(module.params['name'], bandwidth_limit='')
+ change = True
+ except Exception:
+ module.fail_json(msg='Volume {0} Bandwidth QoS removal failed.'.format(module.params['name']))
+ elif int(human_to_bytes(module.params['bw_qos'])) in range(1048576, 549755813888):
+ try:
+ volfact = array.set_volume(module.params['name'],
+ bandwidth_limit=module.params['bw_qos'])
+ change = True
+ except Exception:
+ module.fail_json(msg='Volume {0} Bandwidth QoS change failed.'.format(module.params['name']))
+ else:
+ module.fail_json(msg='Bandwidth QoS value {0} out of range.'.format(module.params['bw_qos']))
+ if module.params['iops_qos'] and IOPS_API_VERSION in api_version:
+ if human_to_real(module.params['iops_qos']) != vol_qos['iops_limit']:
+ if module.params['iops_qos'] == '0':
+ try:
+ volfact = array.set_volume(module.params['name'], iops_limit='')
+ change = True
+ except Exception:
+ module.fail_json(msg='Volume {0} IOPs QoS removal failed.'.format(module.params['name']))
+ elif int(human_to_real(module.params['iops_qos'])) in range(100, 100000000):
+ try:
+ volfact = array.set_volume(module.params['name'],
+ iops_limit=module.params['iops_qos'])
+ change = True
+ except Exception:
+ module.fail_json(msg='Volume {0} IOPs QoS change failed.'.format(module.params['name']))
+ else:
+ module.fail_json(msg='Bandwidth QoS value {0} out of range.'.format(module.params['bw_qos']))
+
+ module.exit_json(changed=change, volume=volfact)
+
+ module.exit_json(changed=changed)
+
+
+def rename_volume(module, array):
+ """Rename volume within a container, ie pod, vgroup or local array"""
+ changed = True
+ volfact = []
+ if not module.check_mode:
+ changed = False
+ pod_name = ''
+ vgroup_name = ''
+ target_name = module.params['rename']
+ target_exists = False
+ if "::" in module.params['name']:
+ pod_name = module.params["name"].split("::")[0]
+ target_name = pod_name + "::" + module.params['rename']
+ try:
+ array.get_volume(target_name, pending=True)
+ target_exists = True
+ except Exception:
+ target_exists = False
+ elif "/" in module.params['name']:
+ vgroup_name = module.params["name"].split("/")[0]
+ target_name = vgroup_name + "/" + module.params['rename']
+ try:
+ array.get_volume(target_name, pending=True)
+ target_exists = True
+ except Exception:
+ target_exists = False
+ else:
+ try:
+ array.get_volume(target_name, pending=True)
+ target_exists = True
+ except Exception:
+ target_exists = False
+ if target_exists and get_endpoint(target_name, array):
+ module.fail_json(msg='Target volume {0} is a protocol-endpoinnt'.format(target_name))
+ if not target_exists:
+ if get_destroyed_endpoint(target_name, array):
+ module.fail_json(msg='Target volume {0} is a destroyed protocol-endpoinnt'.format(target_name))
+ else:
+ try:
+ volfact = array.rename_volume(module.params["name"], module.params['rename'])
+ changed = True
+ except Exception:
+ module.fail_json(msg='Rename volume {0} to {1} failed.'.format(module.params["name"], module.params['rename']))
+ else:
+ module.fail_json(msg="Target volume {0} already exists.".format(target_name))
+
+ module.exit_json(changed=changed, volume=volfact)
+
+
+def move_volume(module, array):
+ """Move volume between pods, vgroups or local array"""
+ changed = True
+ volfact = []
+ if not module.check_mode:
+ changed = False
+ vgroup_exists = False
+ target_exists = False
+ pod_exists = False
+ pod_name = ''
+ vgroup_name = ''
+ volume_name = module.params['name']
+ if "::" in module.params['name']:
+ volume_name = module.params["name"].split("::")[1]
+ pod_name = module.params["name"].split("::")[0]
+ if "/" in module.params['name']:
+ volume_name = module.params["name"].split("/")[1]
+ vgroup_name = module.params["name"].split("/")[0]
+ if module.params['move'] == 'local':
+ target_location = ""
+ if "::" not in module.params['name']:
+ if "/" not in module.params['name']:
+ module.fail_json(msg='Source and destination [local] cannot be the same.')
+ try:
+ target_exists = array.get_volume(volume_name, pending=True)
+ except Exception:
+ target_exists = False
+ if target_exists:
+ module.fail_json(msg='Target volume {0} already exists'.format(volume_name))
+ else:
+ try:
+ pod_exists = array.get_pod(module.params['move'])
+ if len(pod_exists['arrays']) > 1:
+ module.fail_json(msg='Volume cannot be moved into a stretched pod')
+ if pod_exists['link_target_count'] != 0:
+ module.fail_json(msg='Volume cannot be moved into a linked source pod')
+ if pod_exists['promotion_status'] == 'demoted':
+ module.fail_json(msg='Volume cannot be moved into a demoted pod')
+ pod_exists = bool(pod_exists)
+ except Exception:
+ pod_exists = False
+ if pod_exists:
+ try:
+ target_exists = bool(array.get_volume(module.params['move'] + "::" + volume_name, pending=True))
+ except Exception:
+ target_exists = False
+ try:
+ vgroup_exists = bool(array.get_vgroup(module.params['move']))
+ except Exception:
+ vgroup_exists = False
+ if vgroup_exists:
+ try:
+ target_exists = bool(array.get_volume(module.params['move'] + "/" + volume_name, pending=True))
+ except Exception:
+ target_exists = False
+ if target_exists:
+ module.fail_json(msg='Volume of same name already exists in move location')
+ if pod_exists and vgroup_exists:
+ module.fail_json(msg='Move location {0} matches both a pod and a vgroup. Please rename one of these.'.format(module.params['move']))
+ if not pod_exists and not vgroup_exists:
+ module.fail_json(msg='Move location {0} does not exist.'.format(module.params['move']))
+ if "::" in module.params['name']:
+ pod = array.get_pod(module.params['move'])
+ if len(pod['arrays']) > 1:
+ module.fail_json(msg='Volume cannot be moved out of a stretched pod')
+ if pod['linked_target_count'] != 0:
+ module.fail_json(msg='Volume cannot be moved out of a linked source pod')
+ if pod['promotion_status'] == 'demoted':
+ module.fail_json(msg='Volume cannot be out of a demoted pod')
+ if "/" in module.params['name']:
+ if vgroup_name == module.params['move'] or pod_name == module.params['move']:
+ module.fail_json(msg='Source and destination cannot be the same')
+ target_location = module.params['move']
+ if get_endpoint(target_location, array):
+ module.fail_json(msg='Target volume {0} is a protocol-endpoinnt'.format(target_location))
+ try:
+ volfact = array.move_volume(module.params['name'], target_location)
+ changed = True
+ except Exception:
+ if target_location == '':
+ target_location = '[local]'
+ module.fail_json(msg='Move of volume {0} to {1} failed.'.format(module.params['name'],
+ target_location))
+ module.exit_json(changed=changed, volume=volfact)
+
+
+def delete_volume(module, array):
+ """ Delete Volume"""
+ changed = True
+ volfact = []
+ if not module.check_mode:
+ try:
+ array.destroy_volume(module.params['name'])
+ if module.params['eradicate']:
+ try:
+ volfact = array.eradicate_volume(module.params['name'])
+ except Exception:
+ module.fail_json(msg='Eradicate volume {0} failed.'.format(module.params['name']))
+ except Exception:
+ module.fail_json(msg='Delete volume {0} failed.'.format(module.params['name']))
+ module.exit_json(changed=changed, volume=volfact)
+
+
+def eradicate_volume(module, array):
+ """ Eradicate Deleted Volume"""
+ changed = True
+ if not module.check_mode:
+ volfact = []
+ if module.params['eradicate']:
+ try:
+ array.eradicate_volume(module.params['name'])
+ except Exception:
+ module.fail_json(msg='Eradication of volume {0} failed'.format(module.params['name']))
+ module.exit_json(changed=changed, volume=volfact)
+
+
+def recover_volume(module, array):
+ """ Recover Deleted Volume"""
+ changed = True
+ volfact = []
+ if not module.check_mode:
+ try:
+ array.recover_volume(module.params['name'])
+ except Exception:
+ module.fail_json(msg='Recovery of volume {0} failed'.format(module.params['name']))
+ module.exit_json(changed=changed, volume=volfact)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(dict(
+ name=dict(type='str', required=True),
+ target=dict(type='str'),
+ move=dict(type='str'),
+ rename=dict(type='str'),
+ overwrite=dict(type='bool', default=False),
+ eradicate=dict(type='bool', default=False),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ bw_qos=dict(type='str', aliases=['qos']),
+ iops_qos=dict(type='str'),
+ count=dict(type='int'),
+ start=dict(type='int', default=0),
+ digits=dict(type='int', default=1),
+ suffix=dict(type='str'),
+ size=dict(type='str'),
+ ))
+
+ mutually_exclusive = [['size', 'target'],
+ ['move', 'rename', 'target', 'eradicate'],
+ ['rename', 'move', 'target', 'eradicate']]
+
+ module = AnsibleModule(argument_spec,
+ mutually_exclusive=mutually_exclusive,
+ supports_check_mode=True)
+
+ size = module.params['size']
+ bw_qos = module.params['bw_qos']
+ iops_qos = module.params['iops_qos']
+ state = module.params['state']
+ destroyed = False
+ array = get_system(module)
+ volume = get_volume(module, array)
+ api_version = array._list_available_rest_versions()
+ endpoint = get_endpoint(module.params['name'], array)
+
+ if endpoint:
+ module.fail_json(msg='Volume {0} is an endpoint. Use purefa_endpoint module.'.format(module.params['name']))
+
+ if not volume:
+ destroyed = get_destroyed_volume(module.params['name'], array)
+ target = get_target(module, array)
+ if module.params['count']:
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg='py-pure-client sdk is required to support \'count\' parameter')
+ if MULTI_VOLUME_VERSION not in api_version:
+ module.fail_json(msg='\'count\' parameter is not supported until Purity//FA 6.0.0 or higher')
+ if module.params['digits'] and module.params['digits'] not in range(1, 10):
+ module.fail_json(msg='\'digits\' must be in the range of 1 to 10')
+ if module.params['start'] < 0:
+ module.fail_json(msg='\'start\' must be a positive number')
+ volume = get_multi_volumes(module)
+ if state == 'present' and not volume and size:
+ create_multi_volume(module, array)
+ elif state == 'present' and not volume and not size:
+ module.fail_json(msg="Size must be specified to create a new volume")
+ elif state == 'absent' and not volume:
+ module.exit_json(changed=False)
+ else:
+ module.warn('Method not yet supported for multi-volume')
+ else:
+ if state == 'present' and not volume and not destroyed and size:
+ create_volume(module, array)
+ elif state == 'present' and volume and (size or bw_qos or iops_qos):
+ update_volume(module, array)
+ elif state == 'present' and not volume and module.params['move']:
+ module.fail_json(msg='Volume {0} cannot be moved - does not exist (maybe deleted)'.format(module.params['name']))
+ elif state == 'present' and volume and module.params['move']:
+ move_volume(module, array)
+ elif state == 'present' and volume and module.params['rename']:
+ rename_volume(module, array)
+ elif state == 'present' and destroyed and not module.params['move'] and not module.params['rename']:
+ recover_volume(module, array)
+ elif state == 'present' and destroyed and module.params['move']:
+ module.fail_json(msg='Volume {0} exists, but in destroyed state'.format(module.params['name']))
+ elif state == 'present' and volume and target:
+ copy_from_volume(module, array)
+ elif state == 'present' and volume and not target:
+ copy_from_volume(module, array)
+ elif state == 'absent' and volume:
+ delete_volume(module, array)
+ elif state == 'absent' and destroyed:
+ eradicate_volume(module, array)
+ elif state == 'present':
+ if not volume and not size:
+ module.fail_json(msg="Size must be specified to create a new volume")
+ elif state == 'absent' and not volume:
+ module.exit_json(changed=False)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_volume_tags.py b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_volume_tags.py
new file mode 100644
index 00000000..a038127b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/plugins/modules/purefa_volume_tags.py
@@ -0,0 +1,261 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefa_volume_tags
+version_added: '1.0.0'
+short_description: Manage volume tags on Pure Storage FlashArrays
+description:
+- Manage volume tags for volumes on Pure Storage FlashArray.
+- Requires a minimum of Purity 6.0.0
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - The name of the volume.
+ type: str
+ required: true
+ namespace:
+ description:
+ - The name of tag namespace
+ default: default
+ type: str
+ copyable:
+ description:
+ - Define whether the volume tags are inherited on volume copies.
+ default: true
+ type: bool
+ kvp:
+ description:
+ - List of key value pairs to assign to the volume.
+ - Seperate the key from the value using a colon (:) only.
+ - All items in list will use I(namespace) and I(copyable) settings.
+ - Maximum of 5 tags per volume
+ - See examples for exact formatting requirements
+ type: list
+ elements: str
+ required: true
+ state:
+ description:
+ - Define whether the volume tag(s) should exist or not.
+ default: present
+ choices: [ absent, present ]
+ type: str
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+'''
+
+EXAMPLES = r'''
+- name: Create new tags in namespace test for volume foo
+ purefa_volume_tags:
+ name: foo
+ namespace: test
+ copyable: false
+ kvp:
+ - 'key1:value1'
+ - 'key2:value2'
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: present
+
+- name: Remove an existing tag in namespace test for volume foo
+ purefa_volume_tags:
+ name: foo
+ namespace: test
+ kvp:
+ - 'key1:value1'
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: present
+
+- name: Update an existing tag in namespace test for volume foo
+ purefa_volume_tags:
+ name: foo
+ namespace: test
+ kvp:
+ - 'key1:value2'
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: present
+
+- name: Delete a tag in namespace test for volume foo
+ purefa_volume_tags:
+ name: foo
+ namespace: test
+ kvp:
+ - 'key1:value2'
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: absent
+'''
+
+RETURN = r'''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import get_system, purefa_argument_spec
+
+
+TAGS_API_VERSION = "1.19"
+
+
+def get_volume(module, array):
+ """Return Volume or None"""
+ try:
+ return array.get_volume(module.params['name'], pending=True)
+ except Exception:
+ return None
+
+
+def get_endpoint(name, array):
+ """Return Endpoint or None"""
+ try:
+ return array.get_volume(name, pending=True, protocol_endpoint=True)
+ except Exception:
+ return None
+
+
+def create_tag(module, array):
+ """Create Volume Tag"""
+ changed = True
+ if not module.check_mode:
+ for tag in range(0, len(module.params['kvp'])):
+ try:
+ array.add_tag_to_volume(module.params['name'],
+ copyable=module.params['copyable'],
+ namespace=module.params['namespace'],
+ key=module.params['kvp'][tag].split(":")[0],
+ value=module.params['kvp'][tag].split(":")[1])
+ except Exception:
+ module.fail_json(msg='Failed to add tag KVP {0} to volume {1}'.format(module.params['kvp'][tag],
+ module.params['name']))
+
+ module.exit_json(changed=changed)
+
+
+def update_tag(module, array, current_tags):
+ """Update Volume Tag"""
+ changed = True
+ if not module.check_mode:
+ changed = False
+ for tag in range(0, len(module.params['kvp'])):
+ tag_exists = False
+ for current_tag in range(0, len(current_tags)):
+ if module.params['kvp'][tag].split(":")[0] == current_tags[current_tag]['key'] and \
+ module.params['namespace'] == current_tags[current_tag]['namespace']:
+ tag_exists = True
+ if module.params['kvp'][tag].split(":")[1] != current_tags[current_tag]['value']:
+ try:
+ module.warn('here {0} {1}'.format(current_tags[current_tag], module.params['kvp'][tag]))
+ array.add_tag_to_volume(module.params['name'],
+ namespace=module.params['namespace'],
+ key=module.params['kvp'][tag].split(":")[0],
+ value=module.params['kvp'][tag].split(":")[1])
+ changed = True
+ except Exception:
+ module.fail_json(msg="Failed to update tag '{0}' from volume {1}".format(module.params['kvp'][tag].split(":")[0],
+ module.params['name']))
+
+ if not tag_exists:
+ try:
+ array.add_tag_to_volume(module.params['name'],
+ namespace=module.params['namespace'],
+ key=module.params['kvp'][tag].split(":")[0],
+ value=module.params['kvp'][tag].split(":")[1]
+ )
+ changed = True
+ except Exception:
+ module.fail_json(msg="Failed to add tag KVP {0} to volume {1}".format(module.params['kvp'][tag].split(":")[0],
+ module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def delete_tag(module, array, current_tags):
+ """ Delete Tag"""
+ changed = True
+ if not module.check_mode:
+ changed = False
+ for tag in range(0, len(module.params['kvp'])):
+ for current_tag in range(0, len(current_tags)):
+ if module.params['kvp'][tag].split(":")[0] == current_tags[current_tag]['key'] and \
+ module.params['namespace'] == current_tags[current_tag]['namespace']:
+ try:
+ array.remove_tag_from_volume(module.params['name'],
+ namespace=module.params['namespace'],
+ key=module.params['kvp'][tag].split(":")[0])
+ changed = True
+ except Exception:
+ module.fail_json(msg="Failed to remove tag KVP '{0}' from volume {1}".format(module.params['kvp'][tag],
+ module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(dict(
+ name=dict(type='str', required=True),
+ copyable=dict(type='bool', default=True),
+ namespace=dict(type='str', default='default'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ kvp=dict(type='list', elements='str', required=True),
+ ))
+
+ module = AnsibleModule(argument_spec,
+ supports_check_mode=True)
+
+ state = module.params['state']
+ if module.params['kvp'] is not None:
+ module.params['kvp'] = sorted(module.params['kvp'][0:5])
+ else:
+ module.fail_json(msg='No KVPs specified. Minimum of 1 is required.')
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+
+ if TAGS_API_VERSION not in api_version:
+ module.fail_json(msg='Volume tags are not supported. Purity 6.0.0, or higher, is required.')
+
+ volume = get_volume(module, array)
+ endpoint = get_endpoint(module.params['name'], array)
+
+ if not volume:
+ module.fail_json(msg='Volume {0} does not exist.'.format(module.params['name']))
+ if endpoint:
+ module.fail_json(msg='Volume {0} is an endpoint. Tags not allowed.'.format(module.params['name']))
+ if "." in module.params['name']:
+ current_tags = array.get_volume(module.params['name'],
+ snap=True,
+ pending=True,
+ tags=True,
+ namespace=module.params['namespace'])
+ else:
+ current_tags = array.get_volume(module.params['name'],
+ pending=True,
+ tags=True,
+ namespace=module.params['namespace'])
+
+ if state == 'present' and not current_tags:
+ create_tag(module, array)
+ elif state == 'absent' and not current_tags:
+ module.exit_json(changed=False)
+ elif state == 'present' and current_tags:
+ update_tag(module, array, current_tags)
+ elif state == 'absent' and current_tags:
+ delete_tag(module, array, current_tags)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/roles/.keep b/collections-debian-merged/ansible_collections/purestorage/flasharray/roles/.keep
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/roles/.keep
diff --git a/collections-debian-merged/ansible_collections/purestorage/flasharray/tests/sanity/ignore-2.10.txt b/collections-debian-merged/ansible_collections/purestorage/flasharray/tests/sanity/ignore-2.10.txt
new file mode 100644
index 00000000..d5e33e6c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flasharray/tests/sanity/ignore-2.10.txt
@@ -0,0 +1,2 @@
+plugins/modules/purefa_info.py validate-modules:return-syntax-error
+plugins/modules/purefa_inventory.py validate-modules:return-syntax-error
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/CHANGELOG.rst b/collections-debian-merged/ansible_collections/purestorage/flashblade/CHANGELOG.rst
new file mode 100644
index 00000000..28ad2bba
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/CHANGELOG.rst
@@ -0,0 +1,90 @@
+====================================
+Purestorage.Flashblade Release Notes
+====================================
+
+.. contents:: Topics
+
+
+v1.4.0
+======
+
+Minor Changes
+-------------
+
+- purefb_banner - Module to manage the GUI and SSH login message
+- purefb_certgrp - Module to manage FlashBlade Certificate Groups
+- purefb_certs - Module to create and delete SSL certificates
+- purefb_connect - Support idempotency when exisitng connection is incoming
+- purefb_fs - Add new options for filesystem control (https://github.com/Pure-Storage-Ansible/FlashBlade-Collection/pull/81)
+- purefb_fs - Default filesystem size on creation changes from 32G to ``unlimited``
+- purefb_fs - Fix error in deletion and eradication of filesystem
+- purefb_fs_replica - Remove condition to attach/detach policies on unhealthy replica-link
+- purefb_info - Add support to list filesystem policies
+- purefb_lifecycle - Module to manage FlashBlade Bucket Lifecycle Rules
+- purefb_s3user - Add support for imported user access keys
+- purefb_syslog - Module to manage syslog server configuration
+
+Bugfixes
+--------
+
+- purefa_policy - Resolve multiple issues related to incorrect use of timezones
+- purefb_connect - Ensure changing encryption status on array connection is performed correctly
+- purefb_connect - Fix breaking change created in purity_fb SDK 1.9.2 for deletion of array connections
+- purefb_connect - Hide target array API token
+- purefb_ds - Ensure updating directory service configurations completes correctly
+- purefb_info - Fix issue getting array info when encrypted connection exists
+
+New Modules
+-----------
+
+- purestorage.flashblade.purefb_banner - Configure Pure Storage FlashBlade GUI and SSH MOTD message
+- purestorage.flashblade.purefb_certgrp - Manage FlashBlade Certifcate Groups
+- purestorage.flashblade.purefb_certs - Manage FlashBlade SSL Certifcates
+- purestorage.flashblade.purefb_lifecycle - Manage FlashBlade object lifecycles
+- purestorage.flashblade.purefb_syslog - Configure Pure Storage FlashBlade syslog settings
+
+v1.3.0
+======
+
+Release Summary
+---------------
+
+| Release Date: 2020-08-08
+| This changlelog describes all changes made to the modules and plugins included in this collection since Ansible 2.9.0
+
+
+Major Changes
+-------------
+
+- purefb_alert - manage alert email settings on a FlashBlade
+- purefb_bladename - manage FlashBlade name
+- purefb_bucket_replica - manage bucket replica links on a FlashBlade
+- purefb_connect - manage connections between FlashBlades
+- purefb_dns - manage DNS settings on a FlashBlade
+- purefb_fs_replica - manage filesystem replica links on a FlashBlade
+- purefb_inventory - get information about the hardware inventory of a FlashBlade
+- purefb_ntp - manage the NTP settings for a FlashBlade
+- purefb_phonehome - manage the phone home settings for a FlashBlade
+- purefb_policy - manage the filesystem snapshot policies for a FlashBlade
+- purefb_proxy - manage the phone home HTTP proxy settings for a FlashBlade
+- purefb_remote_cred - manage the Object Store Remote Credentials on a FlashBlade
+- purefb_snmp_agent - modify the FlashBlade SNMP Agent
+- purefb_snmp_mgr - manage SNMP Managers on a FlashBlade
+- purefb_target - manage remote S3-capable targets for a FlashBlade
+- purefb_user - manage local ``pureuser`` account password on a FlashBlade
+
+Minor Changes
+-------------
+
+- purefb_bucket - Versioning support added
+- purefb_info - new options added for information collection
+- purefb_network - Add replication service type
+- purefb_s3user - Limit ``access_key`` recreation to 3 times
+- purefb_s3user - return dict changed from ``ansible_facts`` to ``s3user_info``
+
+Bugfixes
+--------
+
+- purefb_bucket - Add warning message if ``state`` is ``absent`` without ``eradicate:``
+- purefb_fs - Add graceful exist when ``state`` is ``absent`` and filesystem not eradicated
+- purefb_fs - Add warning message if ``state`` is ``absent`` without ``eradicate``
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/FILES.json b/collections-debian-merged/ansible_collections/purestorage/flashblade/FILES.json
new file mode 100644
index 00000000..c28c37a1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/FILES.json
@@ -0,0 +1,586 @@
+{
+ "files": [
+ {
+ "format": 1,
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": ".",
+ "chksum_type": null
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "roles",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "name": "roles/.keep",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "tests",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "tests/sanity",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "83175635ac646a5cfc169ec6316e0ea90071bf4e8e9a79212a413f326a3049bd",
+ "name": "tests/sanity/ignore-2.9.txt",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "83175635ac646a5cfc169ec6316e0ea90071bf4e8e9a79212a413f326a3049bd",
+ "name": "tests/sanity/ignore-2.10.txt",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "098d26a10ec061d4cabd9d8504a138aaf16175ceca1b8c81bbe63effff052513",
+ "name": "README.md",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "playbooks",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "playbooks/roles",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "name": "playbooks/roles/.keep",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "name": "playbooks/.keep",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "playbooks/templates",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "name": "playbooks/templates/.keep",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "playbooks/files",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "name": "playbooks/files/.keep",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "playbooks/vars",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "name": "playbooks/vars/.keep",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "playbooks/tasks",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "name": "playbooks/tasks/.keep",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "440310120f2f25ceb0bbb0ea80d67426cf5c862f21d6bd3f039e8a665e33d850",
+ "name": "CHANGELOG.rst",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "changelogs",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "72e2d29ddbf66c657d6998587962e1710c9686ba4b977cc1390ba4add6e09009",
+ "name": "changelogs/config.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "71fca98f6027e6f9bf34a63f042bc4ec41e09cc25a93ba8adb3c8a2f4d634519",
+ "name": "changelogs/.plugin-cache.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "9b63a7241dbce0ea01bdbc8466d6d45e6273836d2011df1a910c08f841e92ed8",
+ "name": "changelogs/changelog.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "changelogs/fragments",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "abb817b52fdfa70b538ca9efce8d642282383b6961c47bde20ce0a023d2b941d",
+ "name": "changelogs/fragments/81_purefb_fs_new_options.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "2e9c5c95b8333fee22646f4e83e9034172182b1e99c084725f08df48e45d3d47",
+ "name": "changelogs/fragments/101_fix_policy_and_timezone_error.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "6d8689e8f46ab7d3286b7d3ee46dfa13a8bf0585cc9b197a5ca8271c9dd9590e",
+ "name": "changelogs/fragments/76_default_fs_size.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "ad1078e90875745edce8071846183eed39c3878156d14f96b5db78ab1c5be973",
+ "name": "changelogs/fragments/90_imported_keys.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "828cc0c94acf44d1d373402a0cc657527d9fce8ac744319fbe0d8035684932b4",
+ "name": "changelogs/fragments/96_fix_update_connection.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "ee600c3bcae632d7450ff3447192f8ca2d1622eecd67bc87c59fdd3dd8326bc6",
+ "name": "changelogs/fragments/85_add_banner.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "f3504f5e1acadaf52bd9d420373b7edce2015435232e5fa53282455361bcd440",
+ "name": "changelogs/fragments/80_support_reverse_replica_link.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "1d286bf0fe3301a898bcdcad0bf70955732608eb51468097ca6d70ae269654d8",
+ "name": "changelogs/fragments/84_add_cert.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "b7513178564ee1707090e4b3df65af56f28a71119e0ebf73b074dc9d2c0e1d65",
+ "name": "changelogs/fragments/83_add_certgrp.yml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "e140fbfc3ac4eaab3dd9c482e3beb37efd98ad4c3892b36f93ffb00d89c9283f",
+ "name": "changelogs/fragments/97_fix_encrpyted_array_connection_info.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "8befcbbddf6fc2db62ff48b4f3a1030fe115fb7ababfc9b03c8e693628087337",
+ "name": "changelogs/fragments/92_fix_ds_update.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "e42ee9ea2a2bffa465347a52a3fcf4bfaa51f377e7f33bf4a405eb46ae507442",
+ "name": "changelogs/fragments/86_add_syslog.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "64bd3d32085373ce61a414518c2ed87bfd003d163d3002d087f41f4a54b0b1a0",
+ "name": "changelogs/fragments/v1.3.0_summary.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "fdc6c425f03ffc0b4a008230f290f6ef37874a270909cb2ee311843dc08909f6",
+ "name": "changelogs/fragments/88_add_lifecycle.yml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "57a7b5ed892c4ea2f5149023b2bdde9481eb8c0a7593e4e76a4603e706971100",
+ "name": "changelogs/fragments/78_update_filesystem_replica_link.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "b4cd3cbdb65de6b71cfbe179d56a42be2afbf6486e1ce0df9fdd3a7042bd57b0",
+ "name": "changelogs/fragments/79_hide_connect_api.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "8c7090d551cb59c49622a89c0ed25f12ad89104a9e2ab6708a01fc01fce9e049",
+ "name": "changelogs/fragments/77_filesystem_policies_info.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "787138033d123fa59a9d3cdb424dc093183a020eebf1e76b46cbf059006e18e5",
+ "name": "changelogs/fragments/90_delete_conn_fix.yaml",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "plugins",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "plugins/modules",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "e2085de6051fd21ca0c15054a30fd30aa70a001a5116c2f0e21ce8af0cb737c6",
+ "name": "plugins/modules/purefb_bladename.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "3a83537f84375cb5bf3dfdd8b9c1cbc342299bbfffab074d8988052976dd1076",
+ "name": "plugins/modules/purefb_proxy.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "cc3b2332039bdbcdd16e7cac8b192589d6f76175d9bc3a75ebd14864a933d58b",
+ "name": "plugins/modules/purefb_user.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "219dbbcc1889d99e67d50b4b0d53334c74e285477f816a250b38b4002ab4e044",
+ "name": "plugins/modules/purefb_dns.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "11c74692eea80fd141478baf5addff6d1f374b3b76bb75287f30269f74d5b116",
+ "name": "plugins/modules/purefb_alert.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "0b56bc85c621d21e05c0dc94019edbb1ea43e1bfa1d767807476331b39e5c9d0",
+ "name": "plugins/modules/purefb_remote_cred.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "d3c18977c8bb214b5f339ee4f1f8d68ad4c3b3215b0274ec18e9290de9cdb60a",
+ "name": "plugins/modules/purefb_connect.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "23f52e9de6abe2afa60cf8c2d2e0fc9034cd2caa955d63ef4131b5933bf607d8",
+ "name": "plugins/modules/purefb_fs.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "85944c3b8ca407b8e4e9a07e279268ac2c97d96904cb78c52305bbdd50d848c4",
+ "name": "plugins/modules/purefb_network.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "12912b6a31a8d0df3fbcd2bdd554076d22b6559c51a4fbe7a28f71d3bfe4b44a",
+ "name": "plugins/modules/purefb_target.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "96e2af842acc8018bab4517b00708936a922f83e57558816aa11e6018c1ba238",
+ "name": "plugins/modules/purefb_certgrp.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "f1ad5424beaf199be26a6a3f58c5ceb94eba7534d8e5b33ddfed2d508836d0c3",
+ "name": "plugins/modules/purefb_ds.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "f5d40480816595790d19f655a6a3ce173695acccc3a06d16ed8fb3ed6f021a71",
+ "name": "plugins/modules/purefb_certs.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "3db5be15bdd4a0732d6fcaf39ab21c9f56c8436b7d6841b1668c7c1c45214da9",
+ "name": "plugins/modules/purefb_snmp_agent.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "ec782d0a07640f7aa7cbad32edb509a003f4d5c59c8b94414edb3b5131da8fdb",
+ "name": "plugins/modules/purefb_subnet.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "0ac0045d8abba2bbcdbfe528beb55bc2d83ac610f2c387ea4c5b707dad4c6a79",
+ "name": "plugins/modules/purefb_inventory.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "6e5da3cd489d95a6543065d0138df4d2df16c60d2b6dc26edb958fcd964bdf70",
+ "name": "plugins/modules/purefb_syslog.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "a2cc3787bb04e5f234988bb69c21a5af2b863e3b45d6afc2926fa49416c7cc6c",
+ "name": "plugins/modules/purefb_banner.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "797af2cd252e220310e50c611ff8aff0dd4007742503de46645f2d5812daaf05",
+ "name": "plugins/modules/purefb_snmp_mgr.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "298f484adc1916af94d3b0ff09fea240ff8173e407bb4660152231a75510a7c1",
+ "name": "plugins/modules/purefb_ra.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "bcb8eacb2407bedae3d5fa1b4997b2a45b7ef16847b1224c64a72355b0723b67",
+ "name": "plugins/modules/purefb_s3acc.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "260f7dbf877a474a64992ae575fd2f3595e890ab0e365ebc17ff301eff3a0066",
+ "name": "plugins/modules/purefb_info.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "269843d389a0b6a5e184f785ee7eb379e99523a12e71af1837510fbcb2ed0d8b",
+ "name": "plugins/modules/purefb_s3user.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "42ebcbe13e0b892315497bcbddd44e580f2052ef5ac1f3eddd10ef52a2def635",
+ "name": "plugins/modules/purefb_snap.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "197ffb1eacbfc83cf2274cb978c9c2770ad46d30ca29e3169041d28f7a2c3212",
+ "name": "plugins/modules/purefb_smtp.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "8d130c8196f3b7ed94b6336dccad26f7f3fc9f18b9622ab1a3dcc037df74d75f",
+ "name": "plugins/modules/purefb_dsrole.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "94c0616e56fd88f432aa606b6a943959a0db9e113a6f5952977681ce276b5c2d",
+ "name": "plugins/modules/purefb_bucket.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "5c2286ffc1af45be2c5193d7c11f9369229b00ffd59c0f6448594236622d9425",
+ "name": "plugins/modules/purefb_ntp.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "86815a118728e6d5ae012f0a55a2c7e745213364957fbc75ab9e8f9182c303ba",
+ "name": "plugins/modules/purefb_policy.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "627bf1595d92e3caa55d3cef7f2abca719302771653b4fef1c6d33eab63d1ca4",
+ "name": "plugins/modules/purefb_bucket_replica.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "e4a59857a7058333217d8c543032738dd0265f2e0889bc8a7aad45328cc0d5df",
+ "name": "plugins/modules/purefb_fs_replica.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "ccff87f81fec33e66e0057e073b84ae797fd8b78213deca8859a252eab1657f6",
+ "name": "plugins/modules/purefb_phonehome.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "33f186f9b8159035d760d6a45d2cca4037abc14397c3026963118057ca083a70",
+ "name": "plugins/modules/purefb_lifecycle.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "plugins/doc_fragments",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "3048ea891e7ed4ea764faa477efbbfbd3ca3a6740adb1c0d1718d19149ee6e29",
+ "name": "plugins/doc_fragments/purestorage.py",
+ "chksum_type": "sha256",
+ "format": 1
+ },
+ {
+ "ftype": "dir",
+ "chksum_sha256": null,
+ "name": "plugins/module_utils",
+ "chksum_type": null,
+ "format": 1
+ },
+ {
+ "ftype": "file",
+ "chksum_sha256": "a9d526f8cff8e82d7b23eecd1b3817a2ffc694bb11323dc39633370bf68add2a",
+ "name": "plugins/module_utils/purefb.py",
+ "chksum_type": "sha256",
+ "format": 1
+ }
+ ],
+ "format": 1
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/MANIFEST.json b/collections-debian-merged/ansible_collections/purestorage/flashblade/MANIFEST.json
new file mode 100644
index 00000000..d6ece309
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/MANIFEST.json
@@ -0,0 +1,37 @@
+{
+ "collection_info": {
+ "description": "Collection of modules to manage Pure Storage FlashBlades",
+ "repository": "https://github.com/Pure-Storage-Ansible/FlashBlade-Collection",
+ "tags": [
+ "purestorage",
+ "flashblade",
+ "storage",
+ "object",
+ "nfs"
+ ],
+ "dependencies": {},
+ "authors": [
+ "Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>"
+ ],
+ "issues": "https://github.com/Pure-Storage-Ansible/FlashBlade-Collection/issues",
+ "name": "flashblade",
+ "license": [
+ "GPL-3.0-or-later",
+ "BSD-2-Clause"
+ ],
+ "documentation": "https://docs.ansible.com/ansible/2.10/collections/purestorage/flashblade",
+ "namespace": "purestorage",
+ "version": "1.4.0",
+ "readme": "README.md",
+ "license_file": null,
+ "homepage": null
+ },
+ "file_manifest_file": {
+ "format": 1,
+ "ftype": "file",
+ "chksum_sha256": "3c5f556ad88cdf9bb97523a4f3f25285ebb6e0dbc38850578f049a4bc2bf8f8e",
+ "name": "FILES.json",
+ "chksum_type": "sha256"
+ },
+ "format": 1
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/README.md b/collections-debian-merged/ansible_collections/purestorage/flashblade/README.md
new file mode 100644
index 00000000..f8533587
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/README.md
@@ -0,0 +1,77 @@
+# Pure Storage FlashBlade Collection
+
+The Pure Storage FlashBlade collection consists of the latest versions of the FlashBlade modules.
+
+## Supported Platforms
+
+- Pure Storage FlashBlade with Purity 2.1.2 or later
+- Certain modules and functionality require higher versions of Purity. Modules will inform you if your Purity version is not high enough to use a module.
+
+## Prerequisites
+
+- Ansible 2.9 or later
+- Pure Storage FlashBlade system running Purity//FB 2.1.2 or later
+ - some modules require higher versions of Purity//FB
+- purity_fb >=v1.10
+- python >=v2.7, <3.7
+- netaddr
+
+## Idempotency
+
+All modules are idempotent with the exception of modules that change or set passwords. Due to security requirements exisitng passwords can be validated against and therefore will always be modified, even if there is no change.
+
+## Available Modules
+
+- purefb_alert - manage alert email settings on a FlashBlade
+- purefb_banner - manage FlashBlade login banner
+- purefb_bladename - manage FlashBlade name
+- purefb_bucket - manage S3 buckets on a FlashBlade
+- purefb_bucket_replica - manage bucket replica links on a FlashBlade
+- purefb_certs - manage FlashBlade SSL certificates
+- purefb_certgrp - manage FlashBlade certificate groups
+- purefb_connect - manage connections between FlashBlades
+- purefb_dns - manage DNS settings on a FlashBlade
+- purefb_ds - manage Directory Services settings on a FlashBlade
+- purefb_dsrole - manage Directory Service Roles on a FlashBlade
+- purefb_fs - manage filesystems on a FlashBlade
+- purefb_fs_replica - manage filesystem replica links on a FlashBlade
+- purefb_inventory - get information about the hardware inventory of a FlashBlade
+- purefb_info - get information about the configuration of a FlashBlade
+- purefb_lifecycle - manage FlashBlade Bucket Lifecycle Rules
+- purefb_network - manage the network settings for a FlashBlade
+- purefb_ntp - manage the NTP settings for a FlashBlade
+- purefb_phonehome - manage the phone home settings for a FlashBlade
+- purefb_policy - manage the filesystem snapshot policies for a FlashBlade
+- purefb_proxy - manage the phone home HTTP proxy settings for a FlashBlade
+- purefb_ra - manage the Remote Assist connections on a FlashBlade
+- purefb_remote_cred - manage the Object Store Remote Credentials on a FlashBlade
+- purefb_s3acc - manage the object store accounts on a FlashBlade
+- purefb_s3user - manage the object atore users on a FlashBlade
+- purefb_smtp - manage SMTP settings on a FlashBlade
+- purefb_snap - manage filesystem snapshots on a FlashBlade
+- purefb_snmp_mgr - manage SNMP Managers on a FlashBlade
+- purefb_snmp_agent - modify the FlashBlade SNMP Agent
+- purefb_subnet - manage network subnets on a FlashBlade
+- purefb_syslog - manage FlashBlade syslog server configuration
+- purefb_target - manage remote S3-capable targets for a FlashBlade
+- purefb_user - manage local *pureuser* account password on a FlashBlade
+
+## Instructions
+
+Install the Pure Storage FlashBlade collection on your Ansible management host.
+
+- Using ansible-galaxy (Ansible 2.9 or later):
+```
+ansible-galaxy collection install purestorage.flashblade -p ~/.ansible/collections
+```
+
+All servers that execute the modules must have the appropriate Pure Storage Python SDK installed on the host.
+
+## License
+
+[BSD-2-Clause](https://directory.fsf.org/wiki?title=License:FreeBSD)
+[GPL-3.0-or-later](https://www.gnu.org/licenses/gpl-3.0.en.html)
+
+## Author
+
+This collection was created in 2019 by [Simon Dodsley](@sdodsley) for, and on behalf of, the [Pure Storage Ansible Team](pure-ansible-team@purestorage.com)
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/.plugin-cache.yaml b/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/.plugin-cache.yaml
new file mode 100644
index 00000000..853fad74
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/.plugin-cache.yaml
@@ -0,0 +1,180 @@
+plugins:
+ become: {}
+ cache: {}
+ callback: {}
+ cliconf: {}
+ connection: {}
+ httpapi: {}
+ inventory: {}
+ lookup: {}
+ module:
+ purefb_alert:
+ description: Configure Pure Storage FlashBlade alert email settings
+ name: purefb_alert
+ namespace: ''
+ version_added: 1.0.0
+ purefb_banner:
+ description: Configure Pure Storage FlashBlade GUI and SSH MOTD message
+ name: purefb_banner
+ namespace: ''
+ version_added: 1.4.0
+ purefb_bladename:
+ description: Configure Pure Storage FlashBlade name
+ name: purefb_bladename
+ namespace: ''
+ version_added: 1.0.0
+ purefb_bucket:
+ description: Manage Object Store Buckets on a Pure Storage FlashBlade.
+ name: purefb_bucket
+ namespace: ''
+ version_added: 1.0.0
+ purefb_bucket_replica:
+ description: Manage bucket replica links between Pure Storage FlashBlades
+ name: purefb_bucket_replica
+ namespace: ''
+ version_added: 1.0.0
+ purefb_certgrp:
+ description: Manage FlashBlade Certifcate Groups
+ name: purefb_certgrp
+ namespace: ''
+ version_added: 1.4.0
+ purefb_certs:
+ description: Manage FlashBlade SSL Certifcates
+ name: purefb_certs
+ namespace: ''
+ version_added: 1.4.0
+ purefb_connect:
+ description: Manage replication connections between two FlashBlades
+ name: purefb_connect
+ namespace: ''
+ version_added: 1.0.0
+ purefb_dns:
+ description: Configure Pure Storage FlashBlade DNS settings
+ name: purefb_dns
+ namespace: ''
+ version_added: 1.0.0
+ purefb_ds:
+ description: Configure FlashBlade Directory Service
+ name: purefb_ds
+ namespace: ''
+ version_added: 1.0.0
+ purefb_dsrole:
+ description: Configure FlashBlade Management Directory Service Roles
+ name: purefb_dsrole
+ namespace: ''
+ version_added: 1.0.0
+ purefb_fs:
+ description: Manage filesystemon Pure Storage FlashBlade`
+ name: purefb_fs
+ namespace: ''
+ version_added: 1.0.0
+ purefb_fs_replica:
+ description: Manage filesystem replica links between Pure Storage FlashBlades
+ name: purefb_fs_replica
+ namespace: ''
+ version_added: 1.0.0
+ purefb_info:
+ description: Collect information from Pure Storage FlashBlade
+ name: purefb_info
+ namespace: ''
+ version_added: 1.0.0
+ purefb_inventory:
+ description: Collect information from Pure Storage FlashBlade
+ name: purefb_inventory
+ namespace: ''
+ version_added: 1.0.0
+ purefb_lifecycle:
+ description: Manage FlashBlade object lifecycles
+ name: purefb_lifecycle
+ namespace: ''
+ version_added: 1.4.0
+ purefb_network:
+ description: Manage network interfaces in a Pure Storage FlashBlade
+ name: purefb_network
+ namespace: ''
+ version_added: 1.0.0
+ purefb_ntp:
+ description: Configure Pure Storage FlashBlade NTP settings
+ name: purefb_ntp
+ namespace: ''
+ version_added: 1.0.0
+ purefb_phonehome:
+ description: Enable or Disable Pure Storage FlashBlade Phone Home
+ name: purefb_phonehome
+ namespace: ''
+ version_added: 1.0.0
+ purefb_policy:
+ description: Manage FlashBlade policies
+ name: purefb_policy
+ namespace: ''
+ version_added: 1.0.0
+ purefb_proxy:
+ description: Configure FlashBlade phonehome HTTPs proxy settings
+ name: purefb_proxy
+ namespace: ''
+ version_added: 1.0.0
+ purefb_ra:
+ description: Enable or Disable Pure Storage FlashBlade Remote Assist
+ name: purefb_ra
+ namespace: ''
+ version_added: 1.0.0
+ purefb_remote_cred:
+ description: Create, modify and delete FlashBlade object store remote credentials
+ name: purefb_remote_cred
+ namespace: ''
+ version_added: 1.0.0
+ purefb_s3acc:
+ description: Create or delete FlashBlade Object Store accounts
+ name: purefb_s3acc
+ namespace: ''
+ version_added: 1.0.0
+ purefb_s3user:
+ description: Create or delete FlashBlade Object Store account users
+ name: purefb_s3user
+ namespace: ''
+ version_added: 1.0.0
+ purefb_smtp:
+ description: Configure SMTP for Pure Storage FlashBlade
+ name: purefb_smtp
+ namespace: ''
+ version_added: 1.0.0
+ purefb_snap:
+ description: Manage filesystem snapshots on Pure Storage FlashBlades
+ name: purefb_snap
+ namespace: ''
+ version_added: 1.0.0
+ purefb_snmp_agent:
+ description: Configure the FlashBlade SNMP Agent
+ name: purefb_snmp_agent
+ namespace: ''
+ version_added: 1.0.0
+ purefb_snmp_mgr:
+ description: Configure FlashBlade SNMP Managers
+ name: purefb_snmp_mgr
+ namespace: ''
+ version_added: 1.0.0
+ purefb_subnet:
+ description: Manage network subnets in a Pure Storage FlashBlade
+ name: purefb_subnet
+ namespace: ''
+ version_added: 1.0.0
+ purefb_syslog:
+ description: Configure Pure Storage FlashBlade syslog settings
+ name: purefb_syslog
+ namespace: ''
+ version_added: 1.4.0
+ purefb_target:
+ description: Manage remote S3-capable targets for a FlashBlade
+ name: purefb_target
+ namespace: ''
+ version_added: 1.0.0
+ purefb_user:
+ description: Modify FlashBlade local user account password
+ name: purefb_user
+ namespace: ''
+ version_added: 1.0.0
+ netconf: {}
+ shell: {}
+ strategy: {}
+ vars: {}
+version: 1.4.0
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/changelog.yaml b/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/changelog.yaml
new file mode 100644
index 00000000..d584eb48
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/changelog.yaml
@@ -0,0 +1,101 @@
+ancestor: null
+releases:
+ 1.3.0:
+ changes:
+ bugfixes:
+ - purefb_bucket - Add warning message if ``state`` is ``absent`` without ``eradicate:``
+ - purefb_fs - Add graceful exist when ``state`` is ``absent`` and filesystem
+ not eradicated
+ - purefb_fs - Add warning message if ``state`` is ``absent`` without ``eradicate``
+ major_changes:
+ - purefb_alert - manage alert email settings on a FlashBlade
+ - purefb_bladename - manage FlashBlade name
+ - purefb_bucket_replica - manage bucket replica links on a FlashBlade
+ - purefb_connect - manage connections between FlashBlades
+ - purefb_dns - manage DNS settings on a FlashBlade
+ - purefb_fs_replica - manage filesystem replica links on a FlashBlade
+ - purefb_inventory - get information about the hardware inventory of a FlashBlade
+ - purefb_ntp - manage the NTP settings for a FlashBlade
+ - purefb_phonehome - manage the phone home settings for a FlashBlade
+ - purefb_policy - manage the filesystem snapshot policies for a FlashBlade
+ - purefb_proxy - manage the phone home HTTP proxy settings for a FlashBlade
+ - purefb_remote_cred - manage the Object Store Remote Credentials on a FlashBlade
+ - purefb_snmp_agent - modify the FlashBlade SNMP Agent
+ - purefb_snmp_mgr - manage SNMP Managers on a FlashBlade
+ - purefb_target - manage remote S3-capable targets for a FlashBlade
+ - purefb_user - manage local ``pureuser`` account password on a FlashBlade
+ minor_changes:
+ - purefb_bucket - Versioning support added
+ - purefb_info - new options added for information collection
+ - purefb_network - Add replication service type
+ - purefb_s3user - Limit ``access_key`` recreation to 3 times
+ - purefb_s3user - return dict changed from ``ansible_facts`` to ``s3user_info``
+ release_summary: '| Release Date: 2020-08-08
+
+ | This changlelog describes all changes made to the modules and plugins included
+ in this collection since Ansible 2.9.0
+
+'
+ fragments:
+ - v1.3.0_summary.yaml
+ release_date: '2020-08-06'
+ 1.4.0:
+ changes:
+ bugfixes:
+ - purefa_policy - Resolve multiple issues related to incorrect use of timezones
+ - purefb_connect - Ensure changing encryption status on array connection is
+ performed correctly
+ - purefb_connect - Fix breaking change created in purity_fb SDK 1.9.2 for deletion
+ of array connections
+ - purefb_connect - Hide target array API token
+ - purefb_ds - Ensure updating directory service configurations completes correctly
+ - purefb_info - Fix issue getting array info when encrypted connection exists
+ minor_changes:
+ - purefb_banner - Module to manage the GUI and SSH login message
+ - purefb_certgrp - Module to manage FlashBlade Certificate Groups
+ - purefb_certs - Module to create and delete SSL certificates
+ - purefb_connect - Support idempotency when exisitng connection is incoming
+ - purefb_fs - Add new options for filesystem control (https://github.com/Pure-Storage-Ansible/FlashBlade-Collection/pull/81)
+ - purefb_fs - Default filesystem size on creation changes from 32G to ``unlimited``
+ - purefb_fs - Fix error in deletion and eradication of filesystem
+ - purefb_fs_replica - Remove condition to attach/detach policies on unhealthy
+ replica-link
+ - purefb_info - Add support to list filesystem policies
+ - purefb_lifecycle - Module to manage FlashBlade Bucket Lifecycle Rules
+ - purefb_s3user - Add support for imported user access keys
+ - purefb_syslog - Module to manage syslog server configuration
+ fragments:
+ - 101_fix_policy_and_timezone_error.yaml
+ - 76_default_fs_size.yaml
+ - 77_filesystem_policies_info.yaml
+ - 78_update_filesystem_replica_link.yaml
+ - 79_hide_connect_api.yaml
+ - 80_support_reverse_replica_link.yaml
+ - 81_purefb_fs_new_options.yaml
+ - 83_add_certgrp.yml
+ - 84_add_cert.yaml
+ - 85_add_banner.yaml
+ - 86_add_syslog.yaml
+ - 88_add_lifecycle.yml
+ - 90_delete_conn_fix.yaml
+ - 90_imported_keys.yaml
+ - 92_fix_ds_update.yaml
+ - 96_fix_update_connection.yaml
+ - 97_fix_encrpyted_array_connection_info.yaml
+ modules:
+ - description: Configure Pure Storage FlashBlade GUI and SSH MOTD message
+ name: purefb_banner
+ namespace: ''
+ - description: Manage FlashBlade Certifcate Groups
+ name: purefb_certgrp
+ namespace: ''
+ - description: Manage FlashBlade SSL Certifcates
+ name: purefb_certs
+ namespace: ''
+ - description: Manage FlashBlade object lifecycles
+ name: purefb_lifecycle
+ namespace: ''
+ - description: Configure Pure Storage FlashBlade syslog settings
+ name: purefb_syslog
+ namespace: ''
+ release_date: '2020-10-14'
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/config.yaml b/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/config.yaml
new file mode 100644
index 00000000..c4e01b38
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/config.yaml
@@ -0,0 +1,31 @@
+changelog_filename_template: ../CHANGELOG.rst
+changelog_filename_version_depth: 0
+changes_file: changelog.yaml
+changes_format: combined
+ignore_other_fragment_extensions: true
+keep_fragments: true
+mention_ancestor: true
+new_plugins_after_name: removed_features
+notesdir: fragments
+prelude_section_name: release_summary
+prelude_section_title: Release Summary
+sections:
+- - major_changes
+ - Major Changes
+- - minor_changes
+ - Minor Changes
+- - breaking_changes
+ - Breaking Changes / Porting Guide
+- - deprecated_features
+ - Deprecated Features
+- - removed_features
+ - Removed Features (previously deprecated)
+- - security_fixes
+ - Security Fixes
+- - bugfixes
+ - Bugfixes
+- - known_issues
+ - Known Issues
+title: Purestorage.Flashblade
+trivial_section_name: trivial
+use_fqcn: true
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/101_fix_policy_and_timezone_error.yaml b/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/101_fix_policy_and_timezone_error.yaml
new file mode 100644
index 00000000..e6c1ea64
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/101_fix_policy_and_timezone_error.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_policy - Resolve multiple issues related to incorrect use of timezones
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/76_default_fs_size.yaml b/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/76_default_fs_size.yaml
new file mode 100644
index 00000000..b899c31f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/76_default_fs_size.yaml
@@ -0,0 +1,3 @@
+minor_changes:
+ - purefb_fs - Default filesystem size on creation changes from 32G to ``unlimited``
+ - purefb_fs - Fix error in deletion and eradication of filesystem
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/77_filesystem_policies_info.yaml b/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/77_filesystem_policies_info.yaml
new file mode 100644
index 00000000..c4d84070
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/77_filesystem_policies_info.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_info - Add support to list filesystem policies
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/78_update_filesystem_replica_link.yaml b/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/78_update_filesystem_replica_link.yaml
new file mode 100644
index 00000000..09bc6c3a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/78_update_filesystem_replica_link.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_fs_replica - Remove condition to attach/detach policies on unhealthy replica-link
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/79_hide_connect_api.yaml b/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/79_hide_connect_api.yaml
new file mode 100644
index 00000000..d6dcb9fe
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/79_hide_connect_api.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefb_connect - Hide target array API token
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/80_support_reverse_replica_link.yaml b/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/80_support_reverse_replica_link.yaml
new file mode 100644
index 00000000..42d8f1fe
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/80_support_reverse_replica_link.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_connect - Support idempotency when exisitng connection is incoming
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/81_purefb_fs_new_options.yaml b/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/81_purefb_fs_new_options.yaml
new file mode 100644
index 00000000..a6eb75c0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/81_purefb_fs_new_options.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_fs - Add new options for filesystem control (https://github.com/Pure-Storage-Ansible/FlashBlade-Collection/pull/81)
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/83_add_certgrp.yml b/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/83_add_certgrp.yml
new file mode 100644
index 00000000..4f87b305
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/83_add_certgrp.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_certgrp - Module to manage FlashBlade Certificate Groups
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/84_add_cert.yaml b/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/84_add_cert.yaml
new file mode 100644
index 00000000..1470d302
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/84_add_cert.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_certs - Module to create and delete SSL certificates
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/85_add_banner.yaml b/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/85_add_banner.yaml
new file mode 100644
index 00000000..279173cc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/85_add_banner.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_banner - Module to manage the GUI and SSH login message
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/86_add_syslog.yaml b/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/86_add_syslog.yaml
new file mode 100644
index 00000000..0cde34ca
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/86_add_syslog.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_syslog - Module to manage syslog server configuration
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/88_add_lifecycle.yml b/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/88_add_lifecycle.yml
new file mode 100644
index 00000000..3caa436a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/88_add_lifecycle.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_lifecycle - Module to manage FlashBlade Bucket Lifecycle Rules
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/90_delete_conn_fix.yaml b/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/90_delete_conn_fix.yaml
new file mode 100644
index 00000000..93876fed
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/90_delete_conn_fix.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefb_connect - Fix breaking change created in purity_fb SDK 1.9.2 for deletion of array connections
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/90_imported_keys.yaml b/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/90_imported_keys.yaml
new file mode 100644
index 00000000..af012f74
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/90_imported_keys.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_s3user - Add support for imported user access keys
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/92_fix_ds_update.yaml b/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/92_fix_ds_update.yaml
new file mode 100644
index 00000000..c4d52cab
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/92_fix_ds_update.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefb_ds - Ensure updating directory service configurations completes correctly
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/96_fix_update_connection.yaml b/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/96_fix_update_connection.yaml
new file mode 100644
index 00000000..87bfbeee
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/96_fix_update_connection.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefb_connect - Ensure changing encryption status on array connection is performed correctly
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/97_fix_encrpyted_array_connection_info.yaml b/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/97_fix_encrpyted_array_connection_info.yaml
new file mode 100644
index 00000000..5019c18e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/97_fix_encrpyted_array_connection_info.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefb_info - Fix issue getting array info when encrypted connection exists
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/v1.3.0_summary.yaml b/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/v1.3.0_summary.yaml
new file mode 100644
index 00000000..35cff95f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/changelogs/fragments/v1.3.0_summary.yaml
@@ -0,0 +1,33 @@
+release_summary: |
+ | Release Date: 2020-08-08
+ | This changlelog describes all changes made to the modules and plugins included in this collection since Ansible 2.9.0
+
+major_changes:
+ - purefb_alert - manage alert email settings on a FlashBlade
+ - purefb_bladename - manage FlashBlade name
+ - purefb_bucket_replica - manage bucket replica links on a FlashBlade
+ - purefb_connect - manage connections between FlashBlades
+ - purefb_dns - manage DNS settings on a FlashBlade
+ - purefb_fs_replica - manage filesystem replica links on a FlashBlade
+ - purefb_inventory - get information about the hardware inventory of a FlashBlade
+ - purefb_ntp - manage the NTP settings for a FlashBlade
+ - purefb_phonehome - manage the phone home settings for a FlashBlade
+ - purefb_policy - manage the filesystem snapshot policies for a FlashBlade
+ - purefb_proxy - manage the phone home HTTP proxy settings for a FlashBlade
+ - purefb_remote_cred - manage the Object Store Remote Credentials on a FlashBlade
+ - purefb_snmp_agent - modify the FlashBlade SNMP Agent
+ - purefb_snmp_mgr - manage SNMP Managers on a FlashBlade
+ - purefb_target - manage remote S3-capable targets for a FlashBlade
+ - purefb_user - manage local ``pureuser`` account password on a FlashBlade
+
+minor_changes:
+ - purefb_s3user - return dict changed from ``ansible_facts`` to ``s3user_info``
+ - purefb_s3user - Limit ``access_key`` recreation to 3 times
+ - purefb_info - new options added for information collection
+ - purefb_bucket - Versioning support added
+ - purefb_network - Add replication service type
+
+bugfixes:
+ - purefb_fs - Add graceful exist when ``state`` is ``absent`` and filesystem not eradicated
+ - purefb_fs - Add warning message if ``state`` is ``absent`` without ``eradicate``
+ - purefb_bucket - Add warning message if ``state`` is ``absent`` without ``eradicate:``
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/playbooks/.keep b/collections-debian-merged/ansible_collections/purestorage/flashblade/playbooks/.keep
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/playbooks/.keep
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/playbooks/files/.keep b/collections-debian-merged/ansible_collections/purestorage/flashblade/playbooks/files/.keep
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/playbooks/files/.keep
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/playbooks/roles/.keep b/collections-debian-merged/ansible_collections/purestorage/flashblade/playbooks/roles/.keep
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/playbooks/roles/.keep
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/playbooks/tasks/.keep b/collections-debian-merged/ansible_collections/purestorage/flashblade/playbooks/tasks/.keep
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/playbooks/tasks/.keep
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/playbooks/templates/.keep b/collections-debian-merged/ansible_collections/purestorage/flashblade/playbooks/templates/.keep
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/playbooks/templates/.keep
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/playbooks/vars/.keep b/collections-debian-merged/ansible_collections/purestorage/flashblade/playbooks/vars/.keep
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/playbooks/vars/.keep
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/doc_fragments/purestorage.py b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/doc_fragments/purestorage.py
new file mode 100644
index 00000000..53ae5f81
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/doc_fragments/purestorage.py
@@ -0,0 +1,42 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Simon Dodsley <simon@purestorage.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Standard Pure Storage documentation fragment
+ DOCUMENTATION = r'''
+options:
+ - See separate platform section for more details
+requirements:
+ - See separate platform section for more details
+notes:
+ - Ansible modules are available for the following Pure Storage products: FlashArray, FlashBlade
+'''
+
+ # Documentation fragment for FlashBlade
+ FB = r'''
+options:
+ fb_url:
+ description:
+ - FlashBlade management IP address or Hostname.
+ type: str
+ api_token:
+ description:
+ - FlashBlade API token for admin privileged user.
+ type: str
+notes:
+ - This module requires the C(purity_fb) Python library
+ - You must set C(PUREFB_URL) and C(PUREFB_API) environment variables
+ if I(fb_url) and I(api_token) arguments are not passed to the module directly
+requirements:
+ - python >= 2.7
+ - purity_fb >= 1.9
+ - netaddr
+ - pytz
+'''
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/module_utils/purefb.py b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/module_utils/purefb.py
new file mode 100644
index 00000000..296146b0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/module_utils/purefb.py
@@ -0,0 +1,94 @@
+# -*- coding: utf-8 -*-
+
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Simon Dodsley <simon@purestorage.com>,2017
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import PurityFb
+except ImportError:
+ HAS_PURITY_FB = False
+
+from os import environ
+import platform
+
+VERSION = '1.3'
+USER_AGENT_BASE = 'Ansible'
+API_AGENT_VERSION = '1.5'
+
+
+def get_blade(module):
+ """Return System Object or Fail"""
+ user_agent = '%(base)s %(class)s/%(version)s (%(platform)s)' % {
+ 'base': USER_AGENT_BASE,
+ 'class': __name__,
+ 'version': VERSION,
+ 'platform': platform.platform()
+ }
+ blade_name = module.params['fb_url']
+ api = module.params['api_token']
+
+ if HAS_PURITY_FB:
+ if blade_name and api:
+ blade = PurityFb(blade_name)
+ blade.disable_verify_ssl()
+ try:
+ blade.login(api)
+ versions = blade.api_version.list_versions().versions
+ if API_AGENT_VERSION in versions:
+ blade._api_client.user_agent = user_agent
+ except Exception:
+ module.fail_json(msg="Pure Storage FlashBlade authentication failed. Check your credentials")
+ elif environ.get('PUREFB_URL') and environ.get('PUREFB_API'):
+ blade = PurityFb(environ.get('PUREFB_URL'))
+ blade.disable_verify_ssl()
+ try:
+ blade.login(environ.get('PUREFB_API'))
+ versions = blade.api_version.list_versions().versions
+ if API_AGENT_VERSION in versions:
+ blade._api_client.user_agent = user_agent
+ except Exception:
+ module.fail_json(msg="Pure Storage FlashBlade authentication failed. Check your credentials")
+ else:
+ module.fail_json(msg="You must set PUREFB_URL and PUREFB_API environment variables "
+ "or the fb_url and api_token module arguments")
+ else:
+ module.fail_json(msg="purity_fb SDK not installed.")
+ return blade
+
+
+def purefb_argument_spec():
+ """Return standard base dictionary used for the argument_spec argument in AnsibleModule"""
+
+ return dict(
+ fb_url=dict(),
+ api_token=dict(no_log=True),
+ )
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_alert.py b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_alert.py
new file mode 100644
index 00000000..ea70cd63
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_alert.py
@@ -0,0 +1,201 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefb_alert
+version_added: '1.0.0'
+short_description: Configure Pure Storage FlashBlade alert email settings
+description:
+- Configure alert email configuration for Pure Storage FlashArrays.
+- Add or delete an individual syslog server to the existing
+ list of serves.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ type: str
+ description:
+ - Create or delete alert email
+ default: present
+ choices: [ absent, present ]
+ address:
+ type: str
+ description:
+ - Email address (valid format required)
+ required: true
+ enabled:
+ type: bool
+ default: true
+ description:
+ - Set specified email address to be enabled or disabled
+ severity:
+ type: str
+ description:
+ - The minimum severity that an alert must have in order for
+ emails to be sent to the array's alert watchers
+ default: info
+ choices: [ info, warning, critical ]
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+'''
+
+EXAMPLES = r'''
+- name: Add new email recipient and enable, or enable existing email
+ purefb_alert:
+ address: "user@domain.com"
+ enabled: true
+ state: present
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+- name: Delete existing email recipient
+ purefb_alert:
+ state: absent
+ address: "user@domain.com"
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+'''
+
+RETURN = r'''
+'''
+
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import AlertWatcher
+except ImportError:
+ HAS_PURITY_FB = False
+
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import get_blade, purefb_argument_spec
+
+
+MIN_REQUIRED_API_VERSION = '1.9'
+
+
+def create_alert(module, blade):
+ """Create Alert Email"""
+ changed = True
+ if not module.check_mode:
+ api_version = blade.api_version.list_versions().versions
+ if MIN_REQUIRED_API_VERSION in api_version:
+ watcher_settings = AlertWatcher(minimum_notification_severity=module.params['severity'])
+ try:
+ blade.alert_watchers.create_alert_watchers(names=[module.params['address']],
+ watcher_settings=watcher_settings)
+ except Exception:
+ module.fail_json(msg='Failed to create alert email: {0}'.format(module.params['address']))
+ else:
+ try:
+ blade.alert_watchers.create_alert_watchers(names=[module.params['address']])
+ except Exception:
+ module.fail_json(msg='Failed to create alert email: {0}'.format(module.params['address']))
+ if not module.params['enabled']:
+ watcher_settings = AlertWatcher(enabled=module.params['enabled'])
+ try:
+ blade.alert_watchers.update_alert_watchers(names=[module.params['address']], watcher_settings=watcher_settings)
+ except Exception:
+ module.fail_json(msg='Failed to disable during create alert email: {0}'.format(module.params['address']))
+ module.exit_json(changed=changed)
+
+
+def update_alert(module, blade):
+ """Update alert Watcher"""
+ changed = True
+ if not module.check_mode:
+ api_version = blade.api_version.list_versions().versions
+ mod_alert = False
+ try:
+ alert = blade.alert_watchers.list_alert_watchers(names=[module.params['address']])
+ except Exception:
+ module.fail_json(msg='Failed to get information for alert email: {0}'.format(module.params['address']))
+ current_state = {'enabled': alert.items[0].enabled,
+ 'severity': alert.items[0].minimum_notification_severity
+ }
+ if current_state['enabled'] != module.params['enabled']:
+ mod_alert = True
+ if MIN_REQUIRED_API_VERSION in api_version:
+ if current_state['severity'] != module.params['severity']:
+ mod_alert = True
+ if mod_alert:
+ if MIN_REQUIRED_API_VERSION in api_version:
+ watcher_settings = AlertWatcher(enabled=module.params['enabled'],
+ minimum_notification_severity=module.params['severity'])
+ else:
+ watcher_settings = AlertWatcher(enabled=module.params['enabled'])
+ try:
+ blade.alert_watchers.update_alert_watchers(names=[module.params['address']], watcher_settings=watcher_settings)
+ except Exception:
+ module.fail_json(msg='Failed to update alert email: {0}'.format(module.params['address']))
+ else:
+ changed = False
+ module.exit_json(changed=changed)
+
+
+def delete_alert(module, blade):
+ """Delete Alert Email"""
+ changed = False
+ if not module.check_mode:
+ try:
+ blade.alert_watchers.delete_alert_watchers(names=[module.params['address']])
+ except Exception:
+ module.fail_json(msg='Failed to delete alert email: {0}'.format(module.params['address']))
+ changed = True
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(dict(
+ address=dict(type='str', required=True),
+ enabled=dict(type='bool', default=True),
+ severity=dict(type='str', default='info', choices=['info', 'warning', 'critical']),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ ))
+
+ module = AnsibleModule(argument_spec,
+ supports_check_mode=True)
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg='purity_fb SDK is required for this module')
+
+ pattern = re.compile(r"^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$")
+ if not pattern.match(module.params['address']):
+ module.fail_json(msg='Valid email address not provided.')
+
+ blade = get_blade(module)
+
+ exists = False
+ try:
+ emails = blade.alert_watchers.list_alert_watchers()
+ except Exception:
+ module.fail_json(msg='Failed to get exisitng email list')
+ for email in range(0, len(emails.items)):
+ if emails.items[email].name == module.params['address']:
+ exists = True
+ break
+ if module.params['state'] == 'present' and not exists:
+ create_alert(module, blade)
+ elif module.params['state'] == 'present' and exists:
+ update_alert(module, blade)
+ elif module.params['state'] == 'absent' and exists:
+ delete_alert(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_banner.py b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_banner.py
new file mode 100644
index 00000000..54b64e7c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_banner.py
@@ -0,0 +1,133 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefb_banner
+version_added: '1.4.0'
+short_description: Configure Pure Storage FlashBlade GUI and SSH MOTD message
+description:
+- Configure MOTD for Pure Storage FlashBlades.
+- This will be shown during an SSH or GUI login to the system.
+- Multiple line messages can be achieved using \\n.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Set ot delete the MOTD
+ default: present
+ type: str
+ choices: [ present, absent ]
+ banner:
+ description:
+ - Banner text, or MOTD, to use
+ type: str
+ default: "Welcome to the machine..."
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+'''
+
+EXAMPLES = r'''
+- name: Set new banner text
+ purefb_banner:
+ banner: "Banner over\ntwo lines"
+ state: present
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Delete banner text
+ purefb_banner:
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+'''
+
+RETURN = r'''
+'''
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import PureArray
+except ImportError:
+ HAS_PURITY_FB = False
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import get_blade, purefb_argument_spec
+
+
+MIN_REQUIRED_API_VERSION = '1.10'
+
+
+def set_banner(module, blade):
+ """Set MOTD banner text"""
+ changed = True
+ if not module.check_mode:
+ try:
+ if not module.params['banner']:
+ module.fail_json(msg='Invalid MOTD banner given')
+ blade_settings = PureArray(banner=module.params['banner'])
+ blade.arrays.update_arrays(array_settings=blade_settings)
+ except Exception:
+ module.fail_json(msg='Failed to set MOTD banner text')
+
+ module.exit_json(changed=changed)
+
+
+def delete_banner(module, blade):
+ """Delete MOTD banner text"""
+ changed = True
+ if not module.check_mode:
+ try:
+ blade_settings = PureArray(banner='')
+ blade.arrays.update_arrays(array_settings=blade_settings)
+ except Exception:
+ module.fail_json(msg='Failed to delete current MOTD banner text')
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(dict(
+ banner=dict(type='str', default="Welcome to the machine..."),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ ))
+
+ required_if = [('state', 'present', ['banner'])]
+
+ module = AnsibleModule(argument_spec,
+ required_if=required_if,
+ supports_check_mode=True)
+ if not HAS_PURITY_FB:
+ module.fail_json(msg='purity_fb sdk is required for this module')
+
+ state = module.params['state']
+ blade = get_blade(module)
+ api_version = blade.api_version.list_versions().versions
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(msg="Purity//FB must be upgraded to support this module.")
+ current_banner = blade.login_banner.list_login_banner().login_banner
+
+# set banner if empty value or value differs
+ if state == 'present' and (not current_banner or current_banner != module.params['banner']):
+ set_banner(module, blade)
+ # clear banner if it has a value
+ elif state == 'absent' and current_banner:
+ delete_banner(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_bladename.py b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_bladename.py
new file mode 100644
index 00000000..19d3fdce
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_bladename.py
@@ -0,0 +1,102 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefb_bladename
+version_added: '1.0.0'
+short_description: Configure Pure Storage FlashBlade name
+description:
+- Configure name of Pure Storage FlashBlades.
+- Ideal for Day 0 initial configuration.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Set the FlashBlade name
+ type: str
+ default: present
+ choices: [ present ]
+ name:
+ description:
+ - Name of the FlashBlade. Must conform to correct naming schema.
+ type: str
+ required: true
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+'''
+
+EXAMPLES = r'''
+- name: Set new FlashBlade name
+ purefb_bladename:
+ name: new-flashblade-name
+ state: present
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+'''
+
+RETURN = r'''
+'''
+
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import PureArray
+except ImportError:
+ HAS_PURITY_FB = False
+
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import get_blade, purefb_argument_spec
+
+
+def update_name(module, blade):
+ """Change aray name"""
+ changed = True
+ if not module.check_mode:
+ try:
+ blade_settings = PureArray(name=module.params['name'])
+ blade.arrays.update_arrays(array_settings=blade_settings)
+ except Exception:
+ module.fail_json(msg='Failed to change array name to {0}'.format(module.params['name']))
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['present']),
+ ))
+
+ module = AnsibleModule(argument_spec,
+ supports_check_mode=True)
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg='purity_fb sdk is required for this module')
+
+ blade = get_blade(module)
+ pattern = re.compile("^[a-zA-Z0-9]([a-zA-Z0-9-]{0,54}[a-zA-Z0-9])?$")
+ if not pattern.match(module.params['name']):
+ module.fail_json(msg='FlashBlade name {0} does not conform to array name rules'.format(module.params['name']))
+ if module.params['name'] != blade.arrays.list_arrays().items[0].name:
+ update_name(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_bucket.py b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_bucket.py
new file mode 100644
index 00000000..cb1a37c0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_bucket.py
@@ -0,0 +1,290 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: purefb_bucket
+version_added: "1.0.0"
+short_description: Manage Object Store Buckets on a Pure Storage FlashBlade.
+description:
+ - This module managess object store (s3) buckets on Pure Storage FlashBlade.
+author: Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Bucket Name.
+ required: true
+ type: str
+ account:
+ description:
+ - Object Store Account for Bucket.
+ required: true
+ type: str
+ versioning:
+ description:
+ - State of S3 bucket versioning
+ required: false
+ default: absent
+ type: str
+ choices: [ "enabled", "suspended", "absent" ]
+ state:
+ description:
+ - Create, delete or modifies a bucket.
+ required: false
+ default: present
+ type: str
+ choices: [ "present", "absent" ]
+ eradicate:
+ description:
+ - Define whether to eradicate the bucket on delete or leave in trash.
+ required: false
+ type: bool
+ default: false
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+'''
+
+EXAMPLES = '''
+- name: Create new bucket named foo in account bar
+ purefb_bucket:
+ name: foo
+ account: bar
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Delete bucket named foo in account bar
+ purefb_bucket:
+ name: foo
+ account: bar
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Change bucket versioning state
+ purefb_bucket:
+ name: foo
+ account: bar
+ versioning: enabled
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Recover deleted bucket named foo in account bar
+ purefb_bucket:
+ name: foo
+ account: bar
+ state: present
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Eradicate bucket named foo in account bar
+ purefb_bucket:
+ name: foo
+ account: bar
+ state: absent
+ eradicate: true
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+'''
+
+RETURN = '''
+'''
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import Bucket, Reference, BucketPatch, BucketPost
+except ImportError:
+ HAS_PURITY_FB = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import get_blade, purefb_argument_spec
+
+
+MIN_REQUIRED_API_VERSION = '1.5'
+VERSIONING_VERSION = '1.9'
+
+
+def get_s3acc(module, blade):
+ """Return Object Store Account or None"""
+ s3acc = None
+ accts = blade.object_store_accounts.list_object_store_accounts()
+ for acct in range(0, len(accts.items)):
+ if accts.items[acct].name == module.params['account']:
+ s3acc = accts.items[acct]
+ return s3acc
+
+
+def get_bucket(module, blade):
+ """Return Bucket or None"""
+ s3bucket = None
+ buckets = blade.buckets.list_buckets()
+ for bucket in range(0, len(buckets.items)):
+ if buckets.items[bucket].name == module.params['name']:
+ s3bucket = buckets.items[bucket]
+ return s3bucket
+
+
+def create_bucket(module, blade):
+ """Create bucket"""
+ changed = True
+ if not module.check_mode:
+ try:
+ api_version = blade.api_version.list_versions().versions
+ if VERSIONING_VERSION in api_version:
+ attr = BucketPost()
+ attr.account = Reference(name=module.params['account'])
+ blade.buckets.create_buckets(names=[module.params['name']], bucket=attr)
+ else:
+ attr = Bucket()
+ attr.account = Reference(name=module.params['account'])
+ blade.buckets.create_buckets(names=[module.params['name']], account=attr)
+ if module.params['versioning'] != 'absent' and VERSIONING_VERSION in api_version:
+ try:
+ blade.buckets.update_buckets(names=[module.params['name']],
+ bucket=BucketPatch(versioning=module.params['versioning']))
+ except Exception:
+ module.fail_json(msg='Object Store Bucket {0} Created but versioning state failed'.format(module.params['name']))
+ except Exception:
+ module.fail_json(msg='Object Store Bucket {0}: Creation failed'.format(module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def delete_bucket(module, blade):
+ """ Delete Bucket"""
+ changed = True
+ if not module.check_mode:
+ try:
+ api_version = blade.api_version.list_versions().versions
+ if VERSIONING_VERSION in api_version:
+ blade.buckets.update_buckets(names=[module.params['name']],
+ bucket=BucketPatch(destroyed=True))
+ else:
+ blade.buckets.update_buckets(names=[module.params['name']],
+ destroyed=Bucket(destroyed=True))
+ if module.params['eradicate']:
+ try:
+ blade.buckets.delete_buckets(names=[module.params['name']])
+ except Exception:
+ module.fail_json(msg='Object Store Bucket {0}: Eradication failed'.format(module.params['name']))
+ except Exception:
+ module.fail_json(msg='Object Store Bucket {0}: Deletion failed'.format(module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def recover_bucket(module, blade):
+ """ Recover Bucket"""
+ changed = True
+ if not module.check_mode:
+ try:
+ api_version = blade.api_version.list_versions().versions
+ if VERSIONING_VERSION in api_version:
+ blade.buckets.update_buckets(names=[module.params['name']],
+ bucket=BucketPatch(destroyed=False))
+ else:
+ blade.buckets.update_buckets(names=[module.params['name']],
+ destroyed=Bucket(destroyed=False))
+ except Exception:
+ module.fail_json(msg='Object Store Bucket {0}: Recovery failed'.format(module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def update_bucket(module, blade, bucket):
+ """ Update Bucket """
+ changed = True
+ if not module.check_mode:
+ changed = False
+ api_version = blade.api_version.list_versions().versions
+ if VERSIONING_VERSION in api_version:
+ module.warn('{0}'.format(bucket.versioning))
+ if bucket.versioning != 'none':
+ if module.params['versioning'] == 'absent':
+ versioning = 'suspended'
+ else:
+ versioning = module.params['versioning']
+ if bucket.versioning != versioning:
+ try:
+ blade.buckets.update_buckets(names=[module.params['name']],
+ bucket=BucketPatch(versioning=versioning))
+ changed = True
+ except Exception:
+ module.fail_json(msg='Object Store Bucket {0}: Versioning change failed'.format(module.params['name']))
+ elif module.params['versioning'] != 'absent':
+ try:
+ blade.buckets.update_buckets(names=[module.params['name']],
+ bucket=BucketPatch(versioning=module.params['versioning']))
+ changed = True
+ except Exception:
+ module.fail_json(msg='Object Store Bucket {0}: Versioning change failed'.format(module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def eradicate_bucket(module, blade):
+ """ Eradicate Bucket"""
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.buckets.delete_buckets(names=[module.params['name']])
+ except Exception:
+ module.fail_json(msg='Object Store Bucket {0}: Eradication failed'.format(module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(required=True),
+ account=dict(required=True),
+ eradicate=dict(default='false', type='bool'),
+ versioning=dict(default='absent', choices=['enabled', 'suspended', 'absent']),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+ )
+
+ module = AnsibleModule(argument_spec,
+ supports_check_mode=True)
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg='purity_fb sdk is required for this module')
+
+ state = module.params['state']
+ blade = get_blade(module)
+ api_version = blade.api_version.list_versions().versions
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(msg="Purity//FB must be upgraded to support this module.")
+ bucket = get_bucket(module, blade)
+ if not get_s3acc(module, blade):
+ module.fail_json(msg="Object Store Account {0} does not exist.".format(module.params['account']))
+
+ if module.params['eradicate'] and state == 'present':
+ module.warn('Eradicate flag ignored without state=absent')
+
+ if state == 'present' and not bucket:
+ create_bucket(module, blade)
+ elif state == 'present' and bucket and bucket.destroyed:
+ recover_bucket(module, blade)
+ elif state == 'absent' and bucket and not bucket.destroyed:
+ delete_bucket(module, blade)
+ elif state == 'present' and bucket:
+ update_bucket(module, blade, bucket)
+ elif state == 'absent' and bucket and bucket.destroyed and module.params['eradicate']:
+ eradicate_bucket(module, blade)
+ elif state == 'absent' and not bucket:
+ module.exit_json(changed=False)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_bucket_replica.py b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_bucket_replica.py
new file mode 100644
index 00000000..d094e039
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_bucket_replica.py
@@ -0,0 +1,257 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: purefb_bucket_replica
+version_added: '1.0.0'
+short_description: Manage bucket replica links between Pure Storage FlashBlades
+description:
+ - This module manages bucket replica links between Pure Storage FlashBlades.
+author: Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Local Bucket Name.
+ required: true
+ type: str
+ state:
+ description:
+ - Creates or modifies a bucket replica link
+ required: false
+ default: present
+ type: str
+ choices: [ "present", "absent" ]
+ target:
+ description:
+ - Remote array or target name to create replica on.
+ required: false
+ type: str
+ target_bucket:
+ description:
+ - Name of target bucket name
+ - If not supplied, will default to I(name).
+ type: str
+ required: false
+ paused:
+ description:
+ - State of the bucket replica link
+ type: bool
+ default: false
+ credential:
+ description:
+ - Name of remote credential name to use.
+ required: false
+ type: str
+extends_documentation_fragment:
+ - purestorage.flashblade.purestorage.fb
+'''
+
+EXAMPLES = '''
+- name: Create new bucket replica from foo to bar on arrayB
+ purefb_bucket_replica:
+ name: foo
+ target: arrayB
+ target_bucket: bar
+ credentials: cred_1
+ state: present
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Pause exisitng bucket replica link
+ purefb_bucket_replica:
+ name: foo
+ paused: true
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Delete bucket replica link foo
+ purefb_fs_replica:
+ name: foo
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641'''
+
+RETURN = '''
+'''
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import BucketReplicaLink, ObjectStoreRemoteCredentials
+except ImportError:
+ HAS_PURITY_FB = False
+
+MIN_REQUIRED_API_VERSION = '1.9'
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import get_blade, purefb_argument_spec
+
+
+def get_local_bucket(module, blade):
+ """Return Bucket or None"""
+ try:
+ res = blade.buckets.list_buckets(names=[module.params['name']])
+ return res.items[0]
+ except Exception:
+ return None
+
+
+def get_remote_cred(module, blade, target):
+ """Return Remote Credential or None"""
+ try:
+ res = blade.object_store_remote_credentials.list_object_store_remote_credentials(names=[target + '/' + module.params['credential']])
+ return res.items[0]
+ except Exception:
+ return None
+
+
+def get_local_rl(module, blade):
+ """Return Bucket Replica Link or None"""
+ try:
+ res = blade.bucket_replica_links.list_bucket_replica_links(local_bucket_names=[module.params['name']])
+ return res.items[0]
+ except Exception:
+ return None
+
+
+def get_connected(module, blade):
+ connected_blades = blade.array_connections.list_array_connections()
+ for target in range(0, len(connected_blades.items)):
+ if (connected_blades.items[target].remote.name == module.params['target'] or
+ connected_blades.items[target].management_address == module.params['target']) and \
+ connected_blades.items[target].status in ["connected", "connecting", "partially_connected"]:
+ return connected_blades.items[target].remote.name
+ connected_targets = blade.targets.list_targets()
+ for target in range(0, len(connected_targets.items)):
+ if connected_targets.items[target].name == module.params['target'] and \
+ connected_targets.items[target].status in ["connected", "connecting", "partially_connected"]:
+ return connected_targets.items[target].name
+ return None
+
+
+def create_rl(module, blade, remote_cred):
+ """Create Bucket Replica Link"""
+ changed = True
+ if not module.check_mode:
+ try:
+ if not module.params['target_bucket']:
+ module.params['target_bucket'] = module.params['name']
+ else:
+ module.params['target_bucket'] = module.params['target_bucket'].lower()
+ blade.bucket_replica_links.create_bucket_replica_links(
+ local_bucket_names=[module.params['name']],
+ remote_bucket_names=[module.params['target_bucket']],
+ remote_credentials_names=[remote_cred.name],
+ bucket_replica_link=BucketReplicaLink(paused=module.params['paused']))
+ except Exception:
+ module.fail_json(msg="Failed to create bucket replica link {0}.".format(module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def update_rl_policy(module, blade, local_replica_link):
+ """Update Bucket Replica Link"""
+ changed = True
+ if not module.check_mode:
+ changed = False
+ new_cred = local_replica_link.remote.name + '/' + module.params['credential']
+ if local_replica_link.paused != module.params['paused'] or local_replica_link.remote_credentials.name != new_cred:
+ try:
+ module.warn('{0}'.format(local_replica_link))
+ blade.bucket_replica_links.update_bucket_replica_links(
+ local_bucket_names=[module.params['name']],
+ remote_bucket_names=[local_replica_link.remote_bucket.name],
+ remote_names=[local_replica_link.remote.name],
+ bucket_replica_link=BucketReplicaLink(paused=module.params['paused'],
+ remote_credentials=ObjectStoreRemoteCredentials(name=new_cred)))
+ changed = True
+ except Exception:
+ module.fail_json(msg="Failed to update bucket replica link {0}.".format(module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def delete_rl_policy(module, blade, local_replica_link):
+ """ Delete Bucket Replica Link"""
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.bucket_replica_links.delete_bucket_replica_links(
+ remote_names=[local_replica_link.remote.name],
+ local_bucket_names=[module.params['name']],
+ remote_bucket_names=[local_replica_link.remote_bucket.name])
+ except Exception:
+ module.fail_json(msg="Failed to delete bucket replica link {0}.".format(module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type='str', required=True),
+ target=dict(type='str'),
+ target_bucket=dict(type='str'),
+ paused=dict(type='bool', default=False),
+ credential=dict(type='str'),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+ )
+
+ module = AnsibleModule(argument_spec,
+ supports_check_mode=True)
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg='purity_fb sdk is required for this module')
+
+ state = module.params['state']
+ module.params['name'] = module.params['name'].lower()
+ blade = get_blade(module)
+ versions = blade.api_version.list_versions().versions
+
+ if MIN_REQUIRED_API_VERSION not in versions:
+ module.fail_json(msg='Minimum FlashBlade REST version required: {0}'.format(MIN_REQUIRED_API_VERSION))
+
+ local_bucket = get_local_bucket(module, blade)
+ local_replica_link = get_local_rl(module, blade)
+ target = get_connected(module, blade)
+
+ if not target:
+ module.fail_json(msg='Selected target {0} is not connected.'.format(module.params['target']))
+
+ if local_replica_link and not module.params['credential']:
+ module.params['credential'] = local_replica_link.remote_credentials.name.split('/')[1]
+ remote_cred = get_remote_cred(module, blade, target)
+ if not remote_cred:
+ module.fail_json(msg='Selected remote credential {0} does not exist for target {1}.'.format(module.params['credential'],
+ module.params['target']))
+
+ if not local_bucket:
+ module.fail_json(msg='Selected local bucket {0} does not exist.'.format(module.params['name']))
+
+ if local_replica_link:
+ if local_replica_link.status == 'unhealthy':
+ module.fail_json(msg='Replica Link unhealthy - please check target')
+
+ if state == 'present' and not local_replica_link:
+ create_rl(module, blade, remote_cred)
+ elif state == 'present' and local_replica_link:
+ update_rl_policy(module, blade, local_replica_link)
+ elif state == 'absent' and local_replica_link:
+ delete_rl_policy(module, blade, local_replica_link)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_certgrp.py b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_certgrp.py
new file mode 100644
index 00000000..54a638f7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_certgrp.py
@@ -0,0 +1,193 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefb_certgrp
+version_added: '1.4.0'
+short_description: Manage FlashBlade Certifcate Groups
+description:
+- Manage certifcate groups for FlashBlades
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Create or delete certifcate group
+ default: present
+ type: str
+ choices: [ absent, present ]
+ name:
+ description:
+ - Name of the certificate group
+ type: str
+ certificates:
+ description:
+ - List of certifcates to add to a policy on creation
+ type: list
+ elements: str
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+'''
+
+EXAMPLES = r'''
+- name: Create a certifcate group
+ purefb_certgrp:
+ name: test_grp
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Create a cerifcate group and add existing certificates
+ purefb_certgrp:
+ name: test_grp
+ certifcates:
+ - cert1
+ - cert2
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Delete a certifcate from a group
+ purefb_certgrp:
+ name: test_grp
+ certificates:
+ - cert2
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Delete a certifcate group
+ purefb_certgrp:
+ name: test_grp
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+'''
+
+RETURN = r'''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import get_blade, purefb_argument_spec
+
+
+MIN_REQUIRED_API_VERSION = '1.9'
+
+
+def delete_certgrp(module, blade):
+ """Delete certifcate group"""
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.certificate_groups.delete_certificate_groups(names=[module.params['name']])
+ except Exception:
+ module.fail_json(msg="Failed to delete certifcate group {0}.".format(module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def create_certgrp(module, blade):
+ """Create certifcate group"""
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.certificate_groups.create_certificate_groups(names=[module.params['name']])
+ except Exception:
+ module.fail_json(msg="Failed to create certificate group {0}.".format(module.params['name']))
+ if module.params['certificates']:
+ try:
+ blade.certificate_groups.add_certificate_group_certificates(certificate_names=module.params['certificates'],
+ certificate_group_names=[module.params['name']])
+ except Exception:
+ blade.certificate_groups.delete_certificate_groups(names=[module.params['name']])
+ module.fail_json(msg="Failed to add certifcates {0}. "
+ "Please check they all exist".format(module.params['certificates']))
+ module.exit_json(changed=changed)
+
+
+def update_certgrp(module, blade):
+ """Update certificate group"""
+ changed = True
+ if not module.check_mode:
+ changed = False
+ try:
+ certs = blade.certificate_groups.list_certificate_group_certificates(certificate_group_names=[module.params['name']])
+ except Exception:
+ module.fail_json(msg="Failed to get certifates list for group {0}.".format(module.params['name']))
+ if not certs:
+ if module.params['state'] == 'present':
+ try:
+ blade.certificate_groups.add_certificate_group_certificates(certificate_names=module.params['certificates'],
+ certificate_group_names=[module.params['name']])
+ changed = True
+ except Exception:
+ module.fail_json(msg="Failed to add certifcates {0}. "
+ "Please check they all exist".format(module.params['certificates']))
+ else:
+ current = []
+ for cert in range(0, len(certs.items)):
+ current.append(certs.items[cert].member.name)
+ for new_cert in range(0, len(module.params['certificates'])):
+ certificate = module.params['certificates'][new_cert]
+ if certificate in current:
+ if module.params['state'] == 'absent':
+ try:
+ blade.certificate_groups.remove_certificate_group_certificates(certificate_names=[certificate],
+ certificate_group_names=[module.params['name']])
+ changed = True
+ except Exception:
+ module.fail_json(msg="Failed to delete certifcate {0} from group {1}.".format(certificate, module.params['name']))
+ else:
+ if module.params['state'] == 'present':
+ try:
+ blade.certificate_groups.add_certificate_group_certificates(certificate_names=[certificate],
+ certificate_group_names=[module.params['name']])
+ changed = True
+ except Exception:
+ module.fail_json(msg="Failed to add certifcate {0} to group {1}".format(certificate, module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(dict(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ name=dict(type='str'),
+ certificates=dict(type='list', elements='str'),
+ ))
+
+ module = AnsibleModule(argument_spec,
+ supports_check_mode=True)
+
+ state = module.params['state']
+ blade = get_blade(module)
+ versions = blade.api_version.list_versions().versions
+
+ if MIN_REQUIRED_API_VERSION not in versions:
+ module.fail_json(msg='Minimum FlashBlade REST version required: {0}'.format(MIN_REQUIRED_API_VERSION))
+
+ try:
+ certgrp = blade.certificate_groups.list_certificate_groups(names=[module.params['name']]).items[0]
+ except Exception:
+ certgrp = None
+
+ if certgrp and state == 'present' and module.params['certificates']:
+ update_certgrp(module, blade)
+ elif state == 'present' and not certgrp:
+ create_certgrp(module, blade)
+ elif state == 'absent' and certgrp:
+ if module.params['certificates']:
+ update_certgrp(module, blade)
+ else:
+ delete_certgrp(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_certs.py b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_certs.py
new file mode 100644
index 00000000..df270ee4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_certs.py
@@ -0,0 +1,128 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefb_certs
+version_added: '1.4.0'
+short_description: Manage FlashBlade SSL Certifcates
+description:
+- Manage SSL certifcates for FlashBlades
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Create or delete certifcate
+ default: present
+ type: str
+ choices: [ absent, present ]
+ name:
+ description:
+ - Name of the certificate
+ type: str
+ contents:
+ description:
+ - SSL certifcate text
+ type: str
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+'''
+
+EXAMPLES = r'''
+- name: Create a SSL certifcate
+ purefb_certs:
+ name: test_cert
+ contents: "{{lookup('file', 'certicate_file_name') }}"
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Delete a SSL certifcate
+ purefb_certs:
+ name: test_cert
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+'''
+
+RETURN = r'''
+'''
+
+HAS_PURITYFB = True
+try:
+ from purity_fb import CertificatePost
+except ImportError:
+ HAS_PURITYFB = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import get_blade, purefb_argument_spec
+
+
+MIN_REQUIRED_API_VERSION = '1.9'
+
+
+def delete_cert(module, blade):
+ """Delete certifcate"""
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.certificates.delete_certificates(names=[module.params['name']])
+ except Exception:
+ module.fail_json(msg="Failed to delete certifcate {0}.".format(module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def create_cert(module, blade):
+ """Create certifcate"""
+ changed = True
+ if not module.check_mode:
+ try:
+ body = CertificatePost(certificate=module.params['contents'], certificate_type='external')
+ blade.certificates.create_certificates(names=[module.params['name']], certificate=body)
+ except Exception:
+ module.fail_json(msg="Failed to create certificate {0}.".format(module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(dict(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ name=dict(type='str'),
+ contents=dict(type='str', no_log=True),
+ ))
+
+ module = AnsibleModule(argument_spec,
+ supports_check_mode=True)
+
+ state = module.params['state']
+ blade = get_blade(module)
+ versions = blade.api_version.list_versions().versions
+
+ if MIN_REQUIRED_API_VERSION not in versions:
+ module.fail_json(msg='Minimum FlashBlade REST version required: {0}'.format(MIN_REQUIRED_API_VERSION))
+
+ try:
+ cert = blade.certificates.list_certificates(names=[module.params['name']])
+ except Exception:
+ cert = None
+
+ if not cert and state == 'present':
+ create_cert(module, blade)
+ elif state == 'absent' and cert:
+ delete_cert(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_connect.py b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_connect.py
new file mode 100644
index 00000000..dcac30c3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_connect.py
@@ -0,0 +1,195 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefb_connect
+version_added: '1.0.0'
+short_description: Manage replication connections between two FlashBlades
+description:
+- Manage replication connections to specified remote FlashBlade system
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Create or delete replication connection
+ default: present
+ type: str
+ choices: [ absent, present ]
+ encrypted:
+ description:
+ - Define if replication connection is encrypted
+ type: bool
+ default: False
+ target_url:
+ description:
+ - Management IP address of target FlashBlade system
+ type: str
+ required: true
+ target_api:
+ description:
+ - API token for target FlashBlade system
+ type: str
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+'''
+
+EXAMPLES = r'''
+- name: Create a connection to remote FlashBlade system
+ purefb_connect:
+ target_url: 10.10.10.20
+ target_api: 9c0b56bc-f941-f7a6-9f85-dcc3e9a8f7d6
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+- name: Delete connection to target FlashBlade system
+ purefb_connect:
+ state: absent
+ target_url: 10.10.10.20
+ target_api: 9c0b56bc-f941-f7a6-9f85-dcc3e9a8f7d6
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+'''
+
+RETURN = r'''
+'''
+
+HAS_PURITYFB = True
+try:
+ from purity_fb import PurityFb, ArrayConnection, ArrayConnectionPost
+except ImportError:
+ HAS_PURITYFB = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import get_blade, purefb_argument_spec
+
+
+MIN_REQUIRED_API_VERSION = '1.9'
+
+
+def _check_connected(module, blade):
+ connected_blades = blade.array_connections.list_array_connections()
+ for target in range(0, len(connected_blades.items)):
+ if connected_blades.items[target].management_address is None:
+ try:
+ remote_system = PurityFb(module.params['target_url'])
+ remote_system.login(module.params['target_api'])
+ remote_array = remote_system.arrays.list_arrays().items[0].name
+ if connected_blades.items[target].remote.name == remote_array:
+ return connected_blades.items[target]
+ except Exception:
+ module.fail_json(msg="Failed to connect to remote array {0}.".format(module.params['target_url']))
+ if connected_blades.items[target].management_address == module.params['target_url'] and \
+ connected_blades.items[target].status in ["connected", "connecting", "partially_connected"]:
+ return connected_blades.items[target]
+ return None
+
+
+def break_connection(module, blade, target_blade):
+ """Break connection between arrays"""
+ changed = True
+ if not module.check_mode:
+ source_blade = blade.arrays.list_arrays().items[0].name
+ try:
+ if target_blade.management_address is None:
+ module.fail_json(msg="Disconnect can only happen from the array that formed the connection")
+ blade.array_connections.delete_array_connections(remote_names=[target_blade.remote.name])
+ except Exception:
+ module.fail_json(msg="Failed to disconnect {0} from {1}.".format(target_blade.remote.name, source_blade))
+ module.exit_json(changed=changed)
+
+
+def create_connection(module, blade):
+ """Create connection between arrays"""
+ changed = True
+ if not module.check_mode:
+ remote_array = module.params['target_url']
+ try:
+ remote_system = PurityFb(module.params['target_url'])
+ remote_system.login(module.params['target_api'])
+ remote_array = remote_system.arrays.list_arrays().items[0].name
+ remote_conn_cnt = remote_system.array_connections.list_array_connections().pagination_info.total_item_count
+ # TODO: SD - Update with new max when fan-in/fan-out is enabled for FB
+ if remote_conn_cnt == 1:
+ module.fail_json(msg="Remote array {0} already connected to another array. Fan-In not supported".format(remote_array))
+ connection_key = remote_system.array_connections.create_array_connections_connection_keys().items[0].connection_key
+ remote_array = remote_system.arrays.list_arrays().items[0].name
+ connection_info = ArrayConnectionPost(management_address=module.params['target_url'],
+ encrypted=module.params['encrypted'],
+ connection_key=connection_key)
+ blade.array_connections.create_array_connections(array_connection=connection_info)
+ except Exception:
+ module.fail_json(msg="Failed to connect to remote array {0}.".format(remote_array))
+ module.exit_json(changed=changed)
+
+
+def update_connection(module, blade, target_blade):
+ """Update array connection - only encryption currently"""
+ changed = True
+ if not module.check_mode:
+ if target_blade.management_address is None:
+ module.fail_json(msg="Update can only happen from the array that formed the connection")
+ if module.params['encrypted'] != target_blade.encrypted:
+ if module.params['encrypted'] and blade.file_system_replica_links.list_file_system_replica_links().pagination_info.total_item_count != 0:
+ module.fail_json(msg='Cannot turn array connection encryption on if file system replica links exist')
+ new_attr = ArrayConnection(encrypted=module.params['encrypted'])
+ try:
+ blade.array_connections.update_array_connections(remote_names=[target_blade.remote.name], array_connection=new_attr)
+ except Exception:
+ module.fail_json(msg='Failed to change encryption setting for array connection.')
+ else:
+ changed = False
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(dict(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ encrypted=dict(type='bool', default=False),
+ target_url=dict(type='str', required=True),
+ target_api=dict(type='str', no_log=True),
+ ))
+
+ required_if = [('state', 'present', ['target_api'])]
+
+ module = AnsibleModule(argument_spec,
+ required_if=required_if,
+ supports_check_mode=True)
+
+ if not HAS_PURITYFB:
+ module.fail_json(msg='purity_fb sdk is required for this module')
+
+ state = module.params['state']
+ blade = get_blade(module)
+ versions = blade.api_version.list_versions().versions
+
+ if MIN_REQUIRED_API_VERSION not in versions:
+ module.fail_json(msg='Minimum FlashBlade REST version required: {0}'.format(MIN_REQUIRED_API_VERSION))
+
+ target_blade = _check_connected(module, blade)
+ if state == 'present' and not target_blade:
+ # TODO: SD - Update with new max when fan-out is supported
+ if blade.array_connections.list_array_connections().pagination_info.total_item_count == 1:
+ module.fail_json(msg='Source FlashBlade already connected to another array. Fan-Out not supported')
+ create_connection(module, blade)
+ elif state == 'present' and target_blade:
+ update_connection(module, blade, target_blade)
+ elif state == 'absent' and target_blade:
+ break_connection(module, blade, target_blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_dns.py b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_dns.py
new file mode 100644
index 00000000..304d7ff9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_dns.py
@@ -0,0 +1,167 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefb_dns
+version_added: '1.0.0'
+short_description: Configure Pure Storage FlashBlade DNS settings
+description:
+- Set or erase DNS configuration for Pure Storage FlashBlades.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Create or delete DNS servers configuration
+ type: str
+ default: present
+ choices: [ absent, present ]
+ domain:
+ description:
+ - Domain suffix to be appended when perofrming DNS lookups.
+ type: str
+ nameservers:
+ description:
+ - List of up to 3 unique DNS server IP addresses. These can be
+ IPv4 or IPv6 - No validation is done of the addresses is performed.
+ type: list
+ elements: str
+ search:
+ description:
+ - Ordered list of domain names to search
+ type: list
+ elements: str
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+'''
+
+EXAMPLES = r'''
+- name: Delete exisitng DNS settings
+ purefb_dns:
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+
+- name: Set DNS settings
+ purefb_dns:
+ domain: purestorage.com
+ nameservers:
+ - 8.8.8.8
+ - 8.8.4.4
+ search:
+ - purestorage.com
+ - acme.com
+ fa_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+'''
+
+RETURN = r'''
+'''
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import Dns
+except ImportError:
+ HAS_PURITY_FB = False
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import get_blade, purefb_argument_spec
+
+
+def remove(duplicate):
+ final_list = []
+ for num in duplicate:
+ if num not in final_list:
+ final_list.append(num)
+ return final_list
+
+
+def delete_dns(module, blade):
+ """Delete DNS Settings"""
+ changed = True
+ if not module.check_mode:
+ changed = False
+ current_dns = blade.dns.list_dns()
+ if current_dns.items[0].domain or current_dns.items[0].search != [] or current_dns.items[0].nameservers != []:
+ try:
+ blade.dns.update_dns(dns_settings=Dns(domain='', search=[], nameservers=[]))
+ changed = True
+ except Exception:
+ module.fail_json(msg='Deletion of DNS settings failed')
+ module.exit_json(changed=changed)
+
+
+def update_dns(module, blade):
+ """Set DNS Settings"""
+ changed = True
+ if not module.check_mode:
+ changed = False
+ current_dns = blade.dns.list_dns()
+ if module.params['domain']:
+ if current_dns.items[0].domain != module.params['domain']:
+ try:
+ blade.dns.update_dns(dns_settings=Dns(domain=module.params['domain']))
+ changed = True
+ except Exception:
+ module.fail_json(msg='Update of DNS domain failed')
+ if module.params['nameservers']:
+ if sorted(module.params['nameservers']) != sorted(current_dns.items[0].nameservers):
+ try:
+ blade.dns.update_dns(dns_settings=Dns(nameservers=module.params['nameservers']))
+ changed = True
+ except Exception:
+ module.fail_json(msg='Update of DNS nameservers failed')
+ if module.params['search']:
+ if sorted(module.params['search']) != sorted(current_dns.items[0].search):
+ try:
+ blade.dns.update_dns(dns_settings=Dns(search=module.params['search']))
+ changed = True
+ except Exception:
+ module.fail_json(msg='Update of DNS search failed')
+ module.exit_json(changed=changed)
+
+
+def main():
+
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(dict(
+ nameservers=dict(type='list', elements='str'),
+ search=dict(type='list', elements='str'),
+ domain=dict(type='str'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ ))
+
+ module = AnsibleModule(argument_spec,
+ supports_check_mode=True)
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg='purity_fb sdk is required for this module')
+
+ blade = get_blade(module)
+
+ if module.params['state'] == 'absent':
+ delete_dns(module, blade)
+ elif module.params['state'] == 'present':
+ if module.params['nameservers']:
+ module.params['nameservers'] = remove(module.params['nameservers'])
+ if module.params['search']:
+ module.params['search'] = remove(module.params['search'])
+ update_dns(module, blade)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_ds.py b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_ds.py
new file mode 100644
index 00000000..07300f88
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_ds.py
@@ -0,0 +1,390 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefb_ds
+version_added: '1.0.0'
+short_description: Configure FlashBlade Directory Service
+description:
+- Create, modify or erase directory services configurations. There is no
+ facility to SSL certificates at this time. Use the FlashBlade GUI for this
+ additional configuration work.
+- If updating a directory service and i(bind_password) is provided this
+ will always cause a change, even if the password given isn't different from
+ the current. This makes this part of the module non-idempotent..
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Create or delete directory service configuration
+ default: present
+ type: str
+ choices: [ absent, present ]
+ dstype:
+ description:
+ - The type of directory service to work on
+ choices: [ management, nfs, smb ]
+ type: str
+ required: true
+ enable:
+ description:
+ - Whether to enable or disable directory service support.
+ default: false
+ type: bool
+ uri:
+ description:
+ - A list of up to 30 URIs of the directory servers. Each URI must include
+ the scheme ldap:// or ldaps:// (for LDAP over SSL), a hostname, and a
+ domain name or IP address. For example, ldap://ad.company.com configures
+ the directory service with the hostname "ad" in the domain "company.com"
+ while specifying the unencrypted LDAP protocol.
+ type: list
+ elements: str
+ base_dn:
+ description:
+ - Sets the base of the Distinguished Name (DN) of the directory service
+ groups. The base should consist of only Domain Components (DCs). The
+ base_dn will populate with a default value when a URI is entered by
+ parsing domain components from the URI. The base DN should specify DC=
+ for each domain component and multiple DCs should be separated by commas.
+ type: str
+ bind_password:
+ description:
+ - Sets the password of the bind_user user name account.
+ type: str
+ bind_user:
+ description:
+ - Sets the user name that can be used to bind to and query the directory.
+ - For Active Directory, enter the username - often referred to as
+ sAMAccountName or User Logon Name - of the account that is used to
+ perform directory lookups.
+ - For OpenLDAP, enter the full DN of the user.
+ type: str
+ nis_servers:
+ description:
+ - A list of up to 30 IP addresses or FQDNs for NIS servers.
+ - This cannot be used in conjunction with LDAP configurations.
+ type: list
+ elements: str
+ nis_domain:
+ description:
+ - The NIS domain to search
+ - This cannot be used in conjunction with LDAP configurations.
+ type: str
+ join_ou:
+ description:
+ - The optional organizational unit (OU) where the machine account
+ for the directory service will be created.
+ type: str
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+'''
+
+EXAMPLES = r'''
+- name: Delete existing management directory service
+ purefb_ds:
+ dstype: management
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create NFS directory service (disabled)
+ purefb_ds:
+ dstype: nfs
+ uri: "ldaps://lab.purestorage.com"
+ base_dn: "DC=lab,DC=purestorage,DC=com"
+ bind_user: Administrator
+ bind_password: password
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Enable existing SMB directory service
+ purefb_ds:
+ dstypr: smb
+ enable: true
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Disable existing management directory service
+ purefb_ds:
+ dstype: management
+ enable: false
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create NFS directory service (enabled)
+ purefb_ds:
+ dstype: nfs
+ enable: true
+ uri: "ldaps://lab.purestorage.com"
+ base_dn: "DC=lab,DC=purestorage,DC=com"
+ bind_user: Administrator
+ bind_password: password
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+'''
+
+RETURN = r'''
+'''
+
+
+NIS_API_VERSION = '1.7'
+HAS_PURITY_FB = True
+try:
+ from purity_fb import DirectoryService
+except ImportError:
+ HAS_PURITY_FB = False
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import get_blade, purefb_argument_spec
+
+
+def enable_ds(module, blade):
+ """Enable Directory Service"""
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.directory_services.update_directory_services(names=[module.params['dstype']],
+ directory_service=DirectoryService(enabled=True))
+ changed = True
+ except Exception:
+ module.fail_json(msg='Enable {0} Directory Service failed'.format(module.params['dstype']))
+ module.exit_json(changed=changed)
+
+
+def disable_ds(module, blade):
+ """Disable Directory Service"""
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.directory_services.update_directory_services(names=[module.params['dstype']],
+ directory_service=DirectoryService(enabled=False))
+ except Exception:
+ module.fail_json(msg='Disable {0} Directory Service failed'.format(module.params['dstype']))
+ module.exit_json(changed=changed)
+
+
+def delete_ds(module, blade):
+ """Delete Directory Service"""
+ changed = True
+ if not module.check_mode:
+ dirserv = blade.directory_services.list_directory_services(names=[module.params['dstype']])
+ try:
+ if module.params['dstype'] == 'management':
+ if dirserv.items[0].uris:
+ dir_service = DirectoryService(uris=[''],
+ base_dn="",
+ bind_user="",
+ bind_password="",
+ enabled=False)
+ else:
+ changed = False
+ elif module.params['dstype'] == 'smb':
+ if dirserv.items[0].uris:
+ smb_attrs = {'join_ou': ''}
+ dir_service = DirectoryService(uris=[''],
+ base_dn='',
+ bind_user='',
+ bind_password='',
+ smb=smb_attrs,
+ enabled=False)
+ else:
+ changed = False
+ elif module.params['dstype'] == 'nfs':
+ if dirserv.items[0].uris:
+ dir_service = DirectoryService(uris=[''],
+ base_dn='',
+ bind_user='',
+ bind_password='',
+ enabled=False)
+ elif dirserv.items[0].nfs.nis_domains:
+ nfs_attrs = {'nis_domains': [],
+ 'nis_servers': []}
+ dir_service = DirectoryService(nfs=nfs_attrs,
+ enabled=False)
+ else:
+ changed = False
+ if changed:
+ blade.directory_services.update_directory_services(names=[module.params['dstype']],
+ directory_service=dir_service)
+ except Exception:
+ module.fail_json(msg='Delete {0} Directory Service failed'.format(module.params['dstype']))
+ module.exit_json(changed=changed)
+
+
+def update_ds(module, blade):
+ """Update Directory Service"""
+ changed = True
+ if not module.check_mode:
+ changed = False
+ mod_ds = False
+ attr = {}
+ try:
+ ds_now = blade.directory_services.list_directory_services(names=[module.params['dstype']]).items[0]
+ if module.params['dstype'] == 'nfs' and module.params['nis_servers']:
+ if sorted(module.params['nis_servers']) != sorted(ds_now.nfs.nis_servers) or \
+ module.params['nis_domain'] != ''.join(map(str, ds_now.nfs.nis_domains)):
+ attr['nfs'] = {'nis_domains': [module.params['nis_domain']],
+ 'nis_servers': module.params['nis_servers'][0:30]}
+ mod_ds = True
+ else:
+ if module.params['uri']:
+ if sorted(module.params['uri'][0:30]) != sorted(ds_now.uris):
+ attr['uris'] = module.params['uri'][0:30]
+ mod_ds = True
+ if module.params['base_dn']:
+ if module.params['base_dn'] != ds_now.base_dn:
+ attr['base_dn'] = module.params['base_dn']
+ mod_ds = True
+ if module.params['bind_user']:
+ if module.params['bind_user'] != ds_now.bind_user:
+ attr['bind_user'] = module.params['bind_user']
+ mod_ds = True
+ if module.params['enable']:
+ if module.params['enable'] != ds_now.enabled:
+ attr['enabled'] = module.params['enable']
+ mod_ds = True
+ if module.params['bind_password']:
+ attr['bind_password'] = module.params['bind_password']
+ mod_ds = True
+ if module.params['dstype'] == 'smb':
+ if module.params['join_ou'] != ds_now.smb.join_ou:
+ attr['smb'] = {'join_ou': module.params['join_ou']}
+ mod_ds = True
+ if mod_ds:
+ n_attr = DirectoryService(**attr)
+ try:
+ blade.directory_services.update_directory_services(names=[module.params['dstype']],
+ directory_service=n_attr)
+ except Exception:
+ module.fail_json(msg='Failed to change {0} directory service.'.format(module.params['dstype']))
+ except Exception:
+ module.fail_json(msg='Failed to get current {0} directory service.'.format(module.params['dstype']))
+ changed = True
+ module.exit_json(changed=changed)
+
+
+def create_ds(module, blade):
+ """Create Directory Service"""
+ changed = True
+ if not module.check_mode:
+ try:
+ if module.params['dstype'] == 'management':
+ if module.params['uri']:
+ dir_service = DirectoryService(uris=module.params['uri'][0:30],
+ base_dn=module.params['base_dn'],
+ bind_user=module.params['bind_user'],
+ bind_password=module.params['bind_password'],
+ enabled=module.params['enable'])
+ else:
+ module.fail_json(msg="Incorrect parameters provided for dstype {0}".format(module.params['dstype']))
+ elif module.params['dstype'] == 'smb':
+ if module.params['uri']:
+ smb_attrs = {'join_ou': module.params['join_ou']}
+ dir_service = DirectoryService(uris=module.params['uri'][0:30],
+ base_dn=module.params['base_dn'],
+ bind_user=module.params['bind_user'],
+ bind_password=module.params['bind_password'],
+ smb=smb_attrs,
+ enabled=module.params['enable'])
+ else:
+ module.fail_json(msg="Incorrect parameters provided for dstype {0}".format(module.params['dstype']))
+ elif module.params['dstype'] == 'nfs':
+ if module.params['nis_domain']:
+ nfs_attrs = {'nis_domains': [module.params['nis_domain']],
+ 'nis_servers': module.params['nis_servers'][0:30]}
+ dir_service = DirectoryService(nfs=nfs_attrs,
+ enabled=module.params['enable'])
+ else:
+ dir_service = DirectoryService(uris=module.params['uri'][0:30],
+ base_dn=module.params['base_dn'],
+ bind_user=module.params['bind_user'],
+ bind_password=module.params['bind_password'],
+ enabled=module.params['enable'])
+ blade.directory_services.update_directory_services(names=[module.params['dstype']],
+ directory_service=dir_service)
+ except Exception:
+ module.fail_json(msg='Create {0} Directory Service failed'.format(module.params['dstype']))
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(dict(
+ uri=dict(type='list', elements='str'),
+ dstype=dict(required=True, type='str', choices=['management', 'nfs', 'smb']),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ enable=dict(type='bool', default=False),
+ bind_password=dict(type='str', no_log=True),
+ bind_user=dict(type='str'),
+ base_dn=dict(type='str'),
+ join_ou=dict(type='str'),
+ nis_domain=dict(type='str'),
+ nis_servers=dict(type='list', elements='str'),
+ ))
+
+ required_together = [['uri', 'bind_password', 'bind_user', 'base_dn'],
+ ['nis_servers', 'nis_domain']]
+ mutually_exclusive = [['uri', 'nis_domain']]
+
+ module = AnsibleModule(argument_spec,
+ required_together=required_together,
+ mutually_exclusive=mutually_exclusive,
+ supports_check_mode=True)
+ if not HAS_PURITY_FB:
+ module.fail_json(msg='purity_fb sdk is required for this module')
+
+ state = module.params['state']
+ blade = get_blade(module)
+ api_version = blade.api_version.list_versions().versions
+ ds_configured = False
+ dirserv = blade.directory_services.list_directory_services(names=[module.params['dstype']])
+ ds_enabled = dirserv.items[0].enabled
+ if dirserv.items[0].base_dn is not None:
+ ds_configured = True
+ if (module.params['nis_domain'] or module.params['join_ou']) and (NIS_API_VERSION not in api_version):
+ module.fail_json(msg="NFS or SMB directory service attributes not supported by FlashBlade Purity version")
+ ldap_uri = False
+ set_ldap = False
+ for uri in range(0, len(dirserv.items[0].uris)):
+ if "ldap" in dirserv.items[0].uris[uri].lower():
+ ldap_uri = True
+ if module.params['uri']:
+ for uri in range(0, len(module.params['uri'])):
+ if "ldap" in module.params['uri'][uri].lower():
+ set_ldap = True
+ if not module.params['uri'] and ldap_uri or \
+ module.params['uri'] and set_ldap:
+ if module.params['nis_servers'] or module.params['nis_domain']:
+ module.fail_json(msg="NIS configuration not supported in an LDAP environment")
+ if state == 'absent':
+ delete_ds(module, blade)
+ elif ds_configured and module.params['enable'] and ds_enabled:
+ update_ds(module, blade)
+ elif ds_configured and not module.params['enable'] and ds_enabled:
+ disable_ds(module, blade)
+ elif ds_configured and module.params['enable'] and not ds_enabled:
+ enable_ds(module, blade)
+# Now we have enabled the DS lets make sure there aren't any new updates...
+ update_ds(module, blade)
+ elif not ds_configured and state == 'present':
+ create_ds(module, blade)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_dsrole.py b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_dsrole.py
new file mode 100644
index 00000000..9b6680ae
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_dsrole.py
@@ -0,0 +1,177 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefb_dsrole
+version_added: '1.0.0'
+short_description: Configure FlashBlade Management Directory Service Roles
+description:
+- Set or erase directory services role configurations.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Create or delete directory service role
+ default: present
+ type: str
+ choices: [ absent, present ]
+ role:
+ description:
+ - The directory service role to work on
+ choices: [ array_admin, ops_admin, readonly, storage_admin ]
+ type: str
+ required: true
+ group_base:
+ description:
+ - Specifies where the configured group is located in the directory
+ tree. This field consists of Organizational Units (OUs) that combine
+ with the base DN attribute and the configured group CNs to complete
+ the full Distinguished Name of the groups. The group base should
+ specify OU= for each OU and multiple OUs should be separated by commas.
+ The order of OUs is important and should get larger in scope from left
+ to right.
+ - Each OU should not exceed 64 characters in length.
+ type: str
+ group:
+ description:
+ - Sets the common Name (CN) of the configured directory service group
+ containing users for the FlashBlade. This name should be just the
+ Common Name of the group without the CN= specifier.
+ - Common Names should not exceed 64 characters in length.
+ type: str
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+'''
+
+EXAMPLES = r'''
+- name: Delete existing array_admin directory service role
+ purefb_dsrole:
+ role: array_admin
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create array_admin directory service role
+ purefb_dsrole:
+ role: array_admin
+ group_base: "OU=PureGroups,OU=SANManagers"
+ group: pureadmins
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Update ops_admin directory service role
+ purefb_dsrole:
+ role: ops_admin
+ group_base: "OU=PureGroups"
+ group: opsgroup
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+'''
+
+RETURN = r'''
+'''
+
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import DirectoryServiceRole
+except ImportError:
+ HAS_PURITY_FB = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import get_blade, purefb_argument_spec
+
+
+def update_role(module, blade):
+ """Update Directory Service Role"""
+ changed = True
+ if not module.check_mode:
+ role = blade.directory_services.list_directory_services_roles(names=[module.params['role']])
+ if role.items[0].group_base != module.params['group_base'] or role.items[0].group != module.params['group']:
+ try:
+ role = DirectoryServiceRole(group_base=module.params['group_base'],
+ group=module.params['group'])
+ blade.directory_services.update_directory_services_roles(names=[module.params['role']],
+ directory_service_role=role)
+ except Exception:
+ module.fail_json(msg='Update Directory Service Role {0} failed'.format(module.params['role']))
+ module.exit_json(changed=changed)
+
+
+def delete_role(module, blade):
+ """Delete Directory Service Role"""
+ changed = True
+ if not module.check_mode:
+ try:
+ role = DirectoryServiceRole(group_base='',
+ group='')
+ blade.directory_services.update_directory_services_roles(names=[module.params['role']],
+ directory_service_role=role)
+ except Exception:
+ module.fail_json(msg='Delete Directory Service Role {0} failed'.format(module.params['role']))
+ module.exit_json(changed=changed)
+
+
+def create_role(module, blade):
+ """Create Directory Service Role"""
+ changed = True
+ if not module.check_mode:
+ try:
+ role = DirectoryServiceRole(group_base=module.params['group_base'],
+ group=module.params['group'])
+ blade.directory_services.update_directory_services_roles(names=[module.params['role']],
+ directory_service_role=role)
+ except Exception:
+ module.fail_json(msg='Create Directory Service Role {0} failed'.format(module.params['role']))
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(dict(
+ role=dict(required=True, type='str', choices=['array_admin', 'ops_admin', 'readonly', 'storage_admin']),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ group_base=dict(type='str'),
+ group=dict(type='str'),
+ ))
+
+ required_together = [['group', 'group_base']]
+
+ module = AnsibleModule(argument_spec,
+ required_together=required_together,
+ supports_check_mode=True)
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg='purity_fb sdk is required for this module')
+
+ state = module.params['state']
+ blade = get_blade(module)
+ role_configured = False
+ role = blade.directory_services.list_directory_services_roles(names=[module.params['role']])
+ if role.items[0].group is not None:
+ role_configured = True
+
+ if state == 'absent' and role_configured:
+ delete_role(module, blade)
+ elif role_configured and state == 'present':
+ update_role(module, blade)
+ elif not role_configured and state == 'present':
+ create_role(module, blade)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_fs.py b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_fs.py
new file mode 100644
index 00000000..0b1a8efe
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_fs.py
@@ -0,0 +1,645 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: purefb_fs
+version_added: "1.0.0"
+short_description: Manage filesystemon Pure Storage FlashBlade`
+description:
+ - This module manages filesystems on Pure Storage FlashBlade.
+author: Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Filesystem Name.
+ required: true
+ type: str
+ state:
+ description:
+ - Create, delete or modifies a filesystem.
+ required: false
+ default: present
+ type: str
+ choices: [ "present", "absent" ]
+ eradicate:
+ description:
+ - Define whether to eradicate the filesystem on delete or leave in trash.
+ required: false
+ type: bool
+ default: false
+ size:
+ description:
+ - Volume size in M, G, T or P units. See examples.
+ - If size is not set at filesystem creation time the filesystem size becomes unlimited.
+ type: str
+ required: false
+ nfsv3:
+ description:
+ - Define whether to NFSv3 protocol is enabled for the filesystem.
+ required: false
+ type: bool
+ default: true
+ nfsv4:
+ description:
+ - Define whether to NFSv4.1 protocol is enabled for the filesystem.
+ required: false
+ type: bool
+ default: true
+ nfs_rules:
+ description:
+ - Define the NFS rules in operation.
+ - If not set at filesystem creation time it defaults to I(*(rw,no_root_squash))
+ - Supported binary options are ro/rw, secure/insecure, fileid_32bit/no_fileid_32bit,
+ root_squash/no_root_squash, all_squash/no_all_squash and atime/noatime
+ - Supported non-binary options are anonuid=#, anongid=#, sec=(sys|krb5)
+ required: false
+ type: str
+ smb:
+ description:
+ - Define whether to SMB protocol is enabled for the filesystem.
+ required: false
+ type: bool
+ default: false
+ smb_aclmode:
+ description:
+ - Specify the ACL mode for the SMB protocol.
+ required: false
+ type: str
+ default: shared
+ choices: [ "shared", "native" ]
+ http:
+ description:
+ - Define whether to HTTP/HTTPS protocol is enabled for the filesystem.
+ required: false
+ type: bool
+ default: false
+ snapshot:
+ description:
+ - Define whether a snapshot directory is enabled for the filesystem.
+ required: false
+ type: bool
+ default: false
+ writable:
+ description:
+ - Define if a filesystem is writeable.
+ required: false
+ type: bool
+ promote:
+ description:
+ - Promote/demote a filesystem.
+ - Can only demote the file-system if it is in a replica-link relationship.
+ required: false
+ type: bool
+ fastremove:
+ description:
+ - Define whether the fast remove directory is enabled for the filesystem.
+ required: false
+ type: bool
+ default: false
+ hard_limit:
+ description:
+ - Define whether the capacity for a filesystem is a hard limit.
+ - CAUTION This will cause the filesystem to go Read-Only if the
+ capacity has already exceeded the logical size of the filesystem.
+ required: false
+ type: bool
+ default: false
+ user_quota:
+ description:
+ - Default quota in M, G, T or P units for a user under this file system.
+ required: false
+ type: str
+ group_quota:
+ description:
+ - Default quota in M, G, T or P units for a group under this file system.
+ required: false
+ type: str
+ policy:
+ description:
+ - Filesystem policy to assign to or remove from a filesystem.
+ required: false
+ type: str
+ policy_state:
+ description:
+ - Add or delete a policy from a filesystem
+ required: false
+ default: present
+ type: str
+ choices: [ "absent", "present" ]
+ delete_link:
+ description:
+ - Define if the filesystem can be deleted even if it has a replica link
+ required: false
+ default: false
+ type: bool
+ discard_snaps:
+ description:
+ - Allow a filesystem to be demoted.
+ required: false
+ default: false
+ type: bool
+extends_documentation_fragment:
+ - purestorage.flashblade.purestorage.fb
+'''
+
+EXAMPLES = '''
+- name: Create new filesystem named foo
+ purefb_fs:
+ name: foo
+ size: 1T
+ state: present
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Delete filesystem named foo
+ purefb_fs:
+ name: foo
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Recover filesystem named foo
+ purefb_fs:
+ name: foo
+ state: present
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Eradicate filesystem named foo
+ purefb_fs:
+ name: foo
+ state: absent
+ eradicate: true
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Promote filesystem named foo ready for failover
+ purefb_fs:
+ name: foo
+ promote: true
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Demote filesystem named foo after failover
+ purefb_fs:
+ name: foo
+ promote: false
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Modify attributes of an existing filesystem named foo
+ purefb_fs:
+ name: foo
+ size: 2T
+ nfsv3 : false
+ nfsv4 : true
+ user_quota: 10K
+ group_quota: 25M
+ nfs_rules: '10.21.200.0/24(ro)'
+ snapshot: true
+ fastremove: true
+ hard_limit: true
+ smb: true
+ state: present
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641'''
+
+RETURN = '''
+'''
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import FileSystem, ProtocolRule, NfsRule, SmbRule
+except ImportError:
+ HAS_PURITY_FB = False
+
+from ansible.module_utils.basic import AnsibleModule, human_to_bytes
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import get_blade, purefb_argument_spec
+
+
+HARD_LIMIT_API_VERSION = '1.4'
+NFSV4_API_VERSION = '1.6'
+REPLICATION_API_VERSION = '1.9'
+
+
+def get_fs(module, blade):
+ """Return Filesystem or None"""
+ fsys = []
+ fsys.append(module.params['name'])
+ try:
+ res = blade.file_systems.list_file_systems(names=fsys)
+ return res.items[0]
+ except Exception:
+ return None
+
+
+def create_fs(module, blade):
+ """Create Filesystem"""
+ changed = True
+ if not module.check_mode:
+ try:
+ if not module.params['nfs_rules']:
+ module.params['nfs_rules'] = '*(rw,no_root_squash)'
+ if module.params['size']:
+ size = human_to_bytes(module.params['size'])
+ else:
+ size = 0
+
+ if module.params['user_quota']:
+ user_quota = human_to_bytes(module.params['user_quota'])
+ else:
+ user_quota = None
+ if module.params['group_quota']:
+ group_quota = human_to_bytes(module.params['group_quota'])
+ else:
+ group_quota = None
+
+ api_version = blade.api_version.list_versions().versions
+ if HARD_LIMIT_API_VERSION in api_version:
+ if NFSV4_API_VERSION in api_version:
+ if REPLICATION_API_VERSION in api_version:
+ fs_obj = FileSystem(name=module.params['name'],
+ provisioned=size,
+ fast_remove_directory_enabled=module.params['fastremove'],
+ hard_limit_enabled=module.params['hard_limit'],
+ snapshot_directory_enabled=module.params['snapshot'],
+ nfs=NfsRule(v3_enabled=module.params['nfsv3'],
+ v4_1_enabled=module.params['nfsv4'],
+ rules=module.params['nfs_rules']),
+ smb=SmbRule(enabled=module.params['smb'],
+ acl_mode=module.params['smb_aclmode']),
+ http=ProtocolRule(enabled=module.params['http']),
+ default_user_quota=user_quota,
+ default_group_quota=group_quota
+ )
+ else:
+ fs_obj = FileSystem(name=module.params['name'],
+ provisioned=size,
+ fast_remove_directory_enabled=module.params['fastremove'],
+ hard_limit_enabled=module.params['hard_limit'],
+ snapshot_directory_enabled=module.params['snapshot'],
+ nfs=NfsRule(v3_enabled=module.params['nfsv3'],
+ v4_1_enabled=module.params['nfsv4'],
+ rules=module.params['nfs_rules']),
+ smb=ProtocolRule(enabled=module.params['smb']),
+ http=ProtocolRule(enabled=module.params['http']),
+ default_user_quota=user_quota,
+ default_group_quota=group_quota
+ )
+ else:
+ fs_obj = FileSystem(name=module.params['name'],
+ provisioned=size,
+ fast_remove_directory_enabled=module.params['fastremove'],
+ hard_limit_enabled=module.params['hard_limit'],
+ snapshot_directory_enabled=module.params['snapshot'],
+ nfs=NfsRule(enabled=module.params['nfsv3'], rules=module.params['nfs_rules']),
+ smb=ProtocolRule(enabled=module.params['smb']),
+ http=ProtocolRule(enabled=module.params['http'])
+ )
+ else:
+ fs_obj = FileSystem(name=module.params['name'],
+ provisioned=size,
+ fast_remove_directory_enabled=module.params['fastremove'],
+ snapshot_directory_enabled=module.params['snapshot'],
+ nfs=NfsRule(enabled=module.params['nfs'], rules=module.params['nfs_rules']),
+ smb=ProtocolRule(enabled=module.params['smb']),
+ http=ProtocolRule(enabled=module.params['http'])
+ )
+ blade.file_systems.create_file_systems(fs_obj)
+ except Exception:
+ module.fail_json(msg="Failed to create filesystem {0}.".format(module.params['name']))
+ if REPLICATION_API_VERSION in api_version:
+ if module.params['policy']:
+ try:
+ blade.policies.list_policies(names=[module.params['policy']])
+ except Exception:
+ _delete_fs(module, blade)
+ module.fail_json(msg="Policy {0} doesn't exist.".format(module.params['policy']))
+ try:
+ blade.policies.create_policy_filesystems(policy_names=[module.params['policy']],
+ member_names=[module.params['name']])
+ except Exception:
+ _delete_fs(module, blade)
+ module.fail_json(msg="Failed to apply policy {0} when creating filesystem {1}.".format(module.params['policy'],
+ module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def modify_fs(module, blade):
+ """Modify Filesystem"""
+ changed = True
+ if not module.check_mode:
+ changed = False
+ mod_fs = False
+ attr = {}
+ if module.params['policy'] and module.params['policy_state'] == 'present':
+ try:
+ policy = blade.policies.list_policy_filesystems(policy_names=[module.params['policy']],
+ member_names=[module.params['name']])
+ except Exception:
+ module.fail_json(msg='Policy {0} does not exist.'.format(module.params['policy']))
+ if not policy.items:
+ try:
+ blade.policies.create_policy_filesystems(policy_names=[module.params['policy']],
+ member_names=[module.params['name']])
+ mod_fs = True
+ except Exception:
+ module.fail_json(msg='Failed to add filesystem {0} to policy {1}.'.format(module.params['name'],
+ module.params['polict']))
+ if module.params['policy'] and module.params['policy_state'] == 'absent':
+ try:
+ policy = blade.policies.list_policy_filesystems(policy_names=[module.params['policy']],
+ member_names=[module.params['name']])
+ except Exception:
+ module.fail_json(msg='Policy {0} does not exist.'.format(module.params['policy']))
+ if len(policy.items) == 1:
+ try:
+ blade.policies.delete_policy_filesystems(policy_names=[module.params['policy']],
+ member_names=[module.params['name']])
+ mod_fs = True
+ except Exception:
+ module.fail_json(msg='Failed to remove filesystem {0} to policy {1}.'.format(module.params['name'],
+ module.params['polict']))
+ if module.params['user_quota']:
+ user_quota = human_to_bytes(module.params['user_quota'])
+ if module.params['group_quota']:
+ group_quota = human_to_bytes(module.params['group_quota'])
+ fsys = get_fs(module, blade)
+ if fsys.destroyed:
+ attr['destroyed'] = False
+ mod_fs = True
+ if module.params['size']:
+ if human_to_bytes(module.params['size']) != fsys.provisioned:
+ attr['provisioned'] = human_to_bytes(module.params['size'])
+ mod_fs = True
+ api_version = blade.api_version.list_versions().versions
+ if NFSV4_API_VERSION in api_version:
+ if module.params['nfsv3'] and not fsys.nfs.v3_enabled:
+ attr['nfs'] = NfsRule(v3_enabled=module.params['nfsv3'])
+ mod_fs = True
+ if not module.params['nfsv3'] and fsys.nfs.v3_enabled:
+ attr['nfs'] = NfsRule(v3_enabled=module.params['nfsv3'])
+ mod_fs = True
+ if module.params['nfsv4'] and not fsys.nfs.v4_1_enabled:
+ attr['nfs'] = NfsRule(v4_1_enabled=module.params['nfsv4'])
+ mod_fs = True
+ if not module.params['nfsv4'] and fsys.nfs.v4_1_enabled:
+ attr['nfs'] = NfsRule(v4_1_enabled=module.params['nfsv4'])
+ mod_fs = True
+ if module.params['nfsv3'] or module.params['nfsv4'] and fsys.nfs.v3_enabled or fsys.nfs.v4_1_enabled:
+ if module.params['nfs_rules'] is not None:
+ if fsys.nfs.rules != module.params['nfs_rules']:
+ attr['nfs'] = NfsRule(rules=module.params['nfs_rules'])
+ mod_fs = True
+ if module.params['user_quota'] and user_quota != fsys.default_user_quota:
+ attr['default_user_quota'] = user_quota
+ mod_fs = True
+ if module.params['group_quota'] and group_quota != fsys.default_group_quota:
+ attr['default_group_quota'] = group_quota
+ mod_fs = True
+ else:
+ if module.params['nfsv3'] and not fsys.nfs.enabled:
+ attr['nfs'] = NfsRule(enabled=module.params['nfsv3'])
+ mod_fs = True
+ if not module.params['nfsv3'] and fsys.nfs.enabled:
+ attr['nfs'] = NfsRule(enabled=module.params['nfsv3'])
+ mod_fs = True
+ if module.params['nfsv3'] and fsys.nfs.enabled:
+ if fsys.nfs.rules != module.params['nfs_rules']:
+ attr['nfs'] = NfsRule(rules=module.params['nfs_rules'])
+ mod_fs = True
+ if REPLICATION_API_VERSION in api_version:
+ if module.params['smb'] and not fsys.smb.enabled:
+ attr['smb'] = SmbRule(enabled=module.params['smb'],
+ acl_mode=module.params['smb_aclmode'])
+ mod_fs = True
+ if not module.params['smb'] and fsys.smb.enabled:
+ attr['smb'] = ProtocolRule(enabled=module.params['smb'])
+ mod_fs = True
+ if module.params['smb'] and fsys.smb.enabled:
+ if fsys.smb.acl_mode != module.params['smb_aclmode']:
+ attr['smb'] = SmbRule(enabled=module.params['smb'],
+ acl_mode=module.params['smb_aclmode'])
+ mod_fs = True
+ else:
+ if module.params['smb'] and not fsys.smb.enabled:
+ attr['smb'] = ProtocolRule(enabled=module.params['smb'])
+ mod_fs = True
+ if not module.params['smb'] and fsys.smb.enabled:
+ attr['smb'] = ProtocolRule(enabled=module.params['smb'])
+ mod_fs = True
+ if module.params['http'] and not fsys.http.enabled:
+ attr['http'] = ProtocolRule(enabled=module.params['http'])
+ mod_fs = True
+ if not module.params['http'] and fsys.http.enabled:
+ attr['http'] = ProtocolRule(enabled=module.params['http'])
+ mod_fs = True
+ if module.params['snapshot'] and not fsys.snapshot_directory_enabled:
+ attr['snapshot_directory_enabled'] = module.params['snapshot']
+ mod_fs = True
+ if not module.params['snapshot'] and fsys.snapshot_directory_enabled:
+ attr['snapshot_directory_enabled'] = module.params['snapshot']
+ mod_fs = True
+ if module.params['fastremove'] and not fsys.fast_remove_directory_enabled:
+ attr['fast_remove_directory_enabled'] = module.params['fastremove']
+ mod_fs = True
+ if not module.params['fastremove'] and fsys.fast_remove_directory_enabled:
+ attr['fast_remove_directory_enabled'] = module.params['fastremove']
+ mod_fs = True
+ if HARD_LIMIT_API_VERSION in api_version:
+ if not module.params['hard_limit'] and fsys.hard_limit_enabled:
+ attr['hard_limit_enabled'] = module.params['hard_limit']
+ mod_fs = True
+ if module.params['hard_limit'] and not fsys.hard_limit_enabled:
+ attr['hard_limit_enabled'] = module.params['hard_limit']
+ mod_fs = True
+ if REPLICATION_API_VERSION in api_version:
+ if module.params['writable'] is not None:
+ if not module.params['writable'] and fsys.writable:
+ attr['writable'] = module.params['writable']
+ mod_fs = True
+ if module.params['writable'] and not fsys.writable and fsys.promotion_status == 'promoted':
+ attr['writable'] = module.params['writable']
+ mod_fs = True
+ if module.params['promote'] is not None:
+ if module.params['promote'] and fsys.promotion_status != 'promoted':
+ attr['requested_promotion_state'] = 'promoted'
+ mod_fs = True
+ if not module.params['promote'] and fsys.promotion_status == 'promoted':
+ # Demotion only allowed on filesystems in a replica-link
+ try:
+ blade.file_system_replica_links.list_file_system_replica_links(local_file_system_names=[module.params['name']]).items[0]
+ except Exception:
+ module.fail_json(msg='Filesystem {0} not demoted. Not in a replica-link'.format(module.params['name']))
+ attr['requested_promotion_state'] = module.params['promote']
+ mod_fs = True
+ if mod_fs:
+ n_attr = FileSystem(**attr)
+ if REPLICATION_API_VERSION in api_version:
+ try:
+ blade.file_systems.update_file_systems(name=module.params['name'], attributes=n_attr,
+ discard_non_snapshotted_data=module.params['discard_snaps'])
+ changed = True
+ except Exception:
+ module.fail_json(msg="Failed to update filesystem {0}.".format(module.params['name']))
+ else:
+ try:
+ blade.file_systems.update_file_systems(name=module.params['name'], attributes=n_attr)
+ changed = True
+ except Exception:
+ module.fail_json(msg="Failed to update filesystem {0}.".format(module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def _delete_fs(module, blade):
+ """ In module Delete Filesystem"""
+ api_version = blade.api_version.list_versions().versions
+ if NFSV4_API_VERSION in api_version:
+ blade.file_systems.update_file_systems(name=module.params['name'],
+ attributes=FileSystem(nfs=NfsRule(v3_enabled=False,
+ v4_1_enabled=False),
+ smb=ProtocolRule(enabled=False),
+ http=ProtocolRule(enabled=False),
+ destroyed=True)
+ )
+ else:
+ blade.file_systems.update_file_systems(name=module.params['name'],
+ attributes=FileSystem(nfs=NfsRule(enabled=False),
+ smb=ProtocolRule(enabled=False),
+ http=ProtocolRule(enabled=False),
+ destroyed=True)
+ )
+
+ blade.file_systems.delete_file_systems(module.params['name'])
+
+
+def delete_fs(module, blade):
+ """ Delete Filesystem"""
+ changed = True
+ if not module.check_mode:
+ try:
+ api_version = blade.api_version.list_versions().versions
+ if REPLICATION_API_VERSION in api_version:
+ if NFSV4_API_VERSION in api_version:
+ blade.file_systems.update_file_systems(name=module.params['name'],
+ attributes=FileSystem(nfs=NfsRule(v3_enabled=False,
+ v4_1_enabled=False),
+ smb=ProtocolRule(enabled=False),
+ http=ProtocolRule(enabled=False),
+ destroyed=True),
+ delete_link_on_eradication=module.params['delete_link']
+ )
+ else:
+ blade.file_systems.update_file_systems(name=module.params['name'],
+ attributes=FileSystem(nfs=NfsRule(enabled=False),
+ smb=ProtocolRule(enabled=False),
+ http=ProtocolRule(enabled=False),
+ destroyed=True),
+ delete_link_on_eradication=module.params['delete_link']
+ )
+ else:
+ if NFSV4_API_VERSION in api_version:
+ blade.file_systems.update_file_systems(name=module.params['name'],
+ attributes=FileSystem(nfs=NfsRule(v3_enabled=False,
+ v4_1_enabled=False),
+ smb=ProtocolRule(enabled=False),
+ http=ProtocolRule(enabled=False),
+ destroyed=True)
+ )
+ else:
+ blade.file_systems.update_file_systems(name=module.params['name'],
+ attributes=FileSystem(nfs=NfsRule(enabled=False),
+ smb=ProtocolRule(enabled=False),
+ http=ProtocolRule(enabled=False),
+ destroyed=True)
+ )
+ if module.params['eradicate']:
+ try:
+ blade.file_systems.delete_file_systems(name=module.params['name'])
+ except Exception:
+ module.fail_json(msg="Failed to delete filesystem {0}.".format(module.params['name']))
+ except Exception:
+ module.fail_json(msg="Failed to update filesystem {0} prior to deletion.".format(module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def eradicate_fs(module, blade):
+ """ Eradicate Filesystem"""
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.file_systems.delete_file_systems(name=module.params['name'])
+ except Exception:
+ module.fail_json(msg="Failed to eradicate filesystem {0}.".format(module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type='str', required=True),
+ eradicate=dict(default='false', type='bool'),
+ nfsv3=dict(default='true', type='bool'),
+ nfsv4=dict(default='true', type='bool'),
+ nfs_rules=dict(type='str'),
+ smb=dict(default='false', type='bool'),
+ http=dict(default='false', type='bool'),
+ snapshot=dict(default='false', type='bool'),
+ writable=dict(type='bool'),
+ promote=dict(type='bool'),
+ fastremove=dict(default='false', type='bool'),
+ hard_limit=dict(default='false', type='bool'),
+ user_quota=dict(type='str'),
+ policy=dict(type='str'),
+ group_quota=dict(type='str'),
+ smb_aclmode=dict(type='str', default='shared', choices=['shared', 'native']),
+ policy_state=dict(default='present', choices=['present', 'absent']),
+ state=dict(default='present', choices=['present', 'absent']),
+ delete_link=dict(default=False, type='bool'),
+ discard_snaps=dict(default=False, type='bool'),
+ size=dict(type='str')
+ )
+ )
+
+ module = AnsibleModule(argument_spec,
+ supports_check_mode=True)
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg='purity_fb sdk is required for this module')
+
+ state = module.params['state']
+ blade = get_blade(module)
+ fsys = get_fs(module, blade)
+
+ if module.params['eradicate'] and state == 'present':
+ module.warn('Eradicate flag ignored without state=absent')
+
+ if state == 'present' and not fsys:
+ create_fs(module, blade)
+ elif state == 'present' and fsys:
+ modify_fs(module, blade)
+ elif state == 'absent' and fsys and not fsys.destroyed:
+ delete_fs(module, blade)
+ elif state == 'absent' and fsys and fsys.destroyed and module.params['eradicate']:
+ eradicate_fs(module, blade)
+ elif state == 'absent' and not fsys:
+ module.exit_json(changed=False)
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_fs_replica.py b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_fs_replica.py
new file mode 100644
index 00000000..57d876e0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_fs_replica.py
@@ -0,0 +1,250 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: purefb_fs_replica
+version_added: '1.0.0'
+short_description: Manage filesystem replica links between Pure Storage FlashBlades
+description:
+ - This module manages filesystem replica links between Pure Storage FlashBlades.
+author: Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Local Filesystem Name.
+ required: true
+ type: str
+ state:
+ description:
+ - Creates or modifies a filesystem replica link
+ required: false
+ default: present
+ type: str
+ choices: [ "present", "absent" ]
+ target_array:
+ description:
+ - Remote array name to create replica on.
+ required: false
+ type: str
+ target_fs:
+ description:
+ - Name of target filesystem name
+ - If not supplied, will default to I(name).
+ type: str
+ required: false
+ policy:
+ description:
+ - Name of filesystem snapshot policy to apply to the replica link.
+ required: false
+ type: str
+extends_documentation_fragment:
+ - purestorage.flashblade.purestorage.fb
+'''
+
+EXAMPLES = '''
+- name: Create new filesystem replica from foo to bar on arrayB
+ purefb_fs_replica:
+ name: foo
+ target_array: arrayB
+ target_fs: bar
+ policy: daily
+ state: present
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Add new snapshot policy to exisitng filesystem replica link
+ purefb_fs_replica:
+ name: foo
+ policy: weekly
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Delete snapshot policy from filesystem replica foo
+ purefb_fs_replica:
+ name: foo
+ policy: weekly
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641'''
+
+RETURN = '''
+'''
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import FileSystemReplicaLink, LocationReference
+except ImportError:
+ HAS_PURITY_FB = False
+
+MIN_REQUIRED_API_VERSION = '1.9'
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import get_blade, purefb_argument_spec
+
+
+def get_local_fs(module, blade):
+ """Return Filesystem or None"""
+ try:
+ res = blade.file_systems.list_file_systems(names=[module.params['name']])
+ return res.items[0]
+ except Exception:
+ return None
+
+
+def get_local_rl(module, blade):
+ """Return Filesystem Replica Link or None"""
+ try:
+ res = blade.file_system_replica_links.list_file_system_replica_links(local_file_system_names=[module.params['name']])
+ return res.items[0]
+ except Exception:
+ return None
+
+
+def _check_connected(module, blade):
+ connected_blades = blade.array_connections.list_array_connections()
+ for target in range(0, len(connected_blades.items)):
+ if (connected_blades.items[target].remote.name == module.params['target_array'] or
+ connected_blades.items[target].management_address == module.params['target_array']) and \
+ connected_blades.items[target].status in ["connected", "connecting", "partially_connected"]:
+ return connected_blades.items[target]
+ return None
+
+
+def create_rl(module, blade):
+ """Create Filesystem Replica Link"""
+ changed = True
+ if not module.check_mode:
+ try:
+ remote_array = _check_connected(module, blade)
+ if remote_array:
+ if not module.params['target_fs']:
+ module.params['target_fs'] = module.params['name']
+ if not module.params['policy']:
+ blade.file_system_replica_links.create_file_system_replica_links(
+ local_file_system_names=[module.params['name']],
+ remote_file_system_names=[module.params['target_fs']],
+ remote_names=[remote_array.remote.name])
+ else:
+ blade.file_system_replica_links.create_file_system_replica_links(
+ local_file_system_names=[module.params['name']],
+ remote_file_system_names=[module.params['target_fs']],
+ remote_names=[remote_array.remote.name],
+ file_system_replica_link=FileSystemReplicaLink(policies=[LocationReference(name=module.params['policy'])]))
+ else:
+ module.fail_json(msg='Target array {0} is not connected'.format(module.params['target_array']))
+ except Exception:
+ module.fail_json(msg="Failed to create filesystem replica link for {0}.".format(module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def add_rl_policy(module, blade):
+ """Add Policy to Filesystem Replica Link"""
+ changed = True
+ if not module.check_mode:
+ if not module.params['target_array']:
+ module.params['target_array'] = blade.file_system_replica_links.list_file_system_replica_links(
+ local_file_system_names=[module.params['name']]).items[0].remote.name
+ remote_array = _check_connected(module, blade)
+ try:
+ already_a_policy = blade.file_system_replica_links.list_file_system_replica_link_policies(
+ local_file_system_names=[module.params['name']],
+ policy_names=[module.params['policy']],
+ remote_names=[remote_array.remote.name])
+ if already_a_policy.items:
+ changed = False
+ else:
+ blade.file_system_replica_links.create_file_system_replica_link_policies(
+ policy_names=[module.params['policy']],
+ local_file_system_names=[module.params['name']],
+ remote_names=[remote_array.remote.name])
+ except Exception:
+ module.fail_json(msg="Failed to add policy {0} to replica link {1}.".format(module.params['policy'], module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def delete_rl_policy(module, blade):
+ """ Delete Policy from Filesystem Replica Link"""
+ changed = True
+ if not module.check_mode:
+ current_policy = blade.file_system_replica_links.list_file_system_replica_link_policies(
+ local_file_system_names=[module.params['name']],
+ policy_names=[module.params['policy']])
+ if current_policy.items:
+ try:
+ blade.file_system_replica_links.delete_file_system_replica_link_policies(
+ policy_names=[module.params['policy']],
+ local_file_system_names=[module.params['name']],
+ remote_names=[current_policy.items[0].link.remote.name])
+ except Exception:
+ module.fail_json(msg="Failed to remove policy {0} from replica link {1}.".format(module.params['policy'], module.params['name']))
+ else:
+ changed = False
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type='str', required=True),
+ target_fs=dict(type='str'),
+ target_array=dict(type='str'),
+ policy=dict(type='str'),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+ )
+
+ required_if = [['state', 'absent', ['policy']]]
+
+ module = AnsibleModule(argument_spec,
+ required_if=required_if,
+ supports_check_mode=True)
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg='purity_fb sdk is required for this module')
+
+ state = module.params['state']
+ blade = get_blade(module)
+ versions = blade.api_version.list_versions().versions
+
+ if MIN_REQUIRED_API_VERSION not in versions:
+ module.fail_json(msg='Minimum FlashBlade REST version required: {0}'.format(MIN_REQUIRED_API_VERSION))
+
+ local_fs = get_local_fs(module, blade)
+ local_replica_link = get_local_rl(module, blade)
+
+ if not local_fs:
+ module.fail_json(msg='Selected local filesystem {0} does not exist.'.format(module.params['name']))
+
+ if module.params['policy']:
+ try:
+ policy = blade.policies.list_policies(names=[module.params['policy']])
+ except Exception:
+ module.fail_json(msg='Selected policy {0} does not exist.'.format(module.params['policy']))
+ else:
+ policy = None
+ if state == 'present' and not local_replica_link:
+ create_rl(module, blade)
+ elif state == 'present' and local_replica_link and policy:
+ add_rl_policy(module, blade)
+ elif state == 'absent' and policy:
+ delete_rl_policy(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_info.py b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_info.py
new file mode 100644
index 00000000..eeb95f7b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_info.py
@@ -0,0 +1,899 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2019, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefb_info
+version_added: '1.0.0'
+short_description: Collect information from Pure Storage FlashBlade
+description:
+ - Collect information from a Pure Storage FlashBlade running the
+ Purity//FB operating system. By default, the module will collect basic
+ information including hosts, host groups, protection
+ groups and volume counts. Additional information can be collected
+ based on the configured set of arguements.
+author:
+ - Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ gather_subset:
+ description:
+ - When supplied, this argument will define the information to be collected.
+ Possible values for this include all, minimum, config, performance,
+ capacity, network, subnets, lags, filesystems, snapshots, buckets,
+ replication, policies and arrays.
+ required: false
+ type: list
+ elements: str
+ default: minimum
+extends_documentation_fragment:
+ - purestorage.flashblade.purestorage.fb
+'''
+
+EXAMPLES = r'''
+- name: collect default set of info
+ purefb_info:
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+ register: blade_info
+- name: show default information
+ debug:
+ msg: "{{ blade_info['purefb_info']['default'] }}"
+
+- name: collect configuration and capacity info
+ purefb_info:
+ gather_subset:
+ - config
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+ register: blade_info
+- name: show config information
+ debug:
+ msg: "{{ blade_info['purefb_info']['config'] }}"
+
+- name: collect all info
+ purefb_info:
+ gather_subset:
+ - all
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+ register: blade_info
+- name: show all information
+ debug:
+ msg: "{{ blade_info['purefb_info'] }}"
+'''
+
+RETURN = r'''
+purefb_info:
+ description: Returns the information collected from the FlashBlade
+ returned: always
+ type: complex
+ sample: {
+ "capacity": {
+ "aggregate": {
+ "data_reduction": 1.1179228,
+ "snapshots": 0,
+ "total_physical": 17519748439,
+ "unique": 17519748439,
+ "virtual": 19585726464
+ },
+ "file-system": {
+ "data_reduction": 1.3642412,
+ "snapshots": 0,
+ "total_physical": 4748219708,
+ "unique": 4748219708,
+ "virtual": 6477716992
+ },
+ "object-store": {
+ "data_reduction": 1.0263462,
+ "snapshots": 0,
+ "total_physical": 12771528731,
+ "unique": 12771528731,
+ "virtual": 6477716992
+ },
+ "total": 83359896948925
+ },
+ "config": {
+ "alert_watchers": {
+ "enabled": true,
+ "name": "notify@acmestorage.com"
+ },
+ "array_management": {
+ "base_dn": null,
+ "bind_password": null,
+ "bind_user": null,
+ "enabled": false,
+ "name": "management",
+ "services": [
+ "management"
+ ],
+ "uris": []
+ },
+ "directory_service_roles": {
+ "array_admin": {
+ "group": null,
+ "group_base": null
+ },
+ "ops_admin": {
+ "group": null,
+ "group_base": null
+ },
+ "readonly": {
+ "group": null,
+ "group_base": null
+ },
+ "storage_admin": {
+ "group": null,
+ "group_base": null
+ }
+ },
+ "dns": {
+ "domain": "demo.acmestorage.com",
+ "name": "demo-fb-1",
+ "nameservers": [
+ "8.8.8.8"
+ ],
+ "search": [
+ "demo.acmestorage.com"
+ ]
+ },
+ "nfs_directory_service": {
+ "base_dn": null,
+ "bind_password": null,
+ "bind_user": null,
+ "enabled": false,
+ "name": "nfs",
+ "services": [
+ "nfs"
+ ],
+ "uris": []
+ },
+ "ntp": [
+ "0.ntp.pool.org"
+ ],
+ "smb_directory_service": {
+ "base_dn": null,
+ "bind_password": null,
+ "bind_user": null,
+ "enabled": false,
+ "name": "smb",
+ "services": [
+ "smb"
+ ],
+ "uris": []
+ },
+ "smtp": {
+ "name": "demo-fb-1",
+ "relay_host": null,
+ "sender_domain": "acmestorage.com"
+ },
+ "ssl_certs": {
+ "certificate": "-----BEGIN CERTIFICATE-----\n\n-----END CERTIFICATE-----",
+ "common_name": "Acme Storage",
+ "country": "US",
+ "email": null,
+ "intermediate_certificate": null,
+ "issued_by": "Acme Storage",
+ "issued_to": "Acme Storage",
+ "key_size": 4096,
+ "locality": null,
+ "name": "global",
+ "organization": "Acme Storage",
+ "organizational_unit": "Acme Storage",
+ "passphrase": null,
+ "private_key": null,
+ "state": null,
+ "status": "self-signed",
+ "valid_from": "1508433967000",
+ "valid_to": "2458833967000"
+ }
+ },
+ "default": {
+ "blades": 15,
+ "buckets": 7,
+ "filesystems": 2,
+ "flashblade_name": "demo-fb-1",
+ "object_store_accounts": 1,
+ "object_store_users": 1,
+ "purity_version": "2.2.0",
+ "snapshots": 1,
+ "total_capacity": 83359896948925
+ },
+ "filesystems": {
+ "k8s-pvc-d24b1357-579e-11e8-811f-ecf4bbc88f54": {
+ "destroyed": false,
+ "fast_remove": false,
+ "hard_limit": true,
+ "nfs_rules": "10.21.255.0/24(rw,no_root_squash)",
+ "provisioned": 21474836480,
+ "snapshot_enabled": false
+ },
+ "z": {
+ "destroyed": false,
+ "fast_remove": false,
+ "hard_limit": false,
+ "provisioned": 1073741824,
+ "snapshot_enabled": false
+ }
+ },
+ "lag": {
+ "uplink": {
+ "lag_speed": 0,
+ "port_speed": 40000000000,
+ "ports": [
+ {
+ "name": "CH1.FM1.ETH1.1"
+ },
+ {
+ "name": "CH1.FM1.ETH1.2"
+ },
+ ],
+ "status": "healthy"
+ }
+ },
+ "network": {
+ "fm1.admin0": {
+ "address": "10.10.100.6",
+ "gateway": "10.10.100.1",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "support"
+ ],
+ "type": "vip",
+ "vlan": 2200
+ },
+ "fm2.admin0": {
+ "address": "10.10.100.7",
+ "gateway": "10.10.100.1",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "support"
+ ],
+ "type": "vip",
+ "vlan": 2200
+ },
+ "nfs1": {
+ "address": "10.10.100.4",
+ "gateway": "10.10.100.1",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "data"
+ ],
+ "type": "vip",
+ "vlan": 2200
+ },
+ "vir0": {
+ "address": "10.10.100.5",
+ "gateway": "10.10.100.1",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "management"
+ ],
+ "type": "vip",
+ "vlan": 2200
+ }
+ },
+ "performance": {
+ "aggregate": {
+ "bytes_per_op": 0,
+ "bytes_per_read": 0,
+ "bytes_per_write": 0,
+ "read_bytes_per_sec": 0,
+ "reads_per_sec": 0,
+ "usec_per_other_op": 0,
+ "usec_per_read_op": 0,
+ "usec_per_write_op": 0,
+ "write_bytes_per_sec": 0,
+ "writes_per_sec": 0
+ },
+ "http": {
+ "bytes_per_op": 0,
+ "bytes_per_read": 0,
+ "bytes_per_write": 0,
+ "read_bytes_per_sec": 0,
+ "reads_per_sec": 0,
+ "usec_per_other_op": 0,
+ "usec_per_read_op": 0,
+ "usec_per_write_op": 0,
+ "write_bytes_per_sec": 0,
+ "writes_per_sec": 0
+ },
+ "nfs": {
+ "bytes_per_op": 0,
+ "bytes_per_read": 0,
+ "bytes_per_write": 0,
+ "read_bytes_per_sec": 0,
+ "reads_per_sec": 0,
+ "usec_per_other_op": 0,
+ "usec_per_read_op": 0,
+ "usec_per_write_op": 0,
+ "write_bytes_per_sec": 0,
+ "writes_per_sec": 0
+ },
+ "s3": {
+ "bytes_per_op": 0,
+ "bytes_per_read": 0,
+ "bytes_per_write": 0,
+ "read_bytes_per_sec": 0,
+ "reads_per_sec": 0,
+ "usec_per_other_op": 0,
+ "usec_per_read_op": 0,
+ "usec_per_write_op": 0,
+ "write_bytes_per_sec": 0,
+ "writes_per_sec": 0
+ }
+ },
+ "snapshots": {
+ "z.188": {
+ "destroyed": false,
+ "source": "z",
+ "source_destroyed": false,
+ "suffix": "188"
+ }
+ },
+ "subnet": {
+ "new-mgmt": {
+ "gateway": "10.10.100.1",
+ "interfaces": [
+ {
+ "name": "fm1.admin0"
+ },
+ {
+ "name": "fm2.admin0"
+ },
+ {
+ "name": "nfs1"
+ },
+ {
+ "name": "vir0"
+ }
+ ],
+ "lag": "uplink",
+ "mtu": 1500,
+ "prefix": "10.10.100.0/24",
+ "services": [
+ "data",
+ "management",
+ "support"
+ ],
+ "vlan": 2200
+ }
+ }
+ }
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import get_blade, purefb_argument_spec
+
+
+MIN_REQUIRED_API_VERSION = '1.3'
+HARD_LIMIT_API_VERSION = '1.4'
+POLICIES_API_VERSION = '1.5'
+CERT_GROUPS_API_VERSION = '1.8'
+REPLICATION_API_VERSION = '1.9'
+
+
+def generate_default_dict(blade):
+ default_info = {}
+ defaults = blade.arrays.list_arrays().items[0]
+ default_info['flashblade_name'] = defaults.name
+ default_info['purity_version'] = defaults.version
+ default_info['filesystems'] = \
+ len(blade.file_systems.list_file_systems().items)
+ default_info['snapshots'] = \
+ len(blade.file_system_snapshots.list_file_system_snapshots().items)
+ default_info['buckets'] = len(blade.buckets.list_buckets().items)
+ default_info['object_store_users'] = \
+ len(blade.object_store_users.list_object_store_users().items)
+ default_info['object_store_accounts'] = \
+ len(blade.object_store_accounts.list_object_store_accounts().items)
+ default_info['blades'] = len(blade.blade.list_blades().items)
+ default_info['certificates'] = \
+ len(blade.certificates.list_certificates().items)
+ default_info['total_capacity'] = \
+ blade.arrays.list_arrays_space().items[0].capacity
+ api_version = blade.api_version.list_versions().versions
+ if POLICIES_API_VERSION in api_version:
+ default_info['policies'] = \
+ len(blade.policies.list_policies().items)
+ if CERT_GROUPS_API_VERSION in api_version:
+ default_info['certificate_groups'] = \
+ len(blade.certificate_groups.list_certificate_groups().items)
+ if REPLICATION_API_VERSION in api_version:
+ default_info['fs_replicas'] = \
+ len(blade.file_system_replica_links.list_file_system_replica_links().items)
+ default_info['remote_credentials'] = \
+ len(blade.object_store_remote_credentials.list_object_store_remote_credentials().items)
+ default_info['bucket_replicas'] = \
+ len(blade.bucket_replica_links.list_bucket_replica_links().items)
+ default_info['connected_arrays'] = \
+ len(blade.array_connections.list_array_connections().items)
+ default_info['targets'] = \
+ len(blade.targets.list_targets().items)
+ default_info['kerberos_keytabs'] = \
+ len(blade.keytabs.list_keytabs().items)
+ return default_info
+
+
+def generate_perf_dict(blade):
+ perf_info = {}
+ total_perf = blade.arrays.list_arrays_performance()
+ http_perf = blade.arrays.list_arrays_performance(protocol='http')
+ s3_perf = blade.arrays.list_arrays_performance(protocol='s3')
+ nfs_perf = blade.arrays.list_arrays_performance(protocol='nfs')
+ perf_info['aggregate'] = {
+ 'bytes_per_op': total_perf.items[0].bytes_per_op,
+ 'bytes_per_read': total_perf.items[0].bytes_per_read,
+ 'bytes_per_write': total_perf.items[0].bytes_per_write,
+ 'read_bytes_per_sec': total_perf.items[0].read_bytes_per_sec,
+ 'reads_per_sec': total_perf.items[0].reads_per_sec,
+ 'usec_per_other_op': total_perf.items[0].usec_per_other_op,
+ 'usec_per_read_op': total_perf.items[0].usec_per_read_op,
+ 'usec_per_write_op': total_perf.items[0].usec_per_write_op,
+ 'write_bytes_per_sec': total_perf.items[0].write_bytes_per_sec,
+ 'writes_per_sec': total_perf.items[0].writes_per_sec,
+ }
+ perf_info['http'] = {
+ 'bytes_per_op': http_perf.items[0].bytes_per_op,
+ 'bytes_per_read': http_perf.items[0].bytes_per_read,
+ 'bytes_per_write': http_perf.items[0].bytes_per_write,
+ 'read_bytes_per_sec': http_perf.items[0].read_bytes_per_sec,
+ 'reads_per_sec': http_perf.items[0].reads_per_sec,
+ 'usec_per_other_op': http_perf.items[0].usec_per_other_op,
+ 'usec_per_read_op': http_perf.items[0].usec_per_read_op,
+ 'usec_per_write_op': http_perf.items[0].usec_per_write_op,
+ 'write_bytes_per_sec': http_perf.items[0].write_bytes_per_sec,
+ 'writes_per_sec': http_perf.items[0].writes_per_sec,
+ }
+ perf_info['s3'] = {
+ 'bytes_per_op': s3_perf.items[0].bytes_per_op,
+ 'bytes_per_read': s3_perf.items[0].bytes_per_read,
+ 'bytes_per_write': s3_perf.items[0].bytes_per_write,
+ 'read_bytes_per_sec': s3_perf.items[0].read_bytes_per_sec,
+ 'reads_per_sec': s3_perf.items[0].reads_per_sec,
+ 'usec_per_other_op': s3_perf.items[0].usec_per_other_op,
+ 'usec_per_read_op': s3_perf.items[0].usec_per_read_op,
+ 'usec_per_write_op': s3_perf.items[0].usec_per_write_op,
+ 'write_bytes_per_sec': s3_perf.items[0].write_bytes_per_sec,
+ 'writes_per_sec': s3_perf.items[0].writes_per_sec,
+ }
+ perf_info['nfs'] = {
+ 'bytes_per_op': nfs_perf.items[0].bytes_per_op,
+ 'bytes_per_read': nfs_perf.items[0].bytes_per_read,
+ 'bytes_per_write': nfs_perf.items[0].bytes_per_write,
+ 'read_bytes_per_sec': nfs_perf.items[0].read_bytes_per_sec,
+ 'reads_per_sec': nfs_perf.items[0].reads_per_sec,
+ 'usec_per_other_op': nfs_perf.items[0].usec_per_other_op,
+ 'usec_per_read_op': nfs_perf.items[0].usec_per_read_op,
+ 'usec_per_write_op': nfs_perf.items[0].usec_per_write_op,
+ 'write_bytes_per_sec': nfs_perf.items[0].write_bytes_per_sec,
+ 'writes_per_sec': nfs_perf.items[0].writes_per_sec,
+ }
+ # TODO (SD): Only add in when new python SDK exists that support Python 3.7
+ # api_version = blade.api_version.list_versions().versions
+ # if REPLICATION_API_VERSION in api_version:
+ # file_repl_perf = blade.array_connections.list_array_connections_performance_replication(type='file-system')
+ # obj_repl_perf = blade.array_connections.list_array_connections_performance_replication(type='object-store')
+ # if len(file_repl_perf.total):
+ # perf_info['file_replication'] = {
+ # 'received_bytes_per_sec': file_repl_perf.total[0].async.received_bytes_per_sec,
+ # 'transmitted_bytes_per_sec': file_repl_perf.total[0].async.transmitted_bytes_per_sec,
+ # }
+ # if len(obj_repl_perf.total):
+ # perf_info['object_replication'] = {
+ # 'received_bytes_per_sec': obj_repl_perf.total[0].async.received_bytes_per_sec,
+ # 'transmitted_bytes_per_sec': obj_repl_perf.total[0].async.transmitted_bytes_per_sec,
+ # }
+ return perf_info
+
+
+def generate_config_dict(blade):
+ config_info = {}
+ config_info['dns'] = blade.dns.list_dns().items[0].to_dict()
+ config_info['smtp'] = blade.smtp.list_smtp().items[0].to_dict()
+ try:
+ config_info['alert_watchers'] = \
+ blade.alert_watchers.list_alert_watchers().items[0].to_dict()
+ except Exception:
+ config_info['alert_watchers'] = ''
+ api_version = blade.api_version.list_versions().versions
+ if HARD_LIMIT_API_VERSION in api_version:
+ config_info['array_management'] = \
+ blade.directory_services.list_directory_services(names=['management']).items[0].to_dict()
+ config_info['directory_service_roles'] = {}
+ roles = blade.directory_services.list_directory_services_roles()
+ for role in range(0, len(roles.items)):
+ role_name = roles.items[role].name
+ config_info['directory_service_roles'][role_name] = {
+ 'group': roles.items[role].group,
+ 'group_base': roles.items[role].group_base
+ }
+ config_info['nfs_directory_service'] = \
+ blade.directory_services.list_directory_services(names=['nfs']).items[0].to_dict()
+ config_info['smb_directory_service'] = \
+ blade.directory_services.list_directory_services(names=['smb']).items[0].to_dict()
+ config_info['ntp'] = blade.arrays.list_arrays().items[0].ntp_servers
+ config_info['ssl_certs'] = \
+ blade.certificates.list_certificates().items[0].to_dict()
+ api_version = blade.api_version.list_versions().versions
+ if CERT_GROUPS_API_VERSION in api_version:
+ try:
+ config_info['certificate_groups'] = \
+ blade.certificate_groups.list_certificate_groups().items[0].to_dict()
+ except Exception:
+ config_info['certificate_groups'] = ''
+ if REPLICATION_API_VERSION in api_version:
+ config_info['snmp_agents'] = {}
+ snmp_agents = blade.snmp_agents.list_snmp_agents()
+ for agent in range(0, len(snmp_agents.items)):
+ agent_name = snmp_agents.items[agent].name
+ config_info['snmp_agents'][agent_name] = {
+ 'version': snmp_agents.items[agent].version,
+ 'engine_id': snmp_agents.items[agent].engine_id
+ }
+ if config_info['snmp_agents'][agent_name]['version'] == 'v3':
+ config_info['snmp_agents'][agent_name]['auth_protocol'] = snmp_agents.items[agent].v3.auth_protocol
+ config_info['snmp_agents'][agent_name]['privacy_protocol'] = snmp_agents.items[agent].v3.privacy_protocol
+ config_info['snmp_agents'][agent_name]['user'] = snmp_agents.items[agent].v3.user
+ config_info['snmp_managers'] = {}
+ snmp_managers = blade.snmp_managers.list_snmp_managers()
+ for manager in range(0, len(snmp_managers.items)):
+ mgr_name = snmp_managers.items[manager].name
+ config_info['snmp_managers'][mgr_name] = {
+ 'version': snmp_managers.items[manager].version,
+ 'host': snmp_managers.items[manager].host,
+ 'notification': snmp_managers.items[manager].notification
+ }
+ if config_info['snmp_managers'][mgr_name]['version'] == 'v3':
+ config_info['snmp_managers'][mgr_name]['auth_protocol'] = snmp_managers.items[manager].v3.auth_protocol
+ config_info['snmp_managers'][mgr_name]['privacy_protocol'] = snmp_managers.items[manager].v3.privacy_protocol
+ config_info['snmp_managers'][mgr_name]['user'] = snmp_managers.items[manager].v3.user
+ return config_info
+
+
+def generate_subnet_dict(blade):
+ sub_info = {}
+ subnets = blade.subnets.list_subnets()
+ for sub in range(0, len(subnets.items)):
+ sub_name = subnets.items[sub].name
+ if subnets.items[sub].enabled:
+ sub_info[sub_name] = {
+ 'gateway': subnets.items[sub].gateway,
+ 'mtu': subnets.items[sub].mtu,
+ 'vlan': subnets.items[sub].vlan,
+ 'prefix': subnets.items[sub].prefix,
+ 'services': subnets.items[sub].services,
+ }
+ sub_info[sub_name]['lag'] = subnets.items[sub].link_aggregation_group.name
+ sub_info[sub_name]['interfaces'] = []
+ for iface in range(0, len(subnets.items[sub].interfaces)):
+ sub_info[sub_name]['interfaces'].append({'name': subnets.items[sub].interfaces[iface].name})
+ return sub_info
+
+
+def generate_lag_dict(blade):
+ lag_info = {}
+ groups = blade.link_aggregation_groups.list_link_aggregation_groups()
+ for groupcnt in range(0, len(groups.items)):
+ lag_name = groups.items[groupcnt].name
+ lag_info[lag_name] = {
+ 'lag_speed': groups.items[groupcnt].lag_speed,
+ 'port_speed': groups.items[groupcnt].port_speed,
+ 'status': groups.items[groupcnt].status,
+ }
+ lag_info[lag_name]['ports'] = []
+ for port in range(0, len(groups.items[groupcnt].ports)):
+ lag_info[lag_name]['ports'].append({'name': groups.items[groupcnt].ports[port].name})
+ return lag_info
+
+
+def generate_targets_dict(blade):
+ targets_info = {}
+ targets = blade.targets.list_targets()
+ for target in range(0, len(targets.items)):
+ target_name = targets.items[target].name
+ targets_info[target_name] = {
+ 'address': targets.items[target].address,
+ 'status': targets.items[target].status,
+ 'status_details': targets.items[target].status_details,
+ }
+ return targets_info
+
+
+def generate_remote_creds_dict(blade):
+ remote_creds_info = {}
+ remote_creds = blade.object_store_remote_credentials.list_object_store_remote_credentials()
+ for cred_cnt in range(0, len(remote_creds.items)):
+ cred_name = remote_creds.items[cred_cnt].name
+ remote_creds_info[cred_name] = {
+ 'access_key': remote_creds.items[cred_cnt].access_key_id,
+ 'remote_array': remote_creds.items[cred_cnt].remote.name,
+ }
+ return remote_creds_info
+
+
+def generate_file_repl_dict(blade):
+ file_repl_info = {}
+ file_links = blade.file_system_replica_links.list_file_system_replica_links()
+ for linkcnt in range(0, len(file_links.items)):
+ fs_name = file_links.items[linkcnt].local_file_system.name
+ file_repl_info[fs_name] = {
+ 'direction': file_links.items[linkcnt].direction,
+ 'lag': file_links.items[linkcnt].lag,
+ 'status': file_links.items[linkcnt].status,
+ 'remote_fs': file_links.items[linkcnt].remote.name + ":" + file_links.items[linkcnt].remote_file_system.name,
+ 'recovery_point': file_links.items[linkcnt].recovery_point,
+ }
+ file_repl_info[fs_name]['policies'] = []
+ for policy_cnt in range(0, len(file_links.items[linkcnt].policies)):
+ file_repl_info[fs_name]['policies'].append(file_links.items[linkcnt].policies[policy_cnt].display_name)
+ return file_repl_info
+
+
+def generate_bucket_repl_dict(blade):
+ bucket_repl_info = {}
+ bucket_links = blade.bucket_replica_links.list_bucket_replica_links()
+ for linkcnt in range(0, len(bucket_links.items)):
+ bucket_name = bucket_links.items[linkcnt].local_bucket.name
+ bucket_repl_info[bucket_name] = {
+ 'direction': bucket_links.items[linkcnt].direction,
+ 'lag': bucket_links.items[linkcnt].lag,
+ 'paused': bucket_links.items[linkcnt].paused,
+ 'status': bucket_links.items[linkcnt].status,
+ 'remote_bucket': bucket_links.items[linkcnt].remote_bucket.name,
+ 'remote_credentials': bucket_links.items[linkcnt].remote_credentials.name,
+ 'recovery_point': bucket_links.items[linkcnt].recovery_point,
+ }
+ return bucket_repl_info
+
+
+def generate_network_dict(blade):
+ net_info = {}
+ ports = blade.network_interfaces.list_network_interfaces()
+ for portcnt in range(0, len(ports.items)):
+ int_name = ports.items[portcnt].name
+ if ports.items[portcnt].enabled:
+ net_info[int_name] = {
+ 'type': ports.items[portcnt].type,
+ 'mtu': ports.items[portcnt].mtu,
+ 'vlan': ports.items[portcnt].vlan,
+ 'address': ports.items[portcnt].address,
+ 'services': ports.items[portcnt].services,
+ 'gateway': ports.items[portcnt].gateway,
+ 'netmask': ports.items[portcnt].netmask,
+ }
+ return net_info
+
+
+def generate_capacity_dict(blade):
+ capacity_info = {}
+ total_cap = blade.arrays.list_arrays_space()
+ file_cap = blade.arrays.list_arrays_space(type='file-system')
+ object_cap = blade.arrays.list_arrays_space(type='object-store')
+ capacity_info['total'] = total_cap.items[0].capacity
+ capacity_info['aggregate'] = {
+ 'data_reduction': total_cap.items[0].space.data_reduction,
+ 'snapshots': total_cap.items[0].space.snapshots,
+ 'total_physical': total_cap.items[0].space.total_physical,
+ 'unique': total_cap.items[0].space.unique,
+ 'virtual': total_cap.items[0].space.virtual,
+ }
+ capacity_info['file-system'] = {
+ 'data_reduction': file_cap.items[0].space.data_reduction,
+ 'snapshots': file_cap.items[0].space.snapshots,
+ 'total_physical': file_cap.items[0].space.total_physical,
+ 'unique': file_cap.items[0].space.unique,
+ 'virtual': file_cap.items[0].space.virtual,
+ }
+ capacity_info['object-store'] = {
+ 'data_reduction': object_cap.items[0].space.data_reduction,
+ 'snapshots': object_cap.items[0].space.snapshots,
+ 'total_physical': object_cap.items[0].space.total_physical,
+ 'unique': object_cap.items[0].space.unique,
+ 'virtual': file_cap.items[0].space.virtual,
+ }
+
+ return capacity_info
+
+
+def generate_snap_dict(blade):
+ snap_info = {}
+ snaps = blade.file_system_snapshots.list_file_system_snapshots()
+ api_version = blade.api_version.list_versions().versions
+ for snap in range(0, len(snaps.items)):
+ snapshot = snaps.items[snap].name
+ snap_info[snapshot] = {
+ 'destroyed': snaps.items[snap].destroyed,
+ 'source': snaps.items[snap].source,
+ 'suffix': snaps.items[snap].suffix,
+ 'source_destroyed': snaps.items[snap].source_destroyed,
+ }
+ if REPLICATION_API_VERSION in api_version:
+ snap_info[snapshot]['owner'] = snaps.items[snap].owner.name
+ snap_info[snapshot]['owner_destroyed'] = snaps.items[snap].owner_destroyed
+ snap_info[snapshot]['source_display_name'] = snaps.items[snap].source_display_name
+ snap_info[snapshot]['source_is_local'] = snaps.items[snap].source_is_local
+ snap_info[snapshot]['source_location'] = snaps.items[snap].source_location.name
+ return snap_info
+
+
+def generate_snap_transfer_dict(blade):
+ snap_transfer_info = {}
+ snap_transfers = blade.file_system_snapshots.list_file_system_snapshots_transfer()
+ for snap_transfer in range(0, len(snap_transfers.items)):
+ transfer = snap_transfers.items[snap_transfer].name
+ snap_transfer_info[transfer] = {
+ 'completed': snap_transfers.items[snap_transfer].completed,
+ 'data_transferred': snap_transfers.items[snap_transfer].data_transferred,
+ 'progress': snap_transfers.items[snap_transfer].progress,
+ 'direction': snap_transfers.items[snap_transfer].direction,
+ 'remote': snap_transfers.items[snap_transfer].remote.name,
+ 'remote_snapshot': snap_transfers.items[snap_transfer].remote_snapshot.name,
+ 'started': snap_transfers.items[snap_transfer].started,
+ 'status': snap_transfers.items[snap_transfer].status
+ }
+ return snap_transfer_info
+
+
+def generate_array_conn_dict(blade):
+ array_conn_info = {}
+ arrays = blade.array_connections.list_array_connections()
+ for arraycnt in range(0, len(arrays.items)):
+ array = arrays.items[arraycnt].remote.name
+ array_conn_info[array] = {
+ 'encrypted': arrays.items[arraycnt].encrypted,
+ 'replication_addresses': arrays.items[arraycnt].replication_addresses,
+ 'management_address': arrays.items[arraycnt].management_address,
+ 'id': arrays.items[arraycnt].remote.id,
+ 'status': arrays.items[arraycnt].status,
+ 'version': arrays.items[arraycnt].version,
+ }
+ if arrays.items[arraycnt].encrypted:
+ array_conn_info[array]['ca_certificate_group'] = arrays.items[arraycnt].ca_certificate_group.name
+ return array_conn_info
+
+
+def generate_policies_dict(blade):
+ policies_info = {}
+ policies = blade.policies.list_policies()
+ for policycnt in range(0, len(policies.items)):
+ policy = policies.items[policycnt].name
+ policies_info[policy] = {}
+ policies_info[policy]['enabled'] = policies.items[policycnt].enabled
+ if policies.items[policycnt].rules:
+ policies_info[policy]['rules'] = policies.items[policycnt].rules[0].to_dict()
+ return policies_info
+
+
+def generate_bucket_dict(blade):
+ bucket_info = {}
+ buckets = blade.buckets.list_buckets()
+ for bckt in range(0, len(buckets.items)):
+ bucket = buckets.items[bckt].name
+ bucket_info[bucket] = {
+ 'versioning': buckets.items[bckt].versioning,
+ 'object_count': buckets.items[bckt].object_count,
+ 'id': buckets.items[bckt].id,
+ 'account_name': buckets.items[bckt].account.name,
+ 'data_reduction': buckets.items[bckt].space.data_reduction,
+ 'snapshot_space': buckets.items[bckt].space.snapshots,
+ 'total_physical_space': buckets.items[bckt].space.total_physical,
+ 'unique_space': buckets.items[bckt].space.unique,
+ 'virtual_space': buckets.items[bckt].space.virtual,
+ 'created': buckets.items[bckt].created,
+ 'destroyed': buckets.items[bckt].destroyed,
+ 'time_remaining': buckets.items[bckt].time_remaining,
+ }
+ return bucket_info
+
+
+def generate_fs_dict(blade):
+ fs_info = {}
+ fsys = blade.file_systems.list_file_systems()
+ for fsystem in range(0, len(fsys.items)):
+ share = fsys.items[fsystem].name
+ fs_info[share] = {
+ 'fast_remove': fsys.items[fsystem].fast_remove_directory_enabled,
+ 'snapshot_enabled': fsys.items[fsystem].snapshot_directory_enabled,
+ 'provisioned': fsys.items[fsystem].provisioned,
+ 'destroyed': fsys.items[fsystem].destroyed,
+ }
+ if fsys.items[fsystem].http.enabled:
+ fs_info[share]['http'] = fsys.items[fsystem].http.enabled
+ if fsys.items[fsystem].smb.enabled:
+ fs_info[share]['smb_mode'] = fsys.items[fsystem].smb.acl_mode
+ if fsys.items[fsystem].nfs.enabled:
+ fs_info[share]['nfs_rules'] = fsys.items[fsystem].nfs.rules
+ api_version = blade.api_version.list_versions().versions
+ if HARD_LIMIT_API_VERSION in api_version:
+ fs_info[share]['hard_limit'] = fsys.items[fsystem].hard_limit_enabled
+ if REPLICATION_API_VERSION in api_version:
+ fs_info[share]['promotion_status'] = fsys.items[fsystem].promotion_status
+ fs_info[share]['requested_promotion_state'] = fsys.items[fsystem].requested_promotion_state
+ fs_info[share]['writable'] = fsys.items[fsystem].writable
+ fs_info[share]['source'] = {
+ 'is_local': fsys.items[fsystem].source.is_local,
+ 'name': fsys.items[fsystem].source.name
+ }
+ return fs_info
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(dict(
+ gather_subset=dict(default='minimum', type='list', elements='str')
+ ))
+
+ module = AnsibleModule(argument_spec, supports_check_mode=False)
+
+ blade = get_blade(module)
+ versions = blade.api_version.list_versions().versions
+
+ if MIN_REQUIRED_API_VERSION not in versions:
+ module.fail_json(msg='Minimum FlashBlade REST version required: {0}'.format(MIN_REQUIRED_API_VERSION))
+
+ subset = [test.lower() for test in module.params['gather_subset']]
+ valid_subsets = ('all', 'minimum', 'config', 'performance', 'capacity',
+ 'network', 'subnets', 'lags', 'filesystems', 'snapshots',
+ 'buckets', 'arrays', 'replication', 'policies')
+ subset_test = (test in valid_subsets for test in subset)
+ if not all(subset_test):
+ module.fail_json(msg="value must gather_subset must be one or more of: %s, got: %s"
+ % (",".join(valid_subsets), ",".join(subset)))
+
+ info = {}
+
+ if 'minimum' in subset or 'all' in subset:
+ info['default'] = generate_default_dict(blade)
+ if 'performance' in subset or 'all' in subset:
+ info['performance'] = generate_perf_dict(blade)
+ if 'config' in subset or 'all' in subset:
+ info['config'] = generate_config_dict(blade)
+ if 'capacity' in subset or 'all' in subset:
+ info['capacity'] = generate_capacity_dict(blade)
+ if 'lags' in subset or 'all' in subset:
+ info['lag'] = generate_lag_dict(blade)
+ if 'network' in subset or 'all' in subset:
+ info['network'] = generate_network_dict(blade)
+ if 'subnets' in subset or 'all' in subset:
+ info['subnet'] = generate_subnet_dict(blade)
+ if 'filesystems' in subset or 'all' in subset:
+ info['filesystems'] = generate_fs_dict(blade)
+ if 'snapshots' in subset or 'all' in subset:
+ info['snapshots'] = generate_snap_dict(blade)
+ if 'buckets' in subset or 'all' in subset:
+ info['buckets'] = generate_bucket_dict(blade)
+ api_version = blade.api_version.list_versions().versions
+ if POLICIES_API_VERSION in api_version:
+ if 'policies' in subset or 'all' in subset:
+ info['policies'] = generate_policies_dict(blade)
+ if REPLICATION_API_VERSION in api_version:
+ if 'arrays' in subset or 'all' in subset:
+ info['arrays'] = generate_array_conn_dict(blade)
+ if 'replication' in subset or 'all' in subset:
+ info['file_replication'] = generate_file_repl_dict(blade)
+ info['bucket_replication'] = generate_bucket_repl_dict(blade)
+ info['snap_transfers'] = generate_snap_transfer_dict(blade)
+ info['remote_credentials'] = generate_remote_creds_dict(blade)
+ info['targets'] = generate_targets_dict(blade)
+
+ module.exit_json(changed=False, purefb_info=info)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_inventory.py b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_inventory.py
new file mode 100644
index 00000000..239de7f7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_inventory.py
@@ -0,0 +1,149 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefb_inventory
+version_added: '1.0.0'
+short_description: Collect information from Pure Storage FlashBlade
+description:
+ - Collect information from a Pure Storage FlashBlade running the
+ Purity//FB operating system. By default, the module will collect basic
+ information including hosts, host groups, protection
+ groups and volume counts. Additional information can be collected
+ based on the configured set of arguements.
+author:
+ - Pure Storage ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+extends_documentation_fragment:
+ - purestorage.flashblade.purestorage.fb
+'''
+
+EXAMPLES = r'''
+- name: collect FlashBlade invenroty
+ purefa_inventory:
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+- name: show default information
+ debug:
+ msg: "{{ array_info['purefb_info'] }}"
+
+'''
+
+RETURN = r'''
+purefb_inventory:
+ description: Returns the inventory information for the FlashArray
+ returned: always
+ type: complex
+ sample: {
+ "admins": {
+ "pureuser": {
+ "role": "array_admin",
+ "type": "local"
+ }
+ },
+ "apps": {
+ "offload": {
+ "description": "Snapshot offload to NFS or Amazon S3",
+ "status": "healthy",
+ "version": "5.2.1"
+ }
+ }
+ }
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import get_blade, purefb_argument_spec
+
+
+def generate_hardware_dict(blade):
+ hw_info = {'fans': {},
+ 'controllers': {},
+ 'blades': {},
+ 'chassis': {},
+ 'ethernet': {},
+ 'modules': {},
+ 'power': {},
+ 'switch': {},
+ }
+ components = blade.hardware.list_hardware(filter='type=\'fm\'')
+ for component in range(0, len(components.items)):
+ component_name = components.items[component].name
+ hw_info['modules'][component_name] = {'slot': components.items[component].slot,
+ 'status': components.items[component].status,
+ 'serial': components.items[component].serial,
+ 'model': components.items[component].model
+ }
+ components = blade.hardware.list_hardware(filter='type=\'eth\'')
+ for component in range(0, len(components.items)):
+ component_name = components.items[component].name
+ hw_info['ethernet'][component_name] = {'slot': components.items[component].slot,
+ 'status': components.items[component].status,
+ 'serial': components.items[component].serial,
+ 'model': components.items[component].model,
+ 'speed': components.items[component].speed
+ }
+ components = blade.hardware.list_hardware(filter='type=\'fan\'')
+ for component in range(0, len(components.items)):
+ component_name = components.items[component].name
+ hw_info['fans'][component_name] = {'slot': components.items[component].slot,
+ 'status': components.items[component].status
+ }
+ components = blade.hardware.list_hardware(filter='type=\'fb\'')
+ for component in range(0, len(components.items)):
+ component_name = components.items[component].name
+ hw_info['blades'][component_name] = {'slot': components.items[component].slot,
+ 'status': components.items[component].status,
+ 'serial': components.items[component].serial,
+ 'model': components.items[component].model
+ }
+ components = blade.hardware.list_hardware(filter='type=\'pwr\'')
+ for component in range(0, len(components.items)):
+ component_name = components.items[component].name
+ hw_info['power'][component_name] = {'slot': components.items[component].slot,
+ 'status': components.items[component].status,
+ 'serial': components.items[component].serial,
+ 'model': components.items[component].model
+ }
+ components = blade.hardware.list_hardware(filter='type=\'xfm\'')
+ for component in range(0, len(components.items)):
+ component_name = components.items[component].name
+ hw_info['switch'][component_name] = {'slot': components.items[component].slot,
+ 'status': components.items[component].status,
+ 'serial': components.items[component].serial,
+ 'model': components.items[component].model
+ }
+ components = blade.hardware.list_hardware(filter='type=\'ch\'')
+ for component in range(0, len(components.items)):
+ component_name = components.items[component].name
+ hw_info['chassis'][component_name] = {'slot': components.items[component].slot,
+ 'index': components.items[component].index,
+ 'status': components.items[component].status,
+ 'serial': components.items[component].serial,
+ 'model': components.items[component].model
+ }
+
+ return hw_info
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+ blade = get_blade(module)
+
+ module.exit_json(changed=False, purefb_info=generate_hardware_dict(blade))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_lifecycle.py b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_lifecycle.py
new file mode 100644
index 00000000..1ec3a7d8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_lifecycle.py
@@ -0,0 +1,233 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefb_lifecycle
+version_added: '1.4.0'
+short_description: Manage FlashBlade object lifecycles
+description:
+- Manage lifecycles for object buckets
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Create or delete lifecycle rule
+ default: present
+ type: str
+ choices: [ absent, present ]
+ bucket:
+ description:
+ - Bucket the lifecycle rule applies to
+ type: str
+ required: true
+ name:
+ description:
+ - Name of the lifecycle rule
+ type: str
+ required: true
+ enabled:
+ description:
+ - State of lifecycle rule
+ type: bool
+ default: True
+ keep_for:
+ description:
+ - Time after which previous versions will be marked expired.
+ - Enter as days (d) or weeks (w). Range is 1 - 2147483647 days.
+ type: str
+ prefix:
+ description:
+ - Object key prefix identifying one or more objects in the bucket
+ type: str
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+'''
+
+EXAMPLES = r'''
+- name: Create a lifecycle rule called bar for bucket foo
+ purefb_lifecycle:
+ name: bar
+ bucket: foo
+ keep_for: 2d
+ prefix: test
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Delete lifecycle rule foo from bucket foo
+ purefb_lifecycle:
+ name: foo
+ bucket: bar
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+'''
+
+RETURN = r'''
+'''
+
+HAS_PURITYFB = True
+try:
+ from purity_fb import LifecycleRulePost, LifecycleRulePatch, Reference
+except ImportError:
+ HAS_PURITYFB = False
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import get_blade, purefb_argument_spec
+
+
+MIN_REQUIRED_API_VERSION = '1.10'
+
+
+def _get_bucket(module, blade):
+ s3bucket = None
+ buckets = blade.buckets.list_buckets()
+ for bucket in range(0, len(buckets.items)):
+ if buckets.items[bucket].name == module.params['bucket']:
+ s3bucket = buckets.items[bucket]
+ return s3bucket
+
+
+def _convert_to_millisecs(day):
+ if day[-1:].lower() == "w":
+ return int(day[:-1]) * 7 * 86400000
+ elif day[-1:].lower() == "d":
+ return int(day[:-1]) * 86400000
+ return 0
+
+
+def _findstr(text, match):
+ for line in text.splitlines():
+ if match in line:
+ found = line
+ return found
+
+
+def delete_rule(module, blade):
+ """Delete lifecycle rule"""
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.lifecycle_rules.delete_lifecycle_rules(names=[module.params['bucket'] + '/' + module.params['name']])
+ except Exception:
+ module.fail_json(msg="Failed to delete lifecycle rule {0} for bucket {1}.".format(module.params['name'],
+ module.params['bucket']))
+ module.exit_json(changed=changed)
+
+
+def create_rule(module, blade):
+ """Create lifecycle policy"""
+ changed = True
+ if not module.check_mode:
+ if not module.params['keep_for']:
+ module.fail_json(msg="\'keep_for\' is required to create a new lifecycle rule")
+ if not module.params['keep_for'][-1:].lower() in ['w', 'd']:
+ module.fail_json(msg="\'keep_for\' format incorrect - specify as \'d\' or \'w\'")
+ try:
+ attr = LifecycleRulePost(rule_id=module.params['name'],
+ keep_previous_version_for=_convert_to_millisecs(module.params['keep_for']),
+ prefix=module.params['prefix'])
+ attr.bucket = Reference(name=module.params['bucket'])
+ blade.lifecycle_rules.create_lifecycle_rules(rule=attr)
+ if not module.params['enabled']:
+ attr = LifecycleRulePatch()
+ attr.enabled = False
+ blade.lifecycle_rules.update_lifecycle_rules(name=[module.params['bucket'] + '/' + module.params['name']],
+ rule=attr)
+ except Exception:
+ module.fail_json(msg="Failed to create lifecycle rule {0} for bucket {1}.".format(module.params['name'],
+ module.params['bucket']))
+ module.exit_json(changed=changed)
+
+
+def update_rule(module, blade, rule):
+ """Update snapshot policy"""
+ changed = True
+ if not module.check_mode:
+ changed = False
+ current_rule = {'prefix': rule.prefix,
+ 'keep_previous_version_for': rule.keep_previous_version_for,
+ 'enabled': rule.enabled}
+ if not module.params['prefix']:
+ prefix = current_rule['prefix']
+ else:
+ prefix = module.params['prefix']
+ if not module.params['keep_for']:
+ keep_for = current_rule['keep_previous_version_for']
+ else:
+ keep_for = _convert_to_millisecs(module.params['keep_for'])
+ new_rule = {'prefix': prefix,
+ 'keep_previous_version_for': keep_for,
+ 'enabled': module.params['enabled']}
+
+ if current_rule != new_rule:
+ try:
+ attr = LifecycleRulePatch(keep_previous_version_for=new_rule['keep_previous_version_for'],
+ prefix=new_rule['prefix'])
+ attr.enabled = module.params['enabled']
+ blade.lifecycle_rules.update_lifecycle_rules(names=[module.params['bucket'] + '/' + module.params['name']],
+ rule=attr)
+ changed = True
+ except Exception:
+ module.fail_json(msg='Failed to update lifecycle rule {0} for bucket {1}.'.format(module.params['name'],
+ module.params['bucket']))
+ else:
+ changed = False
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(dict(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ enabled=dict(type='bool', default=True),
+ bucket=dict(type='str', required=True),
+ name=dict(type='str', required=True),
+ prefix=dict(type='str',),
+ keep_for=dict(type='str'),
+ ))
+
+ module = AnsibleModule(argument_spec,
+ supports_check_mode=True)
+
+ if not HAS_PURITYFB:
+ module.fail_json(msg='purity_fb sdk is required for this module')
+
+ state = module.params['state']
+ blade = get_blade(module)
+ versions = blade.api_version.list_versions().versions
+
+ if MIN_REQUIRED_API_VERSION not in versions:
+ module.fail_json(msg='Minimum FlashBlade REST version required: {0}'.format(MIN_REQUIRED_API_VERSION))
+
+ if not _get_bucket(module, blade):
+ module.fail_json(msg='Specified bucket {0} does not exist'.format(module.params['bucket']))
+
+ try:
+ rule = blade.lifecycle_rules.list_lifecycle_rules(names=[module.params['bucket'] + '/' + module.params['name']])
+ except Exception:
+ rule = None
+
+ if rule and state == 'present':
+ update_rule(module, blade, rule.items[0])
+ elif state == 'present' and not rule:
+ create_rule(module, blade)
+ elif state == 'absent' and rule:
+ delete_rule(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_network.py b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_network.py
new file mode 100644
index 00000000..c60dd4a7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_network.py
@@ -0,0 +1,204 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: purefb_network
+version_added: "1.0.0"
+short_description: Manage network interfaces in a Pure Storage FlashBlade
+description:
+ - This module manages network interfaces on Pure Storage FlashBlade.
+ - When creating a network interface a subnet must already exist with
+ a network prefix that covers the IP address of the interface being
+ created.
+author: Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Interface Name.
+ required: true
+ type: str
+ state:
+ description:
+ - Create, delete or modifies a network interface.
+ required: false
+ default: present
+ choices: [ "present", "absent" ]
+ type: str
+ address:
+ description:
+ - IP address of interface.
+ required: false
+ type: str
+ services:
+ description:
+ - Define which services are configured for the interfaces.
+ required: false
+ choices: [ "data", "replication" ]
+ default: data
+ type: str
+ itype:
+ description:
+ - Type of interface.
+ required: false
+ choices: [ "vip" ]
+ default: vip
+ type: str
+extends_documentation_fragment:
+ - purestorage.flashblade.purestorage.fb
+'''
+
+EXAMPLES = '''
+- name: Create new network interface named foo
+ purefb_network:
+ name: foo
+ address: 10.21.200.23
+ state: present
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Change IP address of network interface named foo
+ purefb_network:
+ name: foo
+ state: present
+ address: 10.21.200.123
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Delete network interface named foo
+ purefb_network:
+ name: foo
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641'''
+
+RETURN = '''
+'''
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import NetworkInterface
+except ImportError:
+ HAS_PURITY_FB = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import get_blade, purefb_argument_spec
+
+
+MINIMUM_API_VERSION = '1.3'
+
+
+def get_iface(module, blade):
+ """Return Filesystem or None"""
+ iface = []
+ iface.append(module.params['name'])
+ try:
+ res = blade.network_interfaces.list_network_interfaces(names=iface)
+ return res.items[0]
+ except Exception:
+ return None
+
+
+def create_iface(module, blade):
+ """Create Network Interface"""
+ changed = True
+ if not module.check_mode:
+ iface = []
+ services = []
+ iface.append(module.params['name'])
+ services.append(module.params['services'])
+ try:
+ blade.network_interfaces.create_network_interfaces(names=iface,
+ network_interface=NetworkInterface(address=module.params['address'],
+ services=services,
+ type=module.params['itype']
+ )
+ )
+ except Exception:
+ module.fail_json(msg='Interface creation failed. Check subnet exists for {0}'.format(module.params['address']))
+ module.exit_json(changed=changed)
+
+
+def modify_iface(module, blade):
+ """Modify Network Interface IP address"""
+ changed = True
+ if not module.check_mode:
+ changed = False
+ iface = get_iface(module, blade)
+ iface_new = []
+ iface_new.append(module.params['name'])
+ if module.params['address'] != iface.address:
+ try:
+ blade.network_interfaces.update_network_interfaces(names=iface_new,
+ network_interface=NetworkInterface(address=module.params['address']))
+ changed = True
+ except Exception:
+ module.fail_json(msg='Failed to modify Interface {0}'.format(module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def delete_iface(module, blade):
+ """ Delete Network Interface"""
+ changed = True
+ if not module.check_mode:
+ iface = []
+ iface.append(module.params['name'])
+ try:
+ blade.network_interfaces.delete_network_interfaces(names=iface)
+ except Exception:
+ module.fail_json(msg='Failed to delete network {0}'.format(module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ address=dict(),
+ services=dict(default='data', choices=['data', 'replication']),
+ itype=dict(default='vip', choices=['vip']),
+ )
+ )
+
+ required_if = [["state", "present", ["address"]]]
+
+ module = AnsibleModule(argument_spec,
+ required_if=required_if,
+ supports_check_mode=True)
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg='purity_fb sdk is required for this module')
+
+ state = module.params['state']
+ blade = get_blade(module)
+ api_version = blade.api_version.list_versions().versions
+ if MINIMUM_API_VERSION not in api_version:
+ module.fail_json(msg='Upgrade Purity//FB to enable this module')
+ iface = get_iface(module, blade)
+
+ if state == 'present' and not iface:
+ create_iface(module, blade)
+ elif state == 'present' and iface:
+ modify_iface(module, blade)
+ elif state == 'absent' and iface:
+ delete_iface(module, blade)
+ elif state == 'absent' and not iface:
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_ntp.py b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_ntp.py
new file mode 100644
index 00000000..8f2eb670
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_ntp.py
@@ -0,0 +1,149 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefb_ntp
+version_added: '1.0.0'
+short_description: Configure Pure Storage FlashBlade NTP settings
+description:
+- Set or erase NTP configuration for Pure Storage FlashBlades.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Create or delete NTP servers configuration
+ type: str
+ default: present
+ choices: [ absent, present ]
+ ntp_servers:
+ type: list
+ elements: str
+ description:
+ - A list of up to 4 alternate NTP servers. These may include IPv4,
+ IPv6 or FQDNs. Invalid IP addresses will cause the module to fail.
+ No validation is performed for FQDNs.
+ - If more than 4 servers are provided, only the first 4 unique
+ nameservers will be used.
+ - if no servers are given a default of I(0.pool.ntp.org) will be used.
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+'''
+
+EXAMPLES = r'''
+- name: Delete exisitng NTP server entries
+ purefb_ntp:
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Set array NTP servers
+ purefb_ntp:
+ state: present
+ ntp_servers:
+ - "0.pool.ntp.org"
+ - "1.pool.ntp.org"
+ - "2.pool.ntp.org"
+ - "3.pool.ntp.org"
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+'''
+
+RETURN = r'''
+'''
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import PureArray
+except ImportError:
+ HAS_PURITY_FB = False
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import get_blade, purefb_argument_spec
+
+
+MIN_REQUIRED_API_VERSION = '1.3'
+
+
+def remove(duplicate):
+ final_list = []
+ for num in duplicate:
+ if num not in final_list:
+ final_list.append(num)
+ return final_list
+
+
+def delete_ntp(module, blade):
+ """Delete NTP Servers"""
+ changed = True
+ if not module.check_mode:
+ if blade.arrays.list_arrays().items[0].ntp_servers != []:
+ try:
+ blade_settings = PureArray(ntp_servers=[])
+ blade.arrays.update_arrays(array_settings=blade_settings)
+ except Exception:
+ module.fail_json(msg='Deletion of NTP servers failed')
+ module.exit_json(changed=changed)
+
+
+def create_ntp(module, blade):
+ """Set NTP Servers"""
+ changed = True
+ if not module.check_mode:
+ if not module.params['ntp_servers']:
+ module.params['ntp_servers'] = ['0.pool.ntp.org']
+ try:
+ blade_settings = PureArray(ntp_servers=module.params['ntp_servers'][0:4])
+ blade.arrays.update_arrays(array_settings=blade_settings)
+ except Exception:
+ module.fail_json(msg='Update of NTP servers failed')
+ module.exit_json(changed=changed)
+
+
+def main():
+
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(dict(
+ ntp_servers=dict(type='list', elements='str'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ ))
+
+ required_if = [['state', 'present', ['ntp_servers']]]
+
+ module = AnsibleModule(argument_spec,
+ required_if=required_if,
+ supports_check_mode=True)
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg='purity_fb sdk is required for this module')
+
+ blade = get_blade(module)
+
+ api_version = blade.api_version.list_versions().versions
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(msg="Purity//FB must be upgraded to support this module.")
+
+ if module.params['state'] == 'absent':
+ delete_ntp(module, blade)
+ else:
+ module.params['ntp_servers'] = remove(module.params['ntp_servers'])
+ if sorted(blade.arrays.list_arrays().items[0].ntp_servers) != sorted(module.params['ntp_servers'][0:4]):
+ create_ntp(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_phonehome.py b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_phonehome.py
new file mode 100644
index 00000000..69c8686f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_phonehome.py
@@ -0,0 +1,111 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefb_phonehome
+version_added: '1.0.0'
+short_description: Enable or Disable Pure Storage FlashBlade Phone Home
+description:
+- Enablke or Disable Remote Phone Home for a Pure Storage FlashBlade.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Define state of phone home
+ type: str
+ default: present
+ choices: [ present, absent ]
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+'''
+
+EXAMPLES = r'''
+- name: Enable Remote Phone Home
+ purefb_phonehome:
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Disable Remote Phone Home
+ purefb_phonehome:
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+'''
+
+RETURN = r'''
+'''
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import Support
+except ImportError:
+ HAS_PURITY_FB = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import get_blade, purefb_argument_spec
+
+
+MIN_REQUIRED_API_VERSION = "1.6"
+
+
+def enable_ph(module, blade):
+ """Enable Phone Hone"""
+ changed = True
+ if not module.check_mode:
+ ph_settings = Support(phonehome_enabled=True)
+ try:
+ blade.support.update_support(support=ph_settings)
+ except Exception:
+ module.fail_json(msg='Enabling Phone Home failed')
+ module.exit_json(changed=changed)
+
+
+def disable_ph(module, blade):
+ """Disable Phone Home"""
+ changed = True
+ if not module.check_mode:
+ ph_settings = Support(phonehome_enabled=False)
+ try:
+ blade.support.update_support(support=ph_settings)
+ except Exception:
+ module.fail_json(msg='Disabling Phone Home failed')
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(dict(
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ ))
+
+ module = AnsibleModule(argument_spec,
+ supports_check_mode=True)
+
+ blade = get_blade(module)
+ api_version = blade.api_version.list_versions().versions
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(msg="Purity//FB must be upgraded to support this module.")
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg='purity_fb SDK is required for this module')
+
+ if module.params['state'] == 'present' and not blade.support.list_support().items[0].phonehome_enabled:
+ enable_ph(module, blade)
+ elif module.params['state'] == 'absent' and blade.support.list_support().items[0].phonehome_enabled:
+ disable_ph(module, blade)
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_policy.py b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_policy.py
new file mode 100644
index 00000000..ae0cc579
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_policy.py
@@ -0,0 +1,441 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefb_policy
+version_added: '1.0.0'
+short_description: Manage FlashBlade policies
+description:
+- Manage policies for filesystem and file replica links
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Create or delete policy
+ default: present
+ type: str
+ choices: [ absent, present ]
+ name:
+ description:
+ - Name of the policy
+ type: str
+ enabled:
+ description:
+ - State of policy
+ type: bool
+ default: True
+ every:
+ description:
+ - Interval between snapshots in seconds
+ - Range available 300 - 31536000 (equates to 5m to 365d)
+ type: int
+ keep_for:
+ description:
+ - How long to keep snapshots for
+ - Range available 300 - 31536000 (equates to 5m to 365d)
+ - Must not be set less than I(every)
+ type: int
+ at:
+ description:
+ - Provide a time in 12-hour AM/PM format, eg. 11AM
+ type: str
+ timezone:
+ description:
+ - Time Zone used for the I(at) parameter
+ - If not provided, the module will attempt to get the current local timezone from the server
+ type: str
+ filesystem:
+ description:
+ - List of filesystems to add to a policy on creation
+ - To amend policy members use the I(purefb_fs) module
+ type: list
+ elements: str
+ replica_link:
+ description:
+ - List of filesystem replica links to add to a policy on creation
+ - To amend policy members use the I(purefb_fs_replica) module
+ type: list
+ elements: str
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+'''
+
+EXAMPLES = r'''
+- name: Create a simple policy with no rules
+ purefb_policy:
+ name: test_policy
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Create a policy and connect to existing filesystems and filesystem replica links
+ purefb_policy:
+ name: test_policy_with_members
+ filesystem:
+ - fs1
+ - fs2
+ replica_link:
+ - rl1
+ - rl2
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Create a policy with rules
+ purefb_policy:
+ name: test_policy2
+ at: 11AM
+ keep_for: 86400
+ every: 86400
+ timezone: Asia/Shanghai
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Delete a policy
+ purefb_policy:
+ name: test_policy
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+'''
+
+RETURN = r'''
+'''
+
+HAS_PURITYFB = True
+try:
+ from purity_fb import Policy, PolicyRule, PolicyPatch
+except ImportError:
+ HAS_PURITYFB = False
+
+HAS_PYTZ = True
+try:
+ import pytz
+except ImportError:
+ HAS_PYTX = False
+
+import os
+import re
+import platform
+
+from ansible.module_utils.common.process import get_bin_path
+from ansible.module_utils.facts.utils import get_file_content
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import get_blade, purefb_argument_spec
+
+
+MIN_REQUIRED_API_VERSION = '1.9'
+
+
+def _convert_to_millisecs(hour):
+ if hour[-2:] == "AM" and hour[:2] == "12":
+ return 0
+ elif hour[-2:] == "AM":
+ return int(hour[:-2]) * 3600000
+ elif hour[-2:] == "PM" and hour[:2] == "12":
+ return 43200000
+ return (int(hour[:-2]) + 12) * 3600000
+
+
+def _findstr(text, match):
+ for line in text.splitlines():
+ if match in line:
+ found = line
+ return found
+
+
+def _get_local_tz(module, timezone='UTC'):
+ """
+ We will attempt to get the local timezone of the server running the module and use that.
+ If we can't get the timezone then we will set the default to be UTC
+
+ Linnux has been tested and other opersting systems should be OK.
+ Failures cause assumption of UTC
+
+ Windows is not supported and will assume UTC
+ """
+ if platform.system() == 'Linux':
+ timedatectl = get_bin_path('timedatectl')
+ if timedatectl is not None:
+ rcode, stdout, stderr = module.run_command(timedatectl)
+ if rcode == 0 and stdout:
+ line = _findstr(stdout, 'Time zone')
+ full_tz = line.split(":", 1)[1].rstrip()
+ timezone = full_tz.split()[0]
+ return timezone
+ else:
+ module.warn('Incorrect timedatectl output. Timezone will be set to UTC')
+ else:
+ if os.path.exists('/etc/timezone'):
+ timezone = get_file_content('/etc/timezone')
+ else:
+ module.warn('Could not find /etc/timezone. Assuming UTC')
+
+ elif platform.system() == 'SunOS':
+ if os.path.exists('/etc/default/init'):
+ for line in get_file_content('/etc/default/init', '').splitlines():
+ if line.startswith('TZ='):
+ timezone = line.split('=', 1)[1]
+ return timezone
+ else:
+ module.warn('Could not find /etc/default/init. Assuming UTC')
+
+ elif re.match('^Darwin', platform.platform()):
+ systemsetup = get_bin_path('systemsetup')
+ if systemsetup is not None:
+ rcode, stdout, stderr = module.execute(systemsetup, '-gettimezone')
+ if rcode == 0 and stdout:
+ timezone = stdout.split(':', 1)[1].lstrip()
+ else:
+ module.warn('Could not run systemsetup. Assuming UTC')
+ else:
+ module.warn('Could not find systemsetup. Assuming UTC')
+
+ elif re.match('^(Free|Net|Open)BSD', platform.platform()):
+ if os.path.exists('/etc/timezone'):
+ timezone = get_file_content('/etc/timezone')
+ else:
+ module.warn('Could not find /etc/timezone. Assuming UTC')
+
+ elif platform.system() == 'AIX':
+ aix_oslevel = int(platform.version() + platform.release())
+ if aix_oslevel >= 61:
+ if os.path.exists('/etc/environment'):
+ for line in get_file_content('/etc/environment', '').splitlines():
+ if line.startswith('TZ='):
+ timezone = line.split('=', 1)[1]
+ return timezone
+ else:
+ module.warn('Could not find /etc/environment. Assuming UTC')
+ else:
+ module.warn('Cannot determine timezone when AIX os level < 61. Assuming UTC')
+
+ else:
+ module.warn('Could not find /etc/timezone. Assuming UTC')
+
+ return timezone
+
+
+def delete_policy(module, blade):
+ """Delete policy"""
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.policies.delete_policies(names=[module.params['name']])
+ except Exception:
+ module.fail_json(msg="Failed to delete policy {0}.".format(module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def create_policy(module, blade):
+ """Create snapshot policy"""
+ changed = True
+ if not module.check_mode:
+ try:
+ if module.params['at'] and module.params['every']:
+ if not module.params['every'] % 86400 == 0:
+ module.fail_json(msg='At time can only be set if every value is a multiple of 86400')
+ if not module.params['timezone']:
+ module.params['timezone'] = _get_local_tz(module)
+ if module.params['timezone'] not in pytz.all_timezones_set:
+ module.fail_json(msg='Timezone {0} is not valid'.format(module.params['timezone']))
+ if module.params['keep_for'] < module.params['every']:
+ module.fail_json(msg='Retention period cannot be less than snapshot interval.')
+ if module.params['at'] and not module.params['timezone']:
+ module.params['timezone'] = _get_local_tz(module)
+ if module.params['timezone'] not in set(pytz.all_timezones_set):
+ module.fail_json(msg='Timezone {0} is not valid'.format(module.params['timezone']))
+
+ if module.params['keep_for']:
+ if not 300 <= module.params['keep_for'] <= 34560000:
+ module.fail_json(msg="keep_for parameter is out of range (300 to 34560000)")
+ if not 300 <= module.params['every'] <= 34560000:
+ module.fail_json(msg="every parameter is out of range (300 to 34560000)")
+ if module.params['at']:
+ attr = Policy(enabled=module.params['enabled'], rules=[PolicyRule(keep_for=module.params['keep_for'] * 1000,
+ every=module.params['every'] * 1000,
+ at=_convert_to_millisecs(module.params['at']),
+ time_zone=module.params['timezone'])])
+ else:
+ attr = Policy(enabled=module.params['enabled'], rules=[PolicyRule(keep_for=module.params['keep_for'] * 1000,
+ every=module.params['every'] * 1000)])
+ else:
+ attr = Policy(enabled=module.params['enabled'])
+ blade.policies.create_policies(names=[module.params['name']], policy=attr)
+ except Exception:
+ module.fail_json(msg="Failed to create policy {0}.".format(module.params['name']))
+ if module.params['filesystem']:
+ try:
+ blade.file_systems.list_file_systems(names=module.params['filesystem'])
+ blade.policies.create_policy_filesystems(policy_names=[module.params['name']],
+ member_names=module.params['filesystem'])
+ except Exception:
+ blade.policies.delete_policies(names=[module.params['name']])
+ module.fail_json(msg="Failed to connect filesystems to policy {0}, "
+ "or one of {1} doesn't exist.".format(module.params['name'],
+ module.params['filesystem']))
+ if module.params['replica_link']:
+ for link in module.params['replica_link']:
+ remote_array = blade.file_system_replica_links.list_file_system_replica_links(local_file_system_names=[link])
+ try:
+ blade.policies.create_policy_file_system_replica_links(policy_names=[module.params['name']],
+ member_names=[link],
+ remote_names=[remote_array.items[0].remote.name])
+ except Exception:
+ blade.policies.delete_policies(names=[module.params['name']])
+ module.fail_json(msg="Failed to connect filesystem replicsa link {0} to policy {1}. "
+ "Replica Link {0} does not exist.".format(link, module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def update_policy(module, blade, policy):
+ """Update snapshot policy"""
+ changed = True
+ if not module.check_mode:
+ changed = False
+ if not policy.rules:
+ current_policy = {'time_zone': None,
+ 'every': 0,
+ 'keep_for': 0,
+ 'at': 0,
+ 'enabled': policy.enabled}
+ else:
+ if policy.rules[0].keep_for != 0:
+ policy.rules[0].keep_for = int(policy.rules[0].keep_for / 1000)
+ if policy.rules[0].every != 0:
+ policy.rules[0].every = int(policy.rules[0].every / 1000)
+
+ current_policy = {'time_zone': policy.rules[0].time_zone,
+ 'every': policy.rules[0].every,
+ 'keep_for': policy.rules[0].keep_for,
+ 'at': policy.rules[0].at,
+ 'enabled': policy.enabled}
+ if not module.params['every']:
+ every = 0
+ else:
+ every = module.params['every']
+ if not module.params['keep_for']:
+ keep_for = 0
+ else:
+ keep_for = module.params['keep_for']
+ if module.params['at']:
+ at_time = _convert_to_millisecs(module.params['at'])
+ else:
+ at_time = None
+ if not module.params['timezone']:
+ timezone = _get_local_tz(module)
+ else:
+ timezone = module.params['timezone']
+ if at_time:
+ new_policy = {'time_zone': timezone,
+ 'every': every,
+ 'keep_for': keep_for,
+ 'at': at_time,
+ 'enabled': module.params['enabled']}
+ else:
+ new_policy = {'time_zone': None,
+ 'every': every,
+ 'keep_for': keep_for,
+ 'at': None,
+ 'enabled': module.params['enabled']}
+ if new_policy['time_zone'] and new_policy['time_zone'] not in pytz.all_timezones_set:
+ module.fail_json(msg='Timezone {0} is not valid'.format(module.params['timezone']))
+
+ if current_policy != new_policy:
+ if not module.params['at']:
+ module.params['at'] = current_policy['at']
+ if not module.params['keep_for']:
+ module.params['keep_for'] = current_policy['keep_for']
+ if not module.params['every']:
+ module.params['every'] = current_policy['every']
+ if module.params['at'] and module.params['every']:
+ if not module.params['every'] % 86400 == 0:
+ module.fail_json(msg='At time can only be set if every value is a multiple of 86400')
+ if module.params['keep_for'] < module.params['every']:
+ module.fail_json(msg='Retention period cannot be less than snapshot interval.')
+ if module.params['at'] and not module.params['timezone']:
+ module.params['timezone'] = _get_local_tz(module)
+ if module.params['timezone'] not in set(pytz.all_timezones_set):
+ module.fail_json(msg='Timezone {0} is not valid'.format(module.params['timezone']))
+
+ try:
+ attr = PolicyPatch()
+ attr.enabled = module.params['enabled']
+ if at_time:
+ attr.add_rules = [PolicyRule(keep_for=module.params['keep_for'] * 1000,
+ every=module.params['every'] * 1000,
+ at=at_time,
+ time_zone=timezone)]
+ else:
+ attr.add_rules = [PolicyRule(keep_for=module.params['keep_for'] * 1000,
+ every=module.params['every'] * 1000)]
+ attr.remove_rules = [PolicyRule(keep_for=current_policy['keep_for'] * 1000,
+ every=current_policy['every'] * 1000,
+ at=current_policy['at'],
+ time_zone=current_policy['time_zone'])]
+ blade.policies.update_policies(names=[module.params['name']],
+ policy_patch=attr)
+ changed = True
+ except Exception:
+ module.fail_json(msg='Failed to update policy {0}.'.format(module.params['name']))
+ else:
+ changed = False
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(dict(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ enabled=dict(type='bool', default=True),
+ timezone=dict(type='str'),
+ name=dict(type='str'),
+ at=dict(type='str'),
+ every=dict(type='int'),
+ keep_for=dict(type='int'),
+ filesystem=dict(type='list', elements='str'),
+ replica_link=dict(type='list', elements='str'),
+ ))
+
+ required_together = [['keep_for', 'every']]
+
+ module = AnsibleModule(argument_spec,
+ required_together=required_together,
+ supports_check_mode=True)
+
+ if not HAS_PURITYFB:
+ module.fail_json(msg='purity_fb sdk is required for this module')
+ if not HAS_PYTZ:
+ module.fail_json(msg='pytz is required for this module')
+
+ state = module.params['state']
+ blade = get_blade(module)
+ versions = blade.api_version.list_versions().versions
+
+ if MIN_REQUIRED_API_VERSION not in versions:
+ module.fail_json(msg='Minimum FlashBlade REST version required: {0}'.format(MIN_REQUIRED_API_VERSION))
+
+ try:
+ policy = blade.policies.list_policies(names=[module.params['name']])
+ except Exception:
+ policy = None
+
+ if policy and state == 'present':
+ update_policy(module, blade, policy.items[0])
+ elif state == 'present' and not policy:
+ create_policy(module, blade)
+ elif state == 'absent' and policy:
+ delete_policy(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_proxy.py b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_proxy.py
new file mode 100644
index 00000000..afd31810
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_proxy.py
@@ -0,0 +1,135 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2019, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefb_proxy
+version_added: '1.0.0'
+author:
+ - Pure Storage ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+short_description: Configure FlashBlade phonehome HTTPs proxy settings
+description:
+- Set or erase configuration for the HTTPS phonehome proxy settings.
+options:
+ state:
+ description:
+ - Set or delete proxy configuration
+ default: present
+ type: str
+ choices: [ absent, present ]
+ host:
+ description:
+ - The proxy host name.
+ type: str
+ port:
+ description:
+ - The proxy TCP/IP port number.
+ type: int
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+'''
+
+EXAMPLES = r'''
+- name: Delete exisitng proxy settings
+ purefb_proxy:
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Set proxy settings
+ purefb_proxy:
+ host: purestorage.com
+ port: 8080
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+'''
+
+RETURN = r'''
+'''
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import Support
+except ImportError:
+ HAS_PURITY_FB = False
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import get_blade, purefb_argument_spec
+
+
+def delete_proxy(module, blade):
+ """Delete proxy settings"""
+ changed = True
+ if not module.check_mode:
+ current_proxy = blade.support.list_support().items[0].proxy
+ if current_proxy != '':
+ try:
+ proxy_settings = Support(proxy='')
+ blade.support.update_support(support=proxy_settings)
+ except Exception:
+ module.fail_json(msg='Delete proxy settigs failed')
+ else:
+ changed = False
+ module.exit_json(changed=changed)
+
+
+def create_proxy(module, blade):
+ """Set proxy settings"""
+ changed = True
+ if not module.check_mode:
+ current_proxy = blade.support.list_support().items[0].proxy
+ if current_proxy is not None:
+ new_proxy = "https://" + module.params['host'] + ":" + str(module.params['port'])
+ if new_proxy != current_proxy:
+ try:
+ proxy_settings = Support(proxy=new_proxy)
+ blade.support.update_support(support=proxy_settings)
+ except Exception:
+ module.fail_json(msg='Set phone home proxy failed.')
+ else:
+ changed = False
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(dict(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ host=dict(type='str'),
+ port=dict(type='int'),
+ ))
+
+ required_together = [['host', 'port']]
+
+ module = AnsibleModule(argument_spec,
+ required_together=required_together,
+ supports_check_mode=True)
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg='purity_fb SDK is required for this module')
+
+ state = module.params['state']
+ blade = get_blade(module)
+
+ if state == 'absent':
+ delete_proxy(module, blade)
+ elif state == 'present':
+ create_proxy(module, blade)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_ra.py b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_ra.py
new file mode 100644
index 00000000..87287be4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_ra.py
@@ -0,0 +1,113 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefb_ra
+version_added: '1.0.0'
+short_description: Enable or Disable Pure Storage FlashBlade Remote Assist
+description:
+- Enablke or Disable Remote Assist for a Pure Storage FlashBlade.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Define state of remote assist
+ - When set to I(enable) the RA port can be exposed using the
+ I(debug) module.
+ type: str
+ default: present
+ choices: [ present, absent ]
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+'''
+
+EXAMPLES = r'''
+- name: Enable Remote Assist port
+ purefb_ra:
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Disable Remote Assist port
+ purefb_ra:
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+'''
+
+RETURN = r'''
+'''
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import Support
+except ImportError:
+ HAS_PURITY_FB = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import get_blade, purefb_argument_spec
+
+
+MIN_REQUIRED_API_VERSION = "1.6"
+
+
+def enable_ra(module, blade):
+ """Enable Remote Assist"""
+ changed = True
+ if not module.check_mode:
+ ra_settings = Support(remote_assist_active=True)
+ try:
+ blade.support.update_support(support=ra_settings)
+ except Exception:
+ module.fail_json(msg='Enabling Remote Assist failed')
+ module.exit_json(changed=changed)
+
+
+def disable_ra(module, blade):
+ """Disable Remote Assist"""
+ changed = True
+ if not module.check_mode:
+ ra_settings = Support(remote_assist_active=False)
+ try:
+ blade.support.update_support(support=ra_settings)
+ except Exception:
+ module.fail_json(msg='Disabling Remote Assist failed')
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(dict(
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ ))
+
+ module = AnsibleModule(argument_spec,
+ supports_check_mode=True)
+
+ blade = get_blade(module)
+ api_version = blade.api_version.list_versions().versions
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(msg="Purity//FB must be upgraded to support this module.")
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg='purity_fb SDK is required for this module')
+
+ if module.params['state'] == 'present' and not blade.support.list_support().items[0].remote_assist_active:
+ enable_ra(module, blade)
+ elif module.params['state'] == 'absent' and blade.support.list_support().items[0].remote_assist_active:
+ disable_ra(module, blade)
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_remote_cred.py b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_remote_cred.py
new file mode 100644
index 00000000..bc0543b7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_remote_cred.py
@@ -0,0 +1,202 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefb_remote_cred
+version_added: '1.0.0'
+short_description: Create, modify and delete FlashBlade object store remote credentials
+description:
+- Create, modify and delete object store remote credentials
+- You must have a correctly configured remote array or target
+- This module is B(not) idempotent when updating existing remote credentials
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Define state of remote credential
+ default: present
+ choices: [ absent, present ]
+ type: str
+ name:
+ description:
+ - The name of the credential
+ required: true
+ type: str
+ access_key:
+ description:
+ - Access Key ID of the S3 target
+ type: str
+ secret:
+ description:
+ - Secret Access Key for the S3 or Azure target
+ type: str
+ target:
+ description:
+ - Define whether to initialize the S3 bucket
+ required: true
+ type: str
+
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+'''
+
+EXAMPLES = r'''
+- name: Create remote credential
+ purefb_remote_cred:
+ name: cred1
+ access_key: "3794fb12c6204e19195f"
+ secret: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"
+ target: target1
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Delete remote credential
+ purefb_remote_cred:
+ name: cred1
+ target: target1
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+'''
+
+RETURN = r'''
+'''
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import ObjectStoreRemoteCredentials
+except ImportError:
+ HAS_PURITY_FB = False
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import get_blade, purefb_argument_spec
+
+MIN_REQUIRED_API_VERSION = '1.9'
+
+
+def get_connected(module, blade):
+ """Return connected device or None"""
+ connected_blades = blade.array_connections.list_array_connections()
+ for target in range(0, len(connected_blades.items)):
+ if (connected_blades.items[target].remote.name == module.params['target'] or
+ connected_blades.items[target].management_address == module.params['target']) and \
+ connected_blades.items[target].status in ["connected", "connecting", "partially_connected"]:
+ return connected_blades.items[target].remote.name
+ connected_targets = blade.targets.list_targets()
+ for target in range(0, len(connected_targets.items)):
+ if connected_targets.items[target].name == module.params['target'] and \
+ connected_targets.items[target].status in ["connected", "connecting", "partially_connected"]:
+ return connected_targets.items[target].name
+ return None
+
+
+def get_remote_cred(module, blade):
+ """Return Remote Credential or None"""
+ try:
+ res = blade.object_store_remote_credentials.list_object_store_remote_credentials(names=[module.params['target'] + '/' + module.params['name']])
+ return res.items[0]
+ except Exception:
+ return None
+
+
+def create_credential(module, blade):
+ """Create remote credential"""
+ changed = True
+ if not module.check_mode:
+ remote_cred = module.params['target'] + '/' + module.params['name']
+ remote_credentials = ObjectStoreRemoteCredentials(access_key_id=module.params['access_key'],
+ secret_access_key=module.params['secret'])
+ try:
+ blade.object_store_remote_credentials.create_object_store_remote_credentials(names=[remote_cred],
+ remote_credentials=remote_credentials)
+ except Exception:
+ module.fail_json(msg='Failed to create remote credential {0}'.format(remote_cred))
+ module.exit_json(changed=changed)
+
+
+def update_credential(module, blade):
+ """Update remote credential"""
+ changed = True
+ if not module.check_mode:
+ remote_cred = module.params['target'] + '/' + module.params['name']
+ new_attr = ObjectStoreRemoteCredentials(access_key_id=module.params['access_key'],
+ secret_access_key=module.params['secret'])
+ try:
+ blade.object_store_remote_credentials.update_object_store_remote_credentials(names=[remote_cred],
+ remote_credentials=new_attr)
+ except Exception:
+ module.fail_json(msg='Failed to update remote credential {0}'.format(remote_cred))
+ module.exit_json(changed=changed)
+
+
+def delete_credential(module, blade):
+ """Delete remote credential"""
+ changed = True
+ if not module.check_mode:
+ remote_cred = module.params['target'] + '/' + module.params['name']
+ try:
+ blade.object_store_remote_credentials.delete_object_store_remote_credentials(names=[remote_cred])
+ except Exception:
+ module.fail_json(msg='Failed to delete remote credential {0}.'.format(remote_cred))
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(dict(
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ name=dict(type='str', required=True),
+ access_key=dict(type='str'),
+ secret=dict(type='str', no_log=True),
+ target=dict(type='str', required=True),
+ ))
+
+ required_if = [['state', 'present', ['access_key', 'secret']]]
+
+ module = AnsibleModule(argument_spec,
+ required_if=required_if,
+ supports_check_mode=True)
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg='purity_fb sdk is required for this module')
+
+ blade = get_blade(module)
+ api_version = blade.api_version.list_versions().versions
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(msg='FlashBlade REST version not supported. '
+ 'Minimum version required: {0}'.format(MIN_REQUIRED_API_VERSION))
+
+ target = get_connected(module, blade)
+
+ if not target:
+ module.fail_json(msg='Selected target {0} is not connected.'.format(module.params['target']))
+
+ remote_cred = get_remote_cred(module, blade)
+
+ if module.params['state'] == 'present' and not remote_cred:
+ create_credential(module, blade)
+ elif module.params['state'] == 'present':
+ update_credential(module, blade)
+ elif module.params['state'] == 'absent' and remote_cred:
+ delete_credential(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_s3acc.py b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_s3acc.py
new file mode 100644
index 00000000..f98dfa7f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_s3acc.py
@@ -0,0 +1,140 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefb_s3acc
+version_added: '1.0.0'
+short_description: Create or delete FlashBlade Object Store accounts
+description:
+- Create or delete object store accounts on a Pure Stoage FlashBlade.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Create or delete object store account
+ default: present
+ choices: [ absent, present ]
+ type: str
+ name:
+ description:
+ - The name of object store account
+ type: str
+ required: true
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+'''
+
+EXAMPLES = r'''
+- name: Crrate object store account foo
+ purefb_s3acc:
+ name: foo
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete object store account foo
+ purefb_s3acc:
+ name: foo
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+'''
+
+RETURN = r'''
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import get_blade, purefb_argument_spec
+
+
+MIN_REQUIRED_API_VERSION = '1.3'
+
+
+def get_s3acc(module, blade):
+ """Return Object Store Account or None"""
+ s3acc = None
+ accts = blade.object_store_accounts.list_object_store_accounts()
+ for acct in range(0, len(accts.items)):
+ if accts.items[acct].name == module.params['name']:
+ s3acc = accts.items[acct]
+ return s3acc
+
+
+def update_s3acc(module, blade):
+ """Update Object Store Account"""
+ changed = True
+ if not module.check_mode:
+ changed = False
+ module.exit_json(changed=changed)
+
+
+def create_s3acc(module, blade):
+ """Create Object Store Account"""
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.object_store_accounts.create_object_store_accounts(names=[module.params['name']])
+ except Exception:
+ module.fail_json(msg='Object Store Account {0}: Creation failed'.format(module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def delete_s3acc(module, blade):
+ """Delete Object Store Account"""
+ changed = True
+ if not module.check_mode:
+ count = len(blade.object_store_users.list_object_store_users(filter='name=\'' + module.params['name'] + '/*\'').items)
+ if count != 0:
+ module.fail_json(msg='Remove all Users from Object Store Account {0} \
+ before deletion'.format(module.params['name']))
+ else:
+ try:
+ blade.object_store_accounts.delete_object_store_accounts(names=[module.params['name']])
+ except Exception:
+ module.fail_json(msg='Object Store Account {0}: Deletion failed'.format(module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(dict(
+ name=dict(required=True, type='str'),
+ state=dict(default='present', choices=['present', 'absent']),
+ ))
+
+ module = AnsibleModule(argument_spec,
+ supports_check_mode=True)
+
+ state = module.params['state']
+ blade = get_blade(module)
+ versions = blade.api_version.list_versions().versions
+
+ if MIN_REQUIRED_API_VERSION not in versions:
+ module.fail_json(msg='Minimum FlashBlade REST version required: {0}'.format(MIN_REQUIRED_API_VERSION))
+
+ s3acc = get_s3acc(module, blade)
+
+ if state == 'absent' and s3acc:
+ delete_s3acc(module, blade)
+ elif state == 'present' and s3acc:
+ update_s3acc(module, blade)
+ elif not s3acc and state == 'present':
+ create_s3acc(module, blade)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_s3user.py b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_s3user.py
new file mode 100644
index 00000000..b5298538
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_s3user.py
@@ -0,0 +1,274 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefb_s3user
+version_added: '1.0.0'
+short_description: Create or delete FlashBlade Object Store account users
+description:
+- Create or delete object store account users on a Pure Stoage FlashBlade.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Create or delete object store account user
+ default: present
+ choices: [ absent, present ]
+ type: str
+ name:
+ description:
+ - The name of object store user
+ type: str
+ required: true
+ account:
+ description:
+ - The name of object store account associated with user
+ type: str
+ required: true
+ access_key:
+ description:
+ - Create secret access key.
+ - Key can be exposed using the I(debug) module
+ - If enabled this will override I(imported_key)
+ type: bool
+ default: false
+ imported_key:
+ description:
+ - Access key of imported credentials
+ type: str
+ version_added: "1.4.0"
+ imported_secret:
+ description:
+ - Access key secret for access key to import
+ type: str
+ version_added: "1.4.0"
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+'''
+
+EXAMPLES = r'''
+- name: Create object store user (with access ID and key) foo in account bar
+ purefb_s3user:
+ name: foo
+ account: bar
+ access_key: true
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ register: result
+
+- debug:
+ msg: "S3 User: {{ result['s3user_info'] }}"
+
+- name: Create object store user foo using imported key/secret in account bar
+ purefb_s3user:
+ name: foo
+ account: bar
+ imported_key: "PSABSSZRHPMEDKHMAAJPJBONPJGGDDAOFABDGLBJLHO"
+ imported_secret: "BAG61F63105e0d3669/e066+5C5DFBE2c127d395LBGG"
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete object store user foo in account bar
+ purefb_s3user:
+ name: foo
+ account: bar
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+'''
+
+RETURN = r'''
+'''
+
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import ObjectStoreAccessKey, ObjectStoreAccessKeyPost
+except ImportError:
+ HAS_PURITY_FB = False
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import get_blade, purefb_argument_spec
+
+
+MIN_REQUIRED_API_VERSION = '1.3'
+IMPORT_KEY_API_VERSION = '1.10'
+
+
+def get_s3acc(module, blade):
+ """Return Object Store Account or None"""
+ s3acc = None
+ accts = blade.object_store_accounts.list_object_store_accounts()
+ for acct in range(0, len(accts.items)):
+ if accts.items[acct].name == module.params['account']:
+ s3acc = accts.items[acct]
+ return s3acc
+
+
+def get_s3user(module, blade):
+ """Return Object Store Account or None"""
+ full_user = module.params['account'] + "/" + module.params['name']
+ s3user = None
+ s3users = blade.object_store_users.list_object_store_users()
+ for user in range(0, len(s3users.items)):
+ if s3users.items[user].name == full_user:
+ s3user = s3users.items[user]
+ return s3user
+
+
+def update_s3user(module, blade):
+ """Update Object Store User"""
+ changed = True
+ if not module.check_mode:
+ changed = False
+ exists = False
+ s3user_facts = {}
+ user = module.params['account'] + "/" + module.params['name']
+ if module.params['access_key'] or module.params['imported_key']:
+ key_count = 0
+ keys = blade.object_store_access_keys.list_object_store_access_keys()
+ for key in range(0, len(keys.items)):
+ if module.params['imported_key']:
+ versions = blade.api_version.list_versions().versions
+ if IMPORT_KEY_API_VERSION in versions:
+ if keys.items[key].name == module.params['imported_key']:
+ module.warn('Imported key provided already belongs to a user')
+ exists = True
+ if keys.items[key].user.name == user:
+ key_count += 1
+ if not exists:
+ if key_count < 2:
+ try:
+ if module.params['access_key'] and module.params['imported_key']:
+ module.warn('\'access_key: true\' overrides imported keys')
+ if module.params['access_key']:
+ result = blade.object_store_access_keys.create_object_store_access_keys(
+ object_store_access_key=ObjectStoreAccessKey(user={'name': user}))
+ changed = True
+ s3user_facts['fb_s3user'] = {'user': user,
+ 'access_key': result.items[0].secret_access_key,
+ 'access_id': result.items[0].name}
+ else:
+ if IMPORT_KEY_API_VERSION in versions:
+ blade.object_store_access_keys.create_object_store_access_keys(
+ names=[module.params['imported_key']],
+ object_store_access_key=ObjectStoreAccessKeyPost(
+ user={'name': user}, secret_access_key=module.params['imported_secret']))
+ changed = True
+ except Exception:
+ if module.params['imported_key']:
+ module.fail_json(msg='Object Store User {0}: Access Key import failed'.format(user))
+ else:
+ module.fail_json(msg='Object Store User {0}: Access Key creation failed'.format(user))
+ else:
+ module.warn('Object Store User {0}: Maximum Access Key count reached'.format(user))
+ module.exit_json(changed=changed, s3user_info=s3user_facts)
+
+
+def create_s3user(module, blade):
+ """Create Object Store Account"""
+ s3user_facts = {}
+ changed = True
+ if not module.check_mode:
+ user = module.params['account'] + "/" + module.params['name']
+ try:
+ blade.object_store_users.create_object_store_users(names=[user])
+ if module.params['access_key'] and module.params['imported_key']:
+ module.warn('\'access_key: true\' overrides imported keys')
+ if module.params['access_key']:
+ try:
+ result = blade.object_store_access_keys.create_object_store_access_keys(
+ object_store_access_key=ObjectStoreAccessKey(user={'name': user}))
+ s3user_facts['fb_s3user'] = {'user': user,
+ 'access_key': result.items[0].secret_access_key,
+ 'access_id': result.items[0].name}
+ except Exception:
+ delete_s3user(module, blade)
+ module.fail_json(msg='Object Store User {0}: Creation failed'.format(user))
+ else:
+ if module.params['imported_key']:
+ versions = blade.api_version.list_versions().versions
+ if IMPORT_KEY_API_VERSION in versions:
+ try:
+ blade.object_store_access_keys.create_object_store_access_keys(
+ names=[module.params['imported_key']],
+ object_store_access_key=ObjectStoreAccessKeyPost(
+ user={'name': user}, secret_access_key=module.params['imported_secret']))
+ except Exception:
+ delete_s3user(module, blade)
+ module.fail_json(msg='Object Store User {0}: Creation failed with imported access key'.format(user))
+ except Exception:
+ module.fail_json(msg='Object Store User {0}: Creation failed'.format(user))
+ module.exit_json(changed=changed, s3user_info=s3user_facts)
+
+
+def delete_s3user(module, blade):
+ """Delete Object Store Account"""
+ changed = True
+ if not module.check_mode:
+ user = module.params['account'] + "/" + module.params['name']
+ try:
+ blade.object_store_users.delete_object_store_users(names=[user])
+ except Exception:
+ module.fail_json(msg='Object Store Account {0}: Deletion failed'.format(module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(dict(
+ name=dict(required=True, type='str'),
+ account=dict(required=True, type='str'),
+ access_key=dict(default='false', type='bool'),
+ imported_key=dict(type='str'),
+ imported_secret=dict(type='str', no_log=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ ))
+
+ required_together = [['imported_key', 'imported_secret']]
+
+ module = AnsibleModule(argument_spec,
+ required_together=required_together,
+ supports_check_mode=True)
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg='purity_fb sdk is required for this module')
+
+ state = module.params['state']
+ blade = get_blade(module)
+ versions = blade.api_version.list_versions().versions
+
+ if MIN_REQUIRED_API_VERSION not in versions:
+ module.fail_json(msg='Minimum FlashBlade REST version required: {0}'.format(MIN_REQUIRED_API_VERSION))
+
+ s3acc = get_s3acc(module, blade)
+ if not s3acc:
+ module.fail_json(msg='Object Store Account {0} does not exist'.format(module.params['account']))
+
+ s3user = get_s3user(module, blade)
+
+ if state == 'absent' and s3user:
+ delete_s3user(module, blade)
+ elif state == 'present' and s3user:
+ update_s3user(module, blade)
+ elif not s3user and state == 'present':
+ create_s3user(module, blade)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_smtp.py b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_smtp.py
new file mode 100644
index 00000000..f3347f26
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_smtp.py
@@ -0,0 +1,114 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefb_smtp
+version_added: '1.0.0'
+short_description: Configure SMTP for Pure Storage FlashBlade
+description:
+- Configure SMTP for a Pure Storage FlashBlade.
+- Whilst there can be no relay host, a sender domain must be configured.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ host:
+ description:
+ - Relay server name
+ type: str
+ domain:
+ description:
+ - Domain name for alert messages
+ required: true
+ type: str
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+'''
+
+EXAMPLES = r'''
+- name: Configure SMTP settings
+ purefb_smtp:
+ host: hostname
+ domain: xyz.com
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+'''
+
+RETURN = r'''
+'''
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import Smtp
+except ImportError:
+ HAS_PURITY_FB = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import get_blade, purefb_argument_spec
+
+
+MIN_REQUIRED_API_VERSION = "1.6"
+
+
+def set_smtp(module, blade):
+ """Configure SMTP settings"""
+ changed = True
+ if not module.check_mode:
+ current_smtp = blade.smtp.list_smtp().items[0]
+ if module.params['host'] and module.params['host'] != current_smtp.relay_host:
+ smtp_settings = Smtp(relay_host=module.params['host'])
+ try:
+ blade.smtp.update_smtp(smtp_settings=smtp_settings)
+ except Exception:
+ module.fail_json(msg='Configuring SMTP relay host failed')
+ elif current_smtp.relay_host and not module.params['host']:
+ smtp_settings = Smtp(relay_host='')
+ try:
+ blade.smtp.update_smtp(smtp_settings=smtp_settings)
+ except Exception:
+ module.fail_json(msg='Configuring SMTP relay host failed')
+ if module.params['domain'] != current_smtp.sender_domain:
+ smtp_settings = Smtp(sender_domain=module.params['domain'])
+ try:
+ blade.smtp.update_smtp(smtp_settings=smtp_settings)
+ except Exception:
+ module.fail_json(msg='Configuring SMTP sender domain failed')
+ else:
+ changed = False
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(dict(
+ host=dict(type='str'),
+ domain=dict(type='str', required=True),
+ ))
+
+ module = AnsibleModule(argument_spec,
+ supports_check_mode=True)
+
+ blade = get_blade(module)
+ api_version = blade.api_version.list_versions().versions
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(msg="Purity//FB must be upgraded to support this module.")
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg='purity_fb SDK is required for this module')
+
+ set_smtp(module, blade)
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_snap.py b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_snap.py
new file mode 100644
index 00000000..6039a387
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_snap.py
@@ -0,0 +1,274 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefb_snap
+version_added: '1.0.0'
+short_description: Manage filesystem snapshots on Pure Storage FlashBlades
+description:
+- Create or delete volumes and filesystem snapshots on Pure Storage FlashBlades.
+- Restoring a filesystem from a snapshot is only supported using
+ the latest snapshot.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - The name of the source filesystem.
+ required: true
+ type: str
+ suffix:
+ description:
+ - Suffix of snapshot name.
+ type: str
+ state:
+ description:
+ - Define whether the filesystem snapshot should exist or not.
+ choices: [ absent, present, restore ]
+ default: present
+ type: str
+ eradicate:
+ description:
+ - Define whether to eradicate the snapshot on delete or leave in trash.
+ type: bool
+ default: 'no'
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+'''
+
+EXAMPLES = r'''
+- name: Create snapshot foo.ansible
+ purefb_snap:
+ name: foo
+ suffix: ansible
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: present
+
+- name: Delete snapshot named foo.snap
+ purefb_snap:
+ name: foo
+ suffix: snap
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: absent
+
+- name: Recover deleted snapshot foo.ansible
+ purefb_snap:
+ name: foo
+ suffix: ansible
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: present
+
+- name: Restore filesystem foo (uses latest snapshot)
+ purefb_snap:
+ name: foo
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: restore
+
+- name: Eradicate snapshot named foo.snap
+ purefb_snap:
+ name: foo
+ suffix: snap
+ eradicate: true
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: absent
+'''
+
+RETURN = r'''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import get_blade, purefb_argument_spec
+
+from datetime import datetime
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import FileSystemSnapshot, SnapshotSuffix, FileSystem, Reference
+except ImportError:
+ HAS_PURITY_FB = False
+
+
+def get_fs(module, blade):
+ """Return Filesystem or None"""
+ filesystem = []
+ filesystem.append(module.params['name'])
+ try:
+ res = blade.file_systems.list_file_systems(names=filesystem)
+ return res.items[0]
+ except Exception:
+ return None
+
+
+def get_latest_fssnapshot(module, blade):
+ """ Get the name of the latest snpshot or None"""
+ try:
+ filt = 'source=\'' + module.params['name'] + '\''
+ all_snaps = blade.file_system_snapshots.list_file_system_snapshots(filter=filt)
+ if not all_snaps.items[0].destroyed:
+ return all_snaps.items[0].name
+ else:
+ module.fail_json(msg='Latest snapshot {0} is destroyed.'
+ ' Eradicate or recover this first.'.format(all_snaps.items[0].name))
+ except Exception:
+ return None
+
+
+def get_fssnapshot(module, blade):
+ """Return Snapshot or None"""
+ try:
+ filt = 'source=\'' + module.params['name'] + '\' and suffix=\'' + module.params['suffix'] + '\''
+ res = blade.file_system_snapshots.list_file_system_snapshots(filter=filt)
+ return res.items[0]
+ except Exception:
+ return None
+
+
+def create_snapshot(module, blade):
+ """Create Snapshot"""
+ if not module.check_mode:
+ source = []
+ source.append(module.params['name'])
+ try:
+ blade.file_system_snapshots.create_file_system_snapshots(sources=source,
+ suffix=SnapshotSuffix(module.params['suffix']))
+ changed = True
+ except Exception:
+ changed = False
+ module.exit_json(changed=changed)
+
+
+def restore_snapshot(module, blade):
+ """Restore a filesystem back from the latest snapshot"""
+ if not module.check_mode:
+ snapname = get_latest_fssnapshot(module, blade)
+ if snapname is not None:
+ fs_attr = FileSystem(name=module.params['name'],
+ source=Reference(name=snapname))
+ try:
+ blade.file_systems.create_file_systems(overwrite=True,
+ discard_non_snapshotted_data=True,
+ file_system=fs_attr)
+ changed = True
+ except Exception:
+ changed = False
+ else:
+ module.fail_json(msg='Filesystem {0} has no snapshots to restore from.'.format(module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def recover_snapshot(module, blade):
+ """Recover deleted Snapshot"""
+ if not module.check_mode:
+ snapname = module.params['name'] + "." + module.params['suffix']
+ new_attr = FileSystemSnapshot(destroyed=False)
+ try:
+ blade.file_system_snapshots.update_file_system_snapshots(name=snapname, attributes=new_attr)
+ changed = True
+ except Exception:
+ changed = False
+ module.exit_json(changed=changed)
+
+
+def update_snapshot(module, blade):
+ """Update Snapshot"""
+ changed = False
+ module.exit_json(changed=changed)
+
+
+def delete_snapshot(module, blade):
+ """ Delete Snapshot"""
+ if not module.check_mode:
+ snapname = module.params['name'] + "." + module.params['suffix']
+ new_attr = FileSystemSnapshot(destroyed=True)
+ try:
+ blade.file_system_snapshots.update_file_system_snapshots(name=snapname, attributes=new_attr)
+ changed = True
+ if module.params['eradicate']:
+ try:
+ blade.file_system_snapshots.delete_file_system_snapshots(name=snapname)
+ changed = True
+ except Exception:
+ changed = False
+ except Exception:
+ changed = False
+ module.exit_json(changed=changed)
+
+
+def eradicate_snapshot(module, blade):
+ """ Eradicate Snapshot"""
+ if not module.check_mode:
+ snapname = module.params['name'] + "." + module.params['suffix']
+ try:
+ blade.file_system_snapshots.delete_file_system_snapshots(name=snapname)
+ changed = True
+ except Exception:
+ changed = False
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(required=True),
+ suffix=dict(type='str'),
+ eradicate=dict(default='false', type='bool'),
+ state=dict(default='present', choices=['present', 'absent', 'restore'])
+ )
+ )
+
+ module = AnsibleModule(argument_spec,
+ supports_check_mode=True)
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg='purity_fb sdk is required for this module')
+
+ if module.params['suffix'] is None:
+ suffix = "snap-" + str((datetime.utcnow() - datetime(1970, 1, 1, 0, 0, 0, 0)).total_seconds())
+ module.params['suffix'] = suffix.replace(".", "")
+
+ state = module.params['state']
+ blade = get_blade(module)
+ filesystem = get_fs(module, blade)
+ snap = get_fssnapshot(module, blade)
+
+ if state == 'present' and filesystem and not filesystem.destroyed and not snap:
+ create_snapshot(module, blade)
+ elif state == 'present' and filesystem and not filesystem.destroyed and snap and not snap.destroyed:
+ update_snapshot(module, blade)
+ elif state == 'present' and filesystem and not filesystem.destroyed and snap and snap.destroyed:
+ recover_snapshot(module, blade)
+ elif state == 'present' and filesystem and filesystem.destroyed:
+ update_snapshot(module, blade)
+ elif state == 'present' and not filesystem:
+ update_snapshot(module, blade)
+ elif state == 'restore' and filesystem:
+ restore_snapshot(module, blade)
+ elif state == 'absent' and snap and not snap.destroyed:
+ delete_snapshot(module, blade)
+ elif state == 'absent' and snap and snap.destroyed:
+ eradicate_snapshot(module, blade)
+ elif state == 'absent' and not snap:
+ module.exit_json(changed=False)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_snmp_agent.py b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_snmp_agent.py
new file mode 100644
index 00000000..71872b32
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_snmp_agent.py
@@ -0,0 +1,191 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefb_snmp_agent
+version_added: '1.0.0'
+short_description: Configure the FlashBlade SNMP Agent
+description:
+- Configure the management SNMP Agent on a Pure Storage FlashBlade.
+- This module is not idempotent and will always modify the
+ existing management SNMP agent due to hidden parameters that cannot
+ be compared to the play parameters.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ auth_passphrase:
+ type: str
+ description:
+ - SNMPv3 only. Passphrase of 8 - 32 characters.
+ auth_protocol:
+ type: str
+ description:
+ - SNMP v3 only. Hash algorithm to use
+ choices: [ MD5, SHA ]
+ community:
+ type: str
+ description:
+ - SNMP v2c only. Manager community ID. Between 1 and 32 characters long.
+ user:
+ type: str
+ description:
+ - SNMP v3 only. User ID recognized by the specified SNMP agent.
+ Must be between 1 and 32 characters.
+ version:
+ type: str
+ description:
+ - Version of SNMP protocol to use for the agent.
+ choices: [ v2c, v3 ]
+ privacy_passphrase:
+ type: str
+ description:
+ - SNMPv3 only. Passphrase to encrypt SNMP messages.
+ Must be between 8 and 63 non-space ASCII characters.
+ privacy_protocol:
+ type: str
+ description:
+ - SNMP v3 only. Encryption protocol to use
+ choices: [ AES, DES ]
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+'''
+
+EXAMPLES = r'''
+- name: Update v2c SNMP agent
+ purefb_snmp_agent:
+ community: public
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Update v3 SNMP agent
+ purefb_snmp_agent:
+ version: v3
+ auth_protocol: MD5
+ auth_passphrase: password
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+'''
+
+RETURN = r'''
+'''
+
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import SnmpAgent, SnmpV2c, SnmpV3
+except ImportError:
+ HAS_PURITY_FB = False
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import get_blade, purefb_argument_spec
+
+
+MIN_REQUIRED_API_VERSION = "1.9"
+
+
+def update_agent(module, blade):
+ """Update SNMP Agent"""
+ changed = True
+ if not module.check_mode:
+ changed = False
+ try:
+ agent = blade.snmp_agents.list_snmp_agents()
+ except Exception:
+ module.fail_json(msg="Failed to get configuration for SNMP agent.")
+ current_attr = {'community': agent.items[0].v2c.community,
+ 'version': agent.items[0].version,
+ 'auth_passphrase': agent.items[0].v3.auth_passphrase,
+ 'auth_protocol': agent.items[0].v3.auth_protocol,
+ 'privacy_passphrase': agent.items[0].v3.privacy_passphrase,
+ 'privacy_protocol': agent.items[0].v3.privacy_protocol,
+ 'user': agent.items[0].v3.user,
+ }
+ new_attr = {'community': module.params['community'],
+ 'version': module.params['version'],
+ 'auth_passphrase': module.params['auth_passphrase'],
+ 'auth_protocol': module.params['auth_protocol'],
+ 'privacy_passphrase': module.params['privacy_passphrase'],
+ 'privacy_protocol': module.params['privacy_protocol'],
+ 'user': module.params['user']
+ }
+ if current_attr != new_attr:
+ if new_attr['version'] == 'v2c':
+ updated_v2c_attrs = SnmpV2c(community=new_attr['community'])
+ updated_v2c_agent = SnmpAgent(version='v2c', v2c=updated_v2c_attrs)
+ try:
+ blade.snmp_agents.update_snmp_agents(snmp_agent=updated_v2c_agent)
+ changed = True
+ except Exception:
+ module.fail_json(msg="Failed to update v2c SNMP agent.")
+ else:
+ updated_v3_attrs = SnmpV3(auth_protocol=new_attr['auth_protocol'],
+ auth_passphrase=new_attr['auth_passphrase'],
+ privacy_protocol=new_attr['privacy_protocol'],
+ privacy_passphrase=new_attr['privacy_passphrase'],
+ user=new_attr['user']
+ )
+ updated_v3_agent = SnmpAgent(version='v3', v3=updated_v3_attrs)
+ try:
+ blade.snmp_agents.update_snmp_agents(snmp_agent=updated_v3_agent)
+ changed = True
+ except Exception:
+ module.fail_json(msg="Failed to update v3 SNMP agent.")
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(dict(
+ user=dict(type='str'),
+ auth_passphrase=dict(type='str', no_log=True),
+ auth_protocol=dict(type='str', choices=['MD5', 'SHA']),
+ privacy_passphrase=dict(type='str', no_log=True),
+ privacy_protocol=dict(type='str', choices=['AES', 'DES']),
+ version=dict(type='str', choices=['v2c', 'v3']),
+ community=dict(type='str'),
+ ))
+
+ required_together = [['auth_passphrase', 'auth_protocol'],
+ ['privacy_passphrase', 'privacy_protocol']]
+ required_if = [['version', 'v2c', ['community']],
+ ['version', 'v3', ['user']]]
+
+ module = AnsibleModule(argument_spec,
+ required_together=required_together,
+ required_if=required_if,
+ supports_check_mode=True)
+
+ blade = get_blade(module)
+ api_version = blade.api_version.list_versions().versions
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(msg="Purity//FB must be upgraded to support this module.")
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg='purity_fb SDK is required for this module')
+
+ if module.params['version'] == "v3":
+ if module.params['auth_passphrase'] and (8 > len(module.params['auth_passphrase']) > 32):
+ module.fail_json(msg="auth_password must be between 8 and 32 characters")
+ if module.params['privacy_passphrase'] and 8 > len(module.params['privacy_passphrase']) > 63:
+ module.fail_json(msg="privacy_password must be between 8 and 63 characters")
+
+ update_agent(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_snmp_mgr.py b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_snmp_mgr.py
new file mode 100644
index 00000000..61e47cc4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_snmp_mgr.py
@@ -0,0 +1,302 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefb_snmp_mgr
+version_added: '1.0.0'
+short_description: Configure FlashBlade SNMP Managers
+description:
+- Manage SNMP managers on a Pure Storage FlashBlade.
+- This module is not idempotent and will always modify an
+ existing SNMP manager due to hidden parameters that cannot
+ be compared to the play parameters.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Name of SNMP Manager
+ required: True
+ type: str
+ state:
+ description:
+ - Create or delete SNMP manager
+ type: str
+ default: present
+ choices: [ absent, present ]
+ auth_passphrase:
+ type: str
+ description:
+ - SNMPv3 only. Passphrase of 8 - 32 characters.
+ auth_protocol:
+ type: str
+ description:
+ - SNMP v3 only. Hash algorithm to use
+ choices: [ MD5, SHA ]
+ community:
+ type: str
+ description:
+ - SNMP v2c only. Manager community ID. Between 1 and 32 characters long.
+ host:
+ type: str
+ description:
+ - IPv4 or IPv6 address or FQDN to send trap messages to.
+ user:
+ type: str
+ description:
+ - SNMP v3 only. User ID recognized by the specified SNMP manager.
+ Must be between 1 and 32 characters.
+ version:
+ type: str
+ description:
+ - Version of SNMP protocol to use for the manager.
+ choices: [ v2c, v3 ]
+ notification:
+ type: str
+ description:
+ - Action to perform on event.
+ default: trap
+ choices: [ inform, trap ]
+ privacy_passphrase:
+ type: str
+ description:
+ - SNMPv3 only. Passphrase to encrypt SNMP messages.
+ Must be between 8 and 63 non-space ASCII characters.
+ privacy_protocol:
+ type: str
+ description:
+ - SNMP v3 only. Encryption protocol to use
+ choices: [ AES, DES ]
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+'''
+
+EXAMPLES = r'''
+- name: Delete exisitng SNMP manager
+ purefb_snmp_mgr:
+ name: manager1
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Create v2c SNMP manager
+ purefb_snmp_mgr:
+ name: manager1
+ community: public
+ host: 10.21.22.23
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Create v3 SNMP manager
+ purefb_snmp_mgr:
+ name: manager2
+ version: v3
+ auth_protocol: MD5
+ auth_passphrase: password
+ host: 10.21.22.23
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Update existing SNMP manager
+ purefb_snmp_mgr:
+ name: manager1
+ community: private
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+'''
+
+RETURN = r'''
+'''
+
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import SnmpManager, SnmpV2c, SnmpV3
+except ImportError:
+ HAS_PURITY_FB = False
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import get_blade, purefb_argument_spec
+
+
+MIN_REQUIRED_API_VERSION = "1.9"
+
+
+def update_manager(module, blade):
+ """Update SNMP Manager"""
+ changed = True
+ if not module.check_mode:
+ changed = False
+ try:
+ mgr = blade.snmp_managers.list_snmp_managers(names=[module.params['name']])
+ except Exception:
+ module.fail_json(msg="Failed to get configuration for SNMP manager {0}.".format(module.params['name']))
+ current_attr = {'community': mgr.items[0].v2c.community,
+ 'notification': mgr.items[0].notification,
+ 'host': mgr.items[0].host,
+ 'version': mgr.items[0].version,
+ 'auth_passphrase': mgr.items[0].v3.auth_passphrase,
+ 'auth_protocol': mgr.items[0].v3.auth_protocol,
+ 'privacy_passphrase': mgr.items[0].v3.privacy_passphrase,
+ 'privacy_protocol': mgr.items[0].v3.privacy_protocol,
+ 'user': mgr.items[0].v3.user,
+ }
+ new_attr = {'community': module.params['community'],
+ 'notification': module.params['notification'],
+ 'host': module.params['host'],
+ 'version': module.params['version'],
+ 'auth_passphrase': module.params['auth_passphrase'],
+ 'auth_protocol': module.params['auth_protocol'],
+ 'privacy_passphrase': module.params['privacy_passphrase'],
+ 'privacy_protocol': module.params['privacy_protocol'],
+ 'user': module.params['user']
+ }
+ if current_attr != new_attr:
+ if new_attr['version'] == 'v2c':
+ updated_v2c_attrs = SnmpV2c(community=new_attr['community'])
+ updated_v2c_manager = SnmpManager(host=new_attr['host'], notification=new_attr['notification'],
+ version='v2c', v2c=updated_v2c_attrs
+ )
+ try:
+ blade.snmp_managers.update_snmp_managers(names=[module.params['name']],
+ snmp_manager=updated_v2c_manager
+ )
+ changed = True
+ except Exception:
+ module.fail_json(msg="Failed to update v2c SNMP manager {0}.".format(module.params['name']))
+ else:
+ updated_v3_attrs = SnmpV3(auth_protocol=new_attr['auth_protocol'],
+ auth_passphrase=new_attr['auth_passphrase'],
+ privacy_protocol=new_attr['privacy_protocol'],
+ privacy_passphrase=new_attr['privacy_passphrase'],
+ user=new_attr['user']
+ )
+ updated_v3_manager = SnmpManager(host=new_attr['host'], notification=new_attr['notification'],
+ version='v3', v3=updated_v3_attrs
+ )
+ try:
+ blade.snmp_managers.update_snmp_managers(names=[module.params['name']],
+ snmp_manager=updated_v3_manager
+ )
+ changed = True
+ except Exception:
+ module.fail_json(msg="Failed to update v3 SNMP manager {0}.".format(module.params['name']))
+
+ module.exit_json(changed=changed)
+
+
+def delete_manager(module, blade):
+ """Delete SNMP Manager"""
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.snmp_managers.delete_snmp_managers(names=[module.params['name']])
+ except Exception:
+ module.fail_json(msg='Delete SNMP manager {0} failed'.format(module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def create_manager(module, blade):
+ """Create SNMP Manager"""
+ changed = True
+ if not module.check_mode:
+ if not module.params['version']:
+ module.fail_json(msg='SNMP version required to create a new manager')
+ if module.params['version'] == "v2c":
+ v2_attrs = SnmpV2c(community=module.params['community'])
+ new_v2_manager = SnmpManager(host=module.params['host'], notification=module.params['notification'],
+ version='v2c', v2c=v2_attrs
+ )
+ try:
+ blade.snmp_managers.create_snmp_managers(names=[module.params['name']],
+ snmp_manager=new_v2_manager
+ )
+ except Exception:
+ module.fail_json(msg="Failed to create v2c SNMP manager {0}.".format(module.params['name']))
+ else:
+ v3_attrs = SnmpV3(auth_protocol=module.params['auth_protocol'],
+ auth_passphrase=module.params['auth_passphrase'],
+ privacy_protocol=module.params['privacy_protocol'],
+ privacy_passphrase=module.params['privacy_passphrase'],
+ user=module.params['user']
+ )
+ new_v3_manager = SnmpManager(host=module.params['host'], notification=module.params['notification'],
+ version='v3', v3=v3_attrs
+ )
+ try:
+ blade.snmp_managers.create_snmp_managers(names=[module.params['name']],
+ snmp_manager=new_v3_manager
+ )
+ except Exception:
+ module.fail_json(msg="Failed to create v3 SNMP manager {0}.".format(module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(dict(
+ name=dict(type='str', required=True),
+ host=dict(type='str'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ user=dict(type='str'),
+ notification=dict(type='str', choices=['inform', 'trap'], default='trap'),
+ auth_passphrase=dict(type='str', no_log=True),
+ auth_protocol=dict(type='str', choices=['MD5', 'SHA']),
+ privacy_passphrase=dict(type='str', no_log=True),
+ privacy_protocol=dict(type='str', choices=['AES', 'DES']),
+ version=dict(type='str', choices=['v2c', 'v3']),
+ community=dict(type='str'),
+ ))
+
+ required_together = [['auth_passphrase', 'auth_protocol'],
+ ['privacy_passphrase', 'privacy_protocol']]
+ required_if = [['version', 'v2c', ['community', 'host']],
+ ['version', 'v3', ['host', 'user']]]
+
+ module = AnsibleModule(argument_spec,
+ required_together=required_together,
+ required_if=required_if,
+ supports_check_mode=True)
+
+ state = module.params['state']
+ blade = get_blade(module)
+ api_version = blade.api_version.list_versions().versions
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(msg="Purity//FB must be upgraded to support this module.")
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg='purity_fb SDK is required for this module')
+
+ mgr_configured = False
+ mgrs = blade.snmp_managers.list_snmp_managers()
+ for mgr in range(0, len(mgrs.items)):
+ if mgrs.items[mgr].name == module.params['name']:
+ mgr_configured = True
+ break
+ if module.params['version'] == "v3":
+ if module.params['auth_passphrase'] and (8 > len(module.params['auth_passphrase']) > 32):
+ module.fail_json(msg="auth_password must be between 8 and 32 characters")
+ if module.params['privacy_passphrase'] and 8 > len(module.params['privacy_passphrase']) > 63:
+ module.fail_json(msg="privacy_password must be between 8 and 63 characters")
+ if state == 'absent' and mgr_configured:
+ delete_manager(module, blade)
+ elif mgr_configured and state == 'present':
+ update_manager(module, blade)
+ elif not mgr_configured and state == 'present':
+ create_manager(module, blade)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_subnet.py b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_subnet.py
new file mode 100644
index 00000000..5cb27b74
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_subnet.py
@@ -0,0 +1,263 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: purefb_subnet
+version_added: "1.0.0"
+short_description: Manage network subnets in a Pure Storage FlashBlade
+description:
+ - This module manages network subnets on Pure Storage FlashBlade.
+author: Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Subnet Name.
+ required: true
+ type: str
+ state:
+ description:
+ - Create, delete or modifies a subnet.
+ required: false
+ default: present
+ choices: [ "present", "absent" ]
+ type: str
+ gateway:
+ description:
+ - IPv4 or IPv6 address of subnet gateway.
+ required: false
+ type: str
+ mtu:
+ description:
+ - MTU size of the subnet. Range is 1280 to 9216.
+ required: false
+ default: 1500
+ type: int
+ prefix:
+ description:
+ - IPv4 or IPv6 address associated with the subnet.
+ - Supply the prefix length (CIDR) as well as the IP address.
+ required: false
+ type: str
+ vlan:
+ description:
+ - VLAN ID of the subnet.
+ required: false
+ default: 0
+ type: int
+extends_documentation_fragment:
+ - purestorage.flashblade.purestorage.fb
+'''
+
+EXAMPLES = '''
+- name: Create new network subnet named foo
+ purefb_subnet:
+ name: foo
+ prefix: "10.21.200.3/24"
+ gateway: 10.21.200.1
+ mtu: 9000
+ vlan: 2200
+ state: present
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Change configuration of existing subnet foo
+ purefb_network:
+ name: foo
+ state: present
+ prefix: "10.21.100.3/24"
+ gateway: 10.21.100.1
+ mtu: 1500
+ address: 10.21.200.123
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Delete network subnet named foo
+ purefb_subnet:
+ name: foo
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641'''
+
+RETURN = '''
+'''
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import Subnet
+except ImportError:
+ HAS_PURITY_FB = False
+
+try:
+ import netaddr
+ HAS_NETADDR = True
+except ImportError:
+ HAS_NETADDR = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import get_blade, purefb_argument_spec
+
+
+MINIMUM_API_VERSION = '1.3'
+
+
+def get_subnet(module, blade):
+ """Return Subnet or None"""
+ subnet = []
+ subnet.append(module.params['name'])
+ try:
+ res = blade.subnets.list_subnets(names=subnet)
+ return res.items[0]
+ except Exception:
+ return None
+
+
+def create_subnet(module, blade):
+ """Create Subnet"""
+ changed = True
+ if not module.check_mode:
+ subnet = []
+ subnet.append(module.params['name'])
+ try:
+ blade.subnets.create_subnets(names=subnet,
+ subnet=Subnet(prefix=module.params['prefix'],
+ vlan=module.params['vlan'],
+ mtu=module.params['mtu'],
+ gateway=module.params['gateway']
+ )
+ )
+ except Exception:
+ module.fail_json(msg='Failed to create subnet {0}. Confirm supplied parameters'.format(module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def modify_subnet(module, blade):
+ """Modify Subnet settings"""
+ changed = True
+ if not module.check_mode:
+ changed = False
+ subnet = get_subnet(module, blade)
+ subnet_new = []
+ subnet_new.append(module.params['name'])
+ if module.params['prefix']:
+ if module.params['prefix'] != subnet.prefix:
+ try:
+ blade.subnets.update_subnets(names=subnet_new,
+ subnet=Subnet(prefix=module.params['prefix']))
+ changed = True
+ except Exception:
+ module.fail_json(msg='Failed to change subnet {0} prefix to {1}'.format(module.params['name'],
+ module.params['prefix']))
+ if module.params['vlan']:
+ if module.params['vlan'] != subnet.vlan:
+ try:
+ blade.subnets.update_subnets(names=subnet_new,
+ subnet=Subnet(vlan=module.params['vlan']))
+ changed = True
+ except Exception:
+ module.fail_json(msg='Failed to change subnet {0} VLAN to {1}'.format(module.params['name'],
+ module.params['vlan']))
+ if module.params['gateway']:
+ if module.params['gateway'] != subnet.gateway:
+ try:
+ blade.subnets.update_subnets(names=subnet_new,
+ subnet=Subnet(gateway=module.params['gateway']))
+ changed = True
+ except Exception:
+ module.fail_json(msg='Failed to change subnet {0} gateway to {1}'.format(module.params['name'],
+ module.params['gateway']))
+ if module.params['mtu']:
+ if module.params['mtu'] != subnet.mtu:
+ try:
+ blade.subnets.update_subnets(names=subnet_new,
+ subnet=Subnet(mtu=module.params['mtu']))
+ changed = True
+ except Exception:
+ module.fail_json(msg='Failed to change subnet {0} MTU to {1}'.format(module.params['name'],
+ module.params['mtu']))
+ module.exit_json(changed=changed)
+
+
+def delete_subnet(module, blade):
+ """ Delete Subnet"""
+ changed = True
+ if not module.check_mode:
+ subnet = []
+ subnet.append(module.params['name'])
+ try:
+ blade.subnets.delete_subnets(names=subnet)
+ except Exception:
+ module.fail_json(msg='Failed to delete subnet {0}'.format(module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ gateway=dict(),
+ mtu=dict(type='int', default=1500),
+ prefix=dict(),
+ vlan=dict(type='int', default=0),
+ )
+ )
+
+ required_if = [["state", "present", ["gateway", 'prefix']]]
+
+ module = AnsibleModule(argument_spec,
+ required_if=required_if,
+ supports_check_mode=True)
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg='purity_fb sdk is required for this module')
+
+ if not HAS_NETADDR:
+ module.fail_json(msg='netaddr module is required')
+
+ state = module.params['state']
+ blade = get_blade(module)
+ api_version = blade.api_version.list_versions().versions
+ if MINIMUM_API_VERSION not in api_version:
+ module.fail_json(msg='Upgrade Purity//FB to enable this module')
+ subnet = get_subnet(module, blade)
+ if state == 'present':
+ if not (1280 <= module.params['mtu'] <= 9216):
+ module.fail_json(msg='MTU {0} is out of range (1280 to 9216)'.format(module.params['mtu']))
+ if not (0 <= module.params['vlan'] <= 4094):
+ module.fail_json(msg='VLAN ID {0} is out of range (0 to 4094)'.format(module.params['vlan']))
+ if netaddr.IPAddress(module.params['gateway']) not in netaddr.IPNetwork(module.params['prefix']):
+ module.fail_json(msg='Gateway and subnet are not compatible.')
+ subnets = blade.subnets.list_subnets()
+ nrange = netaddr.IPSet([module.params['prefix']])
+ for sub in range(0, len(subnets.items)):
+ if subnets.items[sub].vlan == module.params['vlan'] and subnets.items[sub].name != module.params['name']:
+ module.fail_json(msg='VLAN ID {0} is already in use.'.format(module.params['vlan']))
+ if nrange & netaddr.IPSet([subnets.items[sub].prefix]) and subnets.items[sub].name != module.params['name']:
+ module.fail_json(msg='Prefix CIDR overlaps with existing subnet.')
+
+ if state == 'present' and not subnet:
+ create_subnet(module, blade)
+ elif state == 'present' and subnet:
+ modify_subnet(module, blade)
+ elif state == 'absent' and subnet:
+ delete_subnet(module, blade)
+ elif state == 'absent' and not subnet:
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_syslog.py b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_syslog.py
new file mode 100644
index 00000000..f0f5a638
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_syslog.py
@@ -0,0 +1,180 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefb_syslog
+version_added: '1.4.0'
+short_description: Configure Pure Storage FlashBlade syslog settings
+description:
+- Configure syslog configuration for Pure Storage FlashBlades.
+- Add or delete an individual syslog server to the existing
+ list of serves.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Unique identifier for the syslog server address
+ type: str
+ required: true
+ state:
+ description:
+ - Create or delete syslog servers configuration
+ default: present
+ type: str
+ choices: [ absent, present ]
+ protocol:
+ description:
+ - Protocol which server uses
+ type: str
+ choices: [ tcp, tls, udp ]
+ port:
+ description:
+ - Port at which the server is listening. If no port is specified
+ the system will use 514
+ type: str
+ address:
+ description:
+ - Syslog server address.
+ This field supports IPv4 or FQDN.
+ An invalid IP addresses will cause the module to fail.
+ No validation is performed for FQDNs.
+ type: str
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+'''
+
+EXAMPLES = r'''
+- name: Delete exisitng syslog server entries
+ purefb_syslog:
+ name: syslog1
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Set array syslog servers
+ purefb_syslog:
+ state: present
+ name: syslog1
+ address: syslog1.com
+ protocol: udp
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+'''
+
+RETURN = r'''
+'''
+
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import SyslogServerPostOrPatch
+except ImportError:
+ HAS_PURITY_FB = False
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import get_blade, purefb_argument_spec
+
+
+MIN_REQUIRED_API_VERSION = '1.10'
+
+
+def delete_syslog(module, blade):
+ """Delete Syslog Server"""
+ changed = True
+ if not module.check_mode:
+ changed = False
+ try:
+ server = blade.syslog.list_syslog_servers(names=[module.params['name']])
+ except Exception:
+ server = None
+
+ if server:
+ try:
+ blade.syslog.delete_syslog_servers(names=[module.params['name']])
+ changed = True
+ except Exception:
+ module.fail_json(msg='Failed to remove syslog server: {0}'.format(module.params['name']))
+
+ module.exit_json(changed=changed)
+
+
+def add_syslog(module, blade):
+ """Add Syslog Server"""
+ changed = True
+ if not module.check_mode:
+ changed = False
+ noport_address = module.params['protocol'] + "://" + module.params['address']
+
+ if module.params['port']:
+ full_address = noport_address + ":" + module.params['port']
+ else:
+ full_address = noport_address
+
+ address_list = blade.syslog.list_syslog_servers()
+ if len(address_list.items) == 3:
+ module.fail_json(msg='Maximum number of syslog servers (3) already configured.')
+ exists = False
+
+ if address_list:
+ for address in range(0, len(address_list.items)):
+ if address_list.items[address].name == module.params['name']:
+ exists = True
+ break
+ if not exists:
+ try:
+ attr = SyslogServerPostOrPatch(uri=full_address)
+ blade.syslog.create_syslog_servers(syslog=attr, names=[module.params['name']])
+ changed = True
+ except Exception:
+ module.fail_json(msg='Failed to add syslog server {0} - {1}'.format(module.params['name'], full_address))
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(dict(
+ address=dict(type='str'),
+ protocol=dict(type='str', choices=['tcp', 'tls', 'udp']),
+ port=dict(type='str'),
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ ))
+
+ required_if = [['state', 'present', ['address', 'protocol']]]
+
+ module = AnsibleModule(argument_spec,
+ required_if=required_if,
+ supports_check_mode=True)
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg='purity_fb sdk is required for this module')
+
+ blade = get_blade(module)
+ api_version = blade.api_version.list_versions().versions
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(msg="Purity//FB must be upgraded to support this module.")
+
+ if module.params['state'] == 'absent':
+ delete_syslog(module, blade)
+ else:
+ add_syslog(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_target.py b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_target.py
new file mode 100644
index 00000000..7c087b2d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_target.py
@@ -0,0 +1,174 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefb_target
+version_added: '1.0.0'
+short_description: Manage remote S3-capable targets for a FlashBlade
+description:
+- Manage remote S3-capable targets for a FlashBlade system
+- Use this for non-FlashBlade targets.
+- Use I(purefb_connect) for FlashBlade targets.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Create or delete remote target
+ default: present
+ type: str
+ choices: [ absent, present ]
+ name:
+ description:
+ - Name of S3-capable target (IP or FQDN)
+ type: str
+ required: true
+ address:
+ description:
+ - Address of S3-capable target (IP or FQDN)
+ type: str
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+'''
+
+EXAMPLES = r'''
+- name: Create a connection to remote S3-capable target
+ purefb_target:
+ name: target_1
+ address: 10.10.10.20
+ fb_url: 10.10.10.2
+ api_token: T-89faa581-c668-483d-b77d-23c5d88ba35c
+- name: Delete connection to remote S3-capable system
+ purefb_target:
+ state: absent
+ name: target_1
+ target_api: 9c0b56bc-f941-f7a6-9f85-dcc3e9a8f7d6
+ fb_url: 10.10.10.2
+ api_token: T-89faa581-c668-483d-b77d-23c5d88ba35c
+'''
+
+RETURN = r'''
+'''
+
+HAS_PURITYFB = True
+try:
+ from purity_fb import TargetPost, Target
+except ImportError:
+ HAS_PURITYFB = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import get_blade, purefb_argument_spec
+
+
+MINIMUM_API_VERSION = '1.9'
+
+
+def _check_replication_configured(module, blade):
+ interfaces = blade.network_interfaces.list_network_interfaces()
+ repl_ok = False
+ for link in range(0, len(interfaces.items)):
+ if 'replication' in interfaces.items[link].services:
+ repl_ok = True
+ if not repl_ok:
+ module.fail_json(msg='Replication network interface required to configure a target')
+
+
+def _check_connected(module, blade):
+ connected_targets = blade.targets.list_targets()
+ for target in range(0, len(connected_targets.items)):
+ if connected_targets.items[target].name == module.params['name']:
+ return connected_targets.items[target]
+ return None
+
+
+def break_connection(module, blade):
+ """Break connection to remote target"""
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.targets.delete_targets(names=[module.params['name']])
+ except Exception:
+ module.fail_json(msg="Failed to disconnect target {0}.".format(module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def create_connection(module, blade):
+ """Create connection to remote target"""
+ changed = True
+ if not module.check_mode:
+ connected_targets = blade.targets.list_targets()
+ for target in range(0, len(connected_targets.items)):
+ if connected_targets.items[target].address == module.params['address']:
+ module.fail_json(msg='Target already exists with same connection address')
+ try:
+ target = TargetPost(address=module.params['address'])
+ blade.targets.create_targets(names=[module.params['name']], target=target)
+ except Exception:
+ module.fail_json(msg="Failed to connect to remote target {0}.".format(module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def update_connection(module, blade, connection):
+ """Update target connection address"""
+ changed = True
+ if not module.check_mode:
+ connected_targets = blade.targets.list_targets()
+ for target in range(0, len(connected_targets.items)):
+ if connected_targets.items[target].address == module.params['address'] and \
+ connected_targets.items[target].name != module.params['name']:
+ module.fail_json(msg='Target already exists with same connection address')
+ if module.params['address'] != connection.address:
+ new_address = Target(name=module.params['name'], address=module.params['address'])
+ try:
+ blade.targets.update_targets(names=[connection.name], target=new_address)
+ except Exception:
+ module.fail_json(msg='Failed to change address for target {0}.'.format(module.params['name']))
+ else:
+ changed = False
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(dict(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ name=dict(type='str', required=True),
+ address=dict(type='str'),
+ ))
+
+ required_if = [['state', 'present', ['address']]]
+
+ module = AnsibleModule(argument_spec,
+ required_if=required_if,
+ supports_check_mode=True)
+
+ if not HAS_PURITYFB:
+ module.fail_json(msg='purity_fb sdk is required for this module')
+
+ state = module.params['state']
+ blade = get_blade(module)
+ _check_replication_configured(module, blade)
+ target = _check_connected(module, blade)
+ if state == 'present' and not target:
+ create_connection(module, blade)
+ elif state == 'present' and target:
+ update_connection(module, blade, target)
+ elif state == 'absent' and target:
+ break_connection(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_user.py b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_user.py
new file mode 100644
index 00000000..66909ef9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/plugins/modules/purefb_user.py
@@ -0,0 +1,113 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2019, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: purefb_user
+version_added: '1.0.0'
+short_description: Modify FlashBlade local user account password
+description:
+- Modify local user's password on a Pure Stoage FlashBlade.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - The name of the local user account
+ type: str
+ default: pureuser
+ password:
+ description:
+ - Password for the local user.
+ type: str
+ required: true
+ old_password:
+ description:
+ - If changing an existing password, you must provide the old password for security
+ type: str
+ required: true
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+'''
+
+EXAMPLES = r'''
+- name: Change password for local user (NOT IDEMPOTENT)
+ purefb_user:
+ password: anewpassword
+ old_password: apassword
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+'''
+
+RETURN = r'''
+'''
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import Admin
+except ImportError:
+ HAS_PURITY_FB = False
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import get_blade, purefb_argument_spec
+
+MIN_REQUIRED_API_VERSION = '1.3'
+
+
+def update_user(module, blade):
+ """Create or Update Local User Account"""
+ changed = True
+ if not module.check_mode:
+ if module.params['password']:
+ if module.params['password'] != module.params['old_password']:
+ try:
+ newAdmin = Admin()
+ newAdmin.password = module.params['password']
+ newAdmin.old_password = module.params['old_password']
+ blade.admins.update_admins(names=[module.params['name']], admin=newAdmin)
+ except Exception:
+ module.fail_json(msg='Local User {0}: Password reset failed. '
+ 'Check passwords. One of these is incorrect.'.format(module.params['name']))
+ else:
+ module.fail_json(msg='Local User Account {0}: Password change failed - '
+ 'Old and new passwords are the same'.format(module.params['name']))
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(dict(
+ name=dict(type='str', default='pureuser'),
+ password=dict(required=True, type='str', no_log=True),
+ old_password=dict(required=True, type='str', no_log=True),
+ ))
+
+ module = AnsibleModule(argument_spec,
+ supports_check_mode=True)
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg='purity_fb sdk is required for this module')
+
+ blade = get_blade(module)
+ api_version = blade.api_version.list_versions().versions
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(msg="Purity//FB must be upgraded to support this module.")
+
+ update_user(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/roles/.keep b/collections-debian-merged/ansible_collections/purestorage/flashblade/roles/.keep
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/roles/.keep
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.10.txt b/collections-debian-merged/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.10.txt
new file mode 100644
index 00000000..771db46e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.10.txt
@@ -0,0 +1,2 @@
+plugins/modules/purefb_info.py validate-modules:return-syntax-error
+plugins/modules/purefb_inventory.py validate-modules:return-syntax-error
diff --git a/collections-debian-merged/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.9.txt b/collections-debian-merged/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.9.txt
new file mode 100644
index 00000000..771db46e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.9.txt
@@ -0,0 +1,2 @@
+plugins/modules/purefb_info.py validate-modules:return-syntax-error
+plugins/modules/purefb_inventory.py validate-modules:return-syntax-error