summaryrefslogtreecommitdiffstats
path: root/ansible_collections/purestorage
diff options
context:
space:
mode:
Diffstat (limited to 'ansible_collections/purestorage')
-rw-r--r--ansible_collections/purestorage/flasharray/.git-blame-ignore-revs2
-rw-r--r--ansible_collections/purestorage/flasharray/.github/CONTRIBUTING.md19
-rw-r--r--ansible_collections/purestorage/flasharray/.github/ISSUE_TEMPLATE/bug_report.md38
-rw-r--r--ansible_collections/purestorage/flasharray/.github/ISSUE_TEMPLATE/feature_request.md20
-rw-r--r--ansible_collections/purestorage/flasharray/.github/bug_report_template.md58
-rw-r--r--ansible_collections/purestorage/flasharray/.github/feature_request_template.md21
-rw-r--r--ansible_collections/purestorage/flasharray/.github/pull_request_template.md25
-rw-r--r--ansible_collections/purestorage/flasharray/.github/workflows/ansible-lint.yml10
-rw-r--r--ansible_collections/purestorage/flasharray/.github/workflows/black.yaml11
-rw-r--r--ansible_collections/purestorage/flasharray/.github/workflows/main.yml62
-rw-r--r--ansible_collections/purestorage/flasharray/.github/workflows/stale.yml19
-rw-r--r--ansible_collections/purestorage/flasharray/.gitignore3
-rw-r--r--ansible_collections/purestorage/flasharray/.pylintrc587
-rw-r--r--ansible_collections/purestorage/flasharray/.yamllint7
-rw-r--r--ansible_collections/purestorage/flasharray/CHANGELOG.rst509
-rw-r--r--ansible_collections/purestorage/flasharray/COPYING.GPLv3674
-rw-r--r--ansible_collections/purestorage/flasharray/FILES.json1853
-rw-r--r--ansible_collections/purestorage/flasharray/LICENSE674
-rw-r--r--ansible_collections/purestorage/flasharray/MANIFEST.json36
-rw-r--r--ansible_collections/purestorage/flasharray/README.md114
-rw-r--r--ansible_collections/purestorage/flasharray/README.rst19
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/.plugin-cache.yaml310
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/210_add_rename_hgroup.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/211_fix_clearing_host_inititators.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/changelog.yaml610
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/config.yaml31
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/107_host_case_clarity.yaml7
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/108_fix_eradicate_idempotency.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/109_fa_files_support_purefa_info.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/110_add_apiclient_support.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/111_add_filesystem_support.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/112_add_directory_support.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/113_add_exports_support.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/115_add_gcp_offload.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/116_add_policies.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/118_rename_host.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/121_add_multi_volume_creation.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/122_add_multi_host_creation.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/123_add_multi_vgroup_creation.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/124_sdk_handshake.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/125_dns_idempotency.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/126_fix_volume_move.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/130_info_ds_update.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/131_add_v6_ds_update.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/132_fc_replication.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/133_purefa_info_v6_replication.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/134_ac_pg_support.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/135_no_cbs_ntp.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/136_add_vol_get_send_info.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/137_pgsnap_regex.yaml3
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/139_pgsnap_ac_support.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/140_pod_case.yaml3
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/141_add_remote_snapshot.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/145_fix_missing_move_variable.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/149_volumes_demoted_pods_fix.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/152_fix_user.yaml3
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/153_syslog_update.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/156_snap_suffix_fix.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/160_rename_pg.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/161_offline_offload_fix.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/162_pgsnap_info_fix.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/163_add_maintenance_windows.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/168_dsrole_fix.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/169_add_certs.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/170_pgsnap_stretch_pod_fail.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/174_null_gateway.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/175_check_pgname.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/176_fix_promote_api_issue.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/182_allow_pgroup_with_create.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/187_add_ad.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/188_add_dirsnap.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/193_duplicate_initiators.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/194_vg_qos.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/196_fix_activedr_api_version.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/199_add_fc_port_enable.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/200_add_DAR_info.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/201_increase_krb_count.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/202_add_sso.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/203_add_eradication_timer.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/205_policy_protocl.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/206_add_naa_info.yaml3
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/207_fix_disable_for_remote_assist.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/208_add_directory_quota_support.yaml3
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/213_add_kmip.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/214_join_ou.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/220_capacity_info.yaml3
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/224_add_nguid_info.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/226_deprecate_protocol.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/227_missing_regex.yaml3
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/228_nguid_to_volfact.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/229_snapsuffix.yaml3
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/230_add_pg_deleted_vols.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/231_syslog_settings.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/234_add_vol_info_on_nochange.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/235_eula.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/237_fix_network.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/238_add_dirsnap_rename.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/239_safe_mode.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/242_multi_offload.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/243_sso_to_admin.yaml6
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/246_python_precedence.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/247_fix_smb_policy_rules.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/249_allow_cert_reimport.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/252_add_saml2.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/254_sam2_info.yaml3
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/257_fqcn.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/259_fix_gateway_check.yaml3
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/261_fix_bad_arrays.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/265_fix_multiple_nfs_rules.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/268_fix_quotas_issues.yaml4
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/270_add_priority_info.yaml4
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/271_vgroup_prio.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/272_volume_prio.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/277_add_fs_repl.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/278_pgsnap_info.yaml3
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/279_pg_safemode.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/280_multihost_no_suffix.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/284_volfact_for_recover.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/288_zero_params.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/292_fix_ds_password.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/293_add_chassis_inventory.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/294_dns_ntp_idempotency_absent.yaml3
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/294_user_map_support.yaml3
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/296_ad_tls.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/299_fix_pgsched_zero_support.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/302_fix_pg_recover_and_target_update.yaml3
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/304_host_vlan.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/305_fix_target_dempo.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/307_multiple_dns.yaml3
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/308_add_vm.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/310_hg_vol_idempotency.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/312_pg_alias.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/315_spf_details.yaml3
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/317_add_all_squash.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/318_vol_defaults.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/319_lockout.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/320_completed_snaps.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/328_policy_fix.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/330_extend_vlan.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/334_fix_vg_qos.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/336_add_servicelist.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/337_fix_non-prod_versions.yml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/341_pg_400s.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/342_add_vol_promotion.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/343_fix_ds.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/344_fix_smtp.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/345_user_map.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/347_dns_fix.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/348_add_default_prot.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/349_add_alerts.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/351_fix_rest_check.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/354_fix_promotion.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/360_fix_volume.yaml3
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/363_overwrite_combo.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/364_fc_targets.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/365_pod_pgsched.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/366_add_nvme_types.yaml3
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/367_fix_vg.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/369_fix_host.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/370_add_user_role.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/374_offload_pgsnap.yaml3
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/375_fix_remote_hosts.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/379_cap_compat.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/381_change_booleans.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/383_network_idemp.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/384_update_vol_facts.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/387_no_volume_failure.yaml3
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/388_remove_27.yaml3
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/393_offload_recover.yaml4
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/394_neighbors.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/396_pod_quota.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/397_parialconnect_bug.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/398_hgoup_alias.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/v1.4.0_summary.yaml37
-rw-r--r--ansible_collections/purestorage/flasharray/docs/docsite/links.yml16
-rw-r--r--ansible_collections/purestorage/flasharray/meta/execution-environment.yml8
-rw-r--r--ansible_collections/purestorage/flasharray/meta/runtime.yml8
-rw-r--r--ansible_collections/purestorage/flasharray/playbooks/.keep0
-rw-r--r--ansible_collections/purestorage/flasharray/playbooks/files/.keep0
-rw-r--r--ansible_collections/purestorage/flasharray/playbooks/roles/.keep0
-rw-r--r--ansible_collections/purestorage/flasharray/playbooks/tasks/.keep0
-rw-r--r--ansible_collections/purestorage/flasharray/playbooks/templates/.keep0
-rw-r--r--ansible_collections/purestorage/flasharray/playbooks/vars/.keep0
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/doc_fragments/purestorage.py46
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/module_utils/purefa.py137
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_ad.py323
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_admin.py180
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_alert.py208
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_apiclient.py250
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_arrayname.py103
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_banner.py125
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_certs.py524
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_connect.py238
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_console.py107
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_default_protection.py328
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_directory.py234
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_dirsnap.py474
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_dns.py349
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_ds.py609
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_dsrole.py200
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_endpoint.py347
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_eradication.py117
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_eula.py117
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_export.py251
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_fs.py367
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_hg.py433
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_host.py1085
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_info.py2286
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_inventory.py368
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_kmip.py251
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_logging.py166
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_maintenance.py133
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_messages.py198
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_network.py437
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_ntp.py151
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_offload.py443
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_pg.py909
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_pgsched.py527
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_pgsnap.py481
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_phonehome.py106
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_pod.py664
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_pod_replica.py279
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_policy.py1606
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_proxy.py131
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_ra.py121
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_saml.py340
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_smis.py132
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_smtp.py161
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_snap.py640
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_snmp.py425
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_snmp_agent.py267
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_sso.py119
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_subnet.py327
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_syslog.py218
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_syslog_settings.py171
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_timeout.py116
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_token.py225
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_user.py278
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_vg.py685
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_vlan.py267
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_vnc.py161
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_volume.py1726
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_volume_tags.py295
-rw-r--r--ansible_collections/purestorage/flasharray/requirements.txt7
-rw-r--r--ansible_collections/purestorage/flasharray/roles/.keep0
-rw-r--r--ansible_collections/purestorage/flasharray/settings.json8
-rw-r--r--ansible_collections/purestorage/flashblade/.git-blame-ignore-revs2
-rw-r--r--ansible_collections/purestorage/flashblade/.github/CONTRIBUTING.md19
-rw-r--r--ansible_collections/purestorage/flashblade/.github/ISSUE_TEMPLATE/bug_report.md38
-rw-r--r--ansible_collections/purestorage/flashblade/.github/ISSUE_TEMPLATE/feature_request.md20
-rw-r--r--ansible_collections/purestorage/flashblade/.github/pull_request_template.md25
-rw-r--r--ansible_collections/purestorage/flashblade/.github/workflows/ansible-lint.yml10
-rw-r--r--ansible_collections/purestorage/flashblade/.github/workflows/black.yaml11
-rw-r--r--ansible_collections/purestorage/flashblade/.github/workflows/main.yml62
-rw-r--r--ansible_collections/purestorage/flashblade/.github/workflows/stale.yml19
-rw-r--r--ansible_collections/purestorage/flashblade/.gitignore4
-rw-r--r--ansible_collections/purestorage/flashblade/.pylintrc587
-rw-r--r--ansible_collections/purestorage/flashblade/.yamllint7
-rw-r--r--ansible_collections/purestorage/flashblade/CHANGELOG.rst263
-rw-r--r--ansible_collections/purestorage/flashblade/COPYING.GPLv3674
-rw-r--r--ansible_collections/purestorage/flashblade/FILES.json1279
-rw-r--r--ansible_collections/purestorage/flashblade/LICENSE674
-rw-r--r--ansible_collections/purestorage/flashblade/MANIFEST.json37
-rw-r--r--ansible_collections/purestorage/flashblade/README.md98
-rw-r--r--ansible_collections/purestorage/flashblade/README.rst19
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/.plugin-cache.yaml254
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/changelog.yaml329
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/config.yaml31
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/101_fix_policy_and_timezone_error.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/105_max_access_key.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/107_add_remove_s3user_key.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/108_dns_search_fix.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/109_update_info.yaml3
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/111_dsrole_update_idempotency.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/112_fix_check_mode.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/113_policy_cleanup.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/114_certificate_update.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/115_multiprotocol.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/121_replication_perf.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/123_lifecycle_rule_fix.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/127_add_eula.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/128_add_32_to_info.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/129-virtualhost.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/131-apiclient.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/132_add_timeout.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/135_add_user_policies.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/136_add_s3user_policy.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/138_add_ad_module.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/139_add_keytabs.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/140_more_32_info.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/147_no_gateway.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/150_fix_joint_nfs_version_change.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/152_s3acc_lowercase.yaml3
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/153_add_quota.yaml3
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/154_add_snap_now.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/158_support_lags.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/159_add_lag.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/161_add_lifecycle_info.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/162_new_lifecycle.yaml3
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/163_admin_key.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/164_add_admin.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/166_lag_mac_note.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/167_fix_logins.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/169_pypureclient_fix.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/174_access_policies.yaml3
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/175_throttle_support.yaml3
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/176_nfs_export_policies.yaml4
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/179_fqcn.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/183_v2_connections.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/184_certificate_typos.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/185_nfs_export_rule.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/186_add_tz.yaml3
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/187_rename_nfs_policy.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/188_bucket_type.yaml3
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/191_add_quota_info.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/194_lists_for_service.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/200_proxy.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/202_multiple_snap_rules.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/205_fix_multi_lifecycle.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/211_change_booleans.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/212_object_account_quota.yaml3
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/213_sec_update.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/215_encrypt_sec_info.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/216_extra_bucket_info.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/217_inventory.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/218_object_account_info.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/220_s3user_key_fix.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/76_default_fs_size.yaml3
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/77_filesystem_policies_info.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/78_update_filesystem_replica_link.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/79_hide_connect_api.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/80_support_reverse_replica_link.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/81_purefb_fs_new_options.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/83_add_certgrp.yml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/84_add_cert.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/85_add_banner.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/86_add_syslog.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/88_add_lifecycle.yml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/90_delete_conn_fix.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/90_imported_keys.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/92_fix_ds_update.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/96_fix_update_connection.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/97_fix_encrpyted_array_connection_info.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/v1.3.0_summary.yaml33
-rw-r--r--ansible_collections/purestorage/flashblade/meta/runtime.yml2
-rw-r--r--ansible_collections/purestorage/flashblade/playbooks/.keep0
-rw-r--r--ansible_collections/purestorage/flashblade/playbooks/files/.keep0
-rw-r--r--ansible_collections/purestorage/flashblade/playbooks/roles/.keep0
-rw-r--r--ansible_collections/purestorage/flashblade/playbooks/tasks/.keep0
-rw-r--r--ansible_collections/purestorage/flashblade/playbooks/templates/.keep0
-rw-r--r--ansible_collections/purestorage/flashblade/playbooks/vars/.keep0
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/doc_fragments/purestorage.py42
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/module_utils/purefb.py148
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_ad.py404
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_admin.py137
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_alert.py245
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_apiclient.py250
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_banner.py143
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_bladename.py115
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_bucket.py398
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_bucket_replica.py313
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_certgrp.py249
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_certs.py198
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_connect.py574
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_dns.py175
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_ds.py470
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_dsrole.py213
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_eula.py131
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_fs.py944
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_fs_replica.py308
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_groupquota.py321
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_info.py1548
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_inventory.py279
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_keytabs.py254
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_lag.py315
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_lifecycle.py490
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_messages.py193
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_network.py224
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_ntp.py158
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_phonehome.py124
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_pingtrace.py277
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_policy.py2079
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_proxy.py155
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_ra.py126
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_remote_cred.py243
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_s3acc.py314
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_s3user.py436
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_smtp.py124
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_snap.py379
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_snmp_agent.py210
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_snmp_mgr.py357
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_subnet.py347
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_syslog.py198
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_target.py201
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_timeout.py136
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_tz.py206
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_user.py211
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_userpolicy.py269
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_userquota.py315
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_virtualhost.py142
-rw-r--r--ansible_collections/purestorage/flashblade/requirements.txt5
-rw-r--r--ansible_collections/purestorage/flashblade/roles/.keep0
-rw-r--r--ansible_collections/purestorage/flashblade/settings.json8
-rw-r--r--ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.10.txt2
-rw-r--r--ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.11.txt2
-rw-r--r--ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.12.txt2
-rw-r--r--ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.13.txt2
-rw-r--r--ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.14.txt2
-rw-r--r--ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.15.txt2
-rw-r--r--ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.16.txt2
-rw-r--r--ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.9.txt2
-rw-r--r--ansible_collections/purestorage/fusion/.github/CONTRIBUTING.md19
-rw-r--r--ansible_collections/purestorage/fusion/.github/ISSUE_TEMPLATE/bug_report.md38
-rw-r--r--ansible_collections/purestorage/fusion/.github/ISSUE_TEMPLATE/feature_request.md20
-rw-r--r--ansible_collections/purestorage/fusion/.github/pull_request_template.md25
-rw-r--r--ansible_collections/purestorage/fusion/.github/workflows/ansible-lint.yaml10
-rw-r--r--ansible_collections/purestorage/fusion/.github/workflows/black.yaml11
-rw-r--r--ansible_collections/purestorage/fusion/.github/workflows/create-release.yaml117
-rw-r--r--ansible_collections/purestorage/fusion/.github/workflows/main.yml66
-rw-r--r--ansible_collections/purestorage/fusion/.github/workflows/stale.yml19
-rw-r--r--ansible_collections/purestorage/fusion/.gitignore12
-rw-r--r--ansible_collections/purestorage/fusion/.yamllint7
-rw-r--r--ansible_collections/purestorage/fusion/CHANGELOG.rst226
-rw-r--r--ansible_collections/purestorage/fusion/COPYING.GPLv3674
-rw-r--r--ansible_collections/purestorage/fusion/FILES.json1209
-rw-r--r--ansible_collections/purestorage/fusion/MANIFEST.json34
-rw-r--r--ansible_collections/purestorage/fusion/README.md98
-rw-r--r--ansible_collections/purestorage/fusion/changelogs/.plugin-cache.yaml114
-rw-r--r--ansible_collections/purestorage/fusion/changelogs/changelog.yaml345
-rw-r--r--ansible_collections/purestorage/fusion/changelogs/config.yaml32
-rw-r--r--ansible_collections/purestorage/fusion/meta/runtime.yml12
-rw-r--r--ansible_collections/purestorage/fusion/playbooks/simple/create_array.yml17
-rw-r--r--ansible_collections/purestorage/fusion/playbooks/simple/create_availability_zone.yml13
-rw-r--r--ansible_collections/purestorage/fusion/playbooks/simple/create_tenant_space.yml12
-rw-r--r--ansible_collections/purestorage/fusion/playbooks/simple/list_all.yml14
-rw-r--r--ansible_collections/purestorage/fusion/playbooks/simple/list_api_clients.yml14
-rw-r--r--ansible_collections/purestorage/fusion/playbooks/simple/list_arrays.yml14
-rw-r--r--ansible_collections/purestorage/fusion/playbooks/simple/list_availability_zones.yml14
-rw-r--r--ansible_collections/purestorage/fusion/playbooks/simple/list_hardware_types.yml14
-rw-r--r--ansible_collections/purestorage/fusion/playbooks/simple/list_hosts.yml14
-rw-r--r--ansible_collections/purestorage/fusion/playbooks/simple/list_interfaces.yml14
-rw-r--r--ansible_collections/purestorage/fusion/playbooks/simple/list_network_interface_groups.yml14
-rw-r--r--ansible_collections/purestorage/fusion/playbooks/simple/list_placement_groups.yml14
-rw-r--r--ansible_collections/purestorage/fusion/playbooks/simple/list_protection_policies.yml14
-rw-r--r--ansible_collections/purestorage/fusion/playbooks/simple/list_roles.yml14
-rw-r--r--ansible_collections/purestorage/fusion/playbooks/simple/list_snapshots.yml14
-rw-r--r--ansible_collections/purestorage/fusion/playbooks/simple/list_storage_classes.yml14
-rw-r--r--ansible_collections/purestorage/fusion/playbooks/simple/list_storage_endpoints.yml14
-rw-r--r--ansible_collections/purestorage/fusion/playbooks/simple/list_storage_services.yml14
-rw-r--r--ansible_collections/purestorage/fusion/playbooks/simple/list_tenant_spaces.yml14
-rw-r--r--ansible_collections/purestorage/fusion/playbooks/simple/list_tenants.yml14
-rw-r--r--ansible_collections/purestorage/fusion/playbooks/simple/list_users.yml14
-rw-r--r--ansible_collections/purestorage/fusion/playbooks/simple/list_volumes.yml14
-rw-r--r--ansible_collections/purestorage/fusion/playbooks/simple/make_tenant_admin.yml12
-rw-r--r--ansible_collections/purestorage/fusion/playbooks/simple/remove_array.yml17
-rw-r--r--ansible_collections/purestorage/fusion/playbooks/simple/setup_infrastructure.yml64
-rw-r--r--ansible_collections/purestorage/fusion/playbooks/simple/setup_protection_policies.yml13
-rw-r--r--ansible_collections/purestorage/fusion/playbooks/simple/setup_storage_service_class.yml24
-rw-r--r--ansible_collections/purestorage/fusion/playbooks/simple/setup_workloads.yml71
-rw-r--r--ansible_collections/purestorage/fusion/playbooks/tasks/.keep0
-rw-r--r--ansible_collections/purestorage/fusion/playbooks/templates/.keep0
-rw-r--r--ansible_collections/purestorage/fusion/playbooks/vars/.keep0
-rw-r--r--ansible_collections/purestorage/fusion/plugins/doc_fragments/purestorage.py56
-rw-r--r--ansible_collections/purestorage/fusion/plugins/inventory/__init__.py0
-rw-r--r--ansible_collections/purestorage/fusion/plugins/module_utils/errors.py291
-rw-r--r--ansible_collections/purestorage/fusion/plugins/module_utils/fusion.py183
-rw-r--r--ansible_collections/purestorage/fusion/plugins/module_utils/getters.py99
-rw-r--r--ansible_collections/purestorage/fusion/plugins/module_utils/networking.py76
-rw-r--r--ansible_collections/purestorage/fusion/plugins/module_utils/operations.py42
-rw-r--r--ansible_collections/purestorage/fusion/plugins/module_utils/parsing.py75
-rw-r--r--ansible_collections/purestorage/fusion/plugins/module_utils/prerequisites.py162
-rw-r--r--ansible_collections/purestorage/fusion/plugins/module_utils/startup.py26
-rw-r--r--ansible_collections/purestorage/fusion/plugins/modules/fusion_api_client.py139
-rw-r--r--ansible_collections/purestorage/fusion/plugins/modules/fusion_array.py265
-rw-r--r--ansible_collections/purestorage/fusion/plugins/modules/fusion_az.py162
-rw-r--r--ansible_collections/purestorage/fusion/plugins/modules/fusion_hap.py312
-rw-r--r--ansible_collections/purestorage/fusion/plugins/modules/fusion_hw.py88
-rw-r--r--ansible_collections/purestorage/fusion/plugins/modules/fusion_info.py1130
-rw-r--r--ansible_collections/purestorage/fusion/plugins/modules/fusion_ni.py244
-rw-r--r--ansible_collections/purestorage/fusion/plugins/modules/fusion_nig.py274
-rw-r--r--ansible_collections/purestorage/fusion/plugins/modules/fusion_pg.py278
-rw-r--r--ansible_collections/purestorage/fusion/plugins/modules/fusion_pp.py187
-rw-r--r--ansible_collections/purestorage/fusion/plugins/modules/fusion_ra.py281
-rw-r--r--ansible_collections/purestorage/fusion/plugins/modules/fusion_region.py180
-rw-r--r--ansible_collections/purestorage/fusion/plugins/modules/fusion_sc.py255
-rw-r--r--ansible_collections/purestorage/fusion/plugins/modules/fusion_se.py507
-rw-r--r--ansible_collections/purestorage/fusion/plugins/modules/fusion_ss.py208
-rw-r--r--ansible_collections/purestorage/fusion/plugins/modules/fusion_tenant.py169
-rw-r--r--ansible_collections/purestorage/fusion/plugins/modules/fusion_tn.py122
-rw-r--r--ansible_collections/purestorage/fusion/plugins/modules/fusion_ts.py187
-rw-r--r--ansible_collections/purestorage/fusion/plugins/modules/fusion_volume.py450
-rw-r--r--ansible_collections/purestorage/fusion/tests/functional/README.md35
-rw-r--r--ansible_collections/purestorage/fusion/tests/functional/__init__.py0
-rw-r--r--ansible_collections/purestorage/fusion/tests/functional/test_fusion_api_client.py361
-rw-r--r--ansible_collections/purestorage/fusion/tests/functional/test_fusion_array.py1331
-rw-r--r--ansible_collections/purestorage/fusion/tests/functional/test_fusion_az.py717
-rw-r--r--ansible_collections/purestorage/fusion/tests/functional/test_fusion_hap.py889
-rw-r--r--ansible_collections/purestorage/fusion/tests/functional/test_fusion_hw.py115
-rw-r--r--ansible_collections/purestorage/fusion/tests/functional/test_fusion_info.py2383
-rw-r--r--ansible_collections/purestorage/fusion/tests/functional/test_fusion_nig.py1239
-rw-r--r--ansible_collections/purestorage/fusion/tests/functional/test_fusion_pg.py1595
-rw-r--r--ansible_collections/purestorage/fusion/tests/functional/test_fusion_pp.py528
-rw-r--r--ansible_collections/purestorage/fusion/tests/functional/test_fusion_ra.py813
-rw-r--r--ansible_collections/purestorage/fusion/tests/functional/test_fusion_region.py798
-rw-r--r--ansible_collections/purestorage/fusion/tests/functional/test_fusion_sc.py1240
-rw-r--r--ansible_collections/purestorage/fusion/tests/functional/test_fusion_se.py1039
-rw-r--r--ansible_collections/purestorage/fusion/tests/functional/test_fusion_ss.py930
-rw-r--r--ansible_collections/purestorage/fusion/tests/functional/test_fusion_tenant.py803
-rw-r--r--ansible_collections/purestorage/fusion/tests/functional/test_fusion_ts.py922
-rw-r--r--ansible_collections/purestorage/fusion/tests/functional/test_fusion_volume.py715
-rw-r--r--ansible_collections/purestorage/fusion/tests/functional/utils.py116
-rw-r--r--ansible_collections/purestorage/fusion/tests/helpers.py29
-rw-r--r--ansible_collections/purestorage/fusion/tests/integration/README.md10
-rw-r--r--ansible_collections/purestorage/fusion/tests/integration/integration_config.template6
-rw-r--r--ansible_collections/purestorage/fusion/tests/integration/targets/fusion_az/tasks/main.yml43
-rw-r--r--ansible_collections/purestorage/fusion/tests/integration/targets/fusion_hap/tasks/main.yml42
-rw-r--r--ansible_collections/purestorage/fusion/tests/integration/targets/fusion_ni/tasks/main.yml37
-rw-r--r--ansible_collections/purestorage/fusion/tests/integration/targets/fusion_nig/tasks/main.yml48
-rw-r--r--ansible_collections/purestorage/fusion/tests/integration/targets/fusion_pg/tasks/main.yml95
-rw-r--r--ansible_collections/purestorage/fusion/tests/integration/targets/fusion_pp/tasks/main.yml43
-rw-r--r--ansible_collections/purestorage/fusion/tests/integration/targets/fusion_region/tasks/main.yml53
-rw-r--r--ansible_collections/purestorage/fusion/tests/integration/targets/fusion_sc/tasks/main.yml94
-rw-r--r--ansible_collections/purestorage/fusion/tests/integration/targets/fusion_se/tasks/main.yml100
-rw-r--r--ansible_collections/purestorage/fusion/tests/integration/targets/fusion_ss/tasks/main.yml77
-rw-r--r--ansible_collections/purestorage/fusion/tests/integration/targets/fusion_tenant/tasks/main.yml41
-rw-r--r--ansible_collections/purestorage/fusion/tests/integration/targets/fusion_ts/tasks/main.yml62
-rw-r--r--ansible_collections/purestorage/fusion/tests/unit/README.md15
-rw-r--r--ansible_collections/purestorage/fusion/tests/unit/mocks/__init__.py0
-rw-r--r--ansible_collections/purestorage/fusion/tests/unit/mocks/module_mock.py38
-rw-r--r--ansible_collections/purestorage/fusion/tests/unit/mocks/operation_mock.py24
-rw-r--r--ansible_collections/purestorage/fusion/tests/unit/module_utils/__init__.py0
-rw-r--r--ansible_collections/purestorage/fusion/tests/unit/module_utils/test_networking.py58
-rw-r--r--ansible_collections/purestorage/fusion/tests/unit/module_utils/test_operations.py230
-rw-r--r--ansible_collections/purestorage/fusion/tests/unit/module_utils/test_parsing.py138
-rw-r--r--ansible_collections/purestorage/fusion/tests/unit/module_utils/test_prerequisites.py116
-rw-r--r--ansible_collections/purestorage/fusion/tests/unit/modules/__init__.py0
-rw-r--r--ansible_collections/purestorage/fusion/tests/unit/modules/test_fusion_az.py446
537 files changed, 78327 insertions, 0 deletions
diff --git a/ansible_collections/purestorage/flasharray/.git-blame-ignore-revs b/ansible_collections/purestorage/flasharray/.git-blame-ignore-revs
new file mode 100644
index 000000000..4163f56f3
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/.git-blame-ignore-revs
@@ -0,0 +1,2 @@
+# Migrate code style to Black
+cd83560534efff3d1ac62b5ff6e0e498d59507e9
diff --git a/ansible_collections/purestorage/flasharray/.github/CONTRIBUTING.md b/ansible_collections/purestorage/flasharray/.github/CONTRIBUTING.md
new file mode 100644
index 000000000..c2c5bbfd8
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/.github/CONTRIBUTING.md
@@ -0,0 +1,19 @@
+# WELCOME TO PURE STORAGE FLASH-ARRAY ANSIBLE COLLECTION GITHUB
+
+Hi! Nice to see you here!
+
+## QUESTIONS ?
+
+The GitHub issue tracker is not the best place for questions for various reasons, but the [mailing list](mailto:pure-ansible-team@purestorage.com) is a very helpful places for those things.
+
+## CONTRIBUTING ?
+
+By contributing you agree that these contributions are your own (or approved by your employer) and you grant a full, complete, irrevocable copyright license to all users and developers of the project, present and future, pursuant to the license of the project.
+
+## BUG TO REPORT ?
+
+You can report bugs or make enhancement requests at the [Ansible GitHub issue page](http://github.com/Pure-Storage-Ansible/FlashArray-Collection/issues/new/choose) by filling out the issue template that will be presented.
+
+Also please make sure you are testing on the latest released version of Ansible or the development branch; see the [Installation Guide](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) for details.
+
+Thanks!
diff --git a/ansible_collections/purestorage/flasharray/.github/ISSUE_TEMPLATE/bug_report.md b/ansible_collections/purestorage/flasharray/.github/ISSUE_TEMPLATE/bug_report.md
new file mode 100644
index 000000000..dd84ea782
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/.github/ISSUE_TEMPLATE/bug_report.md
@@ -0,0 +1,38 @@
+---
+name: Bug report
+about: Create a report to help us improve
+title: ''
+labels: ''
+assignees: ''
+
+---
+
+**Describe the bug**
+A clear and concise description of what the bug is.
+
+**To Reproduce**
+Steps to reproduce the behavior:
+1. Go to '...'
+2. Click on '....'
+3. Scroll down to '....'
+4. See error
+
+**Expected behavior**
+A clear and concise description of what you expected to happen.
+
+**Screenshots**
+If applicable, add screenshots to help explain your problem.
+
+**Desktop (please complete the following information):**
+ - OS: [e.g. iOS]
+ - Browser [e.g. chrome, safari]
+ - Version [e.g. 22]
+
+**Smartphone (please complete the following information):**
+ - Device: [e.g. iPhone6]
+ - OS: [e.g. iOS8.1]
+ - Browser [e.g. stock browser, safari]
+ - Version [e.g. 22]
+
+**Additional context**
+Add any other context about the problem here.
diff --git a/ansible_collections/purestorage/flasharray/.github/ISSUE_TEMPLATE/feature_request.md b/ansible_collections/purestorage/flasharray/.github/ISSUE_TEMPLATE/feature_request.md
new file mode 100644
index 000000000..bbcbbe7d6
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/.github/ISSUE_TEMPLATE/feature_request.md
@@ -0,0 +1,20 @@
+---
+name: Feature request
+about: Suggest an idea for this project
+title: ''
+labels: ''
+assignees: ''
+
+---
+
+**Is your feature request related to a problem? Please describe.**
+A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
+
+**Describe the solution you'd like**
+A clear and concise description of what you want to happen.
+
+**Describe alternatives you've considered**
+A clear and concise description of any alternative solutions or features you've considered.
+
+**Additional context**
+Add any other context or screenshots about the feature request here.
diff --git a/ansible_collections/purestorage/flasharray/.github/bug_report_template.md b/ansible_collections/purestorage/flasharray/.github/bug_report_template.md
new file mode 100644
index 000000000..5e2749fb9
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/.github/bug_report_template.md
@@ -0,0 +1,58 @@
+<!--- Verify first that your issue is not already reported on GitHub -->
+<!--- Also test if the latest release and devel branch are affected too -->
+<!--- Complete *all* sections as described, this form is processed automatically -->
+
+##### SUMMARY
+<!--- Explain the problem briefly below -->
+
+##### ISSUE TYPE
+- Bug Report
+
+##### COMPONENT NAME
+<!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure -->
+
+##### ANSIBLE VERSION
+<!--- Paste verbatim output from "ansible --version" between quotes -->
+```paste below
+
+```
+
+##### COLLECTION VERSION
+<!--- Paste verbatim output from "ansible-galaxy collection list <namespace>.<collection>" between the quotes
+for example: ansible-galaxy collection list community.general
+-->
+```paste below
+
+```
+
+##### CONFIGURATION
+<!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes -->
+```paste below
+
+```
+
+##### OS / ENVIRONMENT
+<!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. -->
+
+
+##### STEPS TO REPRODUCE
+<!--- Describe exactly how to reproduce the problem, using a minimal test-case -->
+
+<!--- Paste example playbooks or commands between quotes below -->
+```yaml
+
+```
+
+<!--- HINT: You can paste gist.github.com links for larger files -->
+
+##### EXPECTED RESULTS
+<!--- Describe what you expected to happen when running the steps above -->
+
+
+##### ACTUAL RESULTS
+<!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) -->
+
+<!--- Paste verbatim command output between quotes -->
+```paste below
+
+```
diff --git a/ansible_collections/purestorage/flasharray/.github/feature_request_template.md b/ansible_collections/purestorage/flasharray/.github/feature_request_template.md
new file mode 100644
index 000000000..cec2d6dad
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/.github/feature_request_template.md
@@ -0,0 +1,21 @@
+<!--- Verify first that your feature was not already discussed on GitHub -->
+<!--- Complete *all* sections as described, this form is processed automatically -->
+
+##### SUMMARY
+<!--- Describe the new feature/improvement briefly below -->
+
+##### ISSUE TYPE
+- Feature Idea
+
+##### COMPONENT NAME
+<!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure -->
+
+##### ADDITIONAL INFORMATION
+<!--- Describe how the feature would be used, why it is needed and what it would solve -->
+
+<!--- Paste example playbooks or commands between quotes below -->
+```yaml
+
+```
+
+<!--- HINT: You can also paste gist.github.com links for larger files -->
diff --git a/ansible_collections/purestorage/flasharray/.github/pull_request_template.md b/ansible_collections/purestorage/flasharray/.github/pull_request_template.md
new file mode 100644
index 000000000..27079cb18
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/.github/pull_request_template.md
@@ -0,0 +1,25 @@
+##### SUMMARY
+<!--- Describe the change below, including rationale and design decisions -->
+
+<!--- HINT: Include "Fixes #nnn" if you are fixing an existing issue -->
+
+##### ISSUE TYPE
+<!--- Pick one below and delete the rest -->
+- Bugfix Pull Request
+- Docs Pull Request
+- Feature Pull Request
+- New Module Pull Request
+- New Role Pull Request
+
+##### COMPONENT NAME
+<!--- Write the short name of the module, plugin, task or feature below -->
+
+##### ADDITIONAL INFORMATION
+<!--- Include additional information to help people understand the change here -->
+<!--- A step-by-step reproduction of the problem is helpful if there is no related issue -->
+- All new PRs must include a changelog fragment
+- Details of naming convention and format can be found [here](https://docs.ansible.com/ansible/latest/community/development_process.html#creating-a-changelog-fragment)
+<!--- Paste verbatim command output below, e.g. before and after your change -->
+```paste below
+
+```
diff --git a/ansible_collections/purestorage/flasharray/.github/workflows/ansible-lint.yml b/ansible_collections/purestorage/flasharray/.github/workflows/ansible-lint.yml
new file mode 100644
index 000000000..0b2102184
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/.github/workflows/ansible-lint.yml
@@ -0,0 +1,10 @@
+name: Ansible Lint # feel free to pick your own name
+on: [push, pull_request]
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+ - name: Run ansible-lint
+ uses: ansible-community/ansible-lint-action@main
diff --git a/ansible_collections/purestorage/flasharray/.github/workflows/black.yaml b/ansible_collections/purestorage/flasharray/.github/workflows/black.yaml
new file mode 100644
index 000000000..e5f9711f6
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/.github/workflows/black.yaml
@@ -0,0 +1,11 @@
+name: Lint
+
+on: [push, pull_request]
+
+jobs:
+ lint:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+ - uses: actions/setup-python@v3
+ - uses: psf/black@stable
diff --git a/ansible_collections/purestorage/flasharray/.github/workflows/main.yml b/ansible_collections/purestorage/flasharray/.github/workflows/main.yml
new file mode 100644
index 000000000..e41c0e099
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/.github/workflows/main.yml
@@ -0,0 +1,62 @@
+name: Pure Storage Ansible CI
+
+on:
+ pull_request:
+ push:
+ schedule:
+ - cron: '25 10 * * *'
+
+jobs:
+ build:
+ name: Build flasharray on Ansible ${{ matrix.ansible }} (Python ${{ matrix.python-version }})
+ runs-on: ubuntu-20.04
+ strategy:
+ matrix:
+ ansible:
+ - stable-2.11
+ - stable-2.12
+ - stable-2.13
+ - stable-2.14
+ - stable-2.15
+ - devel
+ python-version:
+ - 3.8
+ - 3.9
+ - "3.10"
+ - "3.11"
+ exclude:
+ - python-version: "3.11"
+ ansible: stable-2.11
+ - python-version: "3.11"
+ ansible: stable-2.12
+ - python-version: "3.11"
+ ansible: stable-2.13
+ - python-version: "3.10"
+ ansible: stable-2.11
+ - python-version: 3.8
+ ansible: stable-2.14
+ - python-version: 3.8
+ ansible: stable-2.15
+ - python-version: 3.8
+ ansible: devel
+ steps:
+ - name: Check out code
+ uses: actions/checkout@v3
+
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v3
+ with:
+ python-version: ${{ matrix.python-version }}
+
+ - name: Install dependencies
+ run: |
+ python${{ matrix.python }} -m pip install --upgrade pip
+ python${{ matrix.python }} -m pip install https://github.com/ansible/ansible/archive/${{ matrix.ansible }}.tar.gz --disable-pip-version-check
+
+ - name: Run sanity tests
+ run: |
+ pwd
+ mkdir -p ansible_collections/purestorage/flasharray
+ rsync -av . ansible_collections/purestorage/flasharray --exclude ansible_collection/purestorage/flasharray
+ cd ansible_collections/purestorage/flasharray
+ ansible-test sanity -v --color --python ${{ matrix.python-version }} --docker
diff --git a/ansible_collections/purestorage/flasharray/.github/workflows/stale.yml b/ansible_collections/purestorage/flasharray/.github/workflows/stale.yml
new file mode 100644
index 000000000..7bbc0505b
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/.github/workflows/stale.yml
@@ -0,0 +1,19 @@
+name: Mark stale issues and pull requests
+
+on:
+ schedule:
+ - cron: "0 0 * * *"
+
+jobs:
+ stale:
+
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/stale@v1
+ with:
+ repo-token: ${{ secrets.GITHUB_TOKEN }}
+ stale-issue-message: 'Stale issue message'
+ stale-pr-message: 'Stale pull request message'
+ stale-issue-label: 'no-issue-activity'
+ stale-pr-label: 'no-pr-activity'
diff --git a/ansible_collections/purestorage/flasharray/.gitignore b/ansible_collections/purestorage/flasharray/.gitignore
new file mode 100644
index 000000000..e2f41b439
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/.gitignore
@@ -0,0 +1,3 @@
+*.tar.gz
+.pylintrc
+collections/ansible_collections/purestorage/flasharray/tests/output/*
diff --git a/ansible_collections/purestorage/flasharray/.pylintrc b/ansible_collections/purestorage/flasharray/.pylintrc
new file mode 100644
index 000000000..cc8d948d4
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/.pylintrc
@@ -0,0 +1,587 @@
+[MASTER]
+
+# A comma-separated list of package or module names from where C extensions may
+# be loaded. Extensions are loading into the active Python interpreter and may
+# run arbitrary code
+extension-pkg-whitelist=
+
+# Add files or directories to the blacklist. They should be base names, not
+# paths.
+ignore=CVS
+
+# Add files or directories matching the regex patterns to the blacklist. The
+# regex matches against base names, not paths.
+ignore-patterns=
+
+# Python code to execute, usually for sys.path manipulation such as
+# pygtk.require().
+#init-hook=
+
+# Use multiple processes to speed up Pylint.
+jobs=1
+
+# List of plugins (as comma separated values of python modules names) to load,
+# usually to register additional checkers.
+load-plugins=
+
+# Pickle collected data for later comparisons.
+persistent=yes
+
+# Specify a configuration file.
+#rcfile=
+
+# When enabled, pylint would attempt to guess common misconfiguration and emit
+# user-friendly hints instead of false-positive error messages
+suggestion-mode=yes
+
+# Allow loading of arbitrary C extensions. Extensions are imported into the
+# active Python interpreter and may run arbitrary code.
+unsafe-load-any-extension=no
+
+
+[MESSAGES CONTROL]
+
+# Only show warnings with the listed confidence levels. Leave empty to show
+# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED
+confidence=
+
+# Disable the message, report, category or checker with the given id(s). You
+# can either give multiple identifiers separated by comma (,) or put this
+# option multiple times (only on the command line, not in the configuration
+# file where it should appear only once).You can also use "--disable=all" to
+# disable everything first and then reenable specific checks. For example, if
+# you want to run only the similarities checker, you can use "--disable=all
+# --enable=similarities". If you want to run only the classes checker, but have
+# no Warning level messages displayed, use"--disable=all --enable=classes
+# --disable=W"
+disable=
+ abstract-method,
+ access-member-before-definition,
+ ansible-deprecated-version,
+ arguments-differ,
+ assignment-from-no-return,
+ assignment-from-none,
+ attribute-defined-outside-init,
+ bad-continuation,
+ bad-indentation,
+ bad-mcs-classmethod-argument,
+ broad-except,
+ c-extension-no-member,
+ cell-var-from-loop,
+ chained-comparison,
+ comparison-with-callable,
+ consider-iterating-dictionary,
+ consider-merging-isinstance,
+ consider-using-dict-comprehension,
+ consider-using-enumerate,
+ consider-using-get,
+ consider-using-in,
+ consider-using-set-comprehension,
+ consider-using-ternary,
+ deprecated-lambda,
+ deprecated-method,
+ deprecated-module,
+ eval-used,
+ exec-used,
+ expression-not-assigned,
+ fixme,
+ function-redefined,
+ global-statement,
+ global-variable-undefined,
+ import-error,
+ import-self,
+ inconsistent-return-statements,
+ invalid-envvar-default,
+ invalid-name,
+ invalid-sequence-index,
+ keyword-arg-before-vararg,
+ len-as-condition,
+ line-too-long,
+ literal-comparison,
+ locally-disabled,
+ method-hidden,
+ misplaced-comparison-constant,
+ missing-docstring,
+ no-else-raise,
+ no-else-return,
+ no-init,
+ no-member,
+ no-name-in-module,
+ no-self-use,
+ no-value-for-parameter,
+ non-iterator-returned,
+ not-a-mapping,
+ not-an-iterable,
+ not-callable,
+ old-style-class,
+ pointless-statement,
+ pointless-string-statement,
+ possibly-unused-variable,
+ protected-access,
+ redefined-argument-from-local,
+ redefined-builtin,
+ redefined-outer-name,
+ redefined-variable-type,
+ reimported,
+ relative-import,
+ signature-differs,
+ simplifiable-if-expression,
+ simplifiable-if-statement,
+ subprocess-popen-preexec-fn,
+ super-init-not-called,
+ superfluous-parens,
+ too-few-public-methods,
+ too-many-ancestors,
+ too-many-arguments,
+ too-many-boolean-expressions,
+ too-many-branches,
+ too-many-function-args,
+ too-many-instance-attributes,
+ too-many-lines,
+ too-many-locals,
+ too-many-nested-blocks,
+ too-many-public-methods,
+ too-many-return-statements,
+ too-many-statements,
+ trailing-comma-tuple,
+ trailing-comma-tuple,
+ try-except-raise,
+ unbalanced-tuple-unpacking,
+ undefined-loop-variable,
+ unexpected-keyword-arg,
+ ungrouped-imports,
+ unidiomatic-typecheck,
+ unnecessary-pass,
+ unsubscriptable-object,
+ unsupported-assignment-operation,
+ unsupported-delete-operation,
+ unsupported-membership-test,
+ unused-argument,
+ unused-import,
+ unused-variable,
+ used-before-assignment,
+ useless-object-inheritance,
+ useless-return,
+ useless-super-delegation,
+ wrong-import-order,
+ wrong-import-position,
+
+# Enable the message, report, category or checker with the given id(s). You can
+# either give multiple identifier separated by comma (,) or put this option
+# multiple time (only on the command line, not in the configuration file where
+# it should appear only once). See also the "--disable" option for examples.
+enable=c-extension-no-member
+
+
+[REPORTS]
+
+# Python expression which should return a note less than 10 (10 is the highest
+# note). You have access to the variables errors warning, statement which
+# respectively contain the number of errors / warnings messages and the total
+# number of statements analyzed. This is used by the global evaluation report
+# (RP0004).
+evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
+
+# Template used to display messages. This is a python new-style format string
+# used to format the message information. See doc for all details
+#msg-template=
+
+# Set the output format. Available formats are text, parseable, colorized, json
+# and msvs (visual studio).You can also give a reporter class, eg
+# mypackage.mymodule.MyReporterClass.
+output-format=text
+
+# Tells whether to display a full report or only the messages
+reports=no
+
+# Activate the evaluation score.
+score=yes
+
+
+[REFACTORING]
+
+# Maximum number of nested blocks for function / method body
+max-nested-blocks=5
+
+# Complete name of functions that never returns. When checking for
+# inconsistent-return-statements if a never returning function is called then
+# it will be considered as an explicit return statement and no message will be
+# printed.
+never-returning-functions=optparse.Values,sys.exit
+
+
+[VARIABLES]
+
+# List of additional names supposed to be defined in builtins. Remember that
+# you should avoid to define new builtins when possible.
+additional-builtins=
+
+# Tells whether unused global variables should be treated as a violation.
+allow-global-unused-variables=yes
+
+# List of strings which can identify a callback function by name. A callback
+# name must start or end with one of those strings.
+callbacks=cb_,
+ _cb
+
+# A regular expression matching the name of dummy variables (i.e. expectedly
+# not used).
+dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_
+
+# Argument names that match this expression will be ignored. Default to name
+# with leading underscore
+ignored-argument-names=_.*|^ignored_|^unused_
+
+# Tells whether we should check for unused import in __init__ files.
+init-import=no
+
+# List of qualified module names which can have objects that can redefine
+# builtins.
+redefining-builtins-modules=six.moves,past.builtins,future.builtins,io,builtins
+
+
+[BASIC]
+
+# Naming style matching correct argument names
+argument-naming-style=snake_case
+
+# Regular expression matching correct argument names. Overrides argument-
+# naming-style
+#argument-rgx=
+
+# Naming style matching correct attribute names
+attr-naming-style=snake_case
+
+# Regular expression matching correct attribute names. Overrides attr-naming-
+# style
+#attr-rgx=
+
+# Bad variable names which should always be refused, separated by a comma
+bad-names=foo,
+ bar,
+ baz,
+ toto,
+ tutu,
+ tata,
+ _,
+
+# Naming style matching correct class attribute names
+class-attribute-naming-style=any
+
+# Regular expression matching correct class attribute names. Overrides class-
+# attribute-naming-style
+#class-attribute-rgx=
+
+# Naming style matching correct class names
+class-naming-style=PascalCase
+
+# Regular expression matching correct class names. Overrides class-naming-style
+#class-rgx=
+
+# Naming style matching correct constant names
+const-naming-style=UPPER_CASE
+
+# Regular expression matching correct constant names. Overrides const-naming-
+# style
+#const-rgx=
+
+# Minimum line length for functions/classes that require docstrings, shorter
+# ones are exempt.
+docstring-min-length=-1
+
+# Naming style matching correct function names
+function-naming-style=snake_case
+
+# Regular expression matching correct function names. Overrides function-
+# naming-style
+#function-rgx=
+
+# Good variable names which should always be accepted, separated by a comma
+good-names=i,
+ j,
+ k,
+ f,
+ e,
+ ex,
+ Run,
+ C,
+ __metaclass__,
+
+# Include a hint for the correct naming format with invalid-name
+include-naming-hint=no
+
+# Naming style matching correct inline iteration names
+inlinevar-naming-style=any
+
+# Regular expression matching correct inline iteration names. Overrides
+# inlinevar-naming-style
+#inlinevar-rgx=
+
+# Naming style matching correct method names
+method-naming-style=snake_case
+
+# Regular expression matching correct method names. Overrides method-naming-
+# style
+#method-rgx=
+
+# Naming style matching correct module names
+module-naming-style=snake_case
+
+# Regular expression matching correct module names. Overrides module-naming-
+# style
+#module-rgx=
+module-rgx=[a-z_][a-z0-9_-]{2,40}$
+method-rgx=[a-z_][a-z0-9_]{2,40}$
+function-rgx=[a-z_][a-z0-9_]{2,40}$
+
+# Colon-delimited sets of names that determine each other's naming style when
+# the name regexes allow several styles.
+name-group=
+
+# Regular expression which should only match function or class names that do
+# not require a docstring.
+no-docstring-rgx=^_
+
+# List of decorators that produce properties, such as abc.abstractproperty. Add
+# to this list to register other decorators that produce valid properties.
+property-classes=abc.abstractproperty
+
+# Naming style matching correct variable names
+variable-naming-style=snake_case
+
+# Regular expression matching correct variable names. Overrides variable-
+# naming-style
+#variable-rgx=
+
+
+[SPELLING]
+
+# Limits count of emitted suggestions for spelling mistakes
+max-spelling-suggestions=4
+
+# Spelling dictionary name. Available dictionaries: none. To make it working
+# install python-enchant package.
+spelling-dict=
+
+# List of comma separated words that should not be checked.
+spelling-ignore-words=
+
+# A path to a file that contains private dictionary; one word per line.
+spelling-private-dict-file=
+
+# Tells whether to store unknown words to indicated private dictionary in
+# --spelling-private-dict-file option instead of raising a message.
+spelling-store-unknown-words=no
+
+
+[FORMAT]
+
+# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
+expected-line-ending-format=
+
+# Regexp for a line that is allowed to be longer than the limit.
+ignore-long-lines=^\s*(# )?<?https?://\S+>?$
+
+# Number of spaces of indent required inside a hanging or continued line.
+indent-after-paren=4
+
+# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
+# tab).
+indent-string=' '
+
+# Maximum number of characters on a single line.
+max-line-length=160
+
+# Maximum number of lines in a module
+max-module-lines=1000
+
+# List of optional constructs for which whitespace checking is disabled. `dict-
+# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}.
+# `trailing-comma` allows a space between comma and closing bracket: (a, ).
+# `empty-line` allows space-only lines.
+no-space-check=trailing-comma,
+ dict-separator
+
+# Allow the body of a class to be on the same line as the declaration if body
+# contains single statement.
+single-line-class-stmt=no
+
+# Allow the body of an if to be on the same line as the test if there is no
+# else.
+single-line-if-stmt=no
+
+
+[TYPECHECK]
+
+# List of decorators that produce context managers, such as
+# contextlib.contextmanager. Add to this list to register other decorators that
+# produce valid context managers.
+contextmanager-decorators=contextlib.contextmanager
+
+# List of members which are set dynamically and missed by pylint inference
+# system, and so shouldn't trigger E1101 when accessed. Python regular
+# expressions are accepted.
+generated-members=
+
+# Tells whether missing members accessed in mixin class should be ignored. A
+# mixin class is detected if its name ends with "mixin" (case insensitive).
+ignore-mixin-members=yes
+
+# This flag controls whether pylint should warn about no-member and similar
+# checks whenever an opaque object is returned when inferring. The inference
+# can return multiple potential results while evaluating a Python object, but
+# some branches might not be evaluated, which results in partial inference. In
+# that case, it might be useful to still emit no-member and other checks for
+# the rest of the inferred objects.
+ignore-on-opaque-inference=yes
+
+# List of class names for which member attributes should not be checked (useful
+# for classes with dynamically set attributes). This supports the use of
+# qualified names.
+ignored-classes=optparse.Values,thread._local,_thread._local
+
+# List of module names for which member attributes should not be checked
+# (useful for modules/projects where namespaces are manipulated during runtime
+# and thus existing member attributes cannot be deduced by static analysis. It
+# supports qualified module names, as well as Unix pattern matching.
+ignored-modules=
+ _MovedItems,
+# Show a hint with possible names when a member name was not found. The aspect
+# of finding the hint is based on edit distance.
+missing-member-hint=yes
+
+# The minimum edit distance a name should have in order to be considered a
+# similar match for a missing member name.
+missing-member-hint-distance=1
+
+# The total number of similar names that should be taken in consideration when
+# showing a hint for a missing member.
+missing-member-max-choices=1
+
+
+[SIMILARITIES]
+
+# Ignore comments when computing similarities.
+ignore-comments=yes
+
+# Ignore docstrings when computing similarities.
+ignore-docstrings=yes
+
+# Ignore imports when computing similarities.
+ignore-imports=no
+
+# Minimum lines number of a similarity.
+min-similarity-lines=4
+
+
+[LOGGING]
+
+# Logging modules to check that the string format arguments are in logging
+# function parameter format
+logging-modules=logging
+
+
+[MISCELLANEOUS]
+
+# List of note tags to take in consideration, separated by a comma.
+notes=FIXME,
+ XXX,
+ TODO
+
+
+[CLASSES]
+
+# List of method names used to declare (i.e. assign) instance attributes.
+defining-attr-methods=__init__,
+ __new__,
+ setUp
+
+# List of member names, which should be excluded from the protected access
+# warning.
+exclude-protected=_asdict,
+ _fields,
+ _replace,
+ _source,
+ _make
+
+# List of valid names for the first argument in a class method.
+valid-classmethod-first-arg=cls
+
+# List of valid names for the first argument in a metaclass class method.
+valid-metaclass-classmethod-first-arg=mcs
+
+
+[DESIGN]
+
+# Maximum number of arguments for function / method
+max-args=5
+
+# Maximum number of attributes for a class (see R0902).
+max-attributes=7
+
+# Maximum number of boolean expressions in a if statement
+max-bool-expr=5
+
+# Maximum number of branch for function / method body
+max-branches=12
+
+# Maximum number of locals for function / method body
+max-locals=15
+
+# Maximum number of parents for a class (see R0901).
+max-parents=7
+
+# Maximum number of public methods for a class (see R0904).
+max-public-methods=20
+
+# Maximum number of return / yield for function / method body
+max-returns=6
+
+# Maximum number of statements in function / method body
+max-statements=50
+
+# Minimum number of public methods for a class (see R0903).
+min-public-methods=2
+
+
+[IMPORTS]
+
+# Allow wildcard imports from modules that define __all__.
+allow-wildcard-with-all=no
+
+# Analyse import fallback blocks. This can be used to support both Python 2 and
+# 3 compatible code, which means that the block might have code that exists
+# only in one or another interpreter, leading to false positives when analysed.
+analyse-fallback-blocks=no
+
+# Deprecated modules which should not be used, separated by a comma
+deprecated-modules=regsub,
+ TERMIOS,
+ Bastion,
+ rexec
+
+# Create a graph of external dependencies in the given file (report RP0402 must
+# not be disabled)
+ext-import-graph=
+
+# Create a graph of every (i.e. internal and external) dependencies in the
+# given file (report RP0402 must not be disabled)
+import-graph=
+
+# Create a graph of internal dependencies in the given file (report RP0402 must
+# not be disabled)
+int-import-graph=
+
+# Force import order to recognize a module as part of the standard
+# compatibility libraries.
+known-standard-library=
+
+# Force import order to recognize a module as part of a third party library.
+known-third-party=enchant
+
+
+[EXCEPTIONS]
+
+# Exceptions that will emit a warning when being caught. Defaults to
+# "Exception"
+overgeneral-exceptions=Exception
diff --git a/ansible_collections/purestorage/flasharray/.yamllint b/ansible_collections/purestorage/flasharray/.yamllint
new file mode 100644
index 000000000..6c19f43f7
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/.yamllint
@@ -0,0 +1,7 @@
+extends: default
+
+rules:
+ document-start: disable
+ indentation: disable
+ line-length:
+ max: 200
diff --git a/ansible_collections/purestorage/flasharray/CHANGELOG.rst b/ansible_collections/purestorage/flasharray/CHANGELOG.rst
new file mode 100644
index 000000000..60e168bbd
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/CHANGELOG.rst
@@ -0,0 +1,509 @@
+====================================
+Purestorage.Flasharray Release Notes
+====================================
+
+.. contents:: Topics
+
+
+v1.19.1
+=======
+
+Bugfixes
+--------
+
+- purefa_info - Fixed missing arguments for google_offload and pods
+
+v1.19.0
+=======
+
+New Modules
+-----------
+
+- purestorage.flasharray.purefa_logging - Manage Pure Storage FlashArray Audit and Session logs
+
+v1.18.0
+=======
+
+Release Summary
+---------------
+
+| FlashArray Collection v1.18 removes module-side support for Python 2.7.
+| The minimum required Python version for the FlashArray Collection is Python 3.6.
+
+
+Minor Changes
+-------------
+
+- purefa_hg - Changed parameter hostgroup to name for consistency. Added hostgroup as an alias for backwards compatability.
+- purefa_hg - Exit gracefully, rather than failing when a specified volume does not exist
+- purefa_host - Exit gracefully, rather than failing when a specified volume does not exist
+- purefa_info - Added network neighbors info to `network` subset
+- purefa_pod - Added support for pod quotas (from REST 2.23)
+- purefa_snap - New response of 'suffix' when snapshot has been created.
+- purefa_volume - Added additional volume facts for volume update, or for no change
+
+Bugfixes
+--------
+
+- purefa_network - Resolves network port setting idempotency issue
+- purefa_pg - Fixed issue where volumes could not be added to a PG when one of the arrays was undergoing a failover.
+- purefa_snap - Fixed issue system generated suffixes not being allowed and removed unnecessary warning message.
+
+v1.17.2
+=======
+
+v1.17.1
+=======
+
+Bugfixes
+--------
+
+- purefa_info - Fix REST response backwards compatibility issue for array capacity REST response
+- purefa_info - Resolves issue in AC environment where REST v2 host list mismatches REST v1 due to remote hosts.
+- purefa_info - Resolves issue with destroyed pgroup snapshot on an offload target not have a time remaining value
+- purefa_pg - Resolves issue with destroyed pgroup snapshot on an offload target not have a time remaining value
+
+v1.17.0
+=======
+
+Minor Changes
+-------------
+
+- purefa_network - Added support for NVMe-RoCE and NVMe-TCP service types
+- purefa_user - Added Ops Admin role to choices
+- purefa_vlan - Added support for NVMe-TCP service type
+
+Bugfixes
+--------
+
+- purefa_host - Fixed parameter name
+- purefa_info - Fix missing FC target ports for host
+- purefa_pgsched - Fix error when setting schedule for pod based protection group
+- purefa_vg - Fix issue with VG creation on newer Purity versions
+- purefa_volume - Ensure promotion_stateus is returned correctly on creation
+- purefa_volume - Fix bug when overwriting volume using invalid parmaeters
+- purefa_volume - Fixed idempotency bug when creating volumes with QoS
+
+v1.16.2
+=======
+
+v1.16.1
+=======
+
+Bugfixes
+--------
+
+- purefa_volume - Fixed issue with promotion status not being called correctly
+
+v1.16.0
+=======
+
+Minor Changes
+-------------
+
+- purefa_host - Add support for VLAN ID tagging for a host (Requires Purity//FA 6.3.5)
+- purefa_info - Add new subset alerts
+- purefa_info - Added default protection information to `config` section
+- purefa_volume - Added support for volume promotion/demotion
+
+Bugfixes
+--------
+
+- purefa - Remove unneeded REST version check as causes issues with REST mismatches
+- purefa_ds - Fixed dict syntax error
+- purefa_info - Fiexed issue with DNS reporting in Purity//FA 6.4.0 with non-FA-File system
+- purefa_info - Fixed error in policies subsection due to API issue
+- purefa_info - Fixed race condition with protection groups
+- purefa_smtp - Fix parameter name
+
+New Modules
+-----------
+
+- purestorage.flasharray.purefa_snmp_agent - Configure the FlashArray SNMP Agent
+
+v1.15.0
+=======
+
+Minor Changes
+-------------
+
+- purefa_network - Added support for servicelist updates
+- purefa_vlan - Extend VLAN support to cover NVMe-RoCE and file interfaces
+
+Bugfixes
+--------
+
+- purefa.py - Fix issue in Purity versions numbers that are for development versions
+- purefa_policy - Fixed missing parameters in function calls
+- purefa_vg - Fix typeerror when using newer Purity versions and setting VG QoS
+
+v1.14.0
+=======
+
+Minor Changes
+-------------
+
+- purefa_ad - Add support for TLS and joining existing AD account
+- purefa_dns - Support multiple DNS configurations from Puritry//FA 6.3.3
+- purefa_info - Add NFS policy user mapping status
+- purefa_info - Add support for Virtual Machines and Snapshots
+- purefa_info - Ensure global admin lockout duration is measured in seconds
+- purefa_info - Support multiple DNS configurations
+- purefa_inventory - Add REST 2.x support and SFP details for Purity//FA 6.3.4 and higher
+- purefa_inventory - Change response dict name to `purefa_inv` so doesn't clash with info module response dict
+- purefa_inventory - add chassis information to inventory
+- purefa_pg - Changed parameter `pgroup` to `name`. Allow `pgroup` as alias for backwards compatability.
+- purefa_policy - Add ``all_squash``, ``anonuid`` and ``anongid`` to NFS client rules options
+- purefa_policy - Add support for NFS policy user mapping
+- purefa_volume - Default Protection Group support added for volume creation and copying from Purity//FA 6.3.4
+
+Bugfixes
+--------
+
+- purefa_dns - Corrects logic where API responds with an empty list rather than a list with a single empty string in it.
+- purefa_ds - Add new parameter `force_bind_password` (default = True) to allow idempotency for module
+- purefa_hg - Ensure volume disconnection from a hostgroup is idempotent
+- purefa_ntp - Corrects workflow so that the state between desired and current are checked before marking the changed flag to true during an absent run
+- purefa_pg - Corredt issue when target for protection group is not correctly amended
+- purefa_pg - Ensure deleted protection group can be correctly recovered
+- purefa_pg - Fix idempotency issue for protection group targets
+- purefa_pgsched - Allow zero as a valid value for appropriate schedule parameters
+- purefa_pgsched - Fix issue where 0 was not correctly handled for replication schedule
+- purefa_pgsnap - Resolved intermittent error where `latest` snapshot is not complete and can fail. Only select latest completed snapshot to restore from.
+
+New Modules
+-----------
+
+- purestorage.flasharray.purefa_default_protection - Manage SafeMode default protection for a Pure Storage FlashArray
+- purestorage.flasharray.purefa_messages - List FlashArray Alert Messages
+
+v1.13.0
+=======
+
+Minor Changes
+-------------
+
+- purefa_fs - Add support for replicated file systems
+- purefa_info - Add QoS information for volume groups
+- purefa_info - Add info for protection group safe mode setting (Requires Purity//FA 6.3.0 or higher)
+- purefa_info - Add info for protection group snapshots
+- purefa_info - Add priority adjustment information for volumes and volume groups
+- purefa_info - Split volume groups into live and deleted dicts
+- purefa_pg - Add support for protection group SafeMode. Requires Purity//FA 6.3.0 or higher
+- purefa_policy - Allow directories in snapshot policies to be managed
+- purefa_vg - Add DMM Priority Adjustment support
+- purefa_volume - Add support for DMM Priority Adjustment
+- purefa_volume - Provide volume facts for volume after recovery
+
+Bugfixes
+--------
+
+- purefa_host - Allow multi-host creation without requiring a suffix string
+- purefa_info - Fix issue where remote arrays are not in a valid connected state
+- purefa_policy - Fix idempotency issue with quota policy rules
+- purefa_policy - Fix issue when creating multiple rules in an NFS policy
+
+v1.12.1
+=======
+
+Minor Changes
+-------------
+
+- All modules - Change examples to use FQCN for module
+
+Bugfixes
+--------
+
+- purefa_info - Fix space reporting issue
+- purefa_subnet - Fix subnet update checks when no gateway in existing subnet configuration
+
+v1.12.0
+=======
+
+Minor Changes
+-------------
+
+- purefa_admin - New module to set global admin settings, inclusing SSO
+- purefa_dirsnap - Add support to rename directory snapshots not managed by a snapshot policy
+- purefa_info - Add SAML2SSO configutration information
+- purefa_info - Add Safe Mode status
+- purefa_info - Fix Active Directory configuration details
+- purefa_network - Resolve bug stopping management IP address being changed correctly
+- purefa_offload - Add support for multiple, homogeneous, offload targets
+- purefa_saml - Add support for SAML2 SSO IdPs
+- purefa_volume - Provide volume facts in all cases, including when no change has occured.
+
+Deprecated Features
+-------------------
+
+- purefa_sso - Deprecated in favor of M(purefa_admin). Will be removed in Collection 2.0
+
+Bugfixes
+--------
+
+- purefa_certs - Allow a certificate to be imported over an existing SSL certificate
+- purefa_eula - Reolve EULA signing issue
+- purefa_network - Fix bug introduced with management of FC ports
+- purefa_policy - Fix issue with SMB Policy creation
+
+Known Issues
+------------
+
+- purefa_admin - Once `max_login` and `lockout` have been set there is currently no way to rest these to zero except through the FlashArray GUI
+
+New Modules
+-----------
+
+- purestorage.flasharray.purefa_admin - Configure Pure Storage FlashArray Global Admin settings
+- purestorage.flasharray.purefa_saml - Manage FlashArray SAML2 service and identity providers
+
+v1.11.0
+=======
+
+Minor Changes
+-------------
+
+- purefa_host - Deprecate ``protocol`` parameter. No longer required.
+- purefa_info - Add NVMe NGUID value for volumes
+- purefa_info - Add array, volume and snapshot detailed capacity information
+- purefa_info - Add deleted members to volume protection group info
+- purefa_info - Add snapshot policy rules suffix support
+- purefa_info - Remove directory_services field. Deprecated in Collections 1.6
+- purefa_policy - Add snapshot policy rules suffix support
+- purefa_syslog_settings - Add support to manage global syslog server settings
+- purefa_volume - Add NVMe NGUID to response dict
+
+Bugfixes
+--------
+
+- purefa_subnet - Add regex to check for correct dsubnet name
+- purefa_user - Add regex to check for correct username
+
+v1.10.0
+=======
+
+Minor Changes
+-------------
+
+- purefa_ds - Add ``join_ou`` parameter for AD account creation
+- purefa_kmip - Add support for KMIP server management
+
+New Modules
+-----------
+
+- purestorage.flasharray.purefa_kmip - Manage FlashArray KMIP server objects
+
+v1.9.0
+======
+
+Minor Changes
+-------------
+
+- purefa_ad - Increase number of kerberos and directory servers to be 3 for each.
+- purefa_ad - New module to manage Active Directory accounts
+- purefa_dirsnap - New modules to manage FA-Files directory snapshots
+- purefa_eradication - New module to set deleted items eradication timer
+- purefa_info - Add data-at-rest and eradication timer information to default dict
+- purefa_info - Add high-level count for directory quotas and details for all FA-Files policies
+- purefa_info - Add volume Page 83 NAA information for volume details
+- purefa_network - Add support for enable/diable FC ports
+- purefa_policy - Add support for FA-files Directory Quotas and associated rules and members
+- purefa_sso - Add support for setting FlashArray Single Sign-On from Pure1 Manage
+- purefa_volume - Add volume Page 83 NAA information to response dict
+
+Bugfixes
+--------
+
+- purefa_host - Rollback host creation if initiators already used by another host
+- purefa_policy - Fix incorrect protocol endpoint invocation
+- purefa_ra - fix disable feature for remote assist, this didn't work due to error in check logic
+- purefa_vg - Correct issue when setting or changing Volume Group QoS
+- purefa_volume - Fix incorrect API version check for ActiveDR support
+
+New Modules
+-----------
+
+- purestorage.flasharray.purefa_ad - Manage FlashArray Active Directory Account
+- purestorage.flasharray.purefa_dirsnap - Manage FlashArray File System Directory Snapshots
+- purestorage.flasharray.purefa_eradication - Configure Pure Storage FlashArray Eradication Timer
+- purestorage.flasharray.purefa_sso - Configure Pure Storage FlashArray Single Sign-On
+
+v1.8.0
+======
+
+Minor Changes
+-------------
+
+- purefa_certs - New module for managing SSL certificates
+- purefa_volume - New parameter pgroup to specify an existing protection group to put crwated volume(s) in.
+
+Bugfixes
+--------
+
+- purefa_dsrole - If using None for group or group_base incorrect change state applied
+- purefa_network - Allow gateway paremeter to be set as None - needed for non-routing iSCSI ports
+- purefa_pg - Check to ensure protection group name meets naming convention
+- purefa_pgsnap - Fail with warning if trying to restore to a stretched ActiveCluster pod
+- purefa_volume - Ensure REST version is high enough to support promotion_status
+
+New Modules
+-----------
+
+- purestorage.flasharray.purefa_certs - Manage FlashArray SSL Certificates
+
+v1.7.0
+======
+
+Minor Changes
+-------------
+
+- purefa_maintenance - New module to set maintenance windows
+- purefa_pg - Add support to rename protection groups
+- purefa_syslog - Add support for naming SYSLOG servers for Purity//FA 6.1 or higher
+
+Bugfixes
+--------
+
+- purefa_info - Fix missing protection group snapshot info for local snapshots
+- purefa_info - Resolve crash when an offload target is offline
+- purefa_pgsnap - Ensure suffix rules only implemented for state=present
+- purefa_user - Do not allow role changed for breakglass user (pureuser)
+- purefa_user - Do not change role for existing user unless requested
+
+New Modules
+-----------
+
+- purestorage.flasharray.purefa_maintenance - Configure Pure Storage FlashArray Maintence Windows
+
+v1.6.2
+======
+
+Bugfixes
+--------
+
+- purefa_volume - Fix issues with moving volumes into demoted or linked pods
+
+v1.6.0
+======
+
+Minor Changes
+-------------
+
+- purefa_connect - Add support for FC-based array replication
+- purefa_ds - Add Purity v6 support for Directory Services, including Data DS and updating services
+- purefa_info - Add support for FC Replication
+- purefa_info - Add support for Remote Volume Snapshots
+- purefa_info - Update directory_services dictionary to cater for FA-Files data DS. Change DS dict forward. Add deprecation warning.
+- purefa_ntp - Ignore NTP configuration for CBS-based arrays
+- purefa_pg - Add support for Protection Groups in AC pods
+- purefa_snap - Add support for remote snapshot of individual volumes to offload targets
+
+Bugfixes
+--------
+
+- purefa_hg - Ensure all hostname chacks are lowercase for consistency
+- purefa_pgsnap - Add check to ensure suffix name meets naming conventions
+- purefa_pgsnap - Ensure pgsnap restores work for AC PGs
+- purefa_pod - Ensure all pod names are lowercase for consistency
+- purefa_snap - Update suffix regex pattern
+- purefa_volume - Add missing variable initialization
+
+v1.5.1
+======
+
+Minor Changes
+-------------
+
+- purefa_host - Add host rename function
+- purefa_host - Add support for multi-host creation
+- purefa_vg - Add support for multiple vgroup creation
+- purefa_volume - Add support for multi-volume creation
+
+Bugfixes
+--------
+
+- purefa.py - Resolve issue when pypureclient doesn't handshake array correctly
+- purefa_dns - Fix idempotency
+- purefa_volume - Alert when volume selected for move does not exist
+
+v1.5.0
+======
+
+Minor Changes
+-------------
+
+- purefa_apiclient - New module to support API Client management
+- purefa_directory - Add support for managed directories
+- purefa_export - Add support for filesystem exports
+- purefa_fs - Add filesystem management support
+- purefa_hg - Enforce case-sensitivity rules for hostgroup objects
+- purefa_host - Enforce hostname case-sensitivity rules
+- purefa_info - Add support for FA Files features
+- purefa_offload - Add support for Google Cloud offload target
+- purefa_pg - Enforce case-sensitivity rules for protection group objects
+- purefa_policy - Add support for NFS, SMB and Snapshot policy management
+
+Bugfixes
+--------
+
+- purefa_host - Correctly remove host that is in a hostgroup
+- purefa_volume - Fix failing idempotency on eradicate volume
+
+New Modules
+-----------
+
+- purestorage.flasharray.purefa_apiclient - Manage FlashArray API Clients
+- purestorage.flasharray.purefa_directory - Manage FlashArray File System Directories
+- purestorage.flasharray.purefa_export - Manage FlashArray File System Exports
+- purestorage.flasharray.purefa_fs - Manage FlashArray File Systems
+- purestorage.flasharray.purefa_policy - Manage FlashArray File System Policies
+
+v1.4.0
+======
+
+Release Summary
+---------------
+
+| Release Date: 2020-08-08
+| This changlelog describes all changes made to the modules and plugins included in this collection since Ansible 2.9.0
+
+
+Major Changes
+-------------
+
+- purefa_console - manage Console Lock setting for the FlashArray
+- purefa_endpoint - manage VMware protocol-endpoints on the FlashArray
+- purefa_eula - sign, or resign, FlashArray EULA
+- purefa_inventory - get hardware inventory information from a FlashArray
+- purefa_network - manage the physical and virtual network settings on the FlashArray
+- purefa_pgsched - manage protection group snapshot and replication schedules on the FlashArray
+- purefa_pod - manage ActiveCluster pods in FlashArrays
+- purefa_pod_replica - manage ActiveDR pod replica links in FlashArrays
+- purefa_proxy - manage the phonehome HTTPS proxy setting for the FlashArray
+- purefa_smis - manage SMI-S settings on the FlashArray
+- purefa_subnet - manage network subnets on the FlashArray
+- purefa_timeout - manage the GUI idle timeout on the FlashArray
+- purefa_vlan - manage VLAN interfaces on the FlashArray
+- purefa_vnc - manage VNC for installed applications on the FlashArray
+- purefa_volume_tags - manage volume tags on the FlashArray
+
+Minor Changes
+-------------
+
+- purefa_hg - All LUN ID to be set for single volume
+- purefa_host - Add CHAP support
+- purefa_host - Add support for Cloud Block Store
+- purefa_host - Add volume disconnection support
+- purefa_info - Certificate times changed to human readable rather than time since epoch
+- purefa_info - new options added for information collection
+- purefa_info - return dict names changed from ``ansible_facts`` to ``ra_info`` and ``user_info`` in approproate sections
+- purefa_offload - Add support for Azure
+- purefa_pgsnap - Add offload support
+- purefa_snap - Allow recovery of deleted snapshot
+- purefa_vg - Add QoS support
+
+Bugfixes
+--------
+
+- purefa_host - resolve hostname case inconsistencies
+- purefa_host - resolve issue found when using in Pure Storage Test Drive
diff --git a/ansible_collections/purestorage/flasharray/COPYING.GPLv3 b/ansible_collections/purestorage/flasharray/COPYING.GPLv3
new file mode 100644
index 000000000..94a9ed024
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/COPYING.GPLv3
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ <program> Copyright (C) <year> <name of author>
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<http://www.gnu.org/licenses/>.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+<http://www.gnu.org/philosophy/why-not-lgpl.html>.
diff --git a/ansible_collections/purestorage/flasharray/FILES.json b/ansible_collections/purestorage/flasharray/FILES.json
new file mode 100644
index 000000000..8bb40c24c
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/FILES.json
@@ -0,0 +1,1853 @@
+{
+ "files": [
+ {
+ "name": ".",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/.keep",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".github",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".github/CONTRIBUTING.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "69fb16d49892fb5d60316a051f1d27d741e71fc84f18c14ff7d388616925535e",
+ "format": 1
+ },
+ {
+ "name": ".github/feature_request_template.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4731d199ca9cbe66b2b6de02846b4860ccfb4fd0ebb2872fe6452b6cf5b73ce2",
+ "format": 1
+ },
+ {
+ "name": ".github/workflows",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".github/workflows/black.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6fb3e0af2e41fb0618586a2990e6645fb9b29d1a7b64b7168c5d27af320569c8",
+ "format": 1
+ },
+ {
+ "name": ".github/workflows/ansible-lint.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4c85688d98b71e3a6594530a362cd5d2cf83842ceaccd0e0fc76e233777c1cef",
+ "format": 1
+ },
+ {
+ "name": ".github/workflows/stale.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0bdef4889afabcd627fc30711a0809c7468b8c9e64cbcebe1334f794a41e7bd9",
+ "format": 1
+ },
+ {
+ "name": ".github/workflows/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2c584c3cb803d47f9b2325a565f92bea429be2f5e2a1241824ed1b2a0a99ebaf",
+ "format": 1
+ },
+ {
+ "name": ".github/ISSUE_TEMPLATE",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".github/ISSUE_TEMPLATE/feature_request.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1f48c52f209a971b8e7eae4120144d28fcf8ee38a7778a7b4d8cf1ab356617d2",
+ "format": 1
+ },
+ {
+ "name": ".github/ISSUE_TEMPLATE/bug_report.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0c8d64f29fb4536513653bf8c97da30f3340e2041b91c8952db1515d6b23a7b3",
+ "format": 1
+ },
+ {
+ "name": ".github/pull_request_template.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "565ead1b588caaa10cd6f2ed1bb6c809eb2ad93bf75da3a198690cac778432d6",
+ "format": 1
+ },
+ {
+ "name": ".github/bug_report_template.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b4eb8821158c73fa62944e91e917f1d1b81fafed3adfe0e6ea373f99902bdf1d",
+ "format": 1
+ },
+ {
+ "name": "meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "meta/runtime.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f0b38b3ecfc8b98b6957f123c01aea90d068bc4349210b126758f8a009062a82",
+ "format": 1
+ },
+ {
+ "name": "meta/execution-environment.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a6458a579fbece249677d5d5473a58da36c7c8ab0a23b136891551f96e2c9b4e",
+ "format": 1
+ },
+ {
+ "name": ".yamllint",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2970fa4875092f99825ac0da3c82d2413ce973087b9945e68fdfa7b3b1e2012e",
+ "format": 1
+ },
+ {
+ "name": "README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1de49e694e4dde079633a7fd592588252a1f37f9d9e687e9ed66acaf82248ca5",
+ "format": 1
+ },
+ {
+ "name": "requirements.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d19402afeb70da85f47229c6d2c91666784d4c9f4f2b3171a4d9921dc1aaa48e",
+ "format": 1
+ },
+ {
+ "name": "docs",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "docs/docsite",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/links.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ba87531c24128b4584a8f5dc481594ff232c9c19f1324e315149e17fe685baec",
+ "format": 1
+ },
+ {
+ "name": "playbooks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/.keep",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "playbooks/templates",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/templates/.keep",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "playbooks/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/files/.keep",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "playbooks/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/vars/.keep",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "playbooks/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/tasks/.keep",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "CHANGELOG.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "36c7a096434b1733c4fa08958f557f3dbbedf562d8f1bdd0ea1c469c8d5a0823",
+ "format": 1
+ },
+ {
+ "name": "settings.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "02d67ecc8a46b6b4ee0955afb6bcc8e8be5739c7cc9552e0d084cb8d2dda79dd",
+ "format": 1
+ },
+ {
+ "name": ".gitignore",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "301bd6ff5bc1dea2fe8e6b9295d4757ea0b569143b3ae21e3fb6cfe458e3c46d",
+ "format": 1
+ },
+ {
+ "name": "changelogs",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "changelogs/config.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "787229dd578477fe1009b0d84411b5c9678bf474c0c89642bd7381d6c4803c19",
+ "format": 1
+ },
+ {
+ "name": "changelogs/210_add_rename_hgroup.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8589783011b4145d3eb5099a7d5e025e9fd2cbf50319d426f0b5b6f8e1b637af",
+ "format": 1
+ },
+ {
+ "name": "changelogs/.plugin-cache.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3df7f8ef3b35247657661745b3ed47c672699a9965e556f26aa763910b6087eb",
+ "format": 1
+ },
+ {
+ "name": "changelogs/changelog.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dc5530e8f118081f497ab074144452b06f24b4771f8fa6332e0e367d86fc0f4d",
+ "format": 1
+ },
+ {
+ "name": "changelogs/211_fix_clearing_host_inititators.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8ce58291d0256cb22a7a8cb015ebfc4775474f594d5c724225875c495213d259",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/145_fix_missing_move_variable.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a212d6e202a5231066c55f1ef24f962bd544d31220b75c6820d58616b3ba3a20",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/116_add_policies.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "308c90a9b130b29db36fe30970cc4e83a86f72d909d979c251cdfa9ea37cc17d",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/317_add_all_squash.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d8a2830079f133a5e1f3876f720599a8b974f9337a7b7fec2d7c2957c7a0b238",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/259_fix_gateway_check.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "89448cb7b64a91aaeee486714624ffa318333f43cfda73519129081df032e01a",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/387_no_volume_failure.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6489276ccdaba3a849b7e24a8a5a6069fd4b62505857d2ebdd7945492b25c5ca",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/140_pod_case.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8314f833498e81e803152d5a6b8fa4992b690a8954a7b75e4f78f55f3e6281f1",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/369_fix_host.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b504c0859681ad87a0b32f5d8e8671c523fdf1514ec4c46f815758a192971012",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/319_lockout.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4e8e8f895f0e131b6129c80938763ba06a483e419ad5be1cf5e8e262e32d7fd4",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/334_fix_vg_qos.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9963dd088bdc4526e0306809822f4137b6c60fc92df70bd59a535ccf66d70cd0",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/238_add_dirsnap_rename.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7dfe5fa7c0d3f44e723d764cf28d28438255e6d92fb0351537b1b75aeb4cde37",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/320_completed_snaps.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "304300ac00c30ab4b188befe164c2d9a89bb9eb92f1ea1580d4866abdbb00c3a",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/261_fix_bad_arrays.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "10bcda38e7401292d7eb21bfbea276bbcdaa70279475b57ead65200878682562",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/397_parialconnect_bug.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dcb8da9aace9eaed868a579288728ec27c58fe43de3a0ac4b2f26c0f2c045749",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/239_safe_mode.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "50e022d852e1bc93695d7980834f02c62fe584f160ceab7c6fde0a50909f215b",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/113_add_exports_support.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5966655368d1bd6f13a19deb5ff00c2884e3015eea1fe054393e47c0a367343b",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/284_volfact_for_recover.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cafe8f028e3c83d63d9dd1270249266c7fba52f22c193424e4feb5ae8c73c4d3",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/228_nguid_to_volfact.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "286446286f7b817d37b3ecd99bf40347dc9ca9e76835dae68f5042088f0ccad0",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/302_fix_pg_recover_and_target_update.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "24bd01c41044777cec8a08dceab1e2adffefeef7454fa3856d9e90aac81986a2",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/351_fix_rest_check.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e9247247ec40002f6c1cbfa6cade93009a439529a61f88b7b3d6541f2cdf2f80",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/336_add_servicelist.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ace44d513f29d6bd520981a19df17b54f73263d675c984701f011023fbe56664",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/342_add_vol_promotion.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9bcce181fe8efb221844d176b0afa0afceeec84a3fdb252c4b6e9b60d262d800",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/249_allow_cert_reimport.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b1b80b8daae56e7b84f267a875b689f1ca8b1b69e9b061261a18121ebab54908",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/293_add_chassis_inventory.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c4f7b2c60b0fdddb85d2c7ac76f6f71c09b0e1de21908d4945d88203e9895d30",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/227_missing_regex.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ab1065f5ec052ecd0bc4d6c1416cac3f1a2783478fdcb8ca434747d099ba746a",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/220_capacity_info.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2676555e4ab199c39f247eb8186470544e3f392bfea10b44bbdf27b59d963f9b",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/202_add_sso.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2558b4d0697775d62ab7e12c6149fb1be85a2e56be9dafe561ee02aac2bf3920",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/188_add_dirsnap.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a198d79ad6e69906839c509eb8522a228d7863ac7ec186911a8c8b41ccd728e2",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/134_ac_pg_support.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "eb72b8d852fda09db8bfcd0742081fecbabd6d68c97b0311a29a26765ed67307",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/246_python_precedence.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c4901627b1f73f31126c2ff78bb5ddbcd40d8b5d8f6c76fde19eb39147764875",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/279_pg_safemode.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2c3658161baa8290c76c8ae550c7b3e267df82d78c26f11a36fb63f5ad0e3551",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/271_vgroup_prio.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9aef1c5e381f895a7096f7f4a14d5004445a54c3e9048f9176b77615e3f98861",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/203_add_eradication_timer.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fabd383ba2343e7b5a9e4ac4818546ef8d991de7e93d14c90bd75f58d7f05a45",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/292_fix_ds_password.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3c65285fc6514bcb540d9f89b7e7786e1e5c5c40dc835d459e333e87be2070b1",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/135_no_cbs_ntp.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1ce568cbe1255ecfcdb3e6e66146dcf52e8bcc5cfcc600b856958785b4c8a820",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/193_duplicate_initiators.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "040a717fd2d50545967a195dfefa3143d746d80b614349a18f6da93ac084398f",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/247_fix_smb_policy_rules.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "de2fd709d27d85d6b8e983f60b16a3aab0dd0f2bb02f44d7ccfcc99915ba3fee",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/348_add_default_prot.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "70909ac3544b7e941da1c84e9ba75ecd1e01898f0da9f5a58bef8f372222dbac",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/141_add_remote_snapshot.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "658ef54ac8bea8cb8a3737ace02ac93e301ac1195044ba8a474eab5ed9f68fe4",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/396_pod_quota.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0f34de9f8d9e99aa6da166beed42352e573c6ceb3cae457b78304a6f65fb9e59",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/315_spf_details.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "86798075e98bf22253d023307d6cf1e8361b9a221d826c5f4a742d715a1058c9",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/169_add_certs.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "355b59a19cff9aede4aab5de882b471171cda7853e50aec1089e196c7df16e28",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/124_sdk_handshake.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "09dd30cee672d1bfcf0852933b8db73124d3465fe15be02d4b77cfe93df58c51",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/308_add_vm.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fde532f4cb23df09dba53e242f40d796ac82bbc0bb5e2208a3642288708cdd65",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/252_add_saml2.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ca47ce440b28f64030239cdf8e3d0d246092e93b0552043bbea7c4c769ceab8f",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/132_fc_replication.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "31119ca85b86a8e72c64dd998590a231499f135595052c25a73df1f5b9a1965e",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/354_fix_promotion.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d7864f83fbfd6f9f8e5bc05b184b4fbd0a64c8c3f7c2819cabd1a0cb2cda5568",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/364_fc_targets.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "aa25ae0317807b32b3adf7fb3d2fb2ee28677408e8fb8fc5a5ec8ee47d217c6c",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/122_add_multi_host_creation.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f330217bef8d7c34061545e4071b372538afb841b2b013e6934e6f872af35a58",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/208_add_directory_quota_support.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "aa55d46f1d02d324beff7238a2cf14255774b32bc4ff34921f682b5edc7b4061",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/370_add_user_role.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "713d2d7066e76f6967b51a67aa59e8dfdd666cd90b8538f7f00827fe3b7b2231",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/174_null_gateway.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bec4fcd5e2b2ec84d89caf3e6513843980489d6cc60c02fe8e01fbbebb658b31",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/393_offload_recover.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "99d7fa900a916022865378fcc1f766e71bcf5a8f3b9a98b272eff891de04b481",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/149_volumes_demoted_pods_fix.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8cf22b39b47e9256017c2a10f64e327db8e87d9f2aa4c75c081e3709fce3330e",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/214_join_ou.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b14ab70f9bd3756c7aca4c28f7d0bf7c2e40815710275232deb7d90239108b57",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/182_allow_pgroup_with_create.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b2d30f4f5efeb55578f94bd60575f7998104bedd70f9c3a4112de08918c6430a",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/341_pg_400s.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2f3e1a18a9d91068015a1874425c8516bd1c8b7dd82bf5f4c4db8af24ce010eb",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/v1.4.0_summary.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "876598a7a2135b855db8a38e69902920412966606847157fd97d8bb49fc479d4",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/109_fa_files_support_purefa_info.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3523ae2ba986b989876bab0ff2182888a48c112cab18373b70e17528c330c3c5",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/133_purefa_info_v6_replication.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cbb12d92a0c8c763b7e9057fe0d7e8bef92b644c016d6da016c7bda7494b6d53",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/375_fix_remote_hosts.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bf689dc94a6aa93b1a5caefebd9bc03581fa83fa22ecdcc1f639c855dd1d3659",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/384_update_vol_facts.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d8eb9673ceca70f3c28a1f90d394719d69bd2a0654c880a59e09fb10ea9b1bd3",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/126_fix_volume_move.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d0b47177157c3ab8b7f3676d9bf5f5d6dd6fecb2d7850b612067bc0dbaf457fe",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/337_fix_non-prod_versions.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9461a330780061d2f543cda7721e4818e8c7e2245439c1737a0f4ea8d095018f",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/213_add_kmip.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a25a4a59e2fe39675f54af4b041187ff50f2a84419ecf0ad33597f9a870e347c",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/201_increase_krb_count.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4c9af9fb7823e936ee5da520bca07919e280832c868d8e808819967cc02b2776",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/199_add_fc_port_enable.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1eb08594ec67b6f68fdc33cbeba0929733a4d762ad1e5775651ce28aeb8be577",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/136_add_vol_get_send_info.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "901f750d427f4fdd282bdaac8ea955e84980af5ab61cb3537842552142fa7831",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/328_policy_fix.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e2241976b8065bcfaad829f6b6b682e932fe24f6750dd4467588fe2aff709a48",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/125_dns_idempotency.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "27bba2cd35c66b07bbd99031704cce7e3234305d883af0e9841cb35dbefb14f0",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/398_hgoup_alias.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8e001322d964bedd03a888574a51c7b967a91a295b80e0f62bcca1426e58d716",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/365_pod_pgsched.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1935c18a1605977c8d6449336c30c7022829f7263ec53f61dc004e3f87f10c5a",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/272_volume_prio.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d5126118e5a4e014fe832b11a84fe2f0004496a45d3bcf47be4645f3fa85c11e",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/229_snapsuffix.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "76c0cbc363953ab6384119e2b69f15be5ffb4f8b251966da6906cf397fb13c0a",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/366_add_nvme_types.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7e157e8e6dc8d2311fa1b7de576b4ac4a23875a76ab8faaab657b6b77b5c057f",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/367_fix_vg.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f0a9b281dad34acdf964d12a23ab6644168adf1104b1cf51c0af829fbebf9333",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/160_rename_pg.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "889571c492bfad0e49d4384c0ab89948ed4ef26b3838523d1e2d00b59c006b6e",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/318_vol_defaults.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7542c7e94b9cd44f3f946c5a4518e0ddf23fc02ebfc48d032b483e5d6534b8e0",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/107_host_case_clarity.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "14cdfe46c920bce4daf2066105d43bd974d27b7398f0c6021a4c7409c53ecbe9",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/296_ad_tls.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "12284c1bee381d7b76410aad520b578b814182839662f5cd35ae917f720c89c7",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/168_dsrole_fix.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a952b14eb16b084e107dc1e809347680de1bbabae470751ce53c1fbe8c00f7b9",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/347_dns_fix.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "83f875d82f6573107b0a3428870ea83f37a24c97b8fb0b60732530d2bcc4d09e",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/163_add_maintenance_windows.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cae0c49297590fa895d7944d8ca2c9ed8ee936cfb2a7eb1e7480ddd8d363790d",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/205_policy_protocl.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "13fe566546b6523cbea2cdf17fd43b01886664a7e016f093b575f107cf65f400",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/230_add_pg_deleted_vols.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bc3226ce03c0a602cc748c1e63b3a7a9bfa0b62dc2b455e28110f4a37588f40b",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/111_add_filesystem_support.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e99a34a5a71e458de587f9741aadfb712c00f98ac28795c23929d97c32468550",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/175_check_pgname.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c9a31f2e60103d8c62690888c0e05c6bbb7ae74faef2705783f6091996275009",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/108_fix_eradicate_idempotency.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "56a1a21cbd2337826c0b74c2e0b8fdc7488726ee48fa1039e9621bc8035ae01b",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/280_multihost_no_suffix.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c31e72389decf9391a7ce5ea2504e6490b3afe0be780106152cb473ec3dd5d1b",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/270_add_priority_info.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3ce6bf60d3a1efd2f708490a654ec6c34e1617bb80f5114c170d683dee794f56",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/153_syslog_update.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ee87138221741514acab7cee37c3f0c41772129b130a8100acdcea2ec732495f",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/288_zero_params.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "630fa677c01443af17039493a324e335da583109dca296e1f5162b12e65c4014",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/176_fix_promote_api_issue.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e1643ed3af96a2f4177225b32510025dad4fa8b2374279f80d960c1a8208bfa7",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/187_add_ad.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7bdf293cc4cf7f96dfa5e017da8988dd7dbcd5103f7d624c882870938ed92f78",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/305_fix_target_dempo.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f71d18b40505148907262f5f577ca2f50a881379a44ec5a95aa81bfb1b5b27f8",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/194_vg_qos.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0e9835e8e6d6dd32ec0af6c42f7d3c38fa496eb3afb3314f80c66fa189268558",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/344_fix_smtp.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "eb2f427e4bc4f43a9dcf4cfcb70387974e2239613d4363adb8d4d022a9b0cb6e",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/330_extend_vlan.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "14826f46f85309e3d52b4eeeecf757e36cb0ec71115db6aba41efdf8e8b9119d",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/162_pgsnap_info_fix.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4dbaadcb3b3f5f5cfcdac7c5a7a0434e5ee06ad88e245b22cb2857a13eea79d2",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/170_pgsnap_stretch_pod_fail.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "af14d8b057b9a6947e71bf6f5ccfc1a89e8cd926a2aeb2e21561e9859d074540",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/310_hg_vol_idempotency.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9aba6c636b1732e62a10fa765a6c3e7e1c5c25f4be439d1d603b5769356c4a02",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/234_add_vol_info_on_nochange.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4032b2d3fb21ea9f94f9ed246d926049d5d7ce8daf094fa22703ba73b1f26caf",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/131_add_v6_ds_update.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "688cedbf2d82b19b6d9e7ef39a9ef53e45c1371460f1b9d90dde1533275e6b23",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/343_fix_ds.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3fa3323ca7bf1ee194ad96413ad37d4dc1018d0b10dd2fff226fc233120737a6",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/243_sso_to_admin.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3d50baeb61cf616fa61f24d6df31a7d75e1f742cb831843548dad5863267310e",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/139_pgsnap_ac_support.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e44ab022a764253dabc565481afd94e7a5a2cb0e37a638bfe76a8d0e59139bdf",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/237_fix_network.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3f561b0d4d78d98e16c0b6dd42d3d1360fd2f0b8b57644a72578e9f294bf63b9",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/304_host_vlan.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2453f6516a40120b2c06ad1672e860b14b9c0276b12ae9c97852194d07a2a1ef",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/277_add_fs_repl.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "94698812175494446d89f92951b8bb0b8794eb1b8b1453aa85dbdf23e0b1522b",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/118_rename_host.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5fad30e620947f3c5878545aa55f223370e4e769ec28002f662efdf9ffd1358a",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/307_multiple_dns.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "63b254118808a320dd69dbc885cdb3ffd20bf8a60c2fa636e5235b15bbb8fa7f",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/360_fix_volume.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "214e7d076ebe88080ae3b674f9218f3fd82c3f624c12c47c1e0f5b25a25cedff",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/112_add_directory_support.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6ea1e0a2ce1457141a4ce999d0538dce6943fd07f9d203dc46728fdd17121c77",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/137_pgsnap_regex.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "48e8092e90544ebbc947d35dd0a49b09c2b73a4547705e9a1db4ffc0745550cd",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/294_dns_ntp_idempotency_absent.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3621e04c4076595ad61dbd05bbdc9810304336354c40938e82be177b89e5e029",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/388_remove_27.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "79481f3b61a45b1d006a43850b16ca12b356b3157005dd866ae6b84c73279d55",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/257_fqcn.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dff369a116a9ba4014989743704667ccbafdf9dbb792bc1387642284c2df8a1b",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/200_add_DAR_info.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d30ccf31e256808a9bfeef72c50100fed7fa0922b49c31e893f277890733dd1a",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/268_fix_quotas_issues.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c59529e1bf783c3e450ce897d8285253d9e660598604e81981d2cb1417c5d728",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/312_pg_alias.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "048f69ebae940761460f18b2c0c5b84f789dd5fa127a0ff8d6d8aafa89d7f1b7",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/235_eula.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1f47001145eba84245d432bb58679d15acaf7065794bd486ce6f4b47d15ccc5f",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/254_sam2_info.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "83cf6a33a4429f2f17d2dc63f4b198e0a1cd7b2f4cb02f86f08a2deb5d5ccb66",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/121_add_multi_volume_creation.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7d63a0ff3a88738493bf7f0afee3600f0b236b28f592694bd686d38a94bdd7d7",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/130_info_ds_update.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "947189a4487b75926ef5cd535900916d0d2107159243e41b3677dc13adcbbc84",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/379_cap_compat.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1c135bcff0cf73fd477bc81cce7903bc90d0ed27554b4b70f938673a10141d61",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/345_user_map.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1dfe18617cd617abc6ad45ca549ffce2fa88493506f3516deba1d9b6a4108a15",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/394_neighbors.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5c60e10bb81cef310e5ac44a3faf04d192e2be272cd0e5b967dcf53a83148fd0",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/383_network_idemp.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "891ed6fb13b7a9450d646736929c46d9fd657035b0a3a60858faac3c51a32cf9",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/152_fix_user.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "855ffc4ffbd24286e556666da375a79f19a3ec7da829ffa46d8f1983335f96d2",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/231_syslog_settings.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8554f5bb3da3ca0f967b61188806535a8d4161371ba5abcae56c3fbef98981d3",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/207_fix_disable_for_remote_assist.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6f3e2568e1937dcbd0ce31e3a87ce1da72af881c77995e55e8673ec487832696",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/242_multi_offload.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d92fd1123f4e28eb21c3a150a3d00b34a474896405a53fe5d7f7bcd8e8463a23",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/156_snap_suffix_fix.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "69187a1d2f2a0ceba0494eaeb883cb74ca8503f68020d29341a27e462a8dd6ea",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/381_change_booleans.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f04fd18a42e321cb3818a579e14cc50a6d27935196ff04632e2db44f7b807322",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/265_fix_multiple_nfs_rules.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "92a7032c57aa30e06e3b7f3a8b14f1bc4a1dbd3c0f91e13ee862f9f91d894d30",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/115_add_gcp_offload.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "500050720f1fb56e313fcd73dc7a98b06abce63cdca08b37cf11a1f8d7d01a49",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/363_overwrite_combo.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8b03125168fec5bdc6800055674a23646d2d1318f68666bf3647c9588a9934ff",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/278_pgsnap_info.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e067b4bfeb515242e2a2cf5018b4b3c45b6af2204f6cd13a8505e6c1b5c55445",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/161_offline_offload_fix.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "954b6c0a6350782c3bc461de4e3813d05d2c441f20e906d73e74bc10ba2a5783",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/226_deprecate_protocol.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "47123e52a081b333ae710eb005535c5fa84469d33a0d30ca7ec83e207d7d46c1",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/110_add_apiclient_support.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a91901335be037584e59a521f7201e27431567170240b027ef9bb9f7220bf3d0",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/206_add_naa_info.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "45e9f15e85c52453f42909582a7c48be2af6c5291f470c6c11861ad6d924bfb3",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/299_fix_pgsched_zero_support.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "794dcb8c905190621276322754f9a0c6766337e58a89e19734b8db5adc359688",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/123_add_multi_vgroup_creation.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1dee7ff90d9aa085f7c775640e522f54a17229548c57e965c8e8229f16a51ab5",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/196_fix_activedr_api_version.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "da06077e7148e0a5647cbb4db66dbf57a87199b3ecd8961f7c070f0a223b49f6",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/349_add_alerts.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0508cb660cfe0dab94258230a0378e2185e1bda6c0eb05ab362352215c91a800",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/294_user_map_support.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a8303eade7f404a0454044ac907e369cf12ab8a715dae17873ef482f959b55ce",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/374_offload_pgsnap.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1c57da1f0407cfd608f8dba3ab67f68abcb140d190cdf48072b7d629979492d7",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/224_add_nguid_info.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "afee325ee45ede0ecadb3d8cfcfc8b11caeb91c257e6fc69b221e972ae80415f",
+ "format": 1
+ },
+ {
+ "name": "LICENSE",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3972dc9744f6499f0f9b2dbf76696f2ae7ad8af9b23dde66d6af86c9dfb36986",
+ "format": 1
+ },
+ {
+ "name": "COPYING.GPLv3",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8ceb4b9ee5adedde47b31e975c1d90c73ad27b6b165a1dcd80c7c545eb65b903",
+ "format": 1
+ },
+ {
+ "name": "plugins",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_fs.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dc191939ce23b0ffb74d36022d868823f02eecdeb5b56e4702669a4ba3115747",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_user.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d9c8bba3a64f111c817d53f8a83a373588fb54be24bdd34bd486151015c5df8d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_alert.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1083cbcca2cc7a040f92bd85e8f13539349460ac6c8aa943bbbb7d245ff7cde2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_host.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7f57e4149defcf7a109b05921be116d0fd0f109d9fe7e45ca0188679654977c7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_eradication.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d590409734be78cadc78c08cf4cc5fa36c23b71208ed785cdd435e82c190673d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_saml.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "30758391ec21522ff48ec934552999ef5a538747784554a84eb3cfbbeea9b020",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_vg.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1466a88fec2fc2ce37f5906b38e7e9d93671f00e30ff7f7990abaa0cbc696c67",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_ntp.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "aca0a4d618f09f623df6064d06e33c76bd26569c3e940580c7f608c2f90e5453",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_offload.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c760847bf396a868a0b06b5ade07f19caaca54dbf1cdf7ed7ea93748e1562350",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_pg.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3e981db2b3ec21ae7a15078361642c74f511a335c0f7cbca9d4217cd25b96283",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_directory.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d18b300ba799994b7c608cd6315255b7336c82632616c5b565960cd52d5b19c0",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_vnc.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d53b0375a25ccd9894ab3ac39b7220a373220e149eaf303362e472f82d77ee92",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_pgsched.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f1d6de2fe9a6f374ae371d2dc8702a7ad7228796421bc31d97b685bd5a192589",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_kmip.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3c9c462829b542b572d7127c31e004e4114fc10cf8bf99cb645b7455f998080b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_phonehome.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "96f1d0e02842f0bc6c8f275b376ec3da02acaf2ff82967b3a16cfd418fb4fa00",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_admin.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "89a58a1b0c90b0eb840250eb710ab6e5fa769ad4b2ad0051de7e2d1b6c27e82f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_logging.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ea585c8404999eb52ad4b8a89728f7cf578cb7fe0f1af3199bb86fdff5eb1e06",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_syslog_settings.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c065df17138b49ac114974aa45884aadb851db971f5c47b5e627aa46dfb6d778",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_subnet.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "88983155d06a3e6dd71f5b298c01eb65296bf95b8e0d59c0fef7d69fd70ac261",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_export.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7700653d2520ef7152ace19d13ba116b5c57c300d468bb30e54adecce0d78931",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_timeout.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6b7ead22ec616b084204ffdd2cc234410da1c76eeb04535fa4e073c699ba647",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_dirsnap.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "55c1a87a354f48ae3cdd27e1b61483176789a4623b8317f5b540bc02fe98be75",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_dns.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b6e7c15606f73a0f95a6f82a729020f397cdf49ffff31fb1ebf7d2287c08e395",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_endpoint.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8609f5e27871c167ce4bcc368b160b16bcfb1a42e2658dda003f49e5af1e692b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_ad.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3968634b5f92292c64ea18869880c7f8413e3a42c4a7fc9ab6a6515ab1de5664",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_console.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d78dd7b52fbe4576d59e70c1d6c6212727358df4bd4b42b8f5c38c67133f10cf",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_default_protection.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "238d08203ef9b095069d1b4ab96fe4cddcd3485c49e3af0aea19f8c93f15d9dd",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_pod.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b864c6938ecf935c3747329b69e41feef8cb1edcaf7cf317bdb1e9bb3dcc7097",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_pgsnap.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e661145dabdf3380a512c6d50953a7e6291d54830f04bf962e6974330516512c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_token.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d6455d9d708a74e221e29b60997836459ddde22f8f31cac745f6a348dd321b68",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_snmp.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8eb07ce1ca80cba4e41c5b00de2e3c288197b773f30235326c24b74189b74151",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_volume.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1409c4e1eb8eb3cb78013f3def7d96fc6922a530c0da658a370df74d8a87e8d7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_snmp_agent.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "17f0434a486ed7094f00731ca23b50556a0ef9e59c40d373b3ae1983fd9b34ef",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_smtp.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a4eab45b866e8f0aea35287282cfb1fb2efdcf3e433a6842380e7fa0b62228c9",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_apiclient.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "808b6e4d0510c8f5aca0f714785cbe38cc5dd4b3720e0f13c18395f72d7e3396",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_connect.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c7eb7946c8b5b4fa077ddb1ea15b38080e04dc711ca04f9877ec51a73db062b7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_maintenance.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "773f2559819b5d54f9e95aa4b29713d58c69ec0747b8ab9d194717f6032a3da1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_ds.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2469f765525af21529e903b6b082f520770f0d4fe14182a37542aec9b5203fcf",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_dsrole.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f70148d126d365a5041de1cd4f51bde24c105024ca1c617621685b9d163e2ad7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_inventory.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "041b8bd3902a5b9c0736a4ab121af986588207c8ac8c15864531d6409be64c37",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_hg.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7f45c42a72542be609cc8c68b99b44c4947683050b113f4477881c70d9e00bad",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_vlan.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "747134ba0527d5cf676606763fe7ebf1202c258d3324247419fc9f25c9b9fdf2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_volume_tags.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "470c400d52f4092667d8fbb58cf7961b978d9967524e6f0fbde852bfe6e20d9d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_policy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3c1c34e351721c170b5a9e45c39cbdc6738311744b395e4058ba8346898b0b73",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_proxy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "278ae5411bfdb29a2f0c684784884b749aca4de72dcc60734f59e1371514002e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_network.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "35f602c303d1e241904287a39195257b54644114a1c9b565b5c8d58b7ad6a63a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dce27f771e4622e9e880365f28ff2720d1669229b712f0dd71723b6e5208e942",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_smis.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "67b94b93742f3aa99c5602f2ae7a22b74eeae6ceaa9655633fb5cd9f43a2a6b7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_sso.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "09c113c73e42683195efaeb9fe205409617330c751a1404c25ab2d942c8917d9",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_messages.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "08fb06ea180440c13a828e36ced36f3c59d0475e3c0768d48dcbd82874deb66c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_syslog.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0d2140dfe303eeaf5e41dd869822baf88cddc5c97d1e21c9af3264e0ebf83662",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_ra.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "14bbb8a463dbdd8da75a64df6b2fa24aa17366b3f738836345a9f06a0a239013",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_snap.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2d14cb9e4c31dac479497d42c2fa4d0f1519c6ad338fadb1c25ec0aa6caf4da4",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_banner.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "597776c8e075ce163f7c0dea404935db420e4ce2460bdb585c958011e846395e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_eula.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7e1091138f4ba58dc36c4d229c3162d8cc4f06d1b1cd840805e31bd668c20963",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_certs.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "11c420d24ffe53d41f0ff8d3295a7d706b10c4641d2bd2c231a3fae3e5fe518f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_arrayname.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a9eb4479369a95fe0f603e771c918ae77ed387f8b4602a2f0d9b070f04557ded",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_pod_replica.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f3dd36f84ce48befeaddad54b3ed1226cb27f916dc6876137a5b3553323b2070",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/purestorage.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cf8dc8c5bef5ff629b260985f336da06abc4a129b26b1f978d14b6e1346eb393",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/purefa.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "22661a8c1f12c0ff4ff416bd5df0a763dd23b33c4eb35c0145cdf68607e56346",
+ "format": 1
+ },
+ {
+ "name": "README.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2f04e9c92d502e0e15adbbcf587392c385144d8b3a1fd4b124057be5de35f1b4",
+ "format": 1
+ },
+ {
+ "name": ".pylintrc",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1bcdf66af9559f83da18c688dba3088f7212923b3174f6283de19860c716362e",
+ "format": 1
+ },
+ {
+ "name": ".git-blame-ignore-revs",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ac6beea8907c4dec17a6c8ccf9cf7728865c8323a2fa0ef1c7e3e79a3c283433",
+ "format": 1
+ }
+ ],
+ "format": 1
+} \ No newline at end of file
diff --git a/ansible_collections/purestorage/flasharray/LICENSE b/ansible_collections/purestorage/flasharray/LICENSE
new file mode 100644
index 000000000..f288702d2
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/LICENSE
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ <program> Copyright (C) <year> <name of author>
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<https://www.gnu.org/licenses/>.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/purestorage/flasharray/MANIFEST.json b/ansible_collections/purestorage/flasharray/MANIFEST.json
new file mode 100644
index 000000000..3a7fdd581
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/MANIFEST.json
@@ -0,0 +1,36 @@
+{
+ "collection_info": {
+ "namespace": "purestorage",
+ "name": "flasharray",
+ "version": "1.19.1",
+ "authors": [
+ "Pure Storage Ansible Team <pure-ansible-team@purestorage.com>"
+ ],
+ "readme": "README.md",
+ "tags": [
+ "purestorage",
+ "flasharray",
+ "cloudblockstore",
+ "storage"
+ ],
+ "description": "Collection of modules to manage Pure Storage FlashArrays (including Cloud Block Store)",
+ "license": [
+ "GPL-3.0-or-later",
+ "BSD-2-Clause"
+ ],
+ "license_file": null,
+ "dependencies": {},
+ "repository": "https://github.com/Pure-Storage-Ansible/FlashArray-Collection",
+ "documentation": "https://github.com/Pure-Storage-Ansible/FlashArray-Collection",
+ "homepage": null,
+ "issues": "https://github.com/Pure-Storage-Ansible/FlashArray-Collection/issues"
+ },
+ "file_manifest_file": {
+ "name": "FILES.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "76a62745a3665d2ddc02ed6c76f491ad07853698b1c5b53b253084e41a21938a",
+ "format": 1
+ },
+ "format": 1
+} \ No newline at end of file
diff --git a/ansible_collections/purestorage/flasharray/README.md b/ansible_collections/purestorage/flasharray/README.md
new file mode 100644
index 000000000..43caf944a
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/README.md
@@ -0,0 +1,114 @@
+<a href="https://github.com/Pure-Storage-Ansible/FlashArray-Collection/releases/latest"><img src="https://img.shields.io/github/v/tag/Pure-Storage-Ansible/FlashArray-Collection?label=release">
+<a href="COPYING.GPLv3"><img src="https://img.shields.io/badge/license-GPL%20v3.0-brightgreen.svg"></a>
+<img src="https://cla-assistant.io/readme/badge/Pure-Storage-Ansible/FlashArray-Collection">
+<img src="https://github.com/Pure-Storage-Ansible/FLashArray-Collection/workflows/Pure%20Storage%20Ansible%20CI/badge.svg">
+<a href="https://github.com/psf/black"><img src="https://img.shields.io/badge/code%20style-black-000000.svg"></a>
+
+# Pure Storage FlashArray Collection
+
+The Pure Storage FlashArray collection consists of the latest versions of the FlashArray modules and also includes support for Cloud Block Store
+
+## Supported Platforms
+
+- Pure Storage FlashArray with Purity 4.6 or later
+- Certain modules and functionality require higher versions of Purity. Modules will inform you if your Purity version is not high enough to use a module.
+
+## Prerequisites
+
+- Ansible 2.9 or later
+- Pure Storage FlashArray system running Purity 4.6 or later
+ - some modules require higher versions of Purity
+- Some modules require specific Purity versions
+- purestorage
+- py-pure-client
+- python >= 3.6
+- netaddr
+- requests
+- pycountry
+- packaging
+
+## Idempotency
+
+All modules are idempotent with the exception of modules that change or set passwords. Due to security requirements exisitng passwords can be validated against and therefore will always be modified, even if there is no change.
+
+## Notes
+
+The Pure Storage Ansible modules force all host and volume names to use kebab-case. Any parameters that use camelCase or PascalCase will be lowercased to ensure consistency across all modules.
+
+## Available Modules
+
+- purefa_ad - manage FlashArray Active Directoy accounts
+- purefa_admin - Configure Pure Storage FlashArray Global Admin settings
+- purefa_alert - manage email alert settings on the FlashArray
+- purefa_apiclient - manageFlashArray API clients
+- purefa_arrayname - manage the name of the FlashArray
+- purefa_banner - manage the CLI and GUI login banner of the FlashArray
+- purefa_certs - manage FlashArray SSL certificates
+- purefa_connect - manage FlashArrays connecting for replication purposes
+- purefa_console - manage Console Lock setting for the FlashArray
+- purefa_ddefault_protection - manage FlashArray default protections
+- purefa_directory - manage FlashArray managed file system directories
+- purefa_dirsnap - manage FlashArray managed file system directory snapshots
+- purefa_dns - manage the DNS settings of the FlashArray
+- purefa_ds - manage the Directory Services of the FlashArray
+- purefa_dsrole - manage the Directory Service Roles of the FlashArray
+- purefa_endpoint - manage VMware protocol-endpoints on the FlashArray
+- purefa_eradication - manage eradication timer for deleted items
+- purefa_eula - sign, or resign, FlashArray EULA
+- purefa_export - manage FlashArrray managed file system exports
+- purefa_fs - manage FlashArray managed file systems
+- purefa_hg - manage hostgroups on the FlashArray
+- purefa_host - manage hosts on the FlashArray
+- purefa_info - get information regarding the configuration of the Flasharray
+- purefa_inventory - get hardware inventory information from a FlashArray
+- purefa_logging - get audit and session logs from a FlashArray
+- purefa_maintenance - manage FlashArray maintenance windows
+- purefa_messages - list FlashArray alert messages
+- purefa_network - manage the physical and virtual network settings on the FlashArray
+- purefa_ntp - manage the NTP settings on the FlashArray
+- purefa_offload - manage the offload targets for a FlashArray
+- purefa_pg - manage protection groups on the FlashArray
+- purefa_pgsched - manage protection group snapshot and replication schedules on the FlashArray
+- purefa_pgsnap - manage protection group snapshots (local and remote) on the FlashArray
+- purefa_phonehome - manage the phonehome setting for the FlashArray
+- purefa_pod - manage ActiveCluster pods in FlashArrays
+- purefa_pod_replica - manage ActiveDR pod replica links in FlashArrays
+- purefa_policy - manage FlashArray NFS, SMB and snapshot policies
+- purefa_proxy - manage the phonehome HTTPS proxy setting for the FlashArray
+- purefa_ra - manage the Remote Assist setting for the FlashArray
+- purefa_saml - Manage FlashArray SAML2 service and identity providers
+- purefa_smis - manage SMI-S settings on the FlashArray
+- purefa_smtp - manage SMTP settings on the FlashArray
+- purefa_snap - manage local snapshots on the FlashArray
+- purefa_snmp - manage SNMP Manager settings on the FlashArray
+- purefa_snmp_agent - manage SNMP Agent settings on the FlashArray
+- purefa_sso - set Single Sign-On from Pure1 Manage state
+- purefa_subnet - manage network subnets on the FlashArray
+- purefa_syslog - manage the Syslog settings on the FlashArray
+- purefa_syslog_settings - manage the global syslog server settings on the FlashArray
+- purefa_token - manage FlashArray user API tokens
+- purefa_timeout - manage the GUI idle timeout on the FlashArray
+- purefa_user - manage local user accounts on the FlashArray
+- purefa_vg - manage volume groups on the FlashArray
+- purefa_vlan - manage VLAN interfaces on the FlashArray
+- purefa_vnc - manage VNC for installed applications on the FlashArray
+- purefa_volume - manage volumes on the FlashArray
+- purefa_volume_tags - manage volume tags on the FlashArray
+
+## Instructions
+
+Install the Pure Storage FlashArray collection on your Ansible management host.
+
+- Using ansible-galaxy (Ansible 2.9 or later):
+```
+ansible-galaxy collection install purestorage.flasharray -p ~/.ansible/collections
+```
+
+## License
+
+[BSD-2-Clause](https://directory.fsf.org/wiki?title=License:FreeBSD)
+[GPL-3.0-or-later](https://www.gnu.org/licenses/gpl-3.0.en.html)
+
+## Author
+
+This collection was created in 2019 by [Simon Dodsley](@sdodsley) for, and on behalf of, the [Pure Storage Ansible Team](pure-ansible-team@purestorage.com)
diff --git a/ansible_collections/purestorage/flasharray/README.rst b/ansible_collections/purestorage/flasharray/README.rst
new file mode 100644
index 000000000..49bbb8327
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/README.rst
@@ -0,0 +1,19 @@
+|License| |CLA-Assistant| |Code-style-black|
+
+|Build history for master branch|
+
+=====================
+FlashArray-Collection
+=====================
+
+Ansible Collection for Pure Storage FlashArray
+
+.. |License| image:: https://img.shields.io/badge/license-GPL%20v3.0-brightgreen.svg
+ :target: COPYING.GPLv3
+ :alt: Repository License
+.. |CLA-Assistant| image:: https://cla-assistant.io/readme/badge/Pure-Storage-Ansible/FlashArray-Collection
+.. |Pure-Storage-Ansible-CI| image:: https://github.com/Pure-Storage-Ansible/FlashArray-Collection/workflows/Pure%20Storage%20Ansible%20CI/badge.svg
+.. |Code-style-black| image:: https://img.shields.io/badge/code%20style-black-000000.svg
+ :target: https://github.com/psf/black
+.. |Build history for master branch| image:: https://buildstats.info/github/chart/Pure-Storage-Ansible/FlashArray-Collection?branch=master&buildCount=50&includeBuildsFromPullRequest=false&showstats=false
+ :target: https://github.com/Pure-Storage-Ansible/FlashArray-Collection/actions?query=branch%3Amaster
diff --git a/ansible_collections/purestorage/flasharray/changelogs/.plugin-cache.yaml b/ansible_collections/purestorage/flasharray/changelogs/.plugin-cache.yaml
new file mode 100644
index 000000000..8719c5637
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/.plugin-cache.yaml
@@ -0,0 +1,310 @@
+objects:
+ role: {}
+plugins:
+ become: {}
+ cache: {}
+ callback: {}
+ cliconf: {}
+ connection: {}
+ filter: {}
+ httpapi: {}
+ inventory: {}
+ lookup: {}
+ module:
+ purefa_ad:
+ description: Manage FlashArray Active Directory Account
+ name: purefa_ad
+ namespace: ''
+ version_added: 1.9.0
+ purefa_admin:
+ description: Configure Pure Storage FlashArray Global Admin settings
+ name: purefa_admin
+ namespace: ''
+ version_added: 1.12.0
+ purefa_alert:
+ description: Configure Pure Storage FlashArray alert email settings
+ name: purefa_alert
+ namespace: ''
+ version_added: 1.0.0
+ purefa_apiclient:
+ description: Manage FlashArray API Clients
+ name: purefa_apiclient
+ namespace: ''
+ version_added: 1.5.0
+ purefa_arrayname:
+ description: Configure Pure Storage FlashArray array name
+ name: purefa_arrayname
+ namespace: ''
+ version_added: 1.0.0
+ purefa_banner:
+ description: Configure Pure Storage FlashArray GUI and SSH MOTD message
+ name: purefa_banner
+ namespace: ''
+ version_added: 1.0.0
+ purefa_certs:
+ description: Manage FlashArray SSL Certificates
+ name: purefa_certs
+ namespace: ''
+ version_added: 1.8.0
+ purefa_connect:
+ description: Manage replication connections between two FlashArrays
+ name: purefa_connect
+ namespace: ''
+ version_added: 1.0.0
+ purefa_console:
+ description: Enable or Disable Pure Storage FlashArray Console Lock
+ name: purefa_console
+ namespace: ''
+ version_added: 1.0.0
+ purefa_default_protection:
+ description: Manage SafeMode default protection for a Pure Storage FlashArray
+ name: purefa_default_protection
+ namespace: ''
+ version_added: 1.14.0
+ purefa_directory:
+ description: Manage FlashArray File System Directories
+ name: purefa_directory
+ namespace: ''
+ version_added: 1.5.0
+ purefa_dirsnap:
+ description: Manage FlashArray File System Directory Snapshots
+ name: purefa_dirsnap
+ namespace: ''
+ version_added: 1.9.0
+ purefa_dns:
+ description: Configure FlashArray DNS settings
+ name: purefa_dns
+ namespace: ''
+ version_added: 1.0.0
+ purefa_ds:
+ description: Configure FlashArray Directory Service
+ name: purefa_ds
+ namespace: ''
+ version_added: 1.0.0
+ purefa_dsrole:
+ description: Configure FlashArray Directory Service Roles
+ name: purefa_dsrole
+ namespace: ''
+ version_added: 1.0.0
+ purefa_endpoint:
+ description: Manage VMware protocol-endpoints on Pure Storage FlashArrays
+ name: purefa_endpoint
+ namespace: ''
+ version_added: 1.0.0
+ purefa_eradication:
+ description: Configure Pure Storage FlashArray Eradication Timer
+ name: purefa_eradication
+ namespace: ''
+ version_added: 1.9.0
+ purefa_eula:
+ description: Sign Pure Storage FlashArray EULA
+ name: purefa_eula
+ namespace: ''
+ version_added: 1.0.0
+ purefa_export:
+ description: Manage FlashArray File System Exports
+ name: purefa_export
+ namespace: ''
+ version_added: 1.5.0
+ purefa_fs:
+ description: Manage FlashArray File Systems
+ name: purefa_fs
+ namespace: ''
+ version_added: 1.5.0
+ purefa_hg:
+ description: Manage hostgroups on Pure Storage FlashArrays
+ name: purefa_hg
+ namespace: ''
+ version_added: 1.0.0
+ purefa_host:
+ description: Manage hosts on Pure Storage FlashArrays
+ name: purefa_host
+ namespace: ''
+ version_added: 1.0.0
+ purefa_info:
+ description: Collect information from Pure Storage FlashArray
+ name: purefa_info
+ namespace: ''
+ version_added: 1.0.0
+ purefa_inventory:
+ description: Collect information from Pure Storage FlashArray
+ name: purefa_inventory
+ namespace: ''
+ version_added: 1.0.0
+ purefa_kmip:
+ description: Manage FlashArray KMIP server objects
+ name: purefa_kmip
+ namespace: ''
+ version_added: 1.10.0
+ purefa_logging:
+ description: Manage Pure Storage FlashArray Audit and Session logs
+ name: purefa_logging
+ namespace: ''
+ version_added: 1.19.0
+ purefa_maintenance:
+ description: Configure Pure Storage FlashArray Maintence Windows
+ name: purefa_maintenance
+ namespace: ''
+ version_added: 1.7.0
+ purefa_messages:
+ description: List FlashArray Alert Messages
+ name: purefa_messages
+ namespace: ''
+ version_added: 1.14.0
+ purefa_network:
+ description: Manage network interfaces in a Pure Storage FlashArray
+ name: purefa_network
+ namespace: ''
+ version_added: 1.0.0
+ purefa_ntp:
+ description: Configure Pure Storage FlashArray NTP settings
+ name: purefa_ntp
+ namespace: ''
+ version_added: 1.0.0
+ purefa_offload:
+ description: Create, modify and delete NFS, S3 or Azure offload targets
+ name: purefa_offload
+ namespace: ''
+ version_added: 1.0.0
+ purefa_pg:
+ description: Manage protection groups on Pure Storage FlashArrays
+ name: purefa_pg
+ namespace: ''
+ version_added: 1.0.0
+ purefa_pgsched:
+ description: Manage protection groups replication schedules on Pure Storage
+ FlashArrays
+ name: purefa_pgsched
+ namespace: ''
+ version_added: 1.0.0
+ purefa_pgsnap:
+ description: Manage protection group snapshots on Pure Storage FlashArrays
+ name: purefa_pgsnap
+ namespace: ''
+ version_added: 1.0.0
+ purefa_phonehome:
+ description: Enable or Disable Pure Storage FlashArray Phonehome
+ name: purefa_phonehome
+ namespace: ''
+ version_added: 1.0.0
+ purefa_pod:
+ description: Manage AC pods in Pure Storage FlashArrays
+ name: purefa_pod
+ namespace: ''
+ version_added: 1.0.0
+ purefa_pod_replica:
+ description: Manage ActiveDR pod replica links between Pure Storage FlashArrays
+ name: purefa_pod_replica
+ namespace: ''
+ version_added: 1.0.0
+ purefa_policy:
+ description: Manage FlashArray File System Policies
+ name: purefa_policy
+ namespace: ''
+ version_added: 1.5.0
+ purefa_proxy:
+ description: Configure FlashArray phonehome HTTPs proxy settings
+ name: purefa_proxy
+ namespace: ''
+ version_added: 1.0.0
+ purefa_ra:
+ description: Enable or Disable Pure Storage FlashArray Remote Assist
+ name: purefa_ra
+ namespace: ''
+ version_added: 1.0.0
+ purefa_saml:
+ description: Manage FlashArray SAML2 service and identity providers
+ name: purefa_saml
+ namespace: ''
+ version_added: 1.12.0
+ purefa_smis:
+ description: Enable or disable FlashArray SMI-S features
+ name: purefa_smis
+ namespace: ''
+ version_added: 1.0.0
+ purefa_smtp:
+ description: Configure FlashArray SMTP settings
+ name: purefa_smtp
+ namespace: ''
+ version_added: 1.0.0
+ purefa_snap:
+ description: Manage volume snapshots on Pure Storage FlashArrays
+ name: purefa_snap
+ namespace: ''
+ version_added: 1.0.0
+ purefa_snmp:
+ description: Configure FlashArray SNMP Managers
+ name: purefa_snmp
+ namespace: ''
+ version_added: 1.0.0
+ purefa_snmp_agent:
+ description: Configure the FlashArray SNMP Agent
+ name: purefa_snmp_agent
+ namespace: ''
+ version_added: 1.16.0
+ purefa_sso:
+ description: Configure Pure Storage FlashArray Single Sign-On
+ name: purefa_sso
+ namespace: ''
+ version_added: 1.9.0
+ purefa_subnet:
+ description: Manage network subnets in a Pure Storage FlashArray
+ name: purefa_subnet
+ namespace: ''
+ version_added: 1.0.0
+ purefa_syslog:
+ description: Configure Pure Storage FlashArray syslog settings
+ name: purefa_syslog
+ namespace: ''
+ version_added: 1.0.0
+ purefa_syslog_settings:
+ description: Manage FlashArray syslog servers settings
+ name: purefa_syslog_settings
+ namespace: ''
+ version_added: 1.10.0
+ purefa_timeout:
+ description: Configure Pure Storage FlashArray GUI idle timeout
+ name: purefa_timeout
+ namespace: ''
+ version_added: 1.0.0
+ purefa_token:
+ description: Create or delete an API token for an existing admin user
+ name: purefa_token
+ namespace: ''
+ version_added: 1.0.0
+ purefa_user:
+ description: Create, modify or delete FlashArray local user account
+ name: purefa_user
+ namespace: ''
+ version_added: 1.0.0
+ purefa_vg:
+ description: Manage volume groups on Pure Storage FlashArrays
+ name: purefa_vg
+ namespace: ''
+ version_added: 1.0.0
+ purefa_vlan:
+ description: Manage network VLAN interfaces in a Pure Storage FlashArray
+ name: purefa_vlan
+ namespace: ''
+ version_added: 1.0.0
+ purefa_vnc:
+ description: Enable or Disable VNC port for installed apps
+ name: purefa_vnc
+ namespace: ''
+ version_added: 1.0.0
+ purefa_volume:
+ description: Manage volumes on Pure Storage FlashArrays
+ name: purefa_volume
+ namespace: ''
+ version_added: 1.0.0
+ purefa_volume_tags:
+ description: Manage volume tags on Pure Storage FlashArrays
+ name: purefa_volume_tags
+ namespace: ''
+ version_added: 1.0.0
+ netconf: {}
+ shell: {}
+ strategy: {}
+ test: {}
+ vars: {}
+version: 1.19.1
diff --git a/ansible_collections/purestorage/flasharray/changelogs/210_add_rename_hgroup.yaml b/ansible_collections/purestorage/flasharray/changelogs/210_add_rename_hgroup.yaml
new file mode 100644
index 000000000..78d1d91f9
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/210_add_rename_hgroup.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_hg - Add support to rename existing hostgroup
diff --git a/ansible_collections/purestorage/flasharray/changelogs/211_fix_clearing_host_inititators.yaml b/ansible_collections/purestorage/flasharray/changelogs/211_fix_clearing_host_inititators.yaml
new file mode 100644
index 000000000..1425d12b0
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/211_fix_clearing_host_inititators.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_host - Allows all current host inititators to be correctly removed
diff --git a/ansible_collections/purestorage/flasharray/changelogs/changelog.yaml b/ansible_collections/purestorage/flasharray/changelogs/changelog.yaml
new file mode 100644
index 000000000..ba3ad5ba1
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/changelog.yaml
@@ -0,0 +1,610 @@
+ancestor: null
+releases:
+ 1.10.0:
+ changes:
+ minor_changes:
+ - purefa_ds - Add ``join_ou`` parameter for AD account creation
+ - purefa_kmip - Add support for KMIP server management
+ fragments:
+ - 213_add_kmip.yaml
+ - 214_join_ou.yaml
+ modules:
+ - description: Manage FlashArray KMIP server objects
+ name: purefa_kmip
+ namespace: ''
+ release_date: '2021-08-04'
+ 1.11.0:
+ changes:
+ bugfixes:
+ - purefa_subnet - Add regex to check for correct dsubnet name
+ - purefa_user - Add regex to check for correct username
+ minor_changes:
+ - purefa_host - Deprecate ``protocol`` parameter. No longer required.
+ - purefa_info - Add NVMe NGUID value for volumes
+ - purefa_info - Add array, volume and snapshot detailed capacity information
+ - purefa_info - Add deleted members to volume protection group info
+ - purefa_info - Add snapshot policy rules suffix support
+ - purefa_info - Remove directory_services field. Deprecated in Collections 1.6
+ - purefa_policy - Add snapshot policy rules suffix support
+ - purefa_syslog_settings - Add support to manage global syslog server settings
+ - purefa_volume - Add NVMe NGUID to response dict
+ fragments:
+ - 220_capacity_info.yaml
+ - 224_add_nguid_info.yaml
+ - 226_deprecate_protocol.yaml
+ - 227_missing_regex.yaml
+ - 228_nguid_to_volfact.yaml
+ - 229_snapsuffix.yaml
+ - 230_add_pg_deleted_vols.yaml
+ - 231_syslog_settings.yaml
+ release_date: '2021-09-27'
+ 1.12.0:
+ changes:
+ bugfixes:
+ - purefa_certs - Allow a certificate to be imported over an existing SSL certificate
+ - purefa_eula - Reolve EULA signing issue
+ - purefa_network - Fix bug introduced with management of FC ports
+ - purefa_policy - Fix issue with SMB Policy creation
+ deprecated_features:
+ - purefa_sso - Deprecated in favor of M(purefa_admin). Will be removed in Collection
+ 2.0
+ known_issues:
+ - purefa_admin - Once `max_login` and `lockout` have been set there is currently
+ no way to rest these to zero except through the FlashArray GUI
+ minor_changes:
+ - purefa_admin - New module to set global admin settings, inclusing SSO
+ - purefa_dirsnap - Add support to rename directory snapshots not managed by
+ a snapshot policy
+ - purefa_info - Add SAML2SSO configutration information
+ - purefa_info - Add Safe Mode status
+ - purefa_info - Fix Active Directory configuration details
+ - purefa_network - Resolve bug stopping management IP address being changed
+ correctly
+ - purefa_offload - Add support for multiple, homogeneous, offload targets
+ - purefa_saml - Add support for SAML2 SSO IdPs
+ - purefa_volume - Provide volume facts in all cases, including when no change
+ has occured.
+ fragments:
+ - 234_add_vol_info_on_nochange.yaml
+ - 235_eula.yaml
+ - 237_fix_network.yaml
+ - 238_add_dirsnap_rename.yaml
+ - 239_safe_mode.yaml
+ - 242_multi_offload.yaml
+ - 243_sso_to_admin.yaml
+ - 246_python_precedence.yaml
+ - 247_fix_smb_policy_rules.yaml
+ - 249_allow_cert_reimport.yaml
+ - 252_add_saml2.yaml
+ - 254_sam2_info.yaml
+ modules:
+ - description: Configure Pure Storage FlashArray Global Admin settings
+ name: purefa_admin
+ namespace: ''
+ - description: Manage FlashArray SAML2 service and identity providers
+ name: purefa_saml
+ namespace: ''
+ release_date: '2022-01-05'
+ 1.12.1:
+ changes:
+ bugfixes:
+ - purefa_info - Fix space reporting issue
+ - purefa_subnet - Fix subnet update checks when no gateway in existing subnet
+ configuration
+ minor_changes:
+ - All modules - Change examples to use FQCN for module
+ fragments:
+ - 257_fqcn.yaml
+ - 259_fix_gateway_check.yaml
+ release_date: '2022-01-21'
+ 1.13.0:
+ changes:
+ bugfixes:
+ - purefa_host - Allow multi-host creation without requiring a suffix string
+ - purefa_info - Fix issue where remote arrays are not in a valid connected state
+ - purefa_policy - Fix idempotency issue with quota policy rules
+ - purefa_policy - Fix issue when creating multiple rules in an NFS policy
+ minor_changes:
+ - purefa_fs - Add support for replicated file systems
+ - purefa_info - Add QoS information for volume groups
+ - purefa_info - Add info for protection group safe mode setting (Requires Purity//FA
+ 6.3.0 or higher)
+ - purefa_info - Add info for protection group snapshots
+ - purefa_info - Add priority adjustment information for volumes and volume groups
+ - purefa_info - Split volume groups into live and deleted dicts
+ - purefa_pg - Add support for protection group SafeMode. Requires Purity//FA
+ 6.3.0 or higher
+ - purefa_policy - Allow directories in snapshot policies to be managed
+ - purefa_vg - Add DMM Priority Adjustment support
+ - purefa_volume - Add support for DMM Priority Adjustment
+ - purefa_volume - Provide volume facts for volume after recovery
+ fragments:
+ - 261_fix_bad_arrays.yaml
+ - 265_fix_multiple_nfs_rules.yaml
+ - 268_fix_quotas_issues.yaml
+ - 270_add_priority_info.yaml
+ - 271_vgroup_prio.yaml
+ - 272_volume_prio.yaml
+ - 277_add_fs_repl.yaml
+ - 278_pgsnap_info.yaml
+ - 279_pg_safemode.yaml
+ - 280_multihost_no_suffix.yaml
+ - 284_volfact_for_recover.yaml
+ release_date: '2022-05-02'
+ 1.14.0:
+ changes:
+ bugfixes:
+ - purefa_dns - Corrects logic where API responds with an empty list rather than
+ a list with a single empty string in it.
+ - purefa_ds - Add new parameter `force_bind_password` (default = True) to allow
+ idempotency for module
+ - purefa_hg - Ensure volume disconnection from a hostgroup is idempotent
+ - purefa_ntp - Corrects workflow so that the state between desired and current
+ are checked before marking the changed flag to true during an absent run
+ - purefa_pg - Corredt issue when target for protection group is not correctly
+ amended
+ - purefa_pg - Ensure deleted protection group can be correctly recovered
+ - purefa_pg - Fix idempotency issue for protection group targets
+ - purefa_pgsched - Allow zero as a valid value for appropriate schedule parameters
+ - purefa_pgsched - Fix issue where 0 was not correctly handled for replication
+ schedule
+ - purefa_pgsnap - Resolved intermittent error where `latest` snapshot is not
+ complete and can fail. Only select latest completed snapshot to restore from.
+ minor_changes:
+ - purefa_ad - Add support for TLS and joining existing AD account
+ - purefa_dns - Support multiple DNS configurations from Puritry//FA 6.3.3
+ - purefa_info - Add NFS policy user mapping status
+ - purefa_info - Add support for Virtual Machines and Snapshots
+ - purefa_info - Ensure global admin lockout duration is measured in seconds
+ - purefa_info - Support multiple DNS configurations
+ - purefa_inventory - Add REST 2.x support and SFP details for Purity//FA 6.3.4
+ and higher
+ - purefa_inventory - Change response dict name to `purefa_inv` so doesn't clash
+ with info module response dict
+ - purefa_inventory - add chassis information to inventory
+ - purefa_pg - Changed parameter `pgroup` to `name`. Allow `pgroup` as alias
+ for backwards compatability.
+ - purefa_policy - Add ``all_squash``, ``anonuid`` and ``anongid`` to NFS client
+ rules options
+ - purefa_policy - Add support for NFS policy user mapping
+ - purefa_volume - Default Protection Group support added for volume creation
+ and copying from Purity//FA 6.3.4
+ fragments:
+ - 288_zero_params.yaml
+ - 292_fix_ds_password.yaml
+ - 293_add_chassis_inventory.yaml
+ - 294_dns_ntp_idempotency_absent.yaml
+ - 294_user_map_support.yaml
+ - 296_ad_tls.yaml
+ - 299_fix_pgsched_zero_support.yaml
+ - 302_fix_pg_recover_and_target_update.yaml
+ - 305_fix_target_dempo.yaml
+ - 307_multiple_dns.yaml
+ - 308_add_vm.yaml
+ - 310_hg_vol_idempotency.yaml
+ - 312_pg_alias.yaml
+ - 315_spf_details.yaml
+ - 317_add_all_squash.yaml
+ - 318_vol_defaults.yaml
+ - 319_lockout.yaml
+ - 320_completed_snaps.yaml
+ modules:
+ - description: Manage SafeMode default protection for a Pure Storage FlashArray
+ name: purefa_default_protection
+ namespace: ''
+ - description: List FlashArray Alert Messages
+ name: purefa_messages
+ namespace: ''
+ release_date: '2022-09-16'
+ 1.15.0:
+ changes:
+ bugfixes:
+ - purefa.py - Fix issue in Purity versions numbers that are for development
+ versions
+ - purefa_policy - Fixed missing parameters in function calls
+ - purefa_vg - Fix typeerror when using newer Purity versions and setting VG
+ QoS
+ minor_changes:
+ - purefa_network - Added support for servicelist updates
+ - purefa_vlan - Extend VLAN support to cover NVMe-RoCE and file interfaces
+ fragments:
+ - 328_policy_fix.yaml
+ - 330_extend_vlan.yaml
+ - 334_fix_vg_qos.yaml
+ - 336_add_servicelist.yaml
+ - 337_fix_non-prod_versions.yml
+ release_date: '2022-11-08'
+ 1.16.0:
+ changes:
+ bugfixes:
+ - purefa - Remove unneeded REST version check as causes issues with REST mismatches
+ - purefa_ds - Fixed dict syntax error
+ - purefa_info - Fiexed issue with DNS reporting in Purity//FA 6.4.0 with non-FA-File
+ system
+ - purefa_info - Fixed error in policies subsection due to API issue
+ - purefa_info - Fixed race condition with protection groups
+ - purefa_smtp - Fix parameter name
+ minor_changes:
+ - purefa_host - Add support for VLAN ID tagging for a host (Requires Purity//FA
+ 6.3.5)
+ - purefa_info - Add new subset alerts
+ - purefa_info - Added default protection information to `config` section
+ - purefa_volume - Added support for volume promotion/demotion
+ fragments:
+ - 304_host_vlan.yaml
+ - 341_pg_400s.yaml
+ - 342_add_vol_promotion.yaml
+ - 343_fix_ds.yaml
+ - 344_fix_smtp.yaml
+ - 345_user_map.yaml
+ - 347_dns_fix.yaml
+ - 348_add_default_prot.yaml
+ - 349_add_alerts.yaml
+ - 351_fix_rest_check.yaml
+ modules:
+ - description: Configure the FlashArray SNMP Agent
+ name: purefa_snmp_agent
+ namespace: ''
+ release_date: '2023-01-06'
+ 1.16.1:
+ changes:
+ bugfixes:
+ - purefa_volume - Fixed issue with promotion status not being called correctly
+ fragments:
+ - 354_fix_promotion.yaml
+ release_date: '2023-01-10'
+ 1.16.2:
+ release_date: '2023-01-11'
+ 1.17.0:
+ changes:
+ bugfixes:
+ - purefa_host - Fixed parameter name
+ - purefa_info - Fix missing FC target ports for host
+ - purefa_pgsched - Fix error when setting schedule for pod based protection
+ group
+ - purefa_vg - Fix issue with VG creation on newer Purity versions
+ - purefa_volume - Ensure promotion_stateus is returned correctly on creation
+ - purefa_volume - Fix bug when overwriting volume using invalid parmaeters
+ - purefa_volume - Fixed idempotency bug when creating volumes with QoS
+ minor_changes:
+ - purefa_network - Added support for NVMe-RoCE and NVMe-TCP service types
+ - purefa_user - Added Ops Admin role to choices
+ - purefa_vlan - Added support for NVMe-TCP service type
+ fragments:
+ - 360_fix_volume.yaml
+ - 363_overwrite_combo.yaml
+ - 364_fc_targets.yaml
+ - 365_pod_pgsched.yaml
+ - 366_add_nvme_types.yaml
+ - 367_fix_vg.yaml
+ - 369_fix_host.yaml
+ - 370_add_user_role.yaml
+ release_date: '2023-02-16'
+ 1.17.1:
+ changes:
+ bugfixes:
+ - purefa_info - Fix REST response backwards compatibility issue for array capacity
+ REST response
+ - purefa_info - Resolves issue in AC environment where REST v2 host list mismatches
+ REST v1 due to remote hosts.
+ - purefa_info - Resolves issue with destroyed pgroup snapshot on an offload
+ target not have a time remaining value
+ - purefa_pg - Resolves issue with destroyed pgroup snapshot on an offload target
+ not have a time remaining value
+ fragments:
+ - 374_offload_pgsnap.yaml
+ - 375_fix_remote_hosts.yaml
+ - 379_cap_compat.yaml
+ release_date: '2023-03-02'
+ 1.17.2:
+ release_date: '2023-03-02'
+ 1.18.0:
+ changes:
+ bugfixes:
+ - purefa_network - Resolves network port setting idempotency issue
+ - purefa_pg - Fixed issue where volumes could not be added to a PG when one
+ of the arrays was undergoing a failover.
+ - purefa_snap - Fixed issue system generated suffixes not being allowed and
+ removed unnecessary warning message.
+ minor_changes:
+ - purefa_hg - Changed parameter hostgroup to name for consistency. Added hostgroup
+ as an alias for backwards compatability.
+ - purefa_hg - Exit gracefully, rather than failing when a specified volume does
+ not exist
+ - purefa_host - Exit gracefully, rather than failing when a specified volume
+ does not exist
+ - purefa_info - Added network neighbors info to `network` subset
+ - purefa_pod - Added support for pod quotas (from REST 2.23)
+ - purefa_snap - New response of 'suffix' when snapshot has been created.
+ - purefa_volume - Added additional volume facts for volume update, or for no
+ change
+ release_summary: '| FlashArray Collection v1.18 removes module-side support
+ for Python 2.7.
+
+ | The minimum required Python version for the FlashArray Collection is Python
+ 3.6.
+
+ '
+ fragments:
+ - 381_change_booleans.yaml
+ - 383_network_idemp.yaml
+ - 384_update_vol_facts.yaml
+ - 387_no_volume_failure.yaml
+ - 388_remove_27.yaml
+ - 393_offload_recover.yaml
+ - 394_neighbors.yaml
+ - 396_pod_quota.yaml
+ - 397_parialconnect_bug.yaml
+ - 398_hgoup_alias.yaml
+ release_date: '2023-05-12'
+ 1.19.0:
+ modules:
+ - description: Manage Pure Storage FlashArray Audit and Session logs
+ name: purefa_logging
+ namespace: ''
+ release_date: '2023-05-19'
+ 1.19.1:
+ changes:
+ bugfixes:
+ - purefa_info - Fixed missing arguments for google_offload and pods
+ release_date: '2023-05-19'
+ 1.4.0:
+ changes:
+ bugfixes:
+ - purefa_host - resolve hostname case inconsistencies
+ - purefa_host - resolve issue found when using in Pure Storage Test Drive
+ major_changes:
+ - purefa_console - manage Console Lock setting for the FlashArray
+ - purefa_endpoint - manage VMware protocol-endpoints on the FlashArray
+ - purefa_eula - sign, or resign, FlashArray EULA
+ - purefa_inventory - get hardware inventory information from a FlashArray
+ - purefa_network - manage the physical and virtual network settings on the FlashArray
+ - purefa_pgsched - manage protection group snapshot and replication schedules
+ on the FlashArray
+ - purefa_pod - manage ActiveCluster pods in FlashArrays
+ - purefa_pod_replica - manage ActiveDR pod replica links in FlashArrays
+ - purefa_proxy - manage the phonehome HTTPS proxy setting for the FlashArray
+ - purefa_smis - manage SMI-S settings on the FlashArray
+ - purefa_subnet - manage network subnets on the FlashArray
+ - purefa_timeout - manage the GUI idle timeout on the FlashArray
+ - purefa_vlan - manage VLAN interfaces on the FlashArray
+ - purefa_vnc - manage VNC for installed applications on the FlashArray
+ - purefa_volume_tags - manage volume tags on the FlashArray
+ minor_changes:
+ - purefa_hg - All LUN ID to be set for single volume
+ - purefa_host - Add CHAP support
+ - purefa_host - Add support for Cloud Block Store
+ - purefa_host - Add volume disconnection support
+ - purefa_info - Certificate times changed to human readable rather than time
+ since epoch
+ - purefa_info - new options added for information collection
+ - purefa_info - return dict names changed from ``ansible_facts`` to ``ra_info``
+ and ``user_info`` in approproate sections
+ - purefa_offload - Add support for Azure
+ - purefa_pgsnap - Add offload support
+ - purefa_snap - Allow recovery of deleted snapshot
+ - purefa_vg - Add QoS support
+ release_summary: '| Release Date: 2020-08-08
+
+ | This changlelog describes all changes made to the modules and plugins included
+ in this collection since Ansible 2.9.0
+
+ '
+ fragments:
+ - v1.4.0_summary.yaml
+ release_date: '2020-08-06'
+ 1.5.0:
+ changes:
+ bugfixes:
+ - purefa_host - Correctly remove host that is in a hostgroup
+ - purefa_volume - Fix failing idempotency on eradicate volume
+ minor_changes:
+ - purefa_apiclient - New module to support API Client management
+ - purefa_directory - Add support for managed directories
+ - purefa_export - Add support for filesystem exports
+ - purefa_fs - Add filesystem management support
+ - purefa_hg - Enforce case-sensitivity rules for hostgroup objects
+ - purefa_host - Enforce hostname case-sensitivity rules
+ - purefa_info - Add support for FA Files features
+ - purefa_offload - Add support for Google Cloud offload target
+ - purefa_pg - Enforce case-sensitivity rules for protection group objects
+ - purefa_policy - Add support for NFS, SMB and Snapshot policy management
+ fragments:
+ - 107_host_case_clarity.yaml
+ - 108_fix_eradicate_idempotency.yaml
+ - 109_fa_files_support_purefa_info.yaml
+ - 110_add_apiclient_support.yaml
+ - 111_add_filesystem_support.yaml
+ - 112_add_directory_support.yaml
+ - 113_add_exports_support.yaml
+ - 115_add_gcp_offload.yaml
+ - 116_add_policies.yaml
+ modules:
+ - description: Manage FlashArray API Clients
+ name: purefa_apiclient
+ namespace: ''
+ - description: Manage FlashArray File System Directories
+ name: purefa_directory
+ namespace: ''
+ - description: Manage FlashArray File System Exports
+ name: purefa_export
+ namespace: ''
+ - description: Manage FlashArray File Systems
+ name: purefa_fs
+ namespace: ''
+ - description: Manage FlashArray File System Policies
+ name: purefa_policy
+ namespace: ''
+ release_date: '2020-10-14'
+ 1.5.1:
+ changes:
+ bugfixes:
+ - purefa.py - Resolve issue when pypureclient doesn't handshake array correctly
+ - purefa_dns - Fix idempotency
+ - purefa_volume - Alert when volume selected for move does not exist
+ minor_changes:
+ - purefa_host - Add host rename function
+ - purefa_host - Add support for multi-host creation
+ - purefa_vg - Add support for multiple vgroup creation
+ - purefa_volume - Add support for multi-volume creation
+ fragments:
+ - 118_rename_host.yaml
+ - 121_add_multi_volume_creation.yaml
+ - 122_add_multi_host_creation.yaml
+ - 123_add_multi_vgroup_creation.yaml
+ - 124_sdk_handshake.yaml
+ - 125_dns_idempotency.yaml
+ - 126_fix_volume_move.yaml
+ release_date: '2020-12-11'
+ 1.6.0:
+ changes:
+ bugfixes:
+ - purefa_hg - Ensure all hostname chacks are lowercase for consistency
+ - purefa_pgsnap - Add check to ensure suffix name meets naming conventions
+ - purefa_pgsnap - Ensure pgsnap restores work for AC PGs
+ - purefa_pod - Ensure all pod names are lowercase for consistency
+ - purefa_snap - Update suffix regex pattern
+ - purefa_volume - Add missing variable initialization
+ minor_changes:
+ - purefa_connect - Add support for FC-based array replication
+ - purefa_ds - Add Purity v6 support for Directory Services, including Data DS
+ and updating services
+ - purefa_info - Add support for FC Replication
+ - purefa_info - Add support for Remote Volume Snapshots
+ - purefa_info - Update directory_services dictionary to cater for FA-Files data
+ DS. Change DS dict forward. Add deprecation warning.
+ - purefa_ntp - Ignore NTP configuration for CBS-based arrays
+ - purefa_pg - Add support for Protection Groups in AC pods
+ - purefa_snap - Add support for remote snapshot of individual volumes to offload
+ targets
+ fragments:
+ - 130_info_ds_update.yaml
+ - 131_add_v6_ds_update.yaml
+ - 132_fc_replication.yaml
+ - 133_purefa_info_v6_replication.yaml
+ - 134_ac_pg_support.yaml
+ - 135_no_cbs_ntp.yaml
+ - 136_add_vol_get_send_info.yaml
+ - 137_pgsnap_regex.yaml
+ - 139_pgsnap_ac_support.yaml
+ - 140_pod_case.yaml
+ - 141_add_remote_snapshot.yaml
+ - 145_fix_missing_move_variable.yaml
+ release_date: '2021-02-02'
+ 1.6.2:
+ changes:
+ bugfixes:
+ - purefa_volume - Fix issues with moving volumes into demoted or linked pods
+ fragments:
+ - 149_volumes_demoted_pods_fix.yaml
+ release_date: '2021-02-04'
+ 1.7.0:
+ changes:
+ bugfixes:
+ - purefa_info - Fix missing protection group snapshot info for local snapshots
+ - purefa_info - Resolve crash when an offload target is offline
+ - purefa_pgsnap - Ensure suffix rules only implemented for state=present
+ - purefa_user - Do not allow role changed for breakglass user (pureuser)
+ - purefa_user - Do not change role for existing user unless requested
+ minor_changes:
+ - purefa_maintenance - New module to set maintenance windows
+ - purefa_pg - Add support to rename protection groups
+ - purefa_syslog - Add support for naming SYSLOG servers for Purity//FA 6.1 or
+ higher
+ fragments:
+ - 152_fix_user.yaml
+ - 153_syslog_update.yaml
+ - 156_snap_suffix_fix.yaml
+ - 160_rename_pg.yaml
+ - 161_offline_offload_fix.yaml
+ - 162_pgsnap_info_fix.yaml
+ - 163_add_maintenance_windows.yaml
+ modules:
+ - description: Configure Pure Storage FlashArray Maintence Windows
+ name: purefa_maintenance
+ namespace: ''
+ release_date: '2021-03-30'
+ 1.8.0:
+ changes:
+ bugfixes:
+ - purefa_dsrole - If using None for group or group_base incorrect change state
+ applied
+ - purefa_network - Allow gateway paremeter to be set as None - needed for non-routing
+ iSCSI ports
+ - purefa_pg - Check to ensure protection group name meets naming convention
+ - purefa_pgsnap - Fail with warning if trying to restore to a stretched ActiveCluster
+ pod
+ - purefa_volume - Ensure REST version is high enough to support promotion_status
+ minor_changes:
+ - purefa_certs - New module for managing SSL certificates
+ - purefa_volume - New parameter pgroup to specify an existing protection group
+ to put crwated volume(s) in.
+ fragments:
+ - 168_dsrole_fix.yaml
+ - 169_add_certs.yaml
+ - 170_pgsnap_stretch_pod_fail.yaml
+ - 174_null_gateway.yaml
+ - 175_check_pgname.yaml
+ - 176_fix_promote_api_issue.yaml
+ - 182_allow_pgroup_with_create.yaml
+ modules:
+ - description: Manage FlashArray SSL Certificates
+ name: purefa_certs
+ namespace: ''
+ release_date: '2021-04-21'
+ 1.9.0:
+ changes:
+ bugfixes:
+ - purefa_host - Rollback host creation if initiators already used by another
+ host
+ - purefa_policy - Fix incorrect protocol endpoint invocation
+ - purefa_ra - fix disable feature for remote assist, this didn't work due to
+ error in check logic
+ - purefa_vg - Correct issue when setting or changing Volume Group QoS
+ - purefa_volume - Fix incorrect API version check for ActiveDR support
+ minor_changes:
+ - purefa_ad - Increase number of kerberos and directory servers to be 3 for
+ each.
+ - purefa_ad - New module to manage Active Directory accounts
+ - purefa_dirsnap - New modules to manage FA-Files directory snapshots
+ - purefa_eradication - New module to set deleted items eradication timer
+ - purefa_info - Add data-at-rest and eradication timer information to default
+ dict
+ - purefa_info - Add high-level count for directory quotas and details for all
+ FA-Files policies
+ - purefa_info - Add volume Page 83 NAA information for volume details
+ - purefa_network - Add support for enable/diable FC ports
+ - purefa_policy - Add support for FA-files Directory Quotas and associated rules
+ and members
+ - purefa_sso - Add support for setting FlashArray Single Sign-On from Pure1
+ Manage
+ - purefa_volume - Add volume Page 83 NAA information to response dict
+ fragments:
+ - 187_add_ad.yaml
+ - 188_add_dirsnap.yaml
+ - 193_duplicate_initiators.yaml
+ - 194_vg_qos.yaml
+ - 196_fix_activedr_api_version.yaml
+ - 199_add_fc_port_enable.yaml
+ - 200_add_DAR_info.yaml
+ - 201_increase_krb_count.yaml
+ - 202_add_sso.yaml
+ - 203_add_eradication_timer.yaml
+ - 205_policy_protocl.yaml
+ - 206_add_naa_info.yaml
+ - 207_fix_disable_for_remote_assist.yaml
+ - 208_add_directory_quota_support.yaml
+ modules:
+ - description: Manage FlashArray Active Directory Account
+ name: purefa_ad
+ namespace: ''
+ - description: Manage FlashArray File System Directory Snapshots
+ name: purefa_dirsnap
+ namespace: ''
+ - description: Configure Pure Storage FlashArray Eradication Timer
+ name: purefa_eradication
+ namespace: ''
+ - description: Configure Pure Storage FlashArray Single Sign-On
+ name: purefa_sso
+ namespace: ''
+ release_date: '2021-07-10'
diff --git a/ansible_collections/purestorage/flasharray/changelogs/config.yaml b/ansible_collections/purestorage/flasharray/changelogs/config.yaml
new file mode 100644
index 000000000..9a3c1d8a4
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/config.yaml
@@ -0,0 +1,31 @@
+changelog_filename_template: ../CHANGELOG.rst
+changelog_filename_version_depth: 0
+changes_file: changelog.yaml
+changes_format: combined
+ignore_other_fragment_extensions: true
+keep_fragments: true
+mention_ancestor: true
+new_plugins_after_name: removed_features
+notesdir: fragments
+prelude_section_name: release_summary
+prelude_section_title: Release Summary
+sections:
+ - - major_changes
+ - Major Changes
+ - - minor_changes
+ - Minor Changes
+ - - breaking_changes
+ - Breaking Changes / Porting Guide
+ - - deprecated_features
+ - Deprecated Features
+ - - removed_features
+ - Removed Features (previously deprecated)
+ - - security_fixes
+ - Security Fixes
+ - - bugfixes
+ - Bugfixes
+ - - known_issues
+ - Known Issues
+title: Purestorage.Flasharray
+trivial_section_name: trivial
+use_fqcn: true
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/107_host_case_clarity.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/107_host_case_clarity.yaml
new file mode 100644
index 000000000..418af8e94
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/107_host_case_clarity.yaml
@@ -0,0 +1,7 @@
+minor_changes:
+ - purefa_host - Enforce hostname case-sensitivity rules
+ - purefa_hg - Enforce case-sensitivity rules for hostgroup objects
+ - purefa_pg - Enforce case-sensitivity rules for protection group objects
+
+bugfixes:
+ - purefa_host - Correctly remove host that is in a hostgroup
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/108_fix_eradicate_idempotency.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/108_fix_eradicate_idempotency.yaml
new file mode 100644
index 000000000..79a8c0c35
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/108_fix_eradicate_idempotency.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_volume - Fix failing idempotency on eradicate volume
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/109_fa_files_support_purefa_info.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/109_fa_files_support_purefa_info.yaml
new file mode 100644
index 000000000..2342709dd
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/109_fa_files_support_purefa_info.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_info - Add support for FA Files features
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/110_add_apiclient_support.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/110_add_apiclient_support.yaml
new file mode 100644
index 000000000..25496854d
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/110_add_apiclient_support.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_apiclient - New module to support API Client management
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/111_add_filesystem_support.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/111_add_filesystem_support.yaml
new file mode 100644
index 000000000..ed9e9f176
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/111_add_filesystem_support.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_fs - Add filesystem management support
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/112_add_directory_support.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/112_add_directory_support.yaml
new file mode 100644
index 000000000..22806892f
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/112_add_directory_support.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_directory - Add support for managed directories
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/113_add_exports_support.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/113_add_exports_support.yaml
new file mode 100644
index 000000000..595a58662
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/113_add_exports_support.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_export - Add support for filesystem exports
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/115_add_gcp_offload.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/115_add_gcp_offload.yaml
new file mode 100644
index 000000000..a36255f67
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/115_add_gcp_offload.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_offload - Add support for Google Cloud offload target
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/116_add_policies.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/116_add_policies.yaml
new file mode 100644
index 000000000..5159a8b3d
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/116_add_policies.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_policy - Add support for NFS, SMB and Snapshot policy management
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/118_rename_host.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/118_rename_host.yaml
new file mode 100644
index 000000000..b34f8a820
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/118_rename_host.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_host - Add host rename function
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/121_add_multi_volume_creation.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/121_add_multi_volume_creation.yaml
new file mode 100644
index 000000000..d48336332
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/121_add_multi_volume_creation.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_volume - Add support for multi-volume creation
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/122_add_multi_host_creation.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/122_add_multi_host_creation.yaml
new file mode 100644
index 000000000..1ad523ee6
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/122_add_multi_host_creation.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_host - Add support for multi-host creation
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/123_add_multi_vgroup_creation.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/123_add_multi_vgroup_creation.yaml
new file mode 100644
index 000000000..207cd97b8
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/123_add_multi_vgroup_creation.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_vg - Add support for multiple vgroup creation
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/124_sdk_handshake.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/124_sdk_handshake.yaml
new file mode 100644
index 000000000..244a77c8e
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/124_sdk_handshake.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa.py - Resolve issue when pypureclient doesn't handshake array correctly
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/125_dns_idempotency.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/125_dns_idempotency.yaml
new file mode 100644
index 000000000..cf195b0d9
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/125_dns_idempotency.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_dns - Fix idempotency
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/126_fix_volume_move.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/126_fix_volume_move.yaml
new file mode 100644
index 000000000..64d22578f
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/126_fix_volume_move.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_volume - Alert when volume selected for move does not exist
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/130_info_ds_update.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/130_info_ds_update.yaml
new file mode 100644
index 000000000..336c43a60
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/130_info_ds_update.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_info - Update directory_services dictionary to cater for FA-Files data DS. Change DS dict forward. Add deprecation warning.
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/131_add_v6_ds_update.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/131_add_v6_ds_update.yaml
new file mode 100644
index 000000000..7fac4905d
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/131_add_v6_ds_update.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_ds - Add Purity v6 support for Directory Services, including Data DS and updating services
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/132_fc_replication.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/132_fc_replication.yaml
new file mode 100644
index 000000000..b033ed5cb
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/132_fc_replication.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_connect - Add support for FC-based array replication
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/133_purefa_info_v6_replication.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/133_purefa_info_v6_replication.yaml
new file mode 100644
index 000000000..bbed50da6
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/133_purefa_info_v6_replication.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_info - Add support for FC Replication
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/134_ac_pg_support.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/134_ac_pg_support.yaml
new file mode 100644
index 000000000..b1ccd2b38
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/134_ac_pg_support.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_pg - Add support for Protection Groups in AC pods
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/135_no_cbs_ntp.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/135_no_cbs_ntp.yaml
new file mode 100644
index 000000000..6a0644d11
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/135_no_cbs_ntp.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_ntp - Ignore NTP configuration for CBS-based arrays
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/136_add_vol_get_send_info.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/136_add_vol_get_send_info.yaml
new file mode 100644
index 000000000..fa3fcc8e6
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/136_add_vol_get_send_info.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_info - Add support for Remote Volume Snapshots
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/137_pgsnap_regex.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/137_pgsnap_regex.yaml
new file mode 100644
index 000000000..1560343c4
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/137_pgsnap_regex.yaml
@@ -0,0 +1,3 @@
+bugfixes:
+ - purefa_pgsnap - Add check to ensure suffix name meets naming conventions
+ - purefa_snap - Update suffix regex pattern
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/139_pgsnap_ac_support.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/139_pgsnap_ac_support.yaml
new file mode 100644
index 000000000..6004d8fac
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/139_pgsnap_ac_support.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_pgsnap - Ensure pgsnap restores work for AC PGs
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/140_pod_case.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/140_pod_case.yaml
new file mode 100644
index 000000000..1896bd6f0
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/140_pod_case.yaml
@@ -0,0 +1,3 @@
+bugfixes:
+ - purefa_pod - Ensure all pod names are lowercase for consistency
+ - purefa_hg - Ensure all hostname chacks are lowercase for consistency
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/141_add_remote_snapshot.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/141_add_remote_snapshot.yaml
new file mode 100644
index 000000000..9af6598a9
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/141_add_remote_snapshot.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_snap - Add support for remote snapshot of individual volumes to offload targets
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/145_fix_missing_move_variable.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/145_fix_missing_move_variable.yaml
new file mode 100644
index 000000000..a5189a0d6
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/145_fix_missing_move_variable.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_volume - Add missing variable initialization
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/149_volumes_demoted_pods_fix.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/149_volumes_demoted_pods_fix.yaml
new file mode 100644
index 000000000..812d0f3c9
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/149_volumes_demoted_pods_fix.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_volume - Fix issues with moving volumes into demoted or linked pods
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/152_fix_user.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/152_fix_user.yaml
new file mode 100644
index 000000000..d124e2ca5
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/152_fix_user.yaml
@@ -0,0 +1,3 @@
+bugfixes:
+ - purefa_user - Do not allow role changed for breakglass user (pureuser)
+ - purefa_user - Do not change role for existing user unless requested
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/153_syslog_update.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/153_syslog_update.yaml
new file mode 100644
index 000000000..84a435487
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/153_syslog_update.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_syslog - Add support for naming SYSLOG servers for Purity//FA 6.1 or higher
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/156_snap_suffix_fix.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/156_snap_suffix_fix.yaml
new file mode 100644
index 000000000..73f2bd27a
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/156_snap_suffix_fix.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_pgsnap - Ensure suffix rules only implemented for state=present
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/160_rename_pg.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/160_rename_pg.yaml
new file mode 100644
index 000000000..3e6cae673
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/160_rename_pg.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_pg - Add support to rename protection groups
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/161_offline_offload_fix.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/161_offline_offload_fix.yaml
new file mode 100644
index 000000000..26871e705
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/161_offline_offload_fix.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_info - Resolve crash when an offload target is offline
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/162_pgsnap_info_fix.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/162_pgsnap_info_fix.yaml
new file mode 100644
index 000000000..db7639b03
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/162_pgsnap_info_fix.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_info - Fix missing protection group snapshot info for local snapshots
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/163_add_maintenance_windows.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/163_add_maintenance_windows.yaml
new file mode 100644
index 000000000..2d0453981
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/163_add_maintenance_windows.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_maintenance - New module to set maintenance windows
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/168_dsrole_fix.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/168_dsrole_fix.yaml
new file mode 100644
index 000000000..44c592e06
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/168_dsrole_fix.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_dsrole - If using None for group or group_base incorrect change state applied
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/169_add_certs.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/169_add_certs.yaml
new file mode 100644
index 000000000..6912e0ab6
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/169_add_certs.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_certs - New module for managing SSL certificates
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/170_pgsnap_stretch_pod_fail.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/170_pgsnap_stretch_pod_fail.yaml
new file mode 100644
index 000000000..8626d2425
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/170_pgsnap_stretch_pod_fail.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_pgsnap - Fail with warning if trying to restore to a stretched ActiveCluster pod
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/174_null_gateway.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/174_null_gateway.yaml
new file mode 100644
index 000000000..776f64131
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/174_null_gateway.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_network - Allow gateway paremeter to be set as None - needed for non-routing iSCSI ports
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/175_check_pgname.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/175_check_pgname.yaml
new file mode 100644
index 000000000..d9acd6495
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/175_check_pgname.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_pg - Check to ensure protection group name meets naming convention
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/176_fix_promote_api_issue.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/176_fix_promote_api_issue.yaml
new file mode 100644
index 000000000..4e58705dd
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/176_fix_promote_api_issue.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_volume - Ensure REST version is high enough to support promotion_status
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/182_allow_pgroup_with_create.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/182_allow_pgroup_with_create.yaml
new file mode 100644
index 000000000..44b41b0b3
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/182_allow_pgroup_with_create.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_volume - New parameter pgroup to specify an existing protection group to put crwated volume(s) in.
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/187_add_ad.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/187_add_ad.yaml
new file mode 100644
index 000000000..2b7a7c0e4
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/187_add_ad.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_ad - New moduke to manage Active Directory accounts
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/188_add_dirsnap.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/188_add_dirsnap.yaml
new file mode 100644
index 000000000..a7bafb1c3
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/188_add_dirsnap.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_dirsnap - New modules to manage FA-Files directory snapshots
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/193_duplicate_initiators.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/193_duplicate_initiators.yaml
new file mode 100644
index 000000000..f65539280
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/193_duplicate_initiators.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_host - Rollback host creation if initiators already used by another host
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/194_vg_qos.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/194_vg_qos.yaml
new file mode 100644
index 000000000..615ba254a
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/194_vg_qos.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_vg - Correct issue when setting or changing Volume Group QoS
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/196_fix_activedr_api_version.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/196_fix_activedr_api_version.yaml
new file mode 100644
index 000000000..c7afd55c7
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/196_fix_activedr_api_version.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_volume - Fix incorrect API version check for ActiveDR support
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/199_add_fc_port_enable.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/199_add_fc_port_enable.yaml
new file mode 100644
index 000000000..174c8ae1b
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/199_add_fc_port_enable.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_network - Add support for enable/diable FC ports
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/200_add_DAR_info.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/200_add_DAR_info.yaml
new file mode 100644
index 000000000..3df78dea7
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/200_add_DAR_info.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_info - Add data-at-rest and eradication timer information to default dict
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/201_increase_krb_count.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/201_increase_krb_count.yaml
new file mode 100644
index 000000000..b3d4cd655
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/201_increase_krb_count.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_ad - Increase number of kerberos and directory servers to be 3 for each.
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/202_add_sso.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/202_add_sso.yaml
new file mode 100644
index 000000000..ed74c67f7
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/202_add_sso.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_sso - Add support for setting FlashArray Single Sign-On from Pure1 Manage
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/203_add_eradication_timer.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/203_add_eradication_timer.yaml
new file mode 100644
index 000000000..b68a6040a
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/203_add_eradication_timer.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_eradication - New module to set deleted items eradication timer
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/205_policy_protocl.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/205_policy_protocl.yaml
new file mode 100644
index 000000000..e7fe96d2c
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/205_policy_protocl.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_policy - Fix incorrect protocol endpoint invocation
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/206_add_naa_info.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/206_add_naa_info.yaml
new file mode 100644
index 000000000..b4e321ce3
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/206_add_naa_info.yaml
@@ -0,0 +1,3 @@
+minor_changes:
+ - purefa_volume - Add volume Page 83 NAA information to response dict
+ - purefa_info - Add volume Page 83 NAA information for volume details
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/207_fix_disable_for_remote_assist.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/207_fix_disable_for_remote_assist.yaml
new file mode 100644
index 000000000..17c10929d
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/207_fix_disable_for_remote_assist.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_ra - fix disable feature for remote assist, this didn't work due to error in check logic
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/208_add_directory_quota_support.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/208_add_directory_quota_support.yaml
new file mode 100644
index 000000000..8e7178185
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/208_add_directory_quota_support.yaml
@@ -0,0 +1,3 @@
+minor_changes:
+ - purefa_policy - Add support for FA-files Directory Quotas and associated rules and members
+ - purefa_info - Add high-level count for directory quotas and details for all FA-Files policies
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/213_add_kmip.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/213_add_kmip.yaml
new file mode 100644
index 000000000..a6dc4ea2a
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/213_add_kmip.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_kmip - Add support for KMIP server management
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/214_join_ou.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/214_join_ou.yaml
new file mode 100644
index 000000000..3f00cacf2
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/214_join_ou.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_ds - Add ``join_ou`` parameter for AD account creation
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/220_capacity_info.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/220_capacity_info.yaml
new file mode 100644
index 000000000..0a2e41dd1
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/220_capacity_info.yaml
@@ -0,0 +1,3 @@
+minor_changes:
+ - purefa_info - Remove directory_services field. Deprecated in Collections 1.6
+ - purefa_info - Add array, volume and snapshot detailed capacity information
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/224_add_nguid_info.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/224_add_nguid_info.yaml
new file mode 100644
index 000000000..e99b862ec
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/224_add_nguid_info.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_info - Add NVMe NGUID value for volumes
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/226_deprecate_protocol.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/226_deprecate_protocol.yaml
new file mode 100644
index 000000000..dfac53d1e
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/226_deprecate_protocol.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_host - Deprecate ``protocol`` parameter. No longer required.
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/227_missing_regex.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/227_missing_regex.yaml
new file mode 100644
index 000000000..d03cf2a42
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/227_missing_regex.yaml
@@ -0,0 +1,3 @@
+bugfixes:
+ - purefa_subnet - Add regex to check for correct dsubnet name
+ - purefa_user - Add regex to check for correct username
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/228_nguid_to_volfact.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/228_nguid_to_volfact.yaml
new file mode 100644
index 000000000..972931f50
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/228_nguid_to_volfact.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_volume - Add NVMe NGUID to response dict
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/229_snapsuffix.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/229_snapsuffix.yaml
new file mode 100644
index 000000000..b290950d2
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/229_snapsuffix.yaml
@@ -0,0 +1,3 @@
+minor_changes:
+ - purefa_info - Add snapshot policy rules suffix support
+ - purefa_policy - Add snapshot policy rules suffix support
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/230_add_pg_deleted_vols.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/230_add_pg_deleted_vols.yaml
new file mode 100644
index 000000000..bc868b24a
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/230_add_pg_deleted_vols.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_info - Add deleted members to volume protection group info
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/231_syslog_settings.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/231_syslog_settings.yaml
new file mode 100644
index 000000000..065247686
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/231_syslog_settings.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_syslog_settings - Add support to manage global syslog server settings
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/234_add_vol_info_on_nochange.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/234_add_vol_info_on_nochange.yaml
new file mode 100644
index 000000000..695f3f6e3
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/234_add_vol_info_on_nochange.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_volume - Provide volume facts in all cases, including when no change has occured.
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/235_eula.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/235_eula.yaml
new file mode 100644
index 000000000..f73f075c6
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/235_eula.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_eula - Reolve EULA signing issue
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/237_fix_network.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/237_fix_network.yaml
new file mode 100644
index 000000000..137f27432
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/237_fix_network.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_network - Fix bug introduced with management of FC ports
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/238_add_dirsnap_rename.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/238_add_dirsnap_rename.yaml
new file mode 100644
index 000000000..b90b8d08a
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/238_add_dirsnap_rename.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_dirsnap - Add support to rename directory snapshots not managed by a snapshot policy
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/239_safe_mode.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/239_safe_mode.yaml
new file mode 100644
index 000000000..42ddaacd7
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/239_safe_mode.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_info - Add Safe Mode status
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/242_multi_offload.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/242_multi_offload.yaml
new file mode 100644
index 000000000..7d7b56d77
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/242_multi_offload.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_offload - Add support for multiple, homogeneous, offload targets
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/243_sso_to_admin.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/243_sso_to_admin.yaml
new file mode 100644
index 000000000..2eae7056c
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/243_sso_to_admin.yaml
@@ -0,0 +1,6 @@
+minor_changes:
+ - purefa_admin - New module to set global admin settings, inclusing SSO
+deprecated_features:
+ - purefa_sso - Deprecated in favor of M(purefa_admin). Will be removed in Collection 2.0
+known_issues:
+ - purefa_admin - Once `max_login` and `lockout` have been set there is currently no way to rest these to zero except through the FlashArray GUI
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/246_python_precedence.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/246_python_precedence.yaml
new file mode 100644
index 000000000..a70c2cff4
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/246_python_precedence.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_network - Resolve bug stopping management IP address being changed correctly
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/247_fix_smb_policy_rules.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/247_fix_smb_policy_rules.yaml
new file mode 100644
index 000000000..d8bd2cc02
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/247_fix_smb_policy_rules.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_policy - Fix issue with SMB Policy creation
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/249_allow_cert_reimport.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/249_allow_cert_reimport.yaml
new file mode 100644
index 000000000..625547e06
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/249_allow_cert_reimport.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_certs - Allow a certificate to be imported over an existing SSL certificate
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/252_add_saml2.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/252_add_saml2.yaml
new file mode 100644
index 000000000..36960fe83
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/252_add_saml2.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_saml - Add support for SAML2 SSO IdPs
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/254_sam2_info.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/254_sam2_info.yaml
new file mode 100644
index 000000000..0d8e71612
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/254_sam2_info.yaml
@@ -0,0 +1,3 @@
+minor_changes:
+ - purefa_info - Add SAML2SSO configutration information
+ - purefa_info - Fix Active Directory configuration details
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/257_fqcn.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/257_fqcn.yaml
new file mode 100644
index 000000000..5cf04eb1f
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/257_fqcn.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - All modules - Change examples to use FQCN for module
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/259_fix_gateway_check.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/259_fix_gateway_check.yaml
new file mode 100644
index 000000000..599e09ff9
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/259_fix_gateway_check.yaml
@@ -0,0 +1,3 @@
+bugfixes:
+ - purefa_subnet - Fix subnet update checks when no gateway in existing subnet configuration
+ - purefa_info - Fix space reporting issue
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/261_fix_bad_arrays.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/261_fix_bad_arrays.yaml
new file mode 100644
index 000000000..521ffd598
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/261_fix_bad_arrays.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_info - Fix issue where remote arrays are not in a valid connected state
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/265_fix_multiple_nfs_rules.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/265_fix_multiple_nfs_rules.yaml
new file mode 100644
index 000000000..022eb48a3
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/265_fix_multiple_nfs_rules.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_policy - Fix issue when creating multiple rules in an NFS policy
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/268_fix_quotas_issues.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/268_fix_quotas_issues.yaml
new file mode 100644
index 000000000..9da541a27
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/268_fix_quotas_issues.yaml
@@ -0,0 +1,4 @@
+minor_changes:
+ - purefa_policy - Allow directories in snapshot policies to be managed
+bugfixes:
+ - purefa_policy - Fix idempotency issue with quota policy rules
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/270_add_priority_info.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/270_add_priority_info.yaml
new file mode 100644
index 000000000..6fe6f2a4b
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/270_add_priority_info.yaml
@@ -0,0 +1,4 @@
+minor_changes:
+ - purefa_info - Add priority adjustment information for volumes and volume groups
+ - purefa_info - Add QoS information for volume groups
+ - purefa_info - Split volume groups into live and deleted dicts
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/271_vgroup_prio.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/271_vgroup_prio.yaml
new file mode 100644
index 000000000..a7a143eed
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/271_vgroup_prio.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_vg - Add DMM Priority Adjustment support
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/272_volume_prio.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/272_volume_prio.yaml
new file mode 100644
index 000000000..4a7ea4229
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/272_volume_prio.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_volume - Add support for DMM Priority Adjustment
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/277_add_fs_repl.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/277_add_fs_repl.yaml
new file mode 100644
index 000000000..637552d1b
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/277_add_fs_repl.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_fs - Add support for replicated file systems
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/278_pgsnap_info.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/278_pgsnap_info.yaml
new file mode 100644
index 000000000..449890e1b
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/278_pgsnap_info.yaml
@@ -0,0 +1,3 @@
+minor_changes:
+ - purefa_info - Add info for protection group snapshots
+ - purefa_info - Add info for protection group safe mode setting (Requires Purity//FA 6.3.0 or higher)
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/279_pg_safemode.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/279_pg_safemode.yaml
new file mode 100644
index 000000000..a8ab33780
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/279_pg_safemode.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_pg - Add support for protection group SafeMode. Requires Purity//FA 6.3.0 or higher
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/280_multihost_no_suffix.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/280_multihost_no_suffix.yaml
new file mode 100644
index 000000000..b0f5883a2
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/280_multihost_no_suffix.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_host - Allow multi-host creation without requiring a suffix string
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/284_volfact_for_recover.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/284_volfact_for_recover.yaml
new file mode 100644
index 000000000..fb6068e33
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/284_volfact_for_recover.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_volume - Provide volume facts for volume after recovery
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/288_zero_params.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/288_zero_params.yaml
new file mode 100644
index 000000000..aab42ef97
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/288_zero_params.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_pgsched - Allow zero as a valid value for appropriate schedule parameters
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/292_fix_ds_password.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/292_fix_ds_password.yaml
new file mode 100644
index 000000000..717c11c3c
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/292_fix_ds_password.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_ds - Add new parameter `force_bind_password` (default = True) to allow idempotency for module
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/293_add_chassis_inventory.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/293_add_chassis_inventory.yaml
new file mode 100644
index 000000000..5db5513b6
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/293_add_chassis_inventory.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_inventory - add chassis information to inventory
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/294_dns_ntp_idempotency_absent.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/294_dns_ntp_idempotency_absent.yaml
new file mode 100644
index 000000000..2937d730f
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/294_dns_ntp_idempotency_absent.yaml
@@ -0,0 +1,3 @@
+bugfixes:
+ - purefa_dns - Corrects logic where API responds with an empty list rather than a list with a single empty string in it.
+ - purefa_ntp - Corrects workflow so that the state between desired and current are checked before marking the changed flag to true during an absent run
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/294_user_map_support.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/294_user_map_support.yaml
new file mode 100644
index 000000000..5c0db8765
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/294_user_map_support.yaml
@@ -0,0 +1,3 @@
+minor_changes:
+ - purefa_policy - Add support for NFS policy user mapping
+ - purefa_info - Add NFS policy user mapping status
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/296_ad_tls.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/296_ad_tls.yaml
new file mode 100644
index 000000000..770ddd3fe
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/296_ad_tls.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_ad - Add support for TLS and joining existing AD account
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/299_fix_pgsched_zero_support.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/299_fix_pgsched_zero_support.yaml
new file mode 100644
index 000000000..07a8a1301
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/299_fix_pgsched_zero_support.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_pgsched - Fix issue where 0 was not correctly handled for replication schedule
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/302_fix_pg_recover_and_target_update.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/302_fix_pg_recover_and_target_update.yaml
new file mode 100644
index 000000000..719e99a65
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/302_fix_pg_recover_and_target_update.yaml
@@ -0,0 +1,3 @@
+bugfixes:
+ - purefa_pg - Ensure deleted protection group can be correctly recovered
+ - purefa_pg - Corredt issue when target for protection group is not correctly amended
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/304_host_vlan.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/304_host_vlan.yaml
new file mode 100644
index 000000000..d31229aef
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/304_host_vlan.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_host - Add support for VLAN ID tagging for a host (Requires Purity//FA 6.3.5)
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/305_fix_target_dempo.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/305_fix_target_dempo.yaml
new file mode 100644
index 000000000..0ef405e8e
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/305_fix_target_dempo.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_pg - Fix idempotency issue for protection group targets
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/307_multiple_dns.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/307_multiple_dns.yaml
new file mode 100644
index 000000000..643ea8e06
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/307_multiple_dns.yaml
@@ -0,0 +1,3 @@
+minor_changes:
+ - purefa_dns - Support multiple DNS configurations from Puritry//FA 6.3.3
+ - purefa_info - Support multiple DNS configurations
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/308_add_vm.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/308_add_vm.yaml
new file mode 100644
index 000000000..00d939638
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/308_add_vm.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_info - Add support for Virtual Machines and Snapshots
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/310_hg_vol_idempotency.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/310_hg_vol_idempotency.yaml
new file mode 100644
index 000000000..9e27ce3f2
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/310_hg_vol_idempotency.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_hg - Ensure volume disconnection from a hostgroup is idempotent
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/312_pg_alias.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/312_pg_alias.yaml
new file mode 100644
index 000000000..71175c85a
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/312_pg_alias.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_pg - Changed parameter `pgroup` to `name`. Allow `pgroup` as alias for backwards compatability.
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/315_spf_details.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/315_spf_details.yaml
new file mode 100644
index 000000000..ec150de3e
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/315_spf_details.yaml
@@ -0,0 +1,3 @@
+minor_changes:
+ - purefa_inventory - Add REST 2.x support and SFP details for Purity//FA 6.3.4 and higher
+ - purefa_inventory - Change response dict name to `purefa_inv` so doesn't clash with info module response dict
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/317_add_all_squash.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/317_add_all_squash.yaml
new file mode 100644
index 000000000..6628c6fef
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/317_add_all_squash.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_policy - Add ``all_squash``, ``anonuid`` and ``anongid`` to NFS client rules options
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/318_vol_defaults.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/318_vol_defaults.yaml
new file mode 100644
index 000000000..6c28de7c1
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/318_vol_defaults.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_volume - Default Protection Group support added for volume creation and copying from Purity//FA 6.3.4
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/319_lockout.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/319_lockout.yaml
new file mode 100644
index 000000000..cf17dd035
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/319_lockout.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_info - Ensure global admin lockout duration is measured in seconds
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/320_completed_snaps.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/320_completed_snaps.yaml
new file mode 100644
index 000000000..77c0677b8
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/320_completed_snaps.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_pgsnap - Resolved intermittent error where `latest` snapshot is not complete and can fail. Only select latest completed snapshot to restore from.
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/328_policy_fix.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/328_policy_fix.yaml
new file mode 100644
index 000000000..3e4ed1226
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/328_policy_fix.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_policy - Fixed missing parameters in function calls
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/330_extend_vlan.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/330_extend_vlan.yaml
new file mode 100644
index 000000000..50435b40f
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/330_extend_vlan.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_vlan - Extend VLAN support to cover NVMe-RoCE and file interfaces
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/334_fix_vg_qos.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/334_fix_vg_qos.yaml
new file mode 100644
index 000000000..bcf2d84c4
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/334_fix_vg_qos.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_vg - Fix typeerror when using newer Purity versions and setting VG QoS
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/336_add_servicelist.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/336_add_servicelist.yaml
new file mode 100644
index 000000000..60223d579
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/336_add_servicelist.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_network - Added support for servicelist updates
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/337_fix_non-prod_versions.yml b/ansible_collections/purestorage/flasharray/changelogs/fragments/337_fix_non-prod_versions.yml
new file mode 100644
index 000000000..cc96ba2f1
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/337_fix_non-prod_versions.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa.py - Fix issue in Purity versions numbers that are for development versions
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/341_pg_400s.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/341_pg_400s.yaml
new file mode 100644
index 000000000..a29e48e8a
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/341_pg_400s.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_info - Fixed race condition with protection groups
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/342_add_vol_promotion.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/342_add_vol_promotion.yaml
new file mode 100644
index 000000000..e5588e559
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/342_add_vol_promotion.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_volume - Added support for volume promotion/demotion
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/343_fix_ds.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/343_fix_ds.yaml
new file mode 100644
index 000000000..f018d6825
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/343_fix_ds.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_ds - Fixed dict syntax error
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/344_fix_smtp.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/344_fix_smtp.yaml
new file mode 100644
index 000000000..9ca2998e6
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/344_fix_smtp.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_smtp - Fix parameter name
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/345_user_map.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/345_user_map.yaml
new file mode 100644
index 000000000..5fed80f16
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/345_user_map.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_info - Fixed error in policies subsection due to API issue
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/347_dns_fix.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/347_dns_fix.yaml
new file mode 100644
index 000000000..a9092c9dc
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/347_dns_fix.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_info - Fiexed issue with DNS reporting in Purity//FA 6.4.0 with non-FA-File system
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/348_add_default_prot.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/348_add_default_prot.yaml
new file mode 100644
index 000000000..551825bae
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/348_add_default_prot.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_info - Added default protection information to `config` section
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/349_add_alerts.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/349_add_alerts.yaml
new file mode 100644
index 000000000..905c4628f
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/349_add_alerts.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_info - Add new subset alerts
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/351_fix_rest_check.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/351_fix_rest_check.yaml
new file mode 100644
index 000000000..003537f32
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/351_fix_rest_check.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa - Remove unneeded REST version check as causes issues with REST mismatches
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/354_fix_promotion.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/354_fix_promotion.yaml
new file mode 100644
index 000000000..3d413bb70
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/354_fix_promotion.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_volume - Fixed issue with promotion status not being called correctly
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/360_fix_volume.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/360_fix_volume.yaml
new file mode 100644
index 000000000..1a76839ef
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/360_fix_volume.yaml
@@ -0,0 +1,3 @@
+bugfixes:
+ - purefa_volume - Ensure promotion_stateus is returned correctly on creation
+ - purefa_volume - Fixed idempotency bug when creating volumes with QoS
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/363_overwrite_combo.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/363_overwrite_combo.yaml
new file mode 100644
index 000000000..de4b27d3b
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/363_overwrite_combo.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_volume - Fix bug when overwriting volume using invalid parmaeters
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/364_fc_targets.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/364_fc_targets.yaml
new file mode 100644
index 000000000..551208351
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/364_fc_targets.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_info - Fix missing FC target ports for host
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/365_pod_pgsched.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/365_pod_pgsched.yaml
new file mode 100644
index 000000000..51a9e5811
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/365_pod_pgsched.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_pgsched - Fix error when setting schedule for pod based protection group
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/366_add_nvme_types.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/366_add_nvme_types.yaml
new file mode 100644
index 000000000..902895fb4
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/366_add_nvme_types.yaml
@@ -0,0 +1,3 @@
+minor_changes:
+ - purefa_network - Added support for NVMe-RoCE and NVMe-TCP service types
+ - purefa_vlan - Added support for NVMe-TCP service type
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/367_fix_vg.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/367_fix_vg.yaml
new file mode 100644
index 000000000..26859df60
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/367_fix_vg.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_vg - Fix issue with VG creation on newer Purity versions
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/369_fix_host.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/369_fix_host.yaml
new file mode 100644
index 000000000..b380f4617
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/369_fix_host.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_host - Fixed parameter name
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/370_add_user_role.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/370_add_user_role.yaml
new file mode 100644
index 000000000..3fea2f1a0
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/370_add_user_role.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_user - Added Ops Admin role to choices
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/374_offload_pgsnap.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/374_offload_pgsnap.yaml
new file mode 100644
index 000000000..bb799037f
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/374_offload_pgsnap.yaml
@@ -0,0 +1,3 @@
+bugfixes:
+ - purefa_info - Resolves issue with destroyed pgroup snapshot on an offload target not have a time remaining value
+ - purefa_pg - Resolves issue with destroyed pgroup snapshot on an offload target not have a time remaining value
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/375_fix_remote_hosts.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/375_fix_remote_hosts.yaml
new file mode 100644
index 000000000..c5979c0c3
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/375_fix_remote_hosts.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_info - Resolves issue in AC environment where REST v2 host list mismatches REST v1 due to remote hosts.
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/379_cap_compat.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/379_cap_compat.yaml
new file mode 100644
index 000000000..440e3f697
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/379_cap_compat.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_info - Fix REST response backwards compatibility issue for array capacity REST response
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/381_change_booleans.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/381_change_booleans.yaml
new file mode 100644
index 000000000..84c3cb521
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/381_change_booleans.yaml
@@ -0,0 +1,2 @@
+trivial:
+ - various modules - Adjust booleans from ``yes``/``no`` to ``true``/``false`` in docs
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/383_network_idemp.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/383_network_idemp.yaml
new file mode 100644
index 000000000..c2121456e
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/383_network_idemp.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_network - Resolves network port setting idempotency issue
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/384_update_vol_facts.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/384_update_vol_facts.yaml
new file mode 100644
index 000000000..3846109e3
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/384_update_vol_facts.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_volume - Added additional volume facts for volume update, or for no change
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/387_no_volume_failure.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/387_no_volume_failure.yaml
new file mode 100644
index 000000000..5d072cf06
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/387_no_volume_failure.yaml
@@ -0,0 +1,3 @@
+minor_changes:
+ - purefa_hg - Exit gracefully, rather than failing when a specified volume does not exist
+ - purefa_host - Exit gracefully, rather than failing when a specified volume does not exist
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/388_remove_27.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/388_remove_27.yaml
new file mode 100644
index 000000000..f9c8d796c
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/388_remove_27.yaml
@@ -0,0 +1,3 @@
+release_summary: |
+ | FlashArray Collection v1.18 removes module-side support for Python 2.7.
+ | The minimum required Python version for the FlashArray Collection is Python 3.6.
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/393_offload_recover.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/393_offload_recover.yaml
new file mode 100644
index 000000000..8216abe99
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/393_offload_recover.yaml
@@ -0,0 +1,4 @@
+bugfixes:
+ - purefa_snap - Fixed issue system generated suffixes not being allowed and removed unnecessary warning message.
+minor_changes:
+ - purefa_snap - New response of 'suffix' when snapshot has been created.
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/394_neighbors.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/394_neighbors.yaml
new file mode 100644
index 000000000..67709079d
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/394_neighbors.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_info - Added network neighbors info to `network` subset
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/396_pod_quota.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/396_pod_quota.yaml
new file mode 100644
index 000000000..4b7f84880
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/396_pod_quota.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_pod - Added support for pod quotas (from REST 2.23)
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/397_parialconnect_bug.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/397_parialconnect_bug.yaml
new file mode 100644
index 000000000..604b287f4
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/397_parialconnect_bug.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_pg - Fixed issue where volumes could not be added to a PG when one of the arrays was undergoing a failover.
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/398_hgoup_alias.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/398_hgoup_alias.yaml
new file mode 100644
index 000000000..3039ce1c2
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/398_hgoup_alias.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_hg - Changed parameter hostgroup to name for consistency. Added hostgroup as an alias for backwards compatability.
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/v1.4.0_summary.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/v1.4.0_summary.yaml
new file mode 100644
index 000000000..3a7dc7e8a
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/v1.4.0_summary.yaml
@@ -0,0 +1,37 @@
+release_summary: |
+ | Release Date: 2020-08-08
+ | This changlelog describes all changes made to the modules and plugins included in this collection since Ansible 2.9.0
+
+major_changes:
+ - purefa_console - manage Console Lock setting for the FlashArray
+ - purefa_endpoint - manage VMware protocol-endpoints on the FlashArray
+ - purefa_eula - sign, or resign, FlashArray EULA
+ - purefa_inventory - get hardware inventory information from a FlashArray
+ - purefa_network - manage the physical and virtual network settings on the FlashArray
+ - purefa_pgsched - manage protection group snapshot and replication schedules on the FlashArray
+ - purefa_pod - manage ActiveCluster pods in FlashArrays
+ - purefa_pod_replica - manage ActiveDR pod replica links in FlashArrays
+ - purefa_proxy - manage the phonehome HTTPS proxy setting for the FlashArray
+ - purefa_smis - manage SMI-S settings on the FlashArray
+ - purefa_subnet - manage network subnets on the FlashArray
+ - purefa_timeout - manage the GUI idle timeout on the FlashArray
+ - purefa_vlan - manage VLAN interfaces on the FlashArray
+ - purefa_vnc - manage VNC for installed applications on the FlashArray
+ - purefa_volume_tags - manage volume tags on the FlashArray
+
+minor_changes:
+ - purefa_info - return dict names changed from ``ansible_facts`` to ``ra_info`` and ``user_info`` in approproate sections
+ - purefa_info - new options added for information collection
+ - purefa_info - Certificate times changed to human readable rather than time since epoch
+ - purefa_host - Add support for Cloud Block Store
+ - purefa_host - Add volume disconnection support
+ - purefa_host - Add CHAP support
+ - purefa_hg - All LUN ID to be set for single volume
+ - purefa_offload - Add support for Azure
+ - purefa_pgsnap - Add offload support
+ - purefa_snap - Allow recovery of deleted snapshot
+ - purefa_vg - Add QoS support
+
+bugfixes:
+ - purefa_host - resolve issue found when using in Pure Storage Test Drive
+ - purefa_host - resolve hostname case inconsistencies
diff --git a/ansible_collections/purestorage/flasharray/docs/docsite/links.yml b/ansible_collections/purestorage/flasharray/docs/docsite/links.yml
new file mode 100644
index 000000000..58396065d
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/docs/docsite/links.yml
@@ -0,0 +1,16 @@
+---
+edit_on_github:
+ repository: Pure-Storage-Ansible/FlashArray-Collection
+ branch: master
+ path_prefix: ''
+
+extra_links:
+ - description: Submit a bug report
+ url: https://github.com/Pure-Storage-Ansible/FlashArray-Collection/issues/new?assignees=sdodsley&labels=bug&template=bug_report_template.md
+ - description: Request a feature
+ url: https://github.com/Pure-Storage-Ansible/FlashArray-Collection/issues/new?assignees=sdodsley&labels=enhancement&template=feature_request_template.md
+
+communication:
+ mailing_lists:
+ - topic: Ansible Project List
+ url: https://groups.google.com/g/ansible-project
diff --git a/ansible_collections/purestorage/flasharray/meta/execution-environment.yml b/ansible_collections/purestorage/flasharray/meta/execution-environment.yml
new file mode 100644
index 000000000..1a7cc31c8
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/meta/execution-environment.yml
@@ -0,0 +1,8 @@
+---
+version: 1
+ansible_config: ansible.cfg
+build_arg_defaults:
+ EE_BASE_IMAGE: 'registry.redhat.io/ansible-automation-platform-20-early-access/ee-minimal-rhel8:2.0.0-10'
+ EE_BUILDER_IMAGE: 'registry.redhat.io/ansible-automation-platform-20-early-access/ansible-builder-rhel8:2.0.0-10'
+dependencies:
+ galaxy: requirements.yml
diff --git a/ansible_collections/purestorage/flasharray/meta/runtime.yml b/ansible_collections/purestorage/flasharray/meta/runtime.yml
new file mode 100644
index 000000000..f664dcbd0
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/meta/runtime.yml
@@ -0,0 +1,8 @@
+---
+requires_ansible: ">=2.9.10"
+plugin_routing:
+ modules:
+ purefa_sso:
+ deprecation:
+ removal_version: "2.0.0"
+ warning_text: purefa_sso will be removed in a future release of this collection, Use purestorage.flasharray.purefa_admin instead.
diff --git a/ansible_collections/purestorage/flasharray/playbooks/.keep b/ansible_collections/purestorage/flasharray/playbooks/.keep
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/playbooks/.keep
diff --git a/ansible_collections/purestorage/flasharray/playbooks/files/.keep b/ansible_collections/purestorage/flasharray/playbooks/files/.keep
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/playbooks/files/.keep
diff --git a/ansible_collections/purestorage/flasharray/playbooks/roles/.keep b/ansible_collections/purestorage/flasharray/playbooks/roles/.keep
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/playbooks/roles/.keep
diff --git a/ansible_collections/purestorage/flasharray/playbooks/tasks/.keep b/ansible_collections/purestorage/flasharray/playbooks/tasks/.keep
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/playbooks/tasks/.keep
diff --git a/ansible_collections/purestorage/flasharray/playbooks/templates/.keep b/ansible_collections/purestorage/flasharray/playbooks/templates/.keep
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/playbooks/templates/.keep
diff --git a/ansible_collections/purestorage/flasharray/playbooks/vars/.keep b/ansible_collections/purestorage/flasharray/playbooks/vars/.keep
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/playbooks/vars/.keep
diff --git a/ansible_collections/purestorage/flasharray/plugins/doc_fragments/purestorage.py b/ansible_collections/purestorage/flasharray/plugins/doc_fragments/purestorage.py
new file mode 100644
index 000000000..7c19925e6
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/doc_fragments/purestorage.py
@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Simon Dodsley <simon@purestorage.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ # Standard Pure Storage documentation fragment
+ DOCUMENTATION = r"""
+options:
+ - See separate platform section for more details
+requirements:
+ - See separate platform section for more details
+notes:
+ - Ansible modules are available for the following Pure Storage products: FlashArray, FlashBlade
+"""
+
+ # Documentation fragment for FlashArray
+ FA = r"""
+options:
+ fa_url:
+ description:
+ - FlashArray management IPv4 address or Hostname.
+ type: str
+ api_token:
+ description:
+ - FlashArray API token for admin privileged user.
+ type: str
+notes:
+ - This module requires the C(purestorage) and C(py-pure-client) Python libraries
+ - Additional Python librarues may be required for specific modules.
+ - You must set C(PUREFA_URL) and C(PUREFA_API) environment variables
+ if I(fa_url) and I(api_token) arguments are not passed to the module directly
+requirements:
+ - python >= 3.3
+ - purestorage >= 1.19
+ - py-pure-client >= 1.26.0
+ - netaddr
+ - requests
+ - pycountry
+ - packaging
+"""
diff --git a/ansible_collections/purestorage/flasharray/plugins/module_utils/purefa.py b/ansible_collections/purestorage/flasharray/plugins/module_utils/purefa.py
new file mode 100644
index 000000000..b85ce0e29
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/module_utils/purefa.py
@@ -0,0 +1,137 @@
+# -*- coding: utf-8 -*-
+
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Simon Dodsley <simon@purestorage.com>,2017
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+HAS_PURESTORAGE = True
+try:
+ from purestorage import purestorage
+except ImportError:
+ HAS_PURESTORAGE = False
+
+HAS_PYPURECLIENT = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PYPURECLIENT = False
+
+from os import environ
+import platform
+
+VERSION = 1.4
+USER_AGENT_BASE = "Ansible"
+
+
+def get_system(module):
+ """Return System Object or Fail"""
+ user_agent = "%(base)s %(class)s/%(version)s (%(platform)s)" % {
+ "base": USER_AGENT_BASE,
+ "class": __name__,
+ "version": VERSION,
+ "platform": platform.platform(),
+ }
+ array_name = module.params["fa_url"]
+ api = module.params["api_token"]
+ if HAS_PURESTORAGE:
+ if array_name and api:
+ system = purestorage.FlashArray(
+ array_name, api_token=api, user_agent=user_agent, verify_https=False
+ )
+ elif environ.get("PUREFA_URL") and environ.get("PUREFA_API"):
+ system = purestorage.FlashArray(
+ environ.get("PUREFA_URL"),
+ api_token=(environ.get("PUREFA_API")),
+ user_agent=user_agent,
+ verify_https=False,
+ )
+ else:
+ module.fail_json(
+ msg="You must set PUREFA_URL and PUREFA_API environment variables "
+ "or the fa_url and api_token module arguments"
+ )
+ try:
+ system.get()
+ except Exception:
+ module.fail_json(
+ msg="Pure Storage FlashArray authentication failed. Check your credentials"
+ )
+ else:
+ module.fail_json(msg="purestorage SDK is not installed.")
+ return system
+
+
+def get_array(module):
+ """Return System Object or Fail"""
+ user_agent = "%(base)s %(class)s/%(version)s (%(platform)s)" % {
+ "base": USER_AGENT_BASE,
+ "class": __name__,
+ "version": VERSION,
+ "platform": platform.platform(),
+ }
+ array_name = module.params["fa_url"]
+ api = module.params["api_token"]
+ if HAS_PYPURECLIENT:
+ if array_name and api:
+ system = flasharray.Client(
+ target=array_name,
+ api_token=api,
+ user_agent=user_agent,
+ )
+ elif environ.get("PUREFA_URL") and environ.get("PUREFA_API"):
+ system = flasharray.Client(
+ target=(environ.get("PUREFA_URL")),
+ api_token=(environ.get("PUREFA_API")),
+ user_agent=user_agent,
+ )
+ else:
+ module.fail_json(
+ msg="You must set PUREFA_URL and PUREFA_API environment variables "
+ "or the fa_url and api_token module arguments"
+ )
+ try:
+ system.get_hardware()
+ except Exception:
+ module.fail_json(
+ msg="Pure Storage FlashArray authentication failed. Check your credentials"
+ )
+ else:
+ module.fail_json(msg="py-pure-client and/or requests are not installed.")
+ return system
+
+
+def purefa_argument_spec():
+ """Return standard base dictionary used for the argument_spec argument in AnsibleModule"""
+
+ return dict(
+ fa_url=dict(),
+ api_token=dict(no_log=True),
+ )
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_ad.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_ad.py
new file mode 100644
index 000000000..d9eee96ac
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_ad.py
@@ -0,0 +1,323 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2021, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_ad
+version_added: '1.9.0'
+short_description: Manage FlashArray Active Directory Account
+description:
+- Add or delete FlashArray Active Directory Account
+- FlashArray allows the creation of one AD computer account, or joining of an
+ existing AD computer account.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Name of the AD account
+ type: str
+ required: true
+ state:
+ description:
+ - Define whether the AD sccount is deleted or not
+ default: present
+ choices: [ absent, present ]
+ type: str
+ computer:
+ description:
+ - The common name of the computer account to be created in the Active Directory domain.
+ - If not specified, defaults to the name of the Active Directory configuration.
+ type: str
+ domain:
+ description:
+ - The Active Directory domain to join
+ type: str
+ username:
+ description:
+ - A user capable of creating a computer account within the domain
+ type: str
+ password:
+ description:
+ - Password string for I(username)
+ type: str
+ directory_servers:
+ description:
+ - A list of directory servers that will be used for lookups related to user authorization
+ - Accepted server formats are IP address and DNS name
+ - All specified servers must be registered to the domain appropriately in the array
+ configured DNS and are only communicated with over the secure LDAP (LDAPS) protocol.
+ If not specified, servers are resolved for the domain in DNS
+ - The specified list can have a maximum length of 1, or 3 for Purity 6.1.6 or higher.
+ If more are provided only the first allowed count used.
+ type: list
+ elements: str
+ kerberos_servers:
+ description:
+ - A list of key distribution servers to use for Kerberos protocol
+ - Accepted server formats are IP address and DNS name
+ - All specified servers must be registered to the domain appropriately in the array
+ configured DNS and are only communicated with over the secure LDAP (LDAPS) protocol.
+ If not specified, servers are resolved for the domain in DNS.
+ - The specified list can have a maximum length of 1, or 3 for Purity 6.1.6 or higher.
+ If more are provided only the first allowed count used.
+ type: list
+ elements: str
+ local_only:
+ description:
+ - Do a local-only delete of an active directory account
+ type: bool
+ default: false
+ join_ou:
+ description:
+ - Distinguished name of organization unit in which the computer account
+ should be created when joining the domain. e.g. OU=Arrays,OU=Storage.
+ - The B(DC=...) components can be omitted.
+ - If left empty, defaults to B(CN=Computers).
+ - Requires Purity//FA 6.1.8 or higher
+ type: str
+ version_added: '1.10.0'
+ tls:
+ description:
+ - TLS mode for communication with domain controllers.
+ type: str
+ choices: [ required, optional ]
+ default: required
+ version_added: '1.14.0'
+ join_existing:
+ description:
+ - If specified as I(true), the domain is searched for a pre-existing
+ computer account to join to, and no new account will be created within the domain.
+ The C(username) specified when joining a pre-existing account must have
+ permissions to 'read all properties from' and 'reset the password of'
+ the pre-existing account. C(join_ou) will be read from the pre-existing
+ account and cannot be specified when joining to an existing account
+ type: bool
+ default: false
+ version_added: '1.14.0'
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Create new AD account
+ purestorage.flasharray.purefa_ad:
+ name: ad_account
+ computer: FLASHARRAY
+ domain: acme.com
+ join_ou: "OU=Acme,OU=Dev"
+ username: Administrator
+ password: Password
+ kerberos_servers:
+ - kdc.acme.com
+ directory_servers:
+ - ldap.acme.com
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete AD account locally
+ purestorage.flasharray.purefa_ad:
+ name: ad_account
+ local_only: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Fully delete AD account. Note that correct AD permissions are required
+ purestorage.flasharray.purefa_ad:
+ name: ad_account
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient.flasharray import ActiveDirectoryPost, ActiveDirectoryPatch
+except ImportError:
+ HAS_PURESTORAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ get_array,
+ purefa_argument_spec,
+)
+
+MIN_REQUIRED_API_VERSION = "2.2"
+SERVER_API_VERSION = "2.6"
+MIN_JOIN_OU_API_VERSION = "2.8"
+MIN_TLS_API_VERSION = "2.15"
+
+
+def delete_account(module, array):
+ """Delete Active directory Account"""
+ changed = True
+ if not module.check_mode:
+ res = array.delete_active_directory(
+ names=[module.params["name"]], local_only=module.params["local_only"]
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete AD Account {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def update_account(module, array):
+ """Update existing AD account"""
+ changed = False
+ current_acc = list(array.get_active_directory(names=[module.params["name"]]).items)[
+ 0
+ ]
+ if current_acc.tls != module.params["tls"]:
+ changed = True
+ if not module.check_mode:
+ res = array.patch_active_directory(
+ names=[module.params["name"]],
+ active_directory=ActiveDirectoryPatch(tls=module.params["tls"]),
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to update AD Account {0} TLS setting. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def create_account(module, array, api_version):
+ """Create Active Directory Account"""
+ changed = True
+ if MIN_JOIN_OU_API_VERSION not in api_version:
+ ad_config = ActiveDirectoryPost(
+ computer_name=module.params["computer"],
+ directory_servers=module.params["directory_servers"],
+ kerberos_servers=module.params["kerberos_servers"],
+ domain=module.params["domain"],
+ user=module.params["username"],
+ password=module.params["password"],
+ )
+ elif MIN_TLS_API_VERSION in api_version:
+ ad_config = ActiveDirectoryPost(
+ computer_name=module.params["computer"],
+ directory_servers=module.params["directory_servers"],
+ kerberos_servers=module.params["kerberos_servers"],
+ domain=module.params["domain"],
+ user=module.params["username"],
+ join_ou=module.params["join_ou"],
+ password=module.params["password"],
+ tls=module.params["tls"],
+ )
+ else:
+ ad_config = ActiveDirectoryPost(
+ computer_name=module.params["computer"],
+ directory_servers=module.params["directory_servers"],
+ kerberos_servers=module.params["kerberos_servers"],
+ domain=module.params["domain"],
+ user=module.params["username"],
+ join_ou=module.params["join_ou"],
+ password=module.params["password"],
+ )
+ if not module.check_mode:
+ if MIN_TLS_API_VERSION in api_version:
+ res = array.post_active_directory(
+ names=[module.params["name"]],
+ join_existing_account=module.params["join_existing"],
+ active_directory=ad_config,
+ )
+ else:
+ res = array.post_active_directory(
+ names=[module.params["name"]],
+ active_directory=ad_config,
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to add Active Directory Account {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ username=dict(type="str"),
+ password=dict(type="str", no_log=True),
+ name=dict(type="str", required=True),
+ computer=dict(type="str"),
+ local_only=dict(type="bool", default=False),
+ domain=dict(type="str"),
+ join_ou=dict(type="str"),
+ directory_servers=dict(type="list", elements="str"),
+ kerberos_servers=dict(type="list", elements="str"),
+ tls=dict(type="str", default="required", choices=["required", "optional"]),
+ join_existing=dict(type="bool", default=False),
+ )
+ )
+
+ required_if = [["state", "present", ["username", "password", "domain"]]]
+
+ module = AnsibleModule(
+ argument_spec, required_if=required_if, supports_check_mode=True
+ )
+
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashArray REST version not supported. "
+ "Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
+ )
+ state = module.params["state"]
+ array = get_array(module)
+ exists = bool(
+ array.get_active_directory(names=[module.params["name"]]).status_code == 200
+ )
+
+ if not module.params["computer"]:
+ module.params["computer"] = module.params["name"].replace("_", "-")
+ if module.params["kerberos_servers"]:
+ if SERVER_API_VERSION in api_version:
+ module.params["kerberos_servers"] = module.params["kerberos_servers"][0:3]
+ else:
+ module.params["kerberos_servers"] = module.params["kerberos_servers"][0:1]
+ if module.params["directory_servers"]:
+ if SERVER_API_VERSION in api_version:
+ module.params["directory_servers"] = module.params["directory_servers"][0:3]
+ else:
+ module.params["directory_servers"] = module.params["directory_servers"][0:1]
+ if not exists and state == "present":
+ create_account(module, array, api_version)
+ elif exists and state == "present" and MIN_TLS_API_VERSION in api_version:
+ update_account(module, array)
+ elif exists and state == "absent":
+ delete_account(module, array)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_admin.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_admin.py
new file mode 100644
index 000000000..becb86893
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_admin.py
@@ -0,0 +1,180 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2021, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_admin
+version_added: '1.12.0'
+short_description: Configure Pure Storage FlashArray Global Admin settings
+description:
+- Set global admin settings for the FlashArray
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ sso:
+ description:
+ - Enable or disable the array Signle Sign-On from Pure1 Manage
+ default: false
+ type: bool
+ max_login:
+ description:
+ - Maximum number of failed logins before account is locked
+ type: int
+ min_password:
+ description:
+ - Minimum user password length
+ default: 1
+ type: int
+ lockout:
+ description:
+ - Account lockout duration, in seconds, after max_login exceeded
+ - Range between 1 second and 90 days (7776000 seconds)
+ type: int
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Set global login parameters
+ purestorage.flasharray.purefa_admin:
+ sso: false
+ max_login: 5
+ min_password: 10
+ lockout: 300
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient.flasharray import AdminSettings
+except ImportError:
+ HAS_PURESTORAGE = False
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ get_array,
+ purefa_argument_spec,
+)
+
+MIN_API_VERSION = "2.2"
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ sso=dict(type="bool", default=False),
+ max_login=dict(type="int"),
+ min_password=dict(type="int", default=1, no_log=False),
+ lockout=dict(type="int"),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+ if module.params["lockout"] and not 1 <= module.params["lockout"] <= 7776000:
+ module.fail_json(msg="Lockout must be between 1 and 7776000 seconds")
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+ changed = False
+ if MIN_API_VERSION in api_version:
+ array = get_array(module)
+ current_settings = list(array.get_admins_settings().items)[0]
+ if (
+ module.params["sso"]
+ and module.params["sso"] != current_settings.single_sign_on_enabled
+ ):
+ changed = True
+ sso = module.params["sso"]
+ else:
+ sso = current_settings.single_sign_on_enabled
+ if (
+ module.params["min_password"]
+ and module.params["min_password"] != current_settings.min_password_length
+ ):
+ changed = True
+ min_password = module.params["min_password"]
+ else:
+ min_password = current_settings.min_password_length
+ lockout = getattr(current_settings, "lockout_duration", None)
+ if (
+ lockout
+ and module.params["lockout"]
+ and lockout != module.params["lockout"] * 1000
+ ):
+ changed = True
+ lockout = module.params["lockout"] * 1000
+ elif not lockout and module.params["lockout"]:
+ changed = True
+ lockout = module.params["lockout"] * 1000
+ max_login = getattr(current_settings, "max_login_attempts", None)
+ if (
+ max_login
+ and module.params["max_login"]
+ and max_login != module.params["max_login"]
+ ):
+ changed = True
+ max_login = module.params["max_login"]
+ elif not max_login and module.params["max_login"]:
+ changed = True
+ max_login = module.params["max_login"]
+ if changed and not module.check_mode:
+ if max_login:
+ admin = AdminSettings(
+ single_sign_on_enabled=sso,
+ min_password_length=min_password,
+ max_login_attempts=max_login,
+ )
+ if lockout:
+ admin = AdminSettings(
+ single_sign_on_enabled=sso,
+ min_password_length=min_password,
+ lockout_duration=lockout,
+ )
+ if lockout and max_login:
+ admin = AdminSettings(
+ single_sign_on_enabled=sso,
+ min_password_length=min_password,
+ lockout_duration=lockout,
+ max_login_attempts=max_login,
+ )
+ if not lockout and not max_login:
+ admin = AdminSettings(
+ single_sign_on_enabled=sso,
+ min_password_length=min_password,
+ )
+ res = array.patch_admins_settings(admin_settings=admin)
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to change Global Admin settings. Error: {0}".format(
+ res.errors[0].message
+ )
+ )
+ else:
+ module.fail_json(msg="Purity version does not support Global Admin settings")
+ module.exit_json(changed=changed)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_alert.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_alert.py
new file mode 100644
index 000000000..1220ed560
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_alert.py
@@ -0,0 +1,208 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_alert
+version_added: '1.0.0'
+short_description: Configure Pure Storage FlashArray alert email settings
+description:
+- Configure alert email configuration for Pure Storage FlashArrays.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ type: str
+ description:
+ - Create or delete alert email
+ default: present
+ choices: [ absent, present ]
+ address:
+ type: str
+ description:
+ - Email address (valid format required)
+ required: true
+ enabled:
+ type: bool
+ default: true
+ description:
+ - Set specified email address to be enabled or disabled
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Add new email recipient and enable, or enable existing email
+ purestorage.flasharray.purefa_alert:
+ address: "user@domain.com"
+ enabled: true
+ state: present
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+- name: Delete existing email recipient
+ purestorage.flasharray.purefa_alert:
+ state: absent
+ address: "user@domain.com"
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ purefa_argument_spec,
+)
+
+
+def create_alert(module, array):
+ """Create Alert Email"""
+ changed = True
+ if not module.check_mode:
+ changed = False
+ try:
+ array.create_alert_recipient(module.params["address"])
+ changed = True
+ except Exception:
+ module.fail_json(
+ msg="Failed to create alert email: {0}".format(module.params["address"])
+ )
+
+ if not module.params["enabled"]:
+ try:
+ array.disable_alert_recipient(module.params["address"])
+ changed = True
+ except Exception:
+ module.fail_json(
+ msg="Failed to create alert email: {0}".format(
+ module.params["address"]
+ )
+ )
+
+ module.exit_json(changed=changed)
+
+
+def enable_alert(module, array):
+ """Enable Alert Email"""
+ changed = True
+ if not module.check_mode:
+ changed = False
+ try:
+ array.enable_alert_recipient(module.params["address"])
+ changed = True
+ except Exception:
+ module.fail_json(
+ msg="Failed to enable alert email: {0}".format(module.params["address"])
+ )
+
+ module.exit_json(changed=changed)
+
+
+def disable_alert(module, array):
+ """Disable Alert Email"""
+ changed = True
+ if not module.check_mode:
+ changed = False
+ try:
+ array.disable_alert_recipient(module.params["address"])
+ changed = True
+ except Exception:
+ module.fail_json(
+ msg="Failed to disable alert email: {0}".format(
+ module.params["address"]
+ )
+ )
+
+ module.exit_json(changed=changed)
+
+
+def delete_alert(module, array):
+ """Delete Alert Email"""
+ changed = True
+ if module.params["address"] == "flasharray-alerts@purestorage.com":
+ module.fail_json(
+ msg="Built-in address {0} cannot be deleted.".format(
+ module.params["address"]
+ )
+ )
+ if not module.check_mode:
+ changed = False
+ try:
+ array.delete_alert_recipient(module.params["address"])
+ changed = True
+ except Exception:
+ module.fail_json(
+ msg="Failed to delete alert email: {0}".format(module.params["address"])
+ )
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ address=dict(type="str", required=True),
+ enabled=dict(type="bool", default=True),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ pattern = re.compile(r"^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$")
+ if not pattern.match(module.params["address"]):
+ module.fail_json(msg="Valid email address not provided.")
+
+ array = get_system(module)
+
+ exists = False
+ try:
+ emails = array.list_alert_recipients()
+ except Exception:
+ module.fail_json(msg="Failed to get exisitng email list")
+ for email in range(0, len(emails)):
+ if emails[email]["name"] == module.params["address"]:
+ exists = True
+ enabled = emails[email]["enabled"]
+ break
+ if module.params["state"] == "present" and not exists:
+ create_alert(module, array)
+ elif (
+ module.params["state"] == "present"
+ and exists
+ and not enabled
+ and module.params["enabled"]
+ ):
+ enable_alert(module, array)
+ elif (
+ module.params["state"] == "present"
+ and exists
+ and enabled
+ and not module.params["enabled"]
+ ):
+ disable_alert(module, array)
+ elif module.params["state"] == "absent" and exists:
+ delete_alert(module, array)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_apiclient.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_apiclient.py
new file mode 100644
index 000000000..12970dddb
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_apiclient.py
@@ -0,0 +1,250 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_apiclient
+version_added: '1.5.0'
+short_description: Manage FlashArray API Clients
+description:
+- Enable or disable FlashArray API Clients
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Name of the API Client
+ type: str
+ required: true
+ state:
+ description:
+ - Define whether the API client should exist or not.
+ default: present
+ choices: [ absent, present ]
+ type: str
+ role:
+ description:
+ - The maximum role allowed for ID Tokens issued by this API client
+ type: str
+ choices: [readonly, ops_admin, storage_admin, array_admin]
+ issuer:
+ description:
+ - The name of the identity provider that will be issuing ID Tokens for this API client
+ - If not specified, defaults to the API client name, I(name).
+ type: str
+ public_key:
+ description:
+ - The API clients PEM formatted (Base64 encoded) RSA public key.
+ - Include the I(—–BEGIN PUBLIC KEY—–) and I(—–END PUBLIC KEY—–) lines
+ type: str
+ token_ttl:
+ description:
+ - Time To Live length in seconds for the exchanged access token
+ - Range is 1 second to 1 day (86400 seconds)
+ type: int
+ default: 86400
+ enabled:
+ description:
+ - State of the API Client Key
+ type: bool
+ default: true
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Create API token ansible-token
+ purestorage.flasharray.purefa_apiclient:
+ name: ansible-token
+ issuer: "Pure Storage"
+ ttl: 3000
+ role: array_admin
+ public_key: "{{lookup('file', 'public_pem_file') }}"
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Disable API CLient
+ purestorage.flasharray.purefa_apiclient:
+ name: ansible-token
+ enabled: false
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Enable API CLient
+ purestorage.flasharray.purefa_apiclient:
+ name: ansible-token
+ enabled: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete API Client
+ purestorage.flasharray.purefa_apiclient:
+ state: absent
+ name: ansible-token
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ get_array,
+ purefa_argument_spec,
+)
+
+MIN_REQUIRED_API_VERSION = "2.1"
+
+
+def delete_client(module, array):
+ changed = True
+ if not module.check_mode:
+ try:
+ array.delete_api_clients(names=[module.params["name"]])
+ except Exception:
+ module.fail_json(
+ msg="Failed to delete API Client {0}".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def update_client(module, array, client):
+ """Update API Client"""
+ changed = False
+ if client.enabled != module.params["enabled"]:
+ changed = True
+ if not module.check_mode:
+ try:
+ array.patch_api_clients(
+ names=[module.params["name"]],
+ api_clients=flasharray.ApiClientPatch(
+ enabled=module.params["enabled"]
+ ),
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to update API Client {0}".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def create_client(module, array):
+ """Create API Client"""
+ changed = True
+ if not 1 <= module.params["token_ttl"] <= 86400:
+ module.fail_json(msg="token_ttl parameter is out of range (1 to 86400)")
+ else:
+ token_ttl = module.params["token_ttl"] * 1000
+ if not module.params["issuer"]:
+ module.params["issuer"] = module.params["name"]
+ try:
+ client = flasharray.ApiClientPost(
+ max_role=module.params["role"],
+ issuer=module.params["issuer"],
+ access_token_ttl_in_ms=token_ttl,
+ public_key=module.params["public_key"],
+ )
+ if not module.check_mode:
+ res = array.post_api_clients(
+ names=[module.params["name"]], api_clients=client
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to create API CLient {0}. Error message: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ if module.params["enabled"]:
+ try:
+ array.patch_api_clients(
+ names=[module.params["name"]],
+ api_clients=flasharray.ApiClientPatch(
+ enabled=module.params["enabled"]
+ ),
+ )
+ except Exception:
+ array.delete_api_clients(names=[module.params["name"]])
+ module.fail_json(
+ msg="Failed to create API Client {0}".format(
+ module.params["name"]
+ )
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to create API Client {0}".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ enabled=dict(type="bool", default=True),
+ name=dict(type="str", required=True),
+ role=dict(
+ type="str",
+ choices=["readonly", "ops_admin", "storage_admin", "array_admin"],
+ ),
+ public_key=dict(type="str", no_log=True),
+ token_ttl=dict(type="int", default=86400, no_log=False),
+ issuer=dict(type="str"),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashArray REST version not supported. "
+ "Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
+ )
+ array = get_array(module)
+ state = module.params["state"]
+
+ try:
+ client = list(array.get_api_clients(names=[module.params["name"]]).items)[0]
+ exists = True
+ except Exception:
+ exists = False
+
+ if not exists and state == "present":
+ create_client(module, array)
+ elif exists and state == "present":
+ update_client(module, array, client)
+ elif exists and state == "absent":
+ delete_client(module, array)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_arrayname.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_arrayname.py
new file mode 100644
index 000000000..cf5202c6f
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_arrayname.py
@@ -0,0 +1,103 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_arrayname
+version_added: '1.0.0'
+short_description: Configure Pure Storage FlashArray array name
+description:
+- Configure name of array for Pure Storage FlashArrays.
+- Ideal for Day 0 initial configuration.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Set the array name
+ type: str
+ default: present
+ choices: [ present ]
+ name:
+ description:
+ - Name of the array. Must conform to correct naming schema.
+ type: str
+ required: true
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Set new array name
+ purestorage.flasharray.purefa_arrayname:
+ name: new-array-name
+ state: present
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ purefa_argument_spec,
+)
+
+
+def update_name(module, array):
+ """Change aray name"""
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set(name=module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Failed to change array name to {0}".format(module.params["name"])
+ )
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True),
+ state=dict(type="str", default="present", choices=["present"]),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ array = get_system(module)
+ pattern = re.compile("^[a-zA-Z0-9]([a-zA-Z0-9-]{0,54}[a-zA-Z0-9])?$")
+ if not pattern.match(module.params["name"]):
+ module.fail_json(
+ msg="Array name {0} does not conform to array name rules".format(
+ module.params["name"]
+ )
+ )
+ if module.params["name"] != array.get()["array_name"]:
+ update_name(module, array)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_banner.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_banner.py
new file mode 100644
index 000000000..bd7a367a5
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_banner.py
@@ -0,0 +1,125 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_banner
+version_added: '1.0.0'
+short_description: Configure Pure Storage FlashArray GUI and SSH MOTD message
+description:
+- Configure MOTD for Pure Storage FlashArrays.
+- This will be shown during an SSH or GUI login to the array.
+- Multiple line messages can be achieved using \\n.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Set ot delete the MOTD
+ default: present
+ type: str
+ choices: [ present, absent ]
+ banner:
+ description:
+ - Banner text, or MOTD, to use
+ type: str
+ default: "Welcome to the machine..."
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Set new banner text
+ purestorage.flasharray.purefa_banner:
+ banner: "Banner over\ntwo lines"
+ state: present
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete banner text
+ purestorage.flasharray.purefa_banner:
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ purefa_argument_spec,
+)
+
+
+def set_banner(module, array):
+ """Set MOTD banner text"""
+ changed = True
+ if not module.params["banner"]:
+ module.fail_json(msg="Invalid MOTD banner given")
+ if not module.check_mode:
+ try:
+ array.set(banner=module.params["banner"])
+ except Exception:
+ module.fail_json(msg="Failed to set MOTD banner text")
+
+ module.exit_json(changed=changed)
+
+
+def delete_banner(module, array):
+ """Delete MOTD banner text"""
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set(banner="")
+ except Exception:
+ module.fail_json(msg="Failed to delete current MOTD banner text")
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ banner=dict(type="str", default="Welcome to the machine..."),
+ state=dict(type="str", default="present", choices=["present", "absent"]),
+ )
+ )
+
+ required_if = [("state", "present", ["banner"])]
+
+ module = AnsibleModule(
+ argument_spec, required_if=required_if, supports_check_mode=True
+ )
+
+ state = module.params["state"]
+ array = get_system(module)
+ current_banner = array.get(banner=True)["banner"]
+ # set banner if empty value or value differs
+ if state == "present" and (
+ not current_banner or current_banner != module.params["banner"]
+ ):
+ set_banner(module, array)
+ # clear banner if it has a value
+ elif state == "absent" and current_banner:
+ delete_banner(module, array)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_certs.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_certs.py
new file mode 100644
index 000000000..33ffb60cc
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_certs.py
@@ -0,0 +1,524 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2021, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_certs
+version_added: '1.8.0'
+short_description: Manage FlashArray SSL Certificates
+description:
+- Create, delete, import and export FlashArray SSL Certificates
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Name of the SSL Certificate
+ type: str
+ default: management
+ state:
+ description:
+ - Action for the module to perform
+ - I(present) will create or re-create an SSL certificate
+ - I(absent) will delete an existing SSL certificate
+ - I(sign) will construct a Certificate Signing request (CSR)
+ - I(export) will export the exisitng SSL certificate
+ - I(import) will import a CA provided certificate.
+ default: present
+ choices: [ absent, present, import, export, sign ]
+ type: str
+ country:
+ type: str
+ description:
+ - The two-letter ISO code for the country where your organization is located
+ province:
+ type: str
+ description:
+ - The full name of the state or province where your organization is located
+ locality:
+ type: str
+ description:
+ - The full name of the city where your organization is located
+ organization:
+ type: str
+ description:
+ - The full and exact legal name of your organization.
+ - The organization name should not be abbreviated and should
+ include suffixes such as Inc, Corp, or LLC.
+ org_unit:
+ type: str
+ description:
+ - The department within your organization that is managing the certificate
+ common_name:
+ type: str
+ description:
+ - The fully qualified domain name (FQDN) of the current array
+ - For example, the common name for https://purearray.example.com is
+ purearray.example.com, or *.example.com for a wildcard certificate
+ - This can also be the management IP address of the array or the
+ shortname of the current array.
+ - Maximum of 64 characters
+ - If not provided this will default to the shortname of the array
+ email:
+ type: str
+ description:
+ - The email address used to contact your organization
+ key_size:
+ type: int
+ description:
+ - The key size in bits if you generate a new private key
+ default: 2048
+ choices: [ 1024, 2048, 4096 ]
+ days:
+ default: 3650
+ type: int
+ description:
+ - The number of valid days for the self-signed certificate being generated
+ - If not specified, the self-signed certificate expires after 3650 days.
+ generate:
+ default: false
+ type: bool
+ description:
+ - Generate a new private key.
+ - If not selected, the certificate will use the existing key
+ certificate:
+ type: str
+ description:
+ - Required for I(import)
+ - A valid signed certicate in PEM format (Base64 encoded)
+ - Includes the "-----BEGIN CERTIFICATE-----" and "-----END CERTIFICATE-----" lines
+ - Does not exceed 3000 characters in length
+ intermeadiate_cert:
+ type: str
+ description:
+ - Intermeadiate certificate provided by the CA
+ key:
+ type: str
+ description:
+ - If the Certificate Signed Request (CSR) was not constructed on the array
+ or the private key has changed since construction the CSR, provide
+ a new private key here
+ passphrase:
+ type: str
+ description:
+ - Passphrase if the private key is encrypted
+ export_file:
+ type: str
+ description:
+ - Name of file to contain Certificate Signing Request when `status sign`
+ - Name of file to export the current SSL Certificate when `status export`
+ - File will be overwritten if it already exists
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Create SSL certifcate foo
+ purestorage.flasharray.purefa_certs:
+ name: foo
+ key_size: 4096
+ country: US
+ province: FL
+ locality: Miami
+ organization: "Acme Inc"
+ org_unit: "DevOps"
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete SSL certificate foo
+ purestorage.flasharray.purefa_certs:
+ name: foo
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Request CSR
+ purestorage.flasharray.purefa_certs:
+ state: sign
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Regenerate key for SSL foo
+ purestorage.flasharray.purefa_certs:
+ generate: true
+ name: foo
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Import SSL Cert foo and Private Key
+ purestorage.flasharray.purefa_certs:
+ state: import
+ name: foo
+ certificate: "{{lookup('file', 'example.crt') }}"
+ key: "{{lookup('file', 'example.key') }}"
+ passphrase: password
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+HAS_PYCOUNTRY = True
+try:
+ import pycountry
+except ImportError:
+ HAS_PYCOUNTRY = False
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ get_array,
+ purefa_argument_spec,
+)
+
+MIN_REQUIRED_API_VERSION = "2.4"
+
+
+def update_cert(module, array):
+ """Update existing SSL Certificate"""
+ changed = True
+ current_cert = list(array.get_certificates(names=[module.params["name"]]).items)[0]
+ try:
+ if module.params["common_name"] != current_cert.common_name:
+ module.params["common_name"] = current_cert.common_name
+ except AttributeError:
+ pass
+ try:
+ if module.params["country"] != current_cert.country:
+ module.params["country"] = current_cert.country
+ except AttributeError:
+ pass
+ try:
+ if module.params["email"] != current_cert.email:
+ module.params["email"] = current_cert.email
+ except AttributeError:
+ pass
+ try:
+ if module.params["key_size"] != current_cert.key_size:
+ module.params["key_size"] = current_cert.key_size
+ except AttributeError:
+ pass
+ try:
+ if module.params["locality"] != current_cert.locality:
+ module.params["locality"] = current_cert.locality
+ except AttributeError:
+ pass
+ try:
+ if module.params["province"] != current_cert.state:
+ module.params["province"] = current_cert.state
+ except AttributeError:
+ pass
+ try:
+ if module.params["organization"] != current_cert.organization:
+ module.params["organization"] = current_cert.organization
+ except AttributeError:
+ pass
+ try:
+ if module.params["org_unit"] != current_cert.organizational_unit:
+ module.params["org_unit"] = current_cert.organizational_unit
+ except AttributeError:
+ pass
+ certificate = flasharray.CertificatePost(
+ common_name=module.params["common_name"],
+ country=module.params["country"],
+ email=module.params["email"],
+ key_size=module.params["key_size"],
+ locality=module.params["locality"],
+ organization=module.params["organization"],
+ organizational_unit=module.params["org_unit"],
+ state=module.params["province"],
+ days=module.params["days"],
+ )
+ if not module.check_mode:
+ res = array.patch_certificates(
+ names=[module.params["name"]],
+ certificate=certificate,
+ generate_new_key=module.params["generate"],
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Updating existing SSL certificate {0} failed. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+
+ module.exit_json(changed=changed)
+
+
+def create_cert(module, array):
+ changed = True
+ certificate = flasharray.CertificatePost(
+ common_name=module.params["common_name"],
+ country=module.params["country"],
+ email=module.params["email"],
+ key_size=module.params["key_size"],
+ locality=module.params["locality"],
+ organization=module.params["organization"],
+ organizational_unit=module.params["org_unit"],
+ state=module.params["province"],
+ status="self-signed",
+ days=module.params["days"],
+ )
+ if not module.check_mode:
+ res = array.post_certificates(
+ names=[module.params["name"]], certificate=certificate
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Creating SSL certificate {0} failed. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+
+ module.exit_json(changed=changed)
+
+
+def delete_cert(module, array):
+ changed = True
+ if module.params["name"] == "management":
+ module.fail_json(msg="management SSL cannot be deleted")
+ if not module.check_mode:
+ res = array.delete_certificates(names=[module.params["name"]])
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete {0} SSL certifcate. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def import_cert(module, array, reimport=False):
+ """Import a CA provided SSL certificate"""
+ changed = True
+ if len(module.params["certificate"]) > 3000:
+ module.fail_json(msg="Imported Certificate exceeds 3000 characters")
+ certificate = flasharray.CertificatePost(
+ certificate=module.params["certificate"],
+ intermediate_certificate=module.params["intermeadiate_cert"],
+ key=module.params["key"],
+ passphrase=module.params["passphrase"],
+ status="imported",
+ )
+ if not module.check_mode:
+ if reimport:
+ res = array.patch_certificates(
+ names=[module.params["name"]], certificate=certificate
+ )
+ else:
+ res = array.post_certificates(
+ names=[module.params["name"]], certificate=certificate
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Importing Certificate failed. Error: {0}".format(
+ res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def export_cert(module, array):
+ """Export current SSL certificate"""
+ changed = True
+ if not module.check_mode:
+ ssl = array.get_certificates(names=[module.params["name"]])
+ if ssl.status_code != 200:
+ module.fail_json(
+ msg="Exporting Certificate failed. Error: {0}".format(
+ ssl.errors[0].message
+ )
+ )
+ ssl_file = open(module.params["export_file"], "w")
+ ssl_file.write(list(ssl.items)[0].certificate)
+ ssl_file.close()
+ module.exit_json(changed=changed)
+
+
+def create_csr(module, array):
+ """Construct a Certificate Signing Request
+
+ Output the result to a specified file
+ """
+ changed = True
+ current_attr = list(array.get_certificates(names=[module.params["name"]]).items)[0]
+ try:
+ if module.params["common_name"] != current_attr.common_name:
+ module.params["common_name"] = current_attr.common_name
+ except AttributeError:
+ pass
+ try:
+ if module.params["country"] != current_attr.country:
+ module.params["country"] = current_attr.country
+ except AttributeError:
+ pass
+ try:
+ if module.params["email"] != current_attr.email:
+ module.params["email"] = current_attr.email
+ except AttributeError:
+ pass
+ try:
+ if module.params["locality"] != current_attr.locality:
+ module.params["locality"] = current_attr.locality
+ except AttributeError:
+ pass
+ try:
+ if module.params["province"] != current_attr.state:
+ module.params["province"] = current_attr.state
+ except AttributeError:
+ pass
+ try:
+ if module.params["organization"] != current_attr.organization:
+ module.params["organization"] = current_attr.organization
+ except AttributeError:
+ pass
+ try:
+ if module.params["org_unit"] != current_attr.organization_unit:
+ module.params["org_unit"] = current_attr.organization_unit
+ except AttributeError:
+ pass
+ if not module.check_mode:
+ certificate = flasharray.CertificateSigningRequestPost(
+ certificate={"name": "management"},
+ common_name=module.params["common_name"],
+ country=module.params["country"],
+ email=module.params["email"],
+ locality=module.params["locality"],
+ state=module.params["province"],
+ organization=module.params["organization"],
+ organization_unit=module.params["org_unit"],
+ )
+ csr = list(
+ array.post_certificates_certificate_signing_requests(
+ certificate=certificate
+ ).items
+ )[0].certificate_signing_request
+ csr_file = open(module.params["export_file"], "w")
+ csr_file.write(csr)
+ csr_file.close()
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(
+ type="str",
+ default="present",
+ choices=["absent", "present", "import", "export", "sign"],
+ ),
+ generate=dict(type="bool", default=False),
+ name=dict(type="str", default="management"),
+ country=dict(type="str"),
+ province=dict(type="str"),
+ locality=dict(type="str"),
+ organization=dict(type="str"),
+ org_unit=dict(type="str"),
+ common_name=dict(type="str"),
+ email=dict(type="str"),
+ key_size=dict(type="int", default=2048, choices=[1024, 2048, 4096]),
+ certificate=dict(type="str", no_log=True),
+ intermeadiate_cert=dict(type="str", no_log=True),
+ key=dict(type="str", no_log=True),
+ export_file=dict(type="str"),
+ passphrase=dict(type="str", no_log=True),
+ days=dict(type="int", default=3650),
+ )
+ )
+
+ mutually_exclusive = [["certificate", "key_size"]]
+ required_if = [
+ ["state", "import", ["certificate"]],
+ ["state", "export", ["export_file"]],
+ ]
+
+ module = AnsibleModule(
+ argument_spec,
+ mutually_exclusive=mutually_exclusive,
+ required_if=required_if,
+ supports_check_mode=True,
+ )
+
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+
+ if not HAS_PYCOUNTRY:
+ module.fail_json(msg="pycountry sdk is required for this module")
+
+ email_pattern = r"^(\w|\.|\_|\-)+[@](\w|\_|\-|\.)+[.]\w{2,3}$"
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashArray REST version not supported. "
+ "Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
+ )
+
+ array = get_array(module)
+ if module.params["email"]:
+ if not re.search(email_pattern, module.params["email"]):
+ module.fail_json(
+ msg="Email {0} is not valid".format(module.params["email"])
+ )
+ if module.params["country"]:
+ if len(module.params["country"]) != 2:
+ module.fail_json(msg="Country must be a two-letter country (ISO) code")
+ if not pycountry.countries.get(alpha_2=module.params["country"].upper()):
+ module.fail_json(
+ msg="Country code {0} is not an assigned ISO 3166-1 code".format(
+ module.params["country"].upper()
+ )
+ )
+ state = module.params["state"]
+ if state in ["present", "sign"]:
+ if not module.params["common_name"]:
+ module.params["common_name"] = list(array.get_arrays().items)[0].name
+ module.params["common_name"] = module.params["common_name"][:64]
+
+ exists = bool(
+ array.get_certificates(names=[module.params["name"]]).status_code == 200
+ )
+
+ if not exists and state == "present":
+ create_cert(module, array)
+ elif exists and state == "present":
+ update_cert(module, array)
+ elif state == "sign":
+ create_csr(module, array)
+ elif not exists and state == "import":
+ import_cert(module, array)
+ elif exists and state == "import":
+ import_cert(module, array, reimport=True)
+ elif state == "export":
+ export_cert(module, array)
+ elif exists and state == "absent":
+ delete_cert(module, array)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_connect.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_connect.py
new file mode 100644
index 000000000..3148ea482
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_connect.py
@@ -0,0 +1,238 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_connect
+version_added: '1.0.0'
+short_description: Manage replication connections between two FlashArrays
+description:
+- Manage array connections to specified target array
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Create or delete array connection
+ default: present
+ type: str
+ choices: [ absent, present ]
+ target_url:
+ description:
+ - Management IP address of remote array.
+ type: str
+ required: true
+ target_api:
+ description:
+ - API token for target array
+ type: str
+ connection:
+ description:
+ - Type of connection between arrays.
+ type: str
+ choices: [ sync, async ]
+ default: async
+ transport:
+ description:
+ - Type of transport protocol to use for replication
+ type: str
+ choices: [ ip, fc ]
+ default: ip
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Create an async connection to remote array
+ purestorage.flasharray.purefa_connect:
+ target_url: 10.10.10.20
+ target_api: 9c0b56bc-f941-f7a6-9f85-dcc3e9a8f7d6
+ connection: async
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+- name: Delete connection to remote array
+ purestorage.flasharray.purefa_connect:
+ state: absent
+ target_url: 10.10.10.20
+ target_api: 9c0b56bc-f941-f7a6-9f85-dcc3e9a8f7d6
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from purestorage import FlashArray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+HAS_PYPURECLIENT = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PYPURECLIENT = False
+
+import platform
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_array,
+ get_system,
+ purefa_argument_spec,
+)
+
+
+P53_API_VERSION = "1.17"
+FC_REPL_VERSION = "2.4"
+
+
+def _check_connected(module, array):
+ connected_arrays = array.list_array_connections()
+ api_version = array._list_available_rest_versions()
+ for target in range(0, len(connected_arrays)):
+ if P53_API_VERSION in api_version:
+ if (
+ connected_arrays[target]["management_address"]
+ == module.params["target_url"]
+ and "connected" in connected_arrays[target]["status"]
+ ):
+ return connected_arrays[target]
+ else:
+ if (
+ connected_arrays[target]["management_address"]
+ == module.params["target_url"]
+ and connected_arrays[target]["connected"]
+ ):
+ return connected_arrays[target]
+ return None
+
+
+def break_connection(module, array, target_array):
+ """Break connection between arrays"""
+ changed = True
+ source_array = array.get()["array_name"]
+ if target_array["management_address"] is None:
+ module.fail_json(
+ msg="disconnect can only happen from the array that formed the connection"
+ )
+ if not module.check_mode:
+ try:
+ array.disconnect_array(target_array["array_name"])
+ except Exception:
+ module.fail_json(
+ msg="Failed to disconnect {0} from {1}.".format(
+ target_array["array_name"], source_array
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def create_connection(module, array):
+ """Create connection between arrays"""
+ changed = True
+ remote_array = module.params["target_url"]
+ user_agent = "%(base)s %(class)s/%(version)s (%(platform)s)" % {
+ "base": "Ansible",
+ "class": __name__,
+ "version": 1.2,
+ "platform": platform.platform(),
+ }
+ try:
+ remote_system = FlashArray(
+ module.params["target_url"],
+ api_token=module.params["target_api"],
+ user_agent=user_agent,
+ )
+ connection_key = remote_system.get(connection_key=True)["connection_key"]
+ remote_array = remote_system.get()["array_name"]
+ api_version = array._list_available_rest_versions()
+ # TODO: Refactor when FC async is supported
+ if (
+ FC_REPL_VERSION in api_version
+ and module.params["transport"].lower() == "fc"
+ ):
+ if module.params["connection"].lower() == "async":
+ module.fail_json(
+ msg="Asynchronous replication not supported using FC transport"
+ )
+ array_connection = flasharray.ArrayConnectionPost(
+ type="sync-replication",
+ management_address=module.params["target_url"],
+ replication_transport="fc",
+ connection_key=connection_key,
+ )
+ array = get_array(module)
+ if not module.check_mode:
+ res = array.post_array_connections(array_connection=array_connection)
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Array Connection failed. Error: {0}".format(
+ res.errors[0].message
+ )
+ )
+ else:
+ if not module.check_mode:
+ array.connect_array(
+ module.params["target_url"],
+ connection_key,
+ [module.params["connection"]],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to connect to remote array {0}.".format(remote_array)
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ connection=dict(type="str", default="async", choices=["async", "sync"]),
+ transport=dict(type="str", default="ip", choices=["ip", "fc"]),
+ target_url=dict(type="str", required=True),
+ target_api=dict(type="str"),
+ )
+ )
+
+ required_if = [("state", "present", ["target_api"])]
+
+ module = AnsibleModule(
+ argument_spec, required_if=required_if, supports_check_mode=True
+ )
+
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="purestorage sdk is required for this module")
+
+ if module.params["transport"] == "fc" and not HAS_PYPURECLIENT:
+ module.fail_json(msg="pypureclient sdk is required for this module")
+
+ state = module.params["state"]
+ array = get_system(module)
+ target_array = _check_connected(module, array)
+
+ if state == "present" and target_array is None:
+ create_connection(module, array)
+ elif state == "absent" and target_array is not None:
+ break_connection(module, array, target_array)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_console.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_console.py
new file mode 100644
index 000000000..f3c4df429
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_console.py
@@ -0,0 +1,107 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_console
+version_added: '1.0.0'
+short_description: Enable or Disable Pure Storage FlashArray Console Lock
+description:
+- Enablke or Disable root lockout from the array at the physical console for a Pure Storage FlashArray.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Define state of console lockout
+ - When set to I(enable) the console port is locked from root login.
+ type: str
+ default: disable
+ choices: [ enable, disable ]
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Enable Console Lockout
+ purestorage.flasharray.purefa_console:
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Disable Console Lockout
+ purestorage.flasharray.purefa_console:
+ state: disable
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ purefa_argument_spec,
+)
+
+
+def enable_console(module, array):
+ """Enable Console Lockout"""
+ changed = False
+ if array.get_console_lock_status()["console_lock"] != "enabled":
+ changed = True
+ if not module.check_mode:
+ try:
+ array.enable_console_lock()
+ except Exception:
+ module.fail_json(msg="Enabling Console Lock failed")
+ module.exit_json(changed=changed)
+
+
+def disable_console(module, array):
+ """Disable Console Lock"""
+ changed = False
+ if array.get_console_lock_status()["console_lock"] == "enabled":
+ changed = True
+ if not module.check_mode:
+ try:
+ array.disable_console_lock()
+ except Exception:
+ module.fail_json(msg="Disabling Console Lock failed")
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="disable", choices=["enable", "disable"]),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ array = get_system(module)
+
+ if module.params["state"] == "enable":
+ enable_console(module, array)
+ else:
+ disable_console(module, array)
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_default_protection.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_default_protection.py
new file mode 100644
index 000000000..5038de423
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_default_protection.py
@@ -0,0 +1,328 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_default_protection
+version_added: '1.14.0'
+short_description: Manage SafeMode default protection for a Pure Storage FlashArray
+description:
+- Configure automatic protection group membership for new volumes and copied volumes
+ array wide, or at the pod level.
+- Requires a minimum of Purity 6.3.4
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ scope:
+ description:
+ - The scope of the default protection group
+ type: str
+ choices: [ array, pod ]
+ default: array
+ name:
+ description:
+ - The name of the protection group to assign or remove as default for the scope.
+ - If I(scope) is I(pod) only the short-name for the pod protection group is needed.
+ See examples
+ elements: str
+ type: list
+ required: true
+ pod:
+ description:
+ - name of the pod to apply the default protection to.
+ - Only required for I(scope) is I(pod)
+ type: str
+ state:
+ description:
+ - Define whether to add or delete the protection group to the default list
+ default: present
+ choices: [ absent, present ]
+ type: str
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Add protection group foo::bar as default for pod foo
+ purestorage.flasharray.purefa_default_protection:
+ name: bar
+ pod: foo
+ scope: pod
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Add protection group foo as default for array
+ purestorage.flasharray.purefa_default_protection:
+ name: foo
+ scope: array
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Remove protection group foo from array default protection
+ purestorage.flasharray.purefa_default_protection:
+ name: foo
+ scope: array
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: absent
+
+- name: Clear default protection for the array
+ purestorage.flasharray.purefa_volume_tags:
+ name: ''
+ scope: array
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: absent
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ get_array,
+ purefa_argument_spec,
+)
+
+
+DEFAULT_API_VERSION = "2.16"
+
+
+def _get_pod(module, array):
+ """Return Pod or None"""
+ try:
+ return array.get_pods(names=[module.params["pod"]])
+ except Exception:
+ return None
+
+
+def _get_pg(array, pod):
+ """Return Protection Group or None"""
+ try:
+ return array.get_protection_groups(names=[pod])
+ except Exception:
+ return None
+
+
+def create_default(module, array):
+ """Create Default Protection"""
+ changed = True
+ pg_list = []
+ if not module.check_mode:
+ for pgroup in range(0, len(module.params["name"])):
+ if module.params["scope"] == "array":
+ pg_list.append(
+ flasharray.DefaultProtectionReference(
+ name=module.params["name"][pgroup], type="protection_group"
+ )
+ )
+ else:
+ pg_list.append(
+ flasharray.DefaultProtectionReference(
+ name=module.params["pod"]
+ + "::"
+ + module.params["name"][pgroup],
+ type="protection_group",
+ )
+ )
+ if module.params["scope"] == "array":
+ protection = flasharray.ContainerDefaultProtection(
+ name="", type="", default_protections=pg_list
+ )
+ res = array.patch_container_default_protections(
+ names=[""], container_default_protection=protection
+ )
+ else:
+ protection = flasharray.ContainerDefaultProtection(
+ name=module.params["pod"], type="pod", default_protections=pg_list
+ )
+ res = array.patch_container_default_protections(
+ names=[module.params["pod"]], container_default_protection=protection
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to set default protection. Error: {0}".format(
+ res.errors[0].message
+ )
+ )
+
+ module.exit_json(changed=changed)
+
+
+def update_default(module, array, current_default):
+ """Update Default Protection"""
+ changed = False
+ current = []
+ for default in range(0, len(current_default)):
+ if module.params["scope"] == "array":
+ current.append(current_default[default].name)
+ else:
+ current.append(current_default[default].name.split(":")[-1])
+ pg_list = []
+ if module.params["state"] == "present":
+ if current:
+ new_list = sorted(list(set(module.params["name"] + current)))
+ else:
+ new_list = sorted(list(set(module.params["name"])))
+ elif current:
+ new_list = sorted(list(set(current).difference(module.params["name"])))
+ else:
+ new_list = []
+ if not new_list:
+ delete_default(module, array)
+ elif new_list == current:
+ changed = False
+ else:
+ changed = True
+ if not module.check_mode:
+ for pgroup in range(0, len(new_list)):
+ if module.params["scope"] == "array":
+ pg_list.append(
+ flasharray.DefaultProtectionReference(
+ name=new_list[pgroup], type="protection_group"
+ )
+ )
+ else:
+ pg_list.append(
+ flasharray.DefaultProtectionReference(
+ name=module.params["pod"] + "::" + new_list[pgroup],
+ type="protection_group",
+ )
+ )
+ if module.params["scope"] == "array":
+ protection = flasharray.ContainerDefaultProtection(
+ name="", type="", default_protections=pg_list
+ )
+ res = array.patch_container_default_protections(
+ names=[""], container_default_protection=protection
+ )
+ else:
+ protection = flasharray.ContainerDefaultProtection(
+ name=module.params["pod"],
+ type="pod",
+ default_protections=pg_list,
+ )
+ res = array.patch_container_default_protections(
+ names=[module.params["pod"]],
+ container_default_protection=protection,
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to update default protection. Error: {0}".format(
+ res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def delete_default(module, array):
+ """Delete Default Protection"""
+ changed = True
+ if not module.check_mode:
+ if module.params["scope"] == "array":
+ protection = flasharray.ContainerDefaultProtection(
+ name="", type="", default_protections=[]
+ )
+ res = array.patch_container_default_protections(
+ names=[""], container_default_protection=protection
+ )
+ else:
+ protection = flasharray.ContainerDefaultProtection(
+ name=module.params["pod"], type="pod", default_protections=[]
+ )
+ res = array.patch_container_default_protections(
+ names=[module.params["pod"]], container_default_protection=[]
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete default protection. Error: {0}".format(
+ res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="list", elements="str", required=True),
+ pod=dict(type="str"),
+ scope=dict(type="str", default="array", choices=["array", "pod"]),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ )
+ )
+
+ required_if = [["scope", "pod", ["pod"]]]
+ module = AnsibleModule(
+ argument_spec, required_if=required_if, supports_check_mode=True
+ )
+ state = module.params["state"]
+ if not HAS_PURESTORAGE:
+ module.fail_json(
+ msg="py-pure-client sdk is required to support 'count' parameter"
+ )
+ arrayv5 = get_system(module)
+ module.params["name"] = sorted(module.params["name"])
+ api_version = arrayv5._list_available_rest_versions()
+ if DEFAULT_API_VERSION not in api_version:
+ module.fail_json(
+ msg="Default Protection is not supported. Purity//FA 6.3.4, or higher, is required."
+ )
+ array = get_array(module)
+ if module.params["scope"] == "pod":
+ if not _get_pod(module, array):
+ module.fail_json(
+ msg="Invalid pod {0} specified.".format(module.params["pod"])
+ )
+ current_default = list(
+ array.get_container_default_protections(names=[module.params["pod"]]).items
+ )[0].default_protections
+ else:
+ current_default = list(array.get_container_default_protections().items)[
+ 0
+ ].default_protections
+ for pgroup in range(0, len(module.params["name"])):
+ if module.params["scope"] == "pod":
+ pod_name = module.params["pod"] + module.params["name"][pgroup]
+ else:
+ pod_name = module.params["name"][pgroup]
+ if not _get_pg(array, pod_name):
+ module.fail_json(msg="Protection Group {0} does not exist".format(pod_name))
+
+ if state == "present" and not current_default:
+ create_default(module, array)
+ elif state == "absent" and not current_default:
+ module.exit_json(changed=False)
+ elif state == "present" and current_default:
+ update_default(module, array, current_default)
+ elif state == "absent" and current_default and module.params["name"] != [""]:
+ update_default(module, array, current_default)
+ elif state == "absent" and current_default and module.params["name"] == [""]:
+ delete_default(module, array)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_directory.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_directory.py
new file mode 100644
index 000000000..125b84172
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_directory.py
@@ -0,0 +1,234 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_directory
+version_added: '1.5.0'
+short_description: Manage FlashArray File System Directories
+description:
+- Create/Delete FlashArray File Systems
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Name of the directory
+ type: str
+ required: true
+ state:
+ description:
+ - Define whether the directory should exist or not.
+ default: present
+ choices: [ absent, present ]
+ type: str
+ filesystem:
+ description:
+ - Name of the filesystem the directory links to.
+ type: str
+ required: true
+ path:
+ description:
+ - Path of the managed directory in the file system
+ - If not provided will default to I(name)
+ type: str
+ rename:
+ description:
+ - Value to rename the specified directory to
+ type: str
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Create direcotry foo in filesysten bar with path zeta
+ purestorage.flasharray.purefa_directory:
+ name: foo
+ filesystem: bar
+ path: zeta
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Rename directory foo to fin in filesystem bar
+ purestorage.flasharray.purefa_directory:
+ name: foo
+ rename: fin
+ filesystem: bar
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete diectory foo in filesystem bar
+ purestorage.flasharray.purefa_directory:
+ name: foo
+ filesystem: bar
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ get_array,
+ purefa_argument_spec,
+)
+
+MIN_REQUIRED_API_VERSION = "2.2"
+
+
+def delete_dir(module, array):
+ """Delete a file system directory"""
+ changed = True
+ if not module.check_mode:
+ res = array.delete_directories(
+ names=[module.params["filesystem"] + ":" + module.params["name"]]
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete file system {0}. {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def rename_dir(module, array):
+ """Rename a file system directory"""
+ changed = False
+ target = array.get_directories(
+ names=[module.params["filesystem"] + ":" + module.params["rename"]]
+ )
+ if target.status_code != 200:
+ if not module.check_mode:
+ changed = True
+ directory = flasharray.DirectoryPatch(
+ name=module.params["filesystem"] + ":" + module.params["rename"]
+ )
+ res = array.patch_directories(
+ names=[module.params["filesystem"] + ":" + module.params["name"]],
+ directory=directory,
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete file system {0}".format(module.params["name"])
+ )
+ else:
+ module.fail_json(
+ msg="Target file system {0} already exists".format(module.params["rename"])
+ )
+ module.exit_json(changed=changed)
+
+
+def create_dir(module, array):
+ """Create a file system directory"""
+ changed = False
+ if not module.params["path"]:
+ module.params["path"] = module.params["name"]
+ all_fs = list(
+ array.get_directories(file_system_names=[module.params["filesystem"]]).items
+ )
+ for check in range(0, len(all_fs)):
+ if module.params["path"] == all_fs[check].path[1:]:
+ module.fail_json(
+ msg="Path {0} already existis in file system {1}".format(
+ module.params["path"], module.params["filesystem"]
+ )
+ )
+ changed = True
+ if not module.check_mode:
+ directory = flasharray.DirectoryPost(
+ directory_name=module.params["name"], path=module.params["path"]
+ )
+ res = array.post_directories(
+ file_system_names=[module.params["filesystem"]], directory=directory
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to create file system {0}. {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ filesystem=dict(type="str", required=True),
+ name=dict(type="str", required=True),
+ rename=dict(type="str"),
+ path=dict(type="str"),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashArray REST version not supported. "
+ "Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
+ )
+ array = get_array(module)
+ state = module.params["state"]
+
+ try:
+ filesystem = list(
+ array.get_file_systems(names=[module.params["filesystem"]]).items
+ )[0]
+ except Exception:
+ module.fail_json(
+ msg="Selected file system {0} does not exist".format(
+ module.params["filesystem"]
+ )
+ )
+ res = array.get_directories(
+ names=[module.params["filesystem"] + ":" + module.params["name"]]
+ )
+ exists = bool(res.status_code == 200)
+
+ if state == "present" and not exists:
+ create_dir(module, array)
+ elif (
+ state == "present"
+ and exists
+ and module.params["rename"]
+ and not filesystem.destroyed
+ ):
+ rename_dir(module, array)
+ elif state == "absent" and exists:
+ delete_dir(module, array)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_dirsnap.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_dirsnap.py
new file mode 100644
index 000000000..4c090bde8
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_dirsnap.py
@@ -0,0 +1,474 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2021, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_dirsnap
+version_added: '1.9.0'
+short_description: Manage FlashArray File System Directory Snapshots
+description:
+- Create/Delete FlashArray File System directory snapshots
+- A full snapshot name is constructed in the form of DIR.CLIENT_NAME.SUFFIX
+ where DIR is the managed directory name, CLIENT_NAME is the client name,
+ and SUFFIX is the suffix.
+- The client visible snapshot name is CLIENT_NAME.SUFFIX.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Name of the directory to snapshot
+ type: str
+ required: true
+ state:
+ description:
+ - Define whether the directory snapshot should exist or not.
+ default: present
+ choices: [ absent, present ]
+ type: str
+ filesystem:
+ description:
+ - Name of the filesystem the directory links to.
+ type: str
+ required: true
+ eradicate:
+ description:
+ - Define whether to eradicate the snapshot on delete or leave in trash
+ type: bool
+ default: false
+ client:
+ description:
+ - The client name portion of the client visible snapshot name
+ type: str
+ required: true
+ suffix:
+ description:
+ - Snapshot suffix to use
+ type: str
+ new_client:
+ description:
+ - The new client name when performing a rename
+ type: str
+ version_added: '1.12.0'
+ new_suffix:
+ description:
+ - The new suffix when performing a rename
+ type: str
+ version_added: '1.12.0'
+ rename:
+ description:
+ - Whether to rename a directory snapshot
+ - The snapshot client name and suffix can be changed
+ - Required with I(new_client) ans I(new_suffix)
+ type: bool
+ default: false
+ version_added: '1.12.0'
+ keep_for:
+ description:
+ - Retention period, after which snapshots will be eradicated
+ - Specify in seconds. Range 300 - 31536000 (5 minutes to 1 year)
+ - Value of 0 will set no retention period.
+ - If not specified on create will default to 0 (no retention period)
+ type: int
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Create a snapshot direcotry foo in filesysten bar for client test with suffix test
+ purestorage.flasharray.purefa_dirsnap:
+ name: foo
+ filesystem: bar
+ client: test
+ suffix: test
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Update retention time for a snapshot foo:bar.client.test
+ purestorage.flasharray.purefa_dirsnap:
+ name: foo
+ filesystem: bar
+ client: client
+ suffix: test
+ keep_for: 300 # 5 minutes
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete snapshot foo:bar.client.test
+ purestorage.flasharray.purefa_dirsnap:
+ name: foo
+ filesystem: bar
+ client: client
+ suffix: test
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Recover deleted snapshot foo:bar.client.test
+ purestorage.flasharray.purefa_dirsnap:
+ name: foo
+ filesystem: bar
+ client: client
+ suffix: test
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete and eradicate snapshot foo:bar.client.test
+ purestorage.flasharray.purefa_dirsnap:
+ name: foo
+ filesystem: bar
+ client: client
+ suffix: test
+ state: absent
+ eradicate: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Eradicate deleted snapshot foo:bar.client.test
+ purestorage.flasharray.purefa_dirsnap:
+ name: foo
+ filesystem: bar
+ client: client
+ suffix: test
+ eradicate: true
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Rename snapshot
+ purestorage.flasharray.purefa_dirsnap:
+ name: foo
+ filesystem: bar
+ client: client
+ suffix: test
+ rename: true
+ new_client: client2
+ new_suffix: test2
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient.flasharray import DirectorySnapshotPost, DirectorySnapshotPatch
+except ImportError:
+ HAS_PURESTORAGE = False
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ get_array,
+ purefa_argument_spec,
+)
+
+MIN_REQUIRED_API_VERSION = "2.2"
+MIN_RENAME_API_VERSION = "2.10"
+
+
+def eradicate_snap(module, array):
+ """Eradicate a filesystem snapshot"""
+ changed = True
+ if not module.check_mode:
+ snapname = (
+ module.params["filesystem"]
+ + ":"
+ + module.params["name"]
+ + "."
+ + module.params["client"]
+ + "."
+ + module.params["suffix"]
+ )
+ res = array.delete_directory_snapshots(names=[snapname])
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to eradicate filesystem snapshot {0}. Error: {1}".format(
+ snapname, res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def delete_snap(module, array):
+ """Delete a filesystem snapshot"""
+ changed = True
+ if not module.check_mode:
+ snapname = (
+ module.params["filesystem"]
+ + ":"
+ + module.params["name"]
+ + "."
+ + module.params["client"]
+ + "."
+ + module.params["suffix"]
+ )
+ directory_snapshot = DirectorySnapshotPatch(destroyed=True)
+ res = array.patch_directory_snapshots(
+ names=[snapname], directory_snapshot=directory_snapshot
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete filesystem snapshot {0}. Error: {1}".format(
+ snapname, res.errors[0].message
+ )
+ )
+ if module.params["eradicate"]:
+ eradicate_snap(module, array)
+ module.exit_json(changed=changed)
+
+
+def update_snap(module, array, snap_detail):
+ """Update a filesystem snapshot retention time"""
+ changed = True
+ snapname = (
+ module.params["filesystem"]
+ + ":"
+ + module.params["name"]
+ + "."
+ + module.params["client"]
+ + "."
+ + module.params["suffix"]
+ )
+ if module.params["rename"]:
+ if not module.params["new_client"]:
+ new_client = module.params["client"]
+ else:
+ new_client = module.params["new_client"]
+ if not module.params["new_suffix"]:
+ new_suffix = module.params["suffix"]
+ else:
+ new_suffix = module.params["new_suffix"]
+ new_snapname = (
+ module.params["filesystem"]
+ + ":"
+ + module.params["name"]
+ + "."
+ + new_client
+ + "."
+ + new_suffix
+ )
+ directory_snapshot = DirectorySnapshotPatch(
+ client_name=new_client, suffix=new_suffix
+ )
+ if not module.check_mode:
+ res = array.patch_directory_snapshots(
+ names=[snapname], directory_snapshot=directory_snapshot
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to rename snapshot {0}. Error: {1}".format(
+ snapname, res.errors[0].message
+ )
+ )
+ else:
+ snapname = new_snapname
+ if not module.params["keep_for"] or module.params["keep_for"] == 0:
+ keep_for = 0
+ elif 300 <= module.params["keep_for"] <= 31536000:
+ keep_for = module.params["keep_for"] * 1000
+ else:
+ module.fail_json(msg="keep_for not in range of 300 - 31536000")
+ if not module.check_mode:
+ if snap_detail.destroyed:
+ directory_snapshot = DirectorySnapshotPatch(destroyed=False)
+ res = array.patch_directory_snapshots(
+ names=[snapname], directory_snapshot=directory_snapshot
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to recover snapshot {0}. Error: {1}".format(
+ snapname, res.errors[0].message
+ )
+ )
+ directory_snapshot = DirectorySnapshotPatch(keep_for=keep_for)
+ if snap_detail.time_remaining == 0 and keep_for != 0:
+ res = array.patch_directory_snapshots(
+ names=[snapname], directory_snapshot=directory_snapshot
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to retention time for snapshot {0}. Error: {1}".format(
+ snapname, res.errors[0].message
+ )
+ )
+ elif snap_detail.time_remaining > 0:
+ if module.params["rename"] and module.params["keep_for"]:
+ res = array.patch_directory_snapshots(
+ names=[snapname], directory_snapshot=directory_snapshot
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to retention time for renamed snapshot {0}. Error: {1}".format(
+ snapname, res.errors[0].message
+ )
+ )
+
+ module.exit_json(changed=changed)
+
+
+def create_snap(module, array):
+ """Create a filesystem snapshot"""
+ changed = True
+ if not module.check_mode:
+ if not module.params["keep_for"] or module.params["keep_for"] == 0:
+ keep_for = 0
+ elif 300 <= module.params["keep_for"] <= 31536000:
+ keep_for = module.params["keep_for"] * 1000
+ else:
+ module.fail_json(msg="keep_for not in range of 300 - 31536000")
+ directory = module.params["filesystem"] + ":" + module.params["name"]
+ if module.params["suffix"]:
+ directory_snapshot = DirectorySnapshotPost(
+ client_name=module.params["client"],
+ keep_for=keep_for,
+ suffix=module.params["suffix"],
+ )
+ else:
+ directory_snapshot = DirectorySnapshotPost(
+ client_name=module.params["client"], keep_for=keep_for
+ )
+ res = array.post_directory_snapshots(
+ source_names=[directory], directory_snapshot=directory_snapshot
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to create client {0} snapshot for {1}. Error: {2}".format(
+ module.params["client"], directory, res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ filesystem=dict(type="str", required=True),
+ name=dict(type="str", required=True),
+ eradicate=dict(type="bool", default=False),
+ client=dict(type="str", required=True),
+ suffix=dict(type="str"),
+ rename=dict(type="bool", default=False),
+ new_client=dict(type="str"),
+ new_suffix=dict(type="str"),
+ keep_for=dict(type="int"),
+ )
+ )
+
+ required_if = [["state", "absent", ["suffix"]]]
+
+ module = AnsibleModule(
+ argument_spec, required_if=required_if, supports_check_mode=True
+ )
+
+ if module.params["rename"]:
+ if not module.params["new_client"] and not module.params["new_suffix"]:
+ module.fail_json(msg="Rename requires one of: new_client, new_suffix")
+
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+
+ client_pattern = re.compile(
+ "^(?=.*[a-zA-Z-])[a-zA-Z0-9]([a-zA-Z0-9-]{0,56}[a-zA-Z0-9])?$"
+ )
+ suffix_pattern = re.compile(
+ "^(?=.*[a-zA-Z-])[a-zA-Z0-9]([a-zA-Z0-9-]{0,63}[a-zA-Z0-9])?$"
+ )
+ if module.params["suffix"]:
+ if not suffix_pattern.match(module.params["suffix"]):
+ module.fail_json(
+ msg="Suffix name {0} does not conform to the suffix name rules.".format(
+ module.params["suffix"]
+ )
+ )
+ if module.params["new_suffix"]:
+ if not suffix_pattern.match(module.params["new_suffix"]):
+ module.fail_json(
+ msg="Suffix rename {0} does not conform to the suffix name rules.".format(
+ module.params["new_suffix"]
+ )
+ )
+ if module.params["client"]:
+ if not client_pattern.match(module.params["client"]):
+ module.fail_json(
+ msg="Client name {0} does not conform to the client name rules.".format(
+ module.params["client"]
+ )
+ )
+
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashArray REST version not supported. "
+ "Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
+ )
+ if module.params["rename"] and MIN_RENAME_API_VERSION not in api_version:
+ module.fail_json(
+ msg="Directory snapshot rename not supported. "
+ "Minimum Purity//FA version required: 6.2.1"
+ )
+ array = get_array(module)
+ state = module.params["state"]
+ snapshot_root = module.params["filesystem"] + ":" + module.params["name"]
+ if bool(
+ array.get_directories(
+ filter='name="' + snapshot_root + '"', total_item_count=True
+ ).total_item_count
+ == 0
+ ):
+ module.fail_json(msg="Directory {0} does not exist.".format(snapshot_root))
+ snap_exists = False
+ if module.params["suffix"]:
+ snap_detail = array.get_directory_snapshots(
+ filter="name='"
+ + snapshot_root
+ + "."
+ + module.params["client"]
+ + "."
+ + module.params["suffix"]
+ + "'",
+ total_item_count=True,
+ )
+ if bool(snap_detail.status_code == 200):
+ snap_exists = bool(snap_detail.total_item_count != 0)
+ if snap_exists:
+ snap_facts = list(snap_detail.items)[0]
+ if state == "present" and not snap_exists:
+ create_snap(module, array)
+ elif state == "present" and snap_exists and module.params["suffix"]:
+ update_snap(module, array, snap_facts)
+ elif state == "absent" and snap_exists and not snap_facts.destroyed:
+ delete_snap(module, array)
+ elif (
+ state == "absent"
+ and snap_exists
+ and snap_facts.destroyed
+ and module.params["eradicate"]
+ ):
+ eradicate_snap(module, array)
+ else:
+ module.exit_json(changed=False)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_dns.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_dns.py
new file mode 100644
index 000000000..746a4ed52
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_dns.py
@@ -0,0 +1,349 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_dns
+version_added: '1.0.0'
+short_description: Configure FlashArray DNS settings
+description:
+- Set or erase configuration for the DNS settings.
+- Nameservers provided will overwrite any existing nameservers.
+- From Purity//FA 6.3.3 DNS setting for FA-File can be configured seperately
+ to the management DNS settings
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Name of the DNS configuration.
+ - Default value only supported for management service
+ default: management
+ type: str
+ version_added: 1.14.0
+ state:
+ description:
+ - Set or delete directory service configuration
+ default: present
+ type: str
+ choices: [ absent, present ]
+ domain:
+ description:
+ - Domain suffix to be appended when perofrming DNS lookups.
+ type: str
+ nameservers:
+ description:
+ - List of up to 3 unique DNS server IP addresses. These can be
+ IPv4 or IPv6 - No validation is done of the addresses is performed.
+ type: list
+ elements: str
+ service:
+ description:
+ - Type of ser vice the DNS will work with
+ type: str
+ version_added: 1.14.0
+ choices: [ management, file ]
+ default: management
+ source:
+ description:
+ - A virtual network interface (vif)
+ type: str
+ version_added: 1.14.0
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Delete exisitng DNS settings
+ purestorage.flasharray.purefa_dns:
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Set managemnt DNS settings
+ purestorage.flasharray.purefa_dns:
+ domain: purestorage.com
+ nameservers:
+ - 8.8.8.8
+ - 8.8.4.4
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Set file DNS settings
+ purestorage.flasharray.purefa_dns:
+ domain: purestorage.com
+ nameservers:
+ - 8.8.8.8
+ - 8.8.4.4
+ name: ad_dns
+ service: file
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ get_array,
+ purefa_argument_spec,
+)
+
+MULTIPLE_DNS = "2.15"
+
+
+def remove(duplicate):
+ final_list = []
+ for num in duplicate:
+ if num not in final_list:
+ final_list.append(num)
+ return final_list
+
+
+def _get_source(module, array):
+ res = array.get_network_interfaces(names=[module.params["source"]])
+ if res.status_code == 200:
+ return True
+ else:
+ return False
+
+
+def delete_dns(module, array):
+ """Delete DNS settings"""
+ changed = False
+ current_dns = array.get_dns()
+ if current_dns["domain"] == "" and current_dns["nameservers"] == [""]:
+ module.exit_json(changed=changed)
+ else:
+ try:
+ changed = True
+ if not module.check_mode:
+ array.set_dns(domain="", nameservers=[])
+ except Exception:
+ module.fail_json(msg="Delete DNS settigs failed")
+ module.exit_json(changed=changed)
+
+
+def create_dns(module, array):
+ """Set DNS settings"""
+ changed = False
+ current_dns = array.get_dns()
+ if current_dns["domain"] != module.params["domain"] or sorted(
+ module.params["nameservers"]
+ ) != sorted(current_dns["nameservers"]):
+ try:
+ changed = True
+ if not module.check_mode:
+ array.set_dns(
+ domain=module.params["domain"],
+ nameservers=module.params["nameservers"][0:3],
+ )
+ except Exception:
+ module.fail_json(msg="Set DNS settings failed: Check configuration")
+ module.exit_json(changed=changed)
+
+
+def update_multi_dns(module, array):
+ """Update a DNS configuration"""
+ changed = False
+ current_dns = list(array.get_dns(names=[module.params["name"]]).items)[0]
+ new_dns = current_dns
+ if module.params["domain"] and current_dns.domain != module.params["domain"]:
+ new_dns.domain = module.params["domain"]
+ changed = True
+ if module.params["service"] and current_dns.services != [module.params["service"]]:
+ module.fail_json(msg="Changing service type is not permitted")
+ if module.params["nameservers"] and sorted(current_dns.nameservers) != sorted(
+ module.params["nameservers"]
+ ):
+ new_dns.nameservers = module.params["nameservers"]
+ changed = True
+ if (
+ module.params["source"] or module.params["source"] == ""
+ ) and current_dns.source.name != module.params["source"]:
+ new_dns.source.name = module.params["source"]
+ changed = True
+ if changed and not module.check_mode:
+ res = array.patch_dns(
+ names=[module.params["name"]],
+ dns=flasharray.Dns(
+ domain=new_dns.domain,
+ nameservers=new_dns.nameservers,
+ source=flasharray.ReferenceNoId(module.params["source"]),
+ ),
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Update to DNS service {0} failed. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def delete_multi_dns(module, array):
+ """Delete a DNS configuration"""
+ changed = True
+ if module.params["name"] == "management":
+ res = array.update_dns(
+ names=[module.params["name"]],
+ dns=flasharray.DnsPatch(
+ domain=module.params["domain"],
+ nameservers=module.params["nameservers"],
+ ),
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Management DNS configuration not deleted. Error: {0}".format(
+ res.errors[0].message
+ )
+ )
+ else:
+ if not module.check_mode:
+ res = array.delete_dns(names=[module.params["name"]])
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete DNS configuration {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def create_multi_dns(module, array):
+ """Create a DNS configuration"""
+ changed = True
+ if not module.check_mode:
+ if module.params["service"] == "file":
+ if module.params["source"]:
+ res = array.post_dns(
+ names=[module.params["name"]],
+ dns=flasharray.DnsPost(
+ services=[module.params["service"]],
+ domain=module.params["domain"],
+ nameservers=module.params["nameservers"],
+ source=flasharray.ReferenceNoId(
+ module.params["source"].lower()
+ ),
+ ),
+ )
+ else:
+ res = array.post_dns(
+ names=[module.params["name"]],
+ dns=flasharray.DnsPost(
+ services=[module.params["service"]],
+ domain=module.params["domain"],
+ nameservers=module.params["nameservers"],
+ ),
+ )
+ else:
+ res = array.create_dns(
+ names=[module.params["name"]],
+ services=[module.params["service"]],
+ domain=module.params["domain"],
+ nameservers=module.params["nameservers"],
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to create {0} DNS configuration {1}. Error: {2}".format(
+ module.params["service"],
+ module.params["name"],
+ res.errors[0].message,
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ name=dict(type="str", default="management"),
+ service=dict(
+ type="str", default="management", choices=["management", "file"]
+ ),
+ domain=dict(type="str"),
+ source=dict(type="str"),
+ nameservers=dict(type="list", elements="str"),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ state = module.params["state"]
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+ if module.params["nameservers"]:
+ module.params["nameservers"] = remove(module.params["nameservers"])
+ if module.params["service"] == "management":
+ module.params["nameservers"] = module.params["nameservers"][0:3]
+
+ if MULTIPLE_DNS in api_version:
+ array = get_array(module)
+ configs = list(array.get_dns().items)
+ exists = False
+ for config in range(0, len(configs)):
+ if configs[config].name == module.params["name"]:
+ exists = True
+ if (
+ module.params["service"] == "management"
+ and module.params["name"] != "management"
+ and not exists
+ ):
+ module.warn("Overriding configuration name to management")
+ module.params["name"] = "management"
+ if module.params["source"] and not _get_source(module, array):
+ module.fail_json(
+ msg="Specified VIF {0} does not exist.".format(module.params["source"])
+ )
+ if state == "present" and exists:
+ update_multi_dns(module, array)
+ elif state == "present" and not exists:
+ if len(configs) == 2:
+ module.fail_json(
+ msg="Only 2 DNS configurations are currently "
+ "supported. One for management and one for file services"
+ )
+ create_multi_dns(module, array)
+ elif exists and state == "absent":
+ delete_multi_dns(module, array)
+ else:
+ module.exit_json(changed=False)
+ else:
+ if state == "absent":
+ delete_dns(module, array)
+ elif state == "present":
+ if not module.params["domain"] or not module.params["nameservers"]:
+ module.fail_json(
+ msg="`domain` and `nameservers` are required for DNS configuration"
+ )
+ create_dns(module, array)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_ds.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_ds.py
new file mode 100644
index 000000000..195aa2155
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_ds.py
@@ -0,0 +1,609 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_ds
+version_added: '1.0.0'
+short_description: Configure FlashArray Directory Service
+description:
+- Set or erase configuration for the directory service. There is no facility
+ to SSL certificates at this time. Use the FlashArray GUI for this
+ additional configuration work.
+- To modify an existing directory service configuration you must first delete
+ an exisitng configuration and then recreate with new settings.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ type: str
+ description:
+ - Create or delete directory service configuration
+ default: present
+ choices: [ absent, present ]
+ enable:
+ description:
+ - Whether to enable or disable directory service support.
+ default: false
+ type: bool
+ dstype:
+ description:
+ - The type of directory service to work on
+ choices: [ management, data ]
+ type: str
+ default: management
+ uri:
+ type: list
+ elements: str
+ description:
+ - A list of up to 30 URIs of the directory servers. Each URI must include
+ the scheme ldap:// or ldaps:// (for LDAP over SSL), a hostname, and a
+ domain name or IP address. For example, ldap://ad.company.com configures
+ the directory service with the hostname "ad" in the domain "company.com"
+ while specifying the unencrypted LDAP protocol.
+ base_dn:
+ type: str
+ description:
+ - Sets the base of the Distinguished Name (DN) of the directory service
+ groups. The base should consist of only Domain Components (DCs). The
+ base_dn will populate with a default value when a URI is entered by
+ parsing domain components from the URI. The base DN should specify DC=
+ for each domain component and multiple DCs should be separated by commas.
+ bind_password:
+ type: str
+ description:
+ - Sets the password of the bind_user user name account.
+ force_bind_password:
+ type: bool
+ default: true
+ description:
+ - Will force the bind password to be reset even if the bind user password
+ is unchanged.
+ - If set to I(false) and I(bind_user) is unchanged the password will not
+ be reset.
+ version_added: 1.14.0
+ bind_user:
+ type: str
+ description:
+ - Sets the user name that can be used to bind to and query the directory.
+ - For Active Directory, enter the username - often referred to as
+ sAMAccountName or User Logon Name - of the account that is used to
+ perform directory lookups.
+ - For OpenLDAP, enter the full DN of the user.
+ group_base:
+ type: str
+ description:
+ - Specifies where the configured groups are located in the directory
+ tree. This field consists of Organizational Units (OUs) that combine
+ with the base DN attribute and the configured group CNs to complete
+ the full Distinguished Name of the groups. The group base should
+ specify OU= for each OU and multiple OUs should be separated by commas.
+ The order of OUs is important and should get larger in scope from left
+ to right. Each OU should not exceed 64 characters in length.
+ - Not Supported from Purity 5.2.0 or higher.
+ Use I(purestorage.flasharray.purefa_dsrole) module.
+ ro_group:
+ type: str
+ description:
+ - Sets the common Name (CN) of the configured directory service group
+ containing users with read-only privileges on the FlashArray. This
+ name should be just the Common Name of the group without the CN=
+ specifier. Common Names should not exceed 64 characters in length.
+ - Not Supported from Purity 5.2.0 or higher.
+ Use I(purestorage.flasharray.purefa_dsrole) module.
+ sa_group:
+ type: str
+ description:
+ - Sets the common Name (CN) of the configured directory service group
+ containing administrators with storage-related privileges on the
+ FlashArray. This name should be just the Common Name of the group
+ without the CN= specifier. Common Names should not exceed 64
+ characters in length.
+ - Not Supported from Purity 5.2.0 or higher.
+ Use I(purestorage.flasharray.purefa_dsrole) module.
+ aa_group:
+ type: str
+ description:
+ - Sets the common Name (CN) of the directory service group containing
+ administrators with full privileges when managing the FlashArray.
+ The name should be just the Common Name of the group without the
+ CN= specifier. Common Names should not exceed 64 characters in length.
+ - Not Supported from Purity 5.2.0 or higher.
+ Use I(purestorage.flasharray.purefa_dsrole) module.
+ user_login:
+ type: str
+ description:
+ - User login attribute in the structure of the configured LDAP servers.
+ Typically the attribute field that holds the users unique login name.
+ Default value is I(sAMAccountName) for Active Directory or I(uid)
+ for all other directory services
+ - Supported from Purity 6.0 or higher.
+ user_object:
+ type: str
+ description:
+ - Value of the object class for a management LDAP user.
+ Defaults to I(User) for Active Directory servers, I(posixAccount) or
+ I(shadowAccount) for OpenLDAP servers dependent on the group type
+ of the server, or person for all other directory servers.
+ - Supported from Purity 6.0 or higher.
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Delete existing directory service
+ purestorage.flasharray.purefa_ds:
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create directory service (disabled) - Pre-5.2.0
+ purestorage.flasharray.purefa_ds:
+ uri: "ldap://lab.purestorage.com"
+ base_dn: "DC=lab,DC=purestorage,DC=com"
+ bind_user: Administrator
+ bind_password: password
+ group_base: "OU=Pure-Admin"
+ ro_group: PureReadOnly
+ sa_group: PureStorage
+ aa_group: PureAdmin
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create directory service (disabled) - 5.2.0 or higher
+ purestorage.flasharray.purefa_ds:
+ dstype: management
+ uri: "ldap://lab.purestorage.com"
+ base_dn: "DC=lab,DC=purestorage,DC=com"
+ bind_user: Administrator
+ bind_password: password
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Enable existing directory service
+ purestorage.flasharray.purefa_ds:
+ enable: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Disable existing directory service
+ purestorage.flasharray.purefa_ds:
+ enable: false
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create directory service (enabled) - Pre-5.2.0
+ purestorage.flasharray.purefa_ds:
+ enable: true
+ uri: "ldap://lab.purestorage.com"
+ base_dn: "DC=lab,DC=purestorage,DC=com"
+ bind_user: Administrator
+ bind_password: password
+ group_base: "OU=Pure-Admin"
+ ro_group: PureReadOnly
+ sa_group: PureStorage
+ aa_group: PureAdmin
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create directory service (enabled) - 5.2.0 or higher
+ purestorage.flasharray.purefa_ds:
+ enable: true
+ dstype: management
+ uri: "ldap://lab.purestorage.com"
+ base_dn: "DC=lab,DC=purestorage,DC=com"
+ bind_user: Administrator
+ bind_password: password
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_array,
+ get_system,
+ purefa_argument_spec,
+)
+
+
+DS_ROLE_REQUIRED_API_VERSION = "1.16"
+FAFILES_API_VERSION = "2.2"
+
+
+def disable_ds(module, array):
+ """Disable Directory Service"""
+ changed = True
+ if not module.check_mode:
+ try:
+ array.disable_directory_service()
+ except Exception:
+ module.fail_json(msg="Disable Directory Service failed")
+ module.exit_json(changed=changed)
+
+
+def enable_ds(module, array):
+ """Enable Directory Service"""
+ changed = False
+ api_version = array._list_available_rest_versions()
+ if DS_ROLE_REQUIRED_API_VERSION in api_version:
+ try:
+ roles = array.list_directory_service_roles()
+ enough_roles = False
+ for role in range(0, len(roles)):
+ if roles[role]["group_base"]:
+ enough_roles = True
+ if enough_roles:
+ changed = True
+ if not module.check_mode:
+ array.enable_directory_service()
+ else:
+ module.fail_json(
+ msg="Cannot enable directory service - please create a directory service role"
+ )
+ except Exception:
+ module.fail_json(msg="Enable Directory Service failed: Check Configuration")
+ else:
+ try:
+ changed = True
+ if not module.check_mode:
+ array.enable_directory_service()
+ except Exception:
+ module.fail_json(msg="Enable Directory Service failed: Check Configuration")
+ module.exit_json(changed=changed)
+
+
+def delete_ds(module, array):
+ """Delete Directory Service"""
+ changed = True
+ if not module.check_mode:
+ try:
+ api_version = array._list_available_rest_versions()
+ array.set_directory_service(enabled=False)
+ if DS_ROLE_REQUIRED_API_VERSION in api_version:
+ array.set_directory_service(
+ uri=[""], base_dn="", bind_user="", bind_password="", certificate=""
+ )
+ else:
+ array.set_directory_service(
+ uri=[""],
+ base_dn="",
+ group_base="",
+ bind_user="",
+ bind_password="",
+ readonly_group="",
+ storage_admin_group="",
+ array_admin_group="",
+ certificate="",
+ )
+ except Exception:
+ module.fail_json(msg="Delete Directory Service failed")
+ module.exit_json(changed=changed)
+
+
+def delete_ds_v6(module, array):
+ """Delete Directory Service"""
+ changed = True
+ if module.params["dstype"] == "management":
+ management = flasharray.DirectoryServiceManagement(
+ user_login_attribute="", user_object_class=""
+ )
+ directory_service = flasharray.DirectoryService(
+ uris=[""],
+ base_dn="",
+ bind_user="",
+ bind_password="",
+ enabled=False,
+ services=module.params["dstype"],
+ management=management,
+ )
+ else:
+ directory_service = flasharray.DirectoryService(
+ uris=[""],
+ base_dn="",
+ bind_user="",
+ bind_password="",
+ enabled=False,
+ services=module.params["dstype"],
+ )
+ if not module.check_mode:
+ res = array.patch_directory_services(
+ names=[module.params["dstype"]], directory_service=directory_service
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Delete {0} Directory Service failed. Error message: {1}".format(
+ module.params["dstype"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def create_ds(module, array):
+ """Create Directory Service"""
+ changed = False
+ if None in (
+ module.params["bind_password"],
+ module.params["bind_user"],
+ module.params["base_dn"],
+ module.params["uri"],
+ ):
+ module.fail_json(
+ msg="Parameters 'bind_password', 'bind_user', 'base_dn' and 'uri' are all required"
+ )
+ api_version = array._list_available_rest_versions()
+ if DS_ROLE_REQUIRED_API_VERSION in api_version:
+ try:
+ changed = True
+ if not module.check_mode:
+ array.set_directory_service(
+ uri=module.params["uri"],
+ base_dn=module.params["base_dn"],
+ bind_user=module.params["bind_user"],
+ bind_password=module.params["bind_password"],
+ )
+ roles = array.list_directory_service_roles()
+ enough_roles = False
+ for role in range(0, len(roles)):
+ if roles[role]["group_base"]:
+ enough_roles = True
+ if enough_roles:
+ array.set_directory_service(enabled=module.params["enable"])
+ else:
+ module.fail_json(
+ msg="Cannot enable directory service - please create a directory service role"
+ )
+ except Exception:
+ module.fail_json(msg="Create Directory Service failed: Check configuration")
+ else:
+ groups_rule = [
+ not module.params["ro_group"],
+ not module.params["sa_group"],
+ not module.params["aa_group"],
+ ]
+
+ if all(groups_rule):
+ module.fail_json(msg="At least one group must be configured")
+ try:
+ changed = True
+ if not module.check_mode:
+ array.set_directory_service(
+ uri=module.params["uri"],
+ base_dn=module.params["base_dn"],
+ group_base=module.params["group_base"],
+ bind_user=module.params["bind_user"],
+ bind_password=module.params["bind_password"],
+ readonly_group=module.params["ro_group"],
+ storage_admin_group=module.params["sa_group"],
+ array_admin_group=module.params["aa_group"],
+ )
+ array.set_directory_service(enabled=module.params["enable"])
+ except Exception:
+ module.fail_json(msg="Create Directory Service failed: Check configuration")
+ module.exit_json(changed=changed)
+
+
+def update_ds_v6(module, array):
+ """Update Directory Service"""
+ changed = False
+ ds_change = False
+ password_required = False
+ dirserv = list(
+ array.get_directory_services(
+ filter="name='" + module.params["dstype"] + "'"
+ ).items
+ )[0]
+ current_ds = dirserv
+ if module.params["uri"] and current_ds.uris is None:
+ password_required = True
+ if current_ds.uris != module.params["uri"]:
+ uris = module.params["uri"]
+ ds_change = True
+ else:
+ uris = current_ds.uris
+ try:
+ base_dn = current_ds.base_dn
+ except AttributeError:
+ base_dn = ""
+ try:
+ bind_user = current_ds.bind_user
+ except AttributeError:
+ bind_user = ""
+ if module.params["base_dn"] != "" and module.params["base_dn"] != base_dn:
+ base_dn = module.params["base_dn"]
+ ds_change = True
+ if module.params["bind_user"] != "":
+ bind_user = module.params["bind_user"]
+ if module.params["bind_user"] != bind_user:
+ password_required = True
+ ds_change = True
+ elif module.params["force_bind_password"]:
+ password_required = True
+ ds_change = True
+ if module.params["bind_password"] is not None and password_required:
+ bind_password = module.params["bind_password"]
+ ds_change = True
+ if module.params["enable"] != current_ds.enabled:
+ ds_change = True
+ if password_required and not module.params["bind_password"]:
+ module.fail_json(msg="'bind_password' must be provided for this task")
+ if module.params["dstype"] == "management":
+ try:
+ user_login = current_ds.management.user_login_attribute
+ except AttributeError:
+ user_login = ""
+ try:
+ user_object = current_ds.management.user_object_class
+ except AttributeError:
+ user_object = ""
+ if (
+ module.params["user_object"] is not None
+ and user_object != module.params["user_object"]
+ ):
+ user_object = module.params["user_object"]
+ ds_change = True
+ if (
+ module.params["user_login"] is not None
+ and user_login != module.params["user_login"]
+ ):
+ user_login = module.params["user_login"]
+ ds_change = True
+ management = flasharray.DirectoryServiceManagement(
+ user_login_attribute=user_login, user_object_class=user_object
+ )
+ if password_required:
+ directory_service = flasharray.DirectoryService(
+ uris=uris,
+ base_dn=base_dn,
+ bind_user=bind_user,
+ bind_password=bind_password,
+ enabled=module.params["enable"],
+ services=module.params["dstype"],
+ management=management,
+ )
+ else:
+ directory_service = flasharray.DirectoryService(
+ uris=uris,
+ base_dn=base_dn,
+ bind_user=bind_user,
+ enabled=module.params["enable"],
+ services=module.params["dstype"],
+ management=management,
+ )
+ else:
+ if password_required:
+ directory_service = flasharray.DirectoryService(
+ uris=uris,
+ base_dn=base_dn,
+ bind_user=bind_user,
+ bind_password=bind_password,
+ enabled=module.params["enable"],
+ services=module.params["dstype"],
+ )
+ else:
+ directory_service = flasharray.DirectoryService(
+ uris=uris,
+ base_dn=base_dn,
+ bind_user=bind_user,
+ enabled=module.params["enable"],
+ services=module.params["dstype"],
+ )
+ if ds_change:
+ changed = True
+ if not module.check_mode:
+ res = array.patch_directory_services(
+ names=[module.params["dstype"]], directory_service=directory_service
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="{0} Directory Service failed. Error message: {1}".format(
+ module.params["dstype"].capitalize(), res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ uri=dict(type="list", elements="str"),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ enable=dict(type="bool", default=False),
+ force_bind_password=dict(type="bool", default=True, no_log=True),
+ bind_password=dict(type="str", no_log=True),
+ bind_user=dict(type="str"),
+ base_dn=dict(type="str"),
+ group_base=dict(type="str"),
+ user_login=dict(type="str"),
+ user_object=dict(type="str"),
+ ro_group=dict(type="str"),
+ sa_group=dict(type="str"),
+ aa_group=dict(type="str"),
+ dstype=dict(
+ type="str", default="management", choices=["management", "data"]
+ ),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required to for this module")
+
+ if FAFILES_API_VERSION in api_version:
+ arrayv6 = get_array(module)
+
+ if module.params["dstype"] == "data":
+ if FAFILES_API_VERSION in api_version:
+ if len(list(arrayv6.get_directory_services().items)) == 1:
+ module.warn("FA-Files is not enabled - ignoring")
+ module.exit_json(changed=False)
+ else:
+ module.fail_json(
+ msg="'data' directory service requires Purity//FA 6.0.0 or higher"
+ )
+
+ state = module.params["state"]
+ ds_exists = False
+ if FAFILES_API_VERSION in api_version:
+ dirserv = list(
+ arrayv6.get_directory_services(
+ filter="name='" + module.params["dstype"] + "'"
+ ).items
+ )[0]
+ if state == "absent" and dirserv.uris != []:
+ delete_ds_v6(module, arrayv6)
+ else:
+ update_ds_v6(module, arrayv6)
+ else:
+ dirserv = array.get_directory_service()
+ ds_enabled = dirserv["enabled"]
+ if dirserv["base_dn"]:
+ ds_exists = True
+
+ if state == "absent" and ds_exists:
+ delete_ds(module, array)
+ elif ds_exists and module.params["enable"] and ds_enabled:
+ module.warn(
+ "To update an existing directory service configuration in Purity//FA 5.x, please delete and recreate"
+ )
+ module.exit_json(changed=False)
+ elif ds_exists and not module.params["enable"] and ds_enabled:
+ disable_ds(module, array)
+ elif ds_exists and module.params["enable"] and not ds_enabled:
+ enable_ds(module, array)
+ elif not ds_exists and state == "present":
+ create_ds(module, array)
+ else:
+ module.exit_json(changed=False)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_dsrole.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_dsrole.py
new file mode 100644
index 000000000..ce6e8c0a5
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_dsrole.py
@@ -0,0 +1,200 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2019, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_dsrole
+version_added: '1.0.0'
+short_description: Configure FlashArray Directory Service Roles
+description:
+- Set or erase directory services role configurations.
+- Only available for FlashArray running Purity 5.2.0 or higher
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Create or delete directory service role
+ type: str
+ default: present
+ choices: [ absent, present ]
+ role:
+ description:
+ - The directory service role to work on
+ type: str
+ required: true
+ choices: [ array_admin, ops_admin, readonly, storage_admin ]
+ group_base:
+ type: str
+ description:
+ - Specifies where the configured group is located in the directory
+ tree. This field consists of Organizational Units (OUs) that combine
+ with the base DN attribute and the configured group CNs to complete
+ the full Distinguished Name of the groups. The group base should
+ specify OU= for each OU and multiple OUs should be separated by commas.
+ The order of OUs is important and should get larger in scope from left
+ to right.
+ - Each OU should not exceed 64 characters in length.
+ group:
+ type: str
+ description:
+ - Sets the common Name (CN) of the configured directory service group
+ containing users for the FlashBlade. This name should be just the
+ Common Name of the group without the CN= specifier.
+ - Common Names should not exceed 64 characters in length.
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Delete exisitng array_admin directory service role
+ purestorage.flasharray.purefa_dsrole:
+ role: array_admin
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create array_admin directory service role
+ purestorage.flasharray.purefa_dsrole:
+ role: array_admin
+ group_base: "OU=PureGroups,OU=SANManagers"
+ group: pureadmins
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Update ops_admin directory service role
+ purestorage.flasharray.purefa_dsrole:
+ role: ops_admin
+ group_base: "OU=PureGroups"
+ group: opsgroup
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ purefa_argument_spec,
+)
+
+
+def update_role(module, array):
+ """Update Directory Service Role"""
+ changed = False
+ role = array.list_directory_service_roles(names=[module.params["role"]])
+ if (
+ role[0]["group_base"] != module.params["group_base"]
+ or role[0]["group"] != module.params["group"]
+ ):
+ try:
+ changed = True
+ if not module.check_mode:
+ array.set_directory_service_roles(
+ names=[module.params["role"]],
+ group_base=module.params["group_base"],
+ group=module.params["group"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Update Directory Service Role {0} failed".format(
+ module.params["role"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def delete_role(module, array):
+ """Delete Directory Service Role"""
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set_directory_service_roles(
+ names=[module.params["role"]], group_base="", group=""
+ )
+ except Exception:
+ module.fail_json(
+ msg="Delete Directory Service Role {0} failed".format(
+ module.params["role"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def create_role(module, array):
+ """Create Directory Service Role"""
+ changed = False
+ if not module.params["group"] == "" or not module.params["group_base"] == "":
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set_directory_service_roles(
+ names=[module.params["role"]],
+ group_base=module.params["group_base"],
+ group=module.params["group"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Create Directory Service Role {0} failed".format(
+ module.params["role"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ role=dict(
+ required=True,
+ type="str",
+ choices=["array_admin", "ops_admin", "readonly", "storage_admin"],
+ ),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ group_base=dict(type="str"),
+ group=dict(type="str"),
+ )
+ )
+
+ required_together = [["group", "group_base"]]
+
+ module = AnsibleModule(
+ argument_spec, required_together=required_together, supports_check_mode=True
+ )
+
+ state = module.params["state"]
+ array = get_system(module)
+ role_configured = False
+ role = array.list_directory_service_roles(names=[module.params["role"]])
+ if role[0]["group"] is not None:
+ role_configured = True
+
+ if state == "absent" and role_configured:
+ delete_role(module, array)
+ elif role_configured and state == "present":
+ update_role(module, array)
+ elif not role_configured and state == "present":
+ create_role(module, array)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_endpoint.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_endpoint.py
new file mode 100644
index 000000000..c759be4af
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_endpoint.py
@@ -0,0 +1,347 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_endpoint
+short_description: Manage VMware protocol-endpoints on Pure Storage FlashArrays
+version_added: '1.0.0'
+description:
+- Create, delete or eradicate the an endpoint on a Pure Storage FlashArray.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - The name of the endpoint.
+ type: str
+ required: true
+ state:
+ description:
+ - Define whether the endpoint should exist or not.
+ default: present
+ choices: [ absent, present ]
+ type: str
+ eradicate:
+ description:
+ - Define whether to eradicate the endpoint on delete or leave in trash.
+ type: bool
+ default: false
+ rename:
+ description:
+ - Value to rename the specified endpoint to.
+ - Rename only applies to the container the current endpoint is in.
+ type: str
+ host:
+ description:
+ - name of host to attach endpoint to
+ type: str
+ hgroup:
+ description:
+ - name of hostgroup to attach endpoint to
+ type: str
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Create new endpoint named foo
+ purestorage.flasharray.purefa_endpoint:
+ name: test-endpoint
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: present
+
+- name: Delete and eradicate endpoint named foo
+ purestorage.flasharray.purefa_endpoint:
+ name: foo
+ eradicate: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: absent
+
+- name: Rename endpoint foor to bar
+ purestorage.flasharray.purefa_endpoint:
+ name: foo
+ rename: bar
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+volume:
+ description: A dictionary describing the changed volume. Only some
+ attributes below will be returned with various actions.
+ type: dict
+ returned: success
+ contains:
+ source:
+ description: Volume name of source volume used for volume copy
+ type: str
+ serial:
+ description: Volume serial number
+ type: str
+ sample: '361019ECACE43D83000120A4'
+ created:
+ description: Volume creation time
+ type: str
+ sample: '2019-03-13T22:49:24Z'
+ name:
+ description: Volume name
+ type: str
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ purefa_argument_spec,
+)
+
+
+VGROUPS_API_VERSION = "1.13"
+
+
+def get_volume(volume, array):
+ """Return Volume or None"""
+ try:
+ return array.get_volume(volume, pending=True)
+ except Exception:
+ return None
+
+
+def get_target(volume, array):
+ """Return Volume or None"""
+ try:
+ return array.get_volume(volume, pending=True)
+ except Exception:
+ return None
+
+
+def get_endpoint(vol, array):
+ """Return Endpoint or None"""
+ try:
+ return array.get_volume(vol, protocol_endpoint=True)
+ except Exception:
+ return None
+
+
+def get_destroyed_endpoint(vol, array):
+ """Return Endpoint Endpoint or None"""
+ try:
+ return bool(
+ array.get_volume(vol, protocol_endpoint=True, pending=True)[
+ "time_remaining"
+ ]
+ != ""
+ )
+ except Exception:
+ return None
+
+
+def check_vgroup(module, array):
+ """Check is the requested VG to create volume in exists"""
+ vg_exists = False
+ vg_name = module.params["name"].split("/")[0]
+ try:
+ vgs = array.list_vgroups()
+ except Exception:
+ module.fail_json(msg="Failed to get volume groups list. Check array.")
+ for vgroup in range(0, len(vgs)):
+ if vg_name == vgs[vgroup]["name"]:
+ vg_exists = True
+ break
+ return vg_exists
+
+
+def create_endpoint(module, array):
+ """Create Endpoint"""
+ changed = False
+ volfact = []
+ if "/" in module.params["name"] and not check_vgroup(module, array):
+ module.fail_json(
+ msg="Failed to create endpoint {0}. Volume Group does not exist.".format(
+ module.params["name"]
+ )
+ )
+ try:
+ changed = True
+ if not module.check_mode:
+ volfact = array.create_conglomerate_volume(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Endpoint {0} creation failed.".format(module.params["name"])
+ )
+ if module.params["host"]:
+ try:
+ if not module.check_mode:
+ array.connect_host(module.params["host"], module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Failed to attach endpoint {0} to host {1}.".format(
+ module.params["name"], module.params["host"]
+ )
+ )
+ if module.params["hgroup"]:
+ try:
+ if not module.check_mode:
+ array.connect_hgroup(module.params["hgroup"], module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Failed to attach endpoint {0} to hostgroup {1}.".format(
+ module.params["name"], module.params["hgroup"]
+ )
+ )
+
+ module.exit_json(changed=changed, volume=volfact)
+
+
+def rename_endpoint(module, array):
+ """Rename endpoint within a container, ie vgroup or local array"""
+ changed = False
+ volfact = []
+ target_name = module.params["rename"]
+ if "/" in module.params["rename"] or "::" in module.params["rename"]:
+ module.fail_json(msg="Target endpoint cannot include a container name")
+ if "/" in module.params["name"]:
+ vgroup_name = module.params["name"].split("/")[0]
+ target_name = vgroup_name + "/" + module.params["rename"]
+ if get_target(target_name, array) or get_destroyed_endpoint(target_name, array):
+ module.fail_json(msg="Target endpoint {0} already exists.".format(target_name))
+ else:
+ try:
+ changed = True
+ if not module.check_mode:
+ volfact = array.rename_volume(module.params["name"], target_name)
+ except Exception:
+ module.fail_json(
+ msg="Rename endpoint {0} to {1} failed.".format(
+ module.params["name"], module.params["rename"]
+ )
+ )
+
+ module.exit_json(changed=changed, volume=volfact)
+
+
+def delete_endpoint(module, array):
+ """Delete Endpoint"""
+ changed = True
+ volfact = []
+ if not module.check_mode:
+ try:
+ array.destroy_volume(module.params["name"])
+ if module.params["eradicate"]:
+ try:
+ volfact = array.eradicate_volume(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Eradicate endpoint {0} failed.".format(
+ module.params["name"]
+ )
+ )
+ except Exception:
+ module.fail_json(
+ msg="Delete endpoint {0} failed.".format(module.params["name"])
+ )
+ module.exit_json(changed=changed, volume=volfact)
+
+
+def recover_endpoint(module, array):
+ """Recover Deleted Endpoint"""
+ changed = True
+ volfact = []
+ if not module.check_mode:
+ try:
+ array.recover_volume(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Recovery of endpoint {0} failed".format(module.params["name"])
+ )
+ module.exit_json(changed=changed, volume=volfact)
+
+
+def eradicate_endpoint(module, array):
+ """Eradicate Deleted Endpoint"""
+ changed = True
+ volfact = []
+ if not module.check_mode:
+ if module.params["eradicate"]:
+ try:
+ array.eradicate_volume(module.params["name"], protocol_endpoint=True)
+ except Exception:
+ module.fail_json(
+ msg="Eradication of endpoint {0} failed".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed, volume=volfact)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True),
+ rename=dict(type="str"),
+ host=dict(type="str"),
+ hgroup=dict(type="str"),
+ eradicate=dict(type="bool", default=False),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ )
+ )
+
+ mutually_exclusive = [["rename", "eradicate"], ["host", "hgroup"]]
+
+ module = AnsibleModule(
+ argument_spec, mutually_exclusive=mutually_exclusive, supports_check_mode=True
+ )
+
+ state = module.params["state"]
+ destroyed = False
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+ if VGROUPS_API_VERSION not in api_version:
+ module.fail_json(
+ msg="Purity version does not support endpoints. Please contact support"
+ )
+ volume = get_volume(module.params["name"], array)
+ if volume:
+ module.fail_json(
+ msg="Volume {0} is an true volume. Please use the purefa_volume module".format(
+ module.params["name"]
+ )
+ )
+ endpoint = get_endpoint(module.params["name"], array)
+ if not endpoint:
+ destroyed = get_destroyed_endpoint(module.params["name"], array)
+
+ if state == "present" and not endpoint and not destroyed:
+ create_endpoint(module, array)
+ elif state == "present" and endpoint and module.params["rename"]:
+ rename_endpoint(module, array)
+ elif state == "present" and destroyed:
+ recover_endpoint(module, array)
+ elif state == "absent" and endpoint:
+ delete_endpoint(module, array)
+ elif state == "absent" and destroyed:
+ eradicate_endpoint(module, array)
+ elif state == "absent" and not endpoint and not volume:
+ module.exit_json(changed=False)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_eradication.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_eradication.py
new file mode 100644
index 000000000..ea7bd48bc
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_eradication.py
@@ -0,0 +1,117 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2021, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_eradication
+version_added: '1.9.0'
+short_description: Configure Pure Storage FlashArray Eradication Timer
+description:
+- Configure the eradication timer for destroyed items on a FlashArray.
+- Valid values are integer days from 1 to 30. Default is 1.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ timer:
+ description:
+ - Set the eradication timer for the FlashArray
+ - Allowed values are integers from 1 to 30. Default is 1
+ default: 1
+ type: int
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Set eradication timer to 30 days
+ purestorage.flasharray.purefa_eradication:
+ timer: 30
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Set eradication timer to 1 day
+ purestorage.flasharray.purefa_eradication:
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient.flasharray import Arrays, EradicationConfig
+except ImportError:
+ HAS_PURESTORAGE = False
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ get_array,
+ purefa_argument_spec,
+)
+
+SEC_PER_DAY = 86400000
+ERADICATION_API_VERSION = "2.6"
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ timer=dict(type="int", default="1"),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+ if not 30 >= module.params["timer"] >= 1:
+ module.fail_json(msg="Eradication Timer must be between 1 and 30 days.")
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+ changed = False
+ if ERADICATION_API_VERSION in api_version:
+ array = get_array(module)
+ current_timer = (
+ list(array.get_arrays().items)[0].eradication_config.eradication_delay
+ / SEC_PER_DAY
+ )
+ if module.params["timer"] != current_timer:
+ changed = True
+ if not module.check_mode:
+ new_timer = SEC_PER_DAY * module.params["timer"]
+ eradication_config = EradicationConfig(eradication_delay=new_timer)
+ res = array.patch_arrays(
+ array=Arrays(eradication_config=eradication_config)
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to change Eradication Timer. Error: {0}".format(
+ res.errors[0].message
+ )
+ )
+ else:
+ module.fail_json(
+ msg="Purity version does not support changing Eradication Timer"
+ )
+ module.exit_json(changed=changed)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_eula.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_eula.py
new file mode 100644
index 000000000..8d4d9536c
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_eula.py
@@ -0,0 +1,117 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_eula
+version_added: '1.0.0'
+short_description: Sign Pure Storage FlashArray EULA
+description:
+- Sign the FlashArray EULA for Day 0 config, or change signatory.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ company:
+ description:
+ - Full legal name of the entity.
+ - The value must be between 1 and 64 characters in length.
+ type: str
+ required: true
+ name:
+ description:
+ - Full legal name of the individual at the company who has the authority to accept the terms of the agreement.
+ - The value must be between 1 and 64 characters in length.
+ type: str
+ required: true
+ title:
+ description:
+ - Individual's job title at the company.
+ - The value must be between 1 and 64 characters in length.
+ type: str
+ required: true
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Sign EULA for FlashArray
+ purestorage.flasharray.purefa_eula:
+ company: "ACME Storage, Inc."
+ name: "Fred Bloggs"
+ title: "Storage Manager"
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ purefa_argument_spec,
+)
+
+
+EULA_API_VERSION = "1.17"
+
+
+def set_eula(module, array):
+ """Sign EULA"""
+ changed = False
+ try:
+ current_eula = array.get_eula()
+ except Exception:
+ module.fail_json(msg="Failed to get current EULA")
+ if (
+ current_eula["acceptance"]["company"] != module.params["company"]
+ or current_eula["acceptance"]["title"] != module.params["title"]
+ or current_eula["acceptance"]["name"] != module.params["name"]
+ ):
+ try:
+ changed = True
+ if not module.check_mode:
+ array.set_eula(
+ company=module.params["company"],
+ title=module.params["title"],
+ name=module.params["name"],
+ )
+ except Exception:
+ module.fail_json(msg="Signing EULA failed")
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ company=dict(type="str", required=True),
+ name=dict(type="str", required=True),
+ title=dict(type="str", required=True),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+ if EULA_API_VERSION in api_version:
+ set_eula(module, array)
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_export.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_export.py
new file mode 100644
index 000000000..5188dbd96
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_export.py
@@ -0,0 +1,251 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_export
+version_added: '1.5.0'
+short_description: Manage FlashArray File System Exports
+description:
+- Create/Delete FlashArray File Systems Exports
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Name of the export
+ type: str
+ required: true
+ state:
+ description:
+ - Define whether the export should exist or not.
+ - You must specify an NFS or SMB policy, or both on creation and deletion.
+ default: present
+ choices: [ absent, present ]
+ type: str
+ filesystem:
+ description:
+ - Name of the filesystem the export applies to
+ type: str
+ required: true
+ directory:
+ description:
+ - Name of the managed directory in the file system the export applies to
+ type: str
+ required: true
+ nfs_policy:
+ description:
+ - Name of NFS Policy to apply to the export
+ type: str
+ smb_policy:
+ description:
+ - Name of SMB Policy to apply to the export
+ type: str
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Create NFS and SMB exports for directory foo in filesysten bar
+ purestorage.flasharray.purefa_export:
+ name: export1
+ filesystem: bar
+ directory: foo
+ nfs_policy: nfs-example
+ smb_polict: smb-example
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete NFS export for directory foo in filesystem bar
+ purestorage.flasharray.purefa_export:
+ name: export1
+ filesystem: bar
+ directory: foo
+ nfs_policy: nfs-example
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ get_array,
+ purefa_argument_spec,
+)
+
+MIN_REQUIRED_API_VERSION = "2.3"
+
+
+def delete_export(module, array):
+ """Delete a file system export"""
+ changed = False
+ all_policies = []
+ directory = module.params["filesystem"] + ":" + module.params["directory"]
+ if not module.params["nfs_policy"] and not module.params["smb_policy"]:
+ module.fail_json(msg="At least one policy must be provided")
+ if module.params["nfs_policy"]:
+ policy_exists = bool(
+ array.get_directory_exports(
+ export_names=[module.params["name"]],
+ policy_names=[module.params["nfs_policy"]],
+ directory_names=[directory],
+ ).status_code
+ == 200
+ )
+ if policy_exists:
+ all_policies.append(module.params["nfs_policy"])
+ if module.params["smb_policy"]:
+ policy_exists = bool(
+ array.get_directory_exports(
+ export_names=[module.params["name"]],
+ policy_names=[module.params["smb_policy"]],
+ directory_names=[directory],
+ ).status_code
+ == 200
+ )
+ if policy_exists:
+ all_policies.append(module.params["smb_policy"])
+ if all_policies:
+ changed = True
+ if not module.check_mode:
+ res = array.delete_directory_exports(
+ export_names=[module.params["name"]], policy_names=all_policies
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete file system export {0}. {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def create_export(module, array):
+ """Create a file system export"""
+ changed = False
+ if not module.params["nfs_policy"] and not module.params["smb_policy"]:
+ module.fail_json(msg="At least one policy must be provided")
+ all_policies = []
+ if module.params["nfs_policy"]:
+ if bool(
+ array.get_policies_nfs(names=[module.params["nfs_policy"]]).status_code
+ != 200
+ ):
+ module.fail_json(
+ msg="NFS Policy {0} does not exist.".format(module.params["nfs_policy"])
+ )
+ if bool(
+ array.get_directory_exports(
+ export_names=[module.params["name"]],
+ policy_names=[module.params["nfs_policy"]],
+ ).status_code
+ != 200
+ ):
+ all_policies.append(module.params["nfs_policy"])
+ if module.params["smb_policy"]:
+ if bool(
+ array.get_policies_smb(names=[module.params["smb_policy"]]).status_code
+ != 200
+ ):
+ module.fail_json(
+ msg="SMB Policy {0} does not exist.".format(module.params["smb_policy"])
+ )
+ if bool(
+ array.get_directory_exports(
+ export_names=[module.params["name"]],
+ policy_names=[module.params["smb_policy"]],
+ ).status_code
+ != 200
+ ):
+ all_policies.append(module.params["smb_policy"])
+ if all_policies:
+ export = flasharray.DirectoryExportPost(export_name=module.params["name"])
+ changed = True
+ if not module.check_mode:
+ res = array.post_directory_exports(
+ directory_names=[
+ module.params["filesystem"] + ":" + module.params["directory"]
+ ],
+ exports=export,
+ policy_names=all_policies,
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to create file system exports for {0}:{1}. Error: {2}".format(
+ module.params["filesystem"],
+ module.params["directory"],
+ res.errors[0].message,
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ filesystem=dict(type="str", required=True),
+ directory=dict(type="str", required=True),
+ name=dict(type="str", required=True),
+ nfs_policy=dict(type="str"),
+ smb_policy=dict(type="str"),
+ )
+ )
+
+ required_if = [["state", "present", ["filesystem", "directory"]]]
+ module = AnsibleModule(
+ argument_spec, required_if=required_if, supports_check_mode=True
+ )
+
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashArray REST version not supported. "
+ "Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
+ )
+ array = get_array(module)
+ state = module.params["state"]
+
+ exists = bool(
+ array.get_directory_exports(export_names=[module.params["name"]]).status_code
+ == 200
+ )
+
+ if state == "present":
+ create_export(module, array)
+ elif state == "absent" and exists:
+ delete_export(module, array)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_fs.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_fs.py
new file mode 100644
index 000000000..05fbcb29b
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_fs.py
@@ -0,0 +1,367 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_fs
+version_added: '1.5.0'
+short_description: Manage FlashArray File Systems
+description:
+- Create/Delete FlashArray File Systems
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Name of the file system
+ type: str
+ required: true
+ state:
+ description:
+ - Define whether the file system should exist or not.
+ default: present
+ choices: [ absent, present ]
+ type: str
+ eradicate:
+ description:
+ - Define whether to eradicate the file system on delete or leave in trash.
+ type: bool
+ default: false
+ rename:
+ description:
+ - Value to rename the specified file system to
+ - Rename only applies to the container the current filesystem is in.
+ - There is no requirement to specify the pod name as this is implied.
+ type: str
+ move:
+ description:
+ - Move a filesystem in and out of a pod
+ - Provide the name of pod to move the filesystem to
+ - Pod names must be unique in the array
+ - To move to the local array, specify C(local)
+ - This is not idempotent - use C(ignore_errors) in the play
+ type: str
+ version_added: '1.13.0'
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Create file system foo
+ purestorage.flasharray.purefa_fs:
+ name: foo
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete and eradicate file system foo
+ purestorage.flasharray.purefa_fs:
+ name: foo
+ eradicate: true
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Rename file system foo to bar
+ purestorage.flasharray.purefa_fs:
+ name: foo
+ rename: bar
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ get_array,
+ purefa_argument_spec,
+)
+
+MIN_REQUIRED_API_VERSION = "2.2"
+REPL_SUPPORT_API = "2.13"
+
+
+def delete_fs(module, array):
+ """Delete a file system"""
+ changed = True
+ if not module.check_mode:
+ try:
+ file_system = flasharray.FileSystemPatch(destroyed=True)
+ array.patch_file_systems(
+ names=[module.params["name"]], file_system=file_system
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to delete file system {0}".format(module.params["name"])
+ )
+ if module.params["eradicate"]:
+ try:
+ array.delete_file_systems(names=[module.params["name"]])
+ except Exception:
+ module.fail_json(
+ msg="Eradication of file system {0} failed".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def recover_fs(module, array):
+ """Recover a deleted file system"""
+ changed = True
+ if not module.check_mode:
+ try:
+ file_system = flasharray.FileSystemPatch(destroyed=False)
+ array.patch_file_systems(
+ names=[module.params["name"]], file_system=file_system
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to recover file system {0}".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def eradicate_fs(module, array):
+ """Eradicate a file system"""
+ changed = True
+ if not module.check_mode:
+ try:
+ array.delete_file_systems(names=[module.params["name"]])
+ except Exception:
+ module.fail_json(
+ msg="Failed to eradicate file system {0}".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def rename_fs(module, array):
+ """Rename a file system"""
+ changed = False
+ target_name = module.params["rename"]
+ if "::" in module.params["name"]:
+ pod_name = module.params["name"].split("::")[0]
+ target_name = pod_name + "::" + module.params["rename"]
+ try:
+ target = list(array.get_file_systems(names=[target_name]).items)[0]
+ except Exception:
+ target = None
+ if not target:
+ changed = True
+ if not module.check_mode:
+ try:
+ file_system = flasharray.FileSystemPatch(name=target_name)
+ array.patch_file_systems(
+ names=[module.params["name"]], file_system=file_system
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to rename file system {0}".format(module.params["name"])
+ )
+ else:
+ module.fail_json(
+ msg="Target file system {0} already exists".format(module.params["rename"])
+ )
+ module.exit_json(changed=changed)
+
+
+def create_fs(module, array):
+ """Create a file system"""
+ changed = True
+ if "::" in module.params["name"]:
+ pod_name = module.params["name"].split("::")[0]
+ try:
+ pod = list(array.get_pods(names=[pod_name]).items)[0]
+ except Exception:
+ module.fail_json(
+ msg="Failed to create filesystem. Pod {0} does not exist".format(
+ pod_name
+ )
+ )
+ if pod.promotion_status == "demoted":
+ module.fail_json(msg="Filesystem cannot be created in a demoted pod")
+ if not module.check_mode:
+ try:
+ array.post_file_systems(names=[module.params["name"]])
+ except Exception:
+ module.fail_json(
+ msg="Failed to create file system {0}".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def move_fs(module, array):
+ """Move filesystem between pods or local array"""
+ changed = False
+ target_exists = False
+ pod_name = ""
+ fs_name = module.params["name"]
+ if "::" in module.params["name"]:
+ fs_name = module.params["name"].split("::")[1]
+ pod_name = module.params["name"].split("::")[0]
+ if module.params["move"] == "local":
+ target_location = ""
+ if "::" not in module.params["name"]:
+ module.fail_json(msg="Source and destination [local] cannot be the same.")
+ try:
+ target_exists = list(array.get_file_systems(names=[fs_name]).items)[0]
+ except Exception:
+ target_exists = False
+ if target_exists:
+ module.fail_json(msg="Target filesystem {0} already exists".format(fs_name))
+ else:
+ try:
+ pod = list(array.get_pods(names=[module.params["move"]]).items)[0]
+ if len(pod.arrays) > 1:
+ module.fail_json(msg="Filesystem cannot be moved into a stretched pod")
+ if pod.link_target_count != 0:
+ module.fail_json(
+ msg="Filesystem cannot be moved into a linked source pod"
+ )
+ if pod.promotion_status == "demoted":
+ module.fail_json(msg="Volume cannot be moved into a demoted pod")
+ except Exception:
+ module.fail_json(
+ msg="Failed to move filesystem. Pod {0} does not exist".format(pod_name)
+ )
+ if "::" in module.params["name"]:
+ pod = list(array.get_pods(names=[module.params["move"]]).items)[0]
+ if len(pod.arrays) > 1:
+ module.fail_json(
+ msg="Filesystem cannot be moved out of a stretched pod"
+ )
+ if pod.linked_target_count != 0:
+ module.fail_json(
+ msg="Filesystem cannot be moved out of a linked source pod"
+ )
+ if pod.promotion_status == "demoted":
+ module.fail_json(msg="Volume cannot be moved out of a demoted pod")
+ target_location = module.params["move"]
+ changed = True
+ if not module.check_mode:
+ file_system = flasharray.FileSystemPatch(
+ pod=flasharray.Reference(name=target_location)
+ )
+ move_res = array.patch_file_systems(
+ names=[module.params["name"]], file_system=file_system
+ )
+ if move_res.status_code != 200:
+ module.fail_json(
+ msg="Move of filesystem {0} failed. Error: {1}".format(
+ module.params["name"], move_res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ eradicate=dict(type="bool", default=False),
+ name=dict(type="str", required=True),
+ move=dict(type="str"),
+ rename=dict(type="str"),
+ )
+ )
+
+ mutually_exclusive = [["move", "rename"]]
+ module = AnsibleModule(
+ argument_spec, mutually_exclusive=mutually_exclusive, supports_check_mode=True
+ )
+
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashArray REST version not supported. "
+ "Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
+ )
+ if REPL_SUPPORT_API not in api_version and "::" in module.params["name"]:
+ module.fail_json(
+ msg="Filesystem Replication is only supported in Purity//FA 6.3.0 or higher"
+ )
+ array = get_array(module)
+ state = module.params["state"]
+
+ try:
+ filesystem = list(array.get_file_systems(names=[module.params["name"]]).items)[
+ 0
+ ]
+ exists = True
+ except Exception:
+ exists = False
+
+ if state == "present" and not exists and not module.params["move"]:
+ create_fs(module, array)
+ elif (
+ state == "present"
+ and exists
+ and module.params["move"]
+ and not filesystem.destroyed
+ ):
+ move_fs(module, array)
+ elif (
+ state == "present"
+ and exists
+ and module.params["rename"]
+ and not filesystem.destroyed
+ ):
+ rename_fs(module, array)
+ elif (
+ state == "present"
+ and exists
+ and filesystem.destroyed
+ and not module.params["rename"]
+ and not module.params["move"]
+ ):
+ recover_fs(module, array)
+ elif (
+ state == "present" and exists and filesystem.destroyed and module.params["move"]
+ ):
+ module.fail_json(
+ msg="Filesystem {0} exists, but in destroyed state".format(
+ module.params["name"]
+ )
+ )
+ elif state == "absent" and exists and not filesystem.destroyed:
+ delete_fs(module, array)
+ elif (
+ state == "absent"
+ and exists
+ and module.params["eradicate"]
+ and filesystem.destroyed
+ ):
+ eradicate_fs(module, array)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_hg.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_hg.py
new file mode 100644
index 000000000..0467501e2
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_hg.py
@@ -0,0 +1,433 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_hg
+version_added: '1.0.0'
+short_description: Manage hostgroups on Pure Storage FlashArrays
+description:
+- Create, delete or modifiy hostgroups on Pure Storage FlashArrays.
+author:
+- Pure Storage ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - The name of the hostgroup.
+ type: str
+ required: true
+ aliases: [ hostgroup ]
+ state:
+ description:
+ - Define whether the hostgroup should exist or not.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ host:
+ type: list
+ elements: str
+ description:
+ - List of existing hosts to add to hostgroup.
+ - Note that hostnames are case-sensitive however FlashArray hostnames are unique
+ and ignore case - you cannot have I(hosta) and I(hostA)
+ volume:
+ type: list
+ elements: str
+ description:
+ - List of existing volumes to add to hostgroup.
+ - Note that volumes are case-sensitive however FlashArray volume names are unique
+ and ignore case - you cannot have I(volumea) and I(volumeA)
+ lun:
+ description:
+ - LUN ID to assign to volume for hostgroup. Must be unique.
+ - Only applicable when only one volume is specified for connection.
+ - If not provided the ID will be automatically assigned.
+ - Range for LUN ID is 1 to 4095.
+ type: int
+ rename:
+ description:
+ - New name of hostgroup
+ type: str
+ version_added: '1.10.0'
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Create empty hostgroup
+ purestorage.flasharray.purefa_hg:
+ name: foo
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Add hosts and volumes to existing or new hostgroup
+ purestorage.flasharray.purefa_hg:
+ name: foo
+ host:
+ - host1
+ - host2
+ volume:
+ - vol1
+ - vol2
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete hosts and volumes from hostgroup
+ purestorage.flasharray.purefa_hg:
+ name: foo
+ host:
+ - host1
+ - host2
+ volume:
+ - vol1
+ - vol2
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: absent
+
+# This will disconnect all hosts and volumes in the hostgroup
+- name: Delete hostgroup
+ purestorage.flasharray.purefa_hg:
+ name: foo
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: absent
+
+- name: Rename hostgroup
+ purestorage.flasharray.purefa_hg:
+ name: foo
+ rename: bar
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create host group with hosts and volumes
+ purestorage.flasharray.purefa_hg:
+ name: bar
+ host:
+ - host1
+ - host2
+ volume:
+ - vol1
+ - vol2
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ purefa_argument_spec,
+)
+
+
+def rename_exists(module, array):
+ """Determine if rename target already exists"""
+ exists = False
+ new_name = module.params["rename"]
+ for hgroup in array.list_hgroups():
+ if hgroup["name"].casefold() == new_name.casefold():
+ exists = True
+ break
+ return exists
+
+
+def get_hostgroup(module, array):
+ hostgroup = None
+
+ for host in array.list_hgroups():
+ if host["name"].casefold() == module.params["name"].casefold():
+ hostgroup = host
+ break
+
+ return hostgroup
+
+
+def make_hostgroup(module, array):
+ if module.params["rename"]:
+ module.fail_json(
+ msg="Hostgroup {0} does not exist - rename failed.".format(
+ module.params["name"]
+ )
+ )
+ changed = True
+ if not module.check_mode:
+ try:
+ array.create_hgroup(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Failed to create hostgroup {0}".format(module.params["name"])
+ )
+ if module.params["host"]:
+ array.set_hgroup(module.params["name"], hostlist=module.params["host"])
+ if module.params["volume"]:
+ if len(module.params["volume"]) == 1 and module.params["lun"]:
+ try:
+ array.connect_hgroup(
+ module.params["name"],
+ module.params["volume"][0],
+ lun=module.params["lun"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to add volume {0} with LUN ID {1}".format(
+ module.params["volume"][0], module.params["lun"]
+ )
+ )
+ else:
+ for vol in module.params["volume"]:
+ try:
+ array.connect_hgroup(module.params["name"], vol)
+ except Exception:
+ module.fail_json(msg="Failed to add volume to hostgroup")
+ module.exit_json(changed=changed)
+
+
+def update_hostgroup(module, array):
+ changed = False
+ renamed = False
+ hgroup = get_hostgroup(module, array)
+ current_hostgroup = module.params["name"]
+ volumes = array.list_hgroup_connections(module.params["name"])
+ if module.params["state"] == "present":
+ if module.params["rename"]:
+ if not rename_exists(module, array):
+ try:
+ if not module.check_mode:
+ array.rename_hgroup(
+ module.params["name"], module.params["rename"]
+ )
+ current_hostgroup = module.params["rename"]
+ renamed = True
+ except Exception:
+ module.fail_json(
+ msg="Rename to {0} failed.".format(module.params["rename"])
+ )
+ else:
+ module.warn(
+ "Rename failed. Hostgroup {0} already exists. Continuing with other changes...".format(
+ module.params["rename"]
+ )
+ )
+ if module.params["host"]:
+ cased_hosts = list(module.params["host"])
+ cased_hghosts = list(hgroup["hosts"])
+ new_hosts = list(set(cased_hosts).difference(cased_hghosts))
+ if new_hosts:
+ try:
+ if not module.check_mode:
+ array.set_hgroup(current_hostgroup, addhostlist=new_hosts)
+ changed = True
+ except Exception:
+ module.fail_json(msg="Failed to add host(s) to hostgroup")
+ if module.params["volume"]:
+ if volumes:
+ current_vols = [vol["vol"] for vol in volumes]
+ cased_vols = list(module.params["volume"])
+ new_volumes = list(set(cased_vols).difference(set(current_vols)))
+ if len(new_volumes) == 1 and module.params["lun"]:
+ try:
+ if not module.check_mode:
+ array.connect_hgroup(
+ current_hostgroup,
+ new_volumes[0],
+ lun=module.params["lun"],
+ )
+ changed = True
+ except Exception:
+ module.fail_json(
+ msg="Failed to add volume {0} with LUN ID {1}".format(
+ new_volumes[0], module.params["lun"]
+ )
+ )
+ else:
+ for cvol in new_volumes:
+ try:
+ if not module.check_mode:
+ array.connect_hgroup(current_hostgroup, cvol)
+ changed = True
+ except Exception:
+ module.fail_json(
+ msg="Failed to connect volume {0} to hostgroup {1}.".format(
+ cvol, current_hostgroup
+ )
+ )
+ else:
+ if len(module.params["volume"]) == 1 and module.params["lun"]:
+ try:
+ if not module.check_mode:
+ array.connect_hgroup(
+ current_hostgroup,
+ module.params["volume"][0],
+ lun=module.params["lun"],
+ )
+ changed = True
+ except Exception:
+ module.fail_json(
+ msg="Failed to add volume {0} with LUN ID {1}".format(
+ module.params["volume"], module.params["lun"]
+ )
+ )
+ else:
+ for cvol in module.params["volume"]:
+ try:
+ if not module.check_mode:
+ array.connect_hgroup(current_hostgroup, cvol)
+ changed = True
+ except Exception:
+ module.fail_json(
+ msg="Failed to connect volume {0} to hostgroup {1}.".format(
+ cvol, current_hostgroup
+ )
+ )
+ else:
+ if module.params["host"]:
+ cased_old_hosts = list(module.params["host"])
+ cased_hosts = list(hgroup["hosts"])
+ old_hosts = list(set(cased_old_hosts).intersection(cased_hosts))
+ if old_hosts:
+ try:
+ if not module.check_mode:
+ array.set_hgroup(current_hostgroup, remhostlist=old_hosts)
+ changed = True
+ except Exception:
+ module.fail_json(
+ msg="Failed to remove hosts {0} from hostgroup {1}".format(
+ old_hosts, current_hostgroup
+ )
+ )
+ if module.params["volume"]:
+ cased_old_vols = list(module.params["volume"])
+ old_volumes = list(
+ set(cased_old_vols).intersection(set([vol["vol"] for vol in volumes]))
+ )
+ if old_volumes:
+ changed = True
+ for cvol in old_volumes:
+ try:
+ if not module.check_mode:
+ array.disconnect_hgroup(current_hostgroup, cvol)
+ except Exception:
+ module.fail_json(
+ msg="Failed to disconnect volume {0} from hostgroup {1}".format(
+ cvol, current_hostgroup
+ )
+ )
+ changed = changed or renamed
+ module.exit_json(changed=changed)
+
+
+def delete_hostgroup(module, array):
+ changed = True
+ try:
+ vols = array.list_hgroup_connections(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Failed to get volume connection for hostgroup {0}".format(
+ module.params["hostgroup"]
+ )
+ )
+ if not module.check_mode:
+ for vol in vols:
+ try:
+ array.disconnect_hgroup(module.params["name"], vol["vol"])
+ except Exception:
+ module.fail_json(
+ msg="Failed to disconnect volume {0} from hostgroup {1}".format(
+ vol["vol"], module.params["name"]
+ )
+ )
+ host = array.get_hgroup(module.params["name"])
+ if not module.check_mode:
+ try:
+ array.set_hgroup(module.params["name"], remhostlist=host["hosts"])
+ try:
+ array.delete_hgroup(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Failed to delete hostgroup {0}".format(
+ module.params["name"]
+ )
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to remove hosts {0} from hostgroup {1}".format(
+ host["hosts"], module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True, aliases=["hostgroup"]),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ host=dict(type="list", elements="str"),
+ lun=dict(type="int"),
+ rename=dict(type="str"),
+ volume=dict(type="list", elements="str"),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ state = module.params["state"]
+ array = get_system(module)
+ hostgroup = get_hostgroup(module, array)
+
+ if module.params["host"]:
+ try:
+ for hst in module.params["host"]:
+ array.get_host(hst)
+ except Exception:
+ module.fail_json(msg="Host {0} not found".format(hst))
+ if module.params["lun"] and len(module.params["volume"]) > 1:
+ module.fail_json(msg="LUN ID cannot be specified with multiple volumes.")
+
+ if module.params["lun"] and not 1 <= module.params["lun"] <= 4095:
+ module.fail_json(
+ msg="LUN ID of {0} is out of range (1 to 4095)".format(module.params["lun"])
+ )
+
+ if module.params["volume"]:
+ try:
+ for vol in module.params["volume"]:
+ array.get_volume(vol)
+ except Exception:
+ module.exit_json(changed=False)
+
+ if hostgroup and state == "present":
+ update_hostgroup(module, array)
+ elif hostgroup and module.params["volume"] and state == "absent":
+ update_hostgroup(module, array)
+ elif hostgroup and module.params["host"] and state == "absent":
+ update_hostgroup(module, array)
+ elif hostgroup and state == "absent":
+ delete_hostgroup(module, array)
+ elif hostgroup is None and state == "absent":
+ module.exit_json(changed=False)
+ else:
+ make_hostgroup(module, array)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_host.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_host.py
new file mode 100644
index 000000000..9054d8f30
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_host.py
@@ -0,0 +1,1085 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_host
+version_added: '1.0.0'
+short_description: Manage hosts on Pure Storage FlashArrays
+description:
+- Create, delete or modify hosts on Pure Storage FlashArrays.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+notes:
+- If specifying C(lun) option ensure host support requested value
+options:
+ name:
+ description:
+ - The name of the host.
+ - Note that hostnames are case-sensitive however FlashArray hostnames are unique
+ and ignore case - you cannot have I(hosta) and I(hostA)
+ - Multi-host support available from Purity//FA 6.0.0
+ B(***NOTE***) Manual deletion of individual hosts created
+ using multi-host will cause idempotency to fail
+ - Multi-host support only exists for host creation
+ type: str
+ required: true
+ aliases: [ host ]
+ protocol:
+ description:
+ - Defines the host connection protocol for volumes.
+ - DEPRECATED No longer a necessary parameter
+ type: str
+ choices: [ fc, iscsi, nvme, mixed ]
+ rename:
+ description:
+ - The name to rename to.
+ - Note that hostnames are case-sensitive however FlashArray hostnames are unique
+ and ignore case - you cannot have I(hosta) and I(hostA)
+ type: str
+ state:
+ description:
+ - Define whether the host should exist or not.
+ - When removing host all connected volumes will be disconnected.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ wwns:
+ type: list
+ elements: str
+ description:
+ - List of wwns of the host.
+ iqn:
+ type: list
+ elements: str
+ description:
+ - List of IQNs of the host.
+ nqn:
+ type: list
+ elements: str
+ description:
+ - List of NQNs of the host.
+ volume:
+ type: str
+ description:
+ - Volume name to map to the host.
+ lun:
+ description:
+ - LUN ID to assign to volume for host. Must be unique.
+ - If not provided the ID will be automatically assigned.
+ - Range for LUN ID is 1 to 4095.
+ type: int
+ count:
+ description:
+ - Number of hosts to be created in a multiple host creation
+ - Only supported from Purity//FA v6.0.0 and higher
+ type: int
+ start:
+ description:
+ - Number at which to start the multiple host creation index
+ - Only supported from Purity//FA v6.0.0 and higher
+ type: int
+ default: 0
+ digits:
+ description:
+ - Number of digits to use for multiple host count. This
+ will pad the index number with zeros where necessary
+ - Only supported from Purity//FA v6.0.0 and higher
+ - Range is between 1 and 10
+ type: int
+ default: 1
+ suffix:
+ description:
+ - Suffix string, if required, for multiple host create
+ - Host names will be formed as I(<name>#<suffix>), where
+ I(#) is a placeholder for the host index
+ See associated descriptions
+ - Suffix string is optional
+ - Only supported from Purity//FA v6.0.0 and higher
+ type: str
+ personality:
+ type: str
+ description:
+ - Define which operating system the host is. Recommended for
+ ActiveCluster integration.
+ default: ''
+ choices: ['hpux', 'vms', 'aix', 'esxi', 'solaris', 'hitachi-vsp', 'oracle-vm-server', 'delete', '']
+ preferred_array:
+ type: list
+ elements: str
+ description:
+ - List of preferred arrays in an ActiveCluster environment.
+ - To remove existing preferred arrays from the host, specify I(delete).
+ target_user:
+ type: str
+ description:
+ - Sets the target user name for CHAP authentication
+ - Required with I(target_password)
+ - To clear the username/password pair use I(clear) as the password
+ target_password:
+ type: str
+ description:
+ - Sets the target password for CHAP authentication
+ - Password length between 12 and 255 characters
+ - To clear the username/password pair use I(clear) as the password
+ - SETTING A PASSWORD IS NON-IDEMPOTENT
+ host_user:
+ type: str
+ description:
+ - Sets the host user name for CHAP authentication
+ - Required with I(host_password)
+ - To clear the username/password pair use I(clear) as the password
+ host_password:
+ type: str
+ description:
+ - Sets the host password for CHAP authentication
+ - Password length between 12 and 255 characters
+ - To clear the username/password pair use I(clear) as the password
+ - SETTING A PASSWORD IS NON-IDEMPOTENT
+ vlan:
+ type: str
+ description:
+ - The VLAN ID that the host is associated with.
+ - If not set or set to I(any), the host can access any VLAN.
+ - If set to I(untagged), the host can only access untagged VLANs.
+ - If set to a number between 1 and 4094, the host can only access the specified VLAN with that number.
+ version_added: '1.16.0'
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Create new AIX host
+ purestorage.flasharray.purefa_host:
+ name: foo
+ personality: aix
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create 10 hosts with index starting at 10 but padded with 3 digits
+ purestorage.flasharray.purefa_host:
+ name: foo
+ personality: vms
+ suffix: bar
+ count: 10
+ start: 10
+ digits: 3
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: present
+
+- name: Rename host foo to bar
+ purestorage.flasharray.purefa_host:
+ name: foo
+ rename: bar
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete host
+ purestorage.flasharray.purefa_host:
+ name: foo
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: absent
+
+- name: Make host bar with wwn ports
+ purestorage.flasharray.purefa_host:
+ name: bar
+ wwns:
+ - 00:00:00:00:00:00:00
+ - 11:11:11:11:11:11:11
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Make host bar with iSCSI ports
+ purestorage.flasharray.purefa_host:
+ name: bar
+ iqn:
+ - iqn.1994-05.com.redhat:7d366003913
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Make host bar with NVMe ports
+ purestorage.flasharray.purefa_host:
+ name: bar
+ nqn:
+ - nqn.2014-08.com.vendor:nvme:nvm-subsystem-sn-d78432
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Make mixed protocol host
+ purestorage.flasharray.purefa_host:
+ name: bar
+ nqn:
+ - nqn.2014-08.com.vendor:nvme:nvm-subsystem-sn-d78432
+ iqn:
+ - iqn.1994-05.com.redhat:7d366003914
+ wwns:
+ - 00:00:00:00:00:00:01
+ - 11:11:11:11:11:11:12
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Map host foo to volume bar as LUN ID 12
+ purestorage.flasharray.purefa_host:
+ name: foo
+ volume: bar
+ lun: 12
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Disconnect volume bar from host foo
+ purestorage.flasharray.purefa_host:
+ name: foo
+ volume: bar
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Add preferred arrays to host foo
+ purestorage.flasharray.purefa_host:
+ name: foo
+ preferred_array:
+ - array1
+ - array2
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete preferred arrays from host foo
+ purestorage.flasharray.purefa_host:
+ name: foo
+ preferred_array: delete
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete exisitng WWNs from host foo (does not delete host object)
+ purestorage.flasharray.purefa_host:
+ name: foo
+ wwns: ""
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Set CHAP target and host username/password pairs
+ purestorage.flasharray.purefa_host:
+ name: foo
+ target_user: user1
+ target_password: passwrodpassword
+ host_user: user2
+ host_password: passwrodpassword
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete CHAP target and host username/password pairs
+ purestorage.flasharray.purefa_host:
+ name: foo
+ target_user: user
+ target_password: clear
+ host_user: user
+ host_password: clear
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_array,
+ get_system,
+ purefa_argument_spec,
+)
+
+
+AC_REQUIRED_API_VERSION = "1.14"
+PREFERRED_ARRAY_API_VERSION = "1.15"
+NVME_API_VERSION = "1.16"
+MULTI_HOST_VERSION = "2.2"
+VLAN_VERSION = "2.16"
+
+
+def _is_cbs(array, is_cbs=False):
+ """Is the selected array a Cloud Block Store"""
+ model = array.get(controllers=True)[0]["model"]
+ is_cbs = bool("CBS" in model)
+ return is_cbs
+
+
+def _set_host_initiators(module, array):
+ """Set host initiators."""
+ if module.params["nqn"]:
+ try:
+ array.set_host(module.params["name"], nqnlist=module.params["nqn"])
+ except Exception:
+ module.fail_json(msg="Setting of NVMe NQN failed.")
+ if module.params["iqn"]:
+ try:
+ array.set_host(module.params["name"], iqnlist=module.params["iqn"])
+ except Exception:
+ module.fail_json(msg="Setting of iSCSI IQN failed.")
+ if module.params["wwns"]:
+ try:
+ array.set_host(module.params["name"], wwnlist=module.params["wwns"])
+ except Exception:
+ module.fail_json(msg="Setting of FC WWNs failed.")
+
+
+def _update_host_initiators(module, array, answer=False):
+ """Change host initiator if iscsi or nvme or add new FC WWNs"""
+ if module.params["nqn"]:
+ current_nqn = array.get_host(module.params["name"])["nqn"]
+ if module.params["nqn"] != [""]:
+ if current_nqn != module.params["nqn"]:
+ answer = True
+ if not module.check_mode:
+ try:
+ array.set_host(
+ module.params["name"], nqnlist=module.params["nqn"]
+ )
+ except Exception:
+ module.fail_json(msg="Change of NVMe NQN failed.")
+ elif current_nqn:
+ answer = True
+ if not module.check_mode:
+ try:
+ array.set_host(module.params["name"], remnqnlist=current_nqn)
+ except Exception:
+ module.fail_json(msg="Removal of NVMe NQN failed.")
+ if module.params["iqn"]:
+ current_iqn = array.get_host(module.params["name"])["iqn"]
+ if module.params["iqn"] != [""]:
+ if current_iqn != module.params["iqn"]:
+ answer = True
+ if not module.check_mode:
+ try:
+ array.set_host(
+ module.params["name"], iqnlist=module.params["iqn"]
+ )
+ except Exception:
+ module.fail_json(msg="Change of iSCSI IQN failed.")
+ elif current_iqn:
+ answer = True
+ if not module.check_mode:
+ try:
+ array.set_host(module.params["name"], remiqnlist=current_iqn)
+ except Exception:
+ module.fail_json(msg="Removal of iSCSI IQN failed.")
+ if module.params["wwns"]:
+ module.params["wwns"] = [wwn.replace(":", "") for wwn in module.params["wwns"]]
+ module.params["wwns"] = [wwn.upper() for wwn in module.params["wwns"]]
+ current_wwn = array.get_host(module.params["name"])["wwn"]
+ if module.params["wwns"] != [""]:
+ if current_wwn != module.params["wwns"]:
+ answer = True
+ if not module.check_mode:
+ try:
+ array.set_host(
+ module.params["name"], wwnlist=module.params["wwns"]
+ )
+ except Exception:
+ module.fail_json(msg="FC WWN change failed.")
+ elif current_wwn:
+ answer = True
+ if not module.check_mode:
+ try:
+ array.set_host(module.params["name"], remwwnlist=current_wwn)
+ except Exception:
+ module.fail_json(msg="Removal of all FC WWNs failed.")
+ return answer
+
+
+def _connect_new_volume(module, array, answer=False):
+ """Connect volume to host"""
+ api_version = array._list_available_rest_versions()
+ if AC_REQUIRED_API_VERSION in api_version and module.params["lun"]:
+ answer = True
+ if not module.check_mode:
+ try:
+ array.connect_host(
+ module.params["name"],
+ module.params["volume"],
+ lun=module.params["lun"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="LUN ID {0} invalid. Check for duplicate LUN IDs.".format(
+ module.params["lun"]
+ )
+ )
+ else:
+ answer = True
+ if not module.check_mode:
+ array.connect_host(module.params["name"], module.params["volume"])
+ return answer
+
+
+def _disconnect_volume(module, array, answer=False):
+ """Disconnect volume from host"""
+ answer = True
+ if not module.check_mode:
+ try:
+ array.disconnect_host(module.params["name"], module.params["volume"])
+ except Exception:
+ module.fail_json(
+ msg="Failed to disconnect volume {0}".format(module.params["volume"])
+ )
+ return answer
+
+
+def _set_host_personality(module, array):
+ """Set host personality. Only called when supported"""
+ if module.params["personality"] != "delete":
+ array.set_host(module.params["name"], personality=module.params["personality"])
+ else:
+ array.set_host(module.params["name"], personality="")
+
+
+def _set_preferred_array(module, array):
+ """Set preferred array list. Only called when supported"""
+ if module.params["preferred_array"] != ["delete"]:
+ array.set_host(
+ module.params["name"], preferred_array=module.params["preferred_array"]
+ )
+ else:
+ array.set_host(module.params["name"], preferred_array=[])
+
+
+def _set_chap_security(module, array):
+ """Set CHAP usernames and passwords"""
+ pattern = re.compile("[^ ]{12,255}")
+ if module.params["host_user"]:
+ if not pattern.match(module.params["host_password"]):
+ module.fail_json(
+ msg="host_password must contain a minimum of 12 and a maximum of 255 characters"
+ )
+ try:
+ array.set_host(
+ module.params["name"],
+ host_user=module.params["host_user"],
+ host_password=module.params["host_password"],
+ )
+ except Exception:
+ module.params(msg="Failed to set CHAP host username and password")
+ if module.params["target_user"]:
+ if not pattern.match(module.params["target_password"]):
+ module.fail_json(
+ msg="target_password must contain a minimum of 12 and a maximum of 255 characters"
+ )
+ try:
+ array.set_host(
+ module.params["name"],
+ target_user=module.params["target_user"],
+ target_password=module.params["target_password"],
+ )
+ except Exception:
+ module.params(msg="Failed to set CHAP target username and password")
+
+
+def _update_chap_security(module, array, answer=False):
+ """Change CHAP usernames and passwords"""
+ pattern = re.compile("[^ ]{12,255}")
+ chap = array.get_host(module.params["name"], chap=True)
+ if module.params["host_user"]:
+ if module.params["host_password"] == "clear":
+ if chap["host_user"]:
+ answer = True
+ if not module.check_mode:
+ try:
+ array.set_host(module.params["name"], host_user="")
+ except Exception:
+ module.params(
+ msg="Failed to clear CHAP host username and password"
+ )
+ else:
+ if not pattern.match(module.params["host_password"]):
+ module.fail_json(
+ msg="host_password must contain a minimum of 12 and a maximum of 255 characters"
+ )
+ answer = True
+ if not module.check_mode:
+ try:
+ array.set_host(
+ module.params["name"],
+ host_user=module.params["host_user"],
+ host_password=module.params["host_password"],
+ )
+ except Exception:
+ module.params(msg="Failed to set CHAP host username and password")
+ if module.params["target_user"]:
+ if module.params["target_password"] == "clear":
+ if chap["target_user"]:
+ answer = True
+ if not module.check_mode:
+ try:
+ array.set_host(module.params["name"], target_user="")
+ except Exception:
+ module.params(
+ msg="Failed to clear CHAP target username and password"
+ )
+ else:
+ if not pattern.match(module.params["target_password"]):
+ module.fail_json(
+ msg="target_password must contain a minimum of 12 and a maximum of 255 characters"
+ )
+ answer = True
+ if not module.check_mode:
+ try:
+ array.set_host(
+ module.params["name"],
+ target_user=module.params["target_user"],
+ target_password=module.params["target_password"],
+ )
+ except Exception:
+ module.params(msg="Failed to set CHAP target username and password")
+ return answer
+
+
+def _update_host_personality(module, array, answer=False):
+ """Change host personality. Only called when supported"""
+ personality = array.get_host(module.params["name"], personality=True)["personality"]
+ if personality is None and module.params["personality"] != "delete":
+ answer = True
+ if not module.check_mode:
+ try:
+ array.set_host(
+ module.params["name"], personality=module.params["personality"]
+ )
+ except Exception:
+ module.fail_json(msg="Personality setting failed.")
+ if personality is not None:
+ if module.params["personality"] == "delete":
+ answer = True
+ if not module.check_mode:
+ try:
+ array.set_host(module.params["name"], personality="")
+ except Exception:
+ module.fail_json(msg="Personality deletion failed.")
+ elif personality != module.params["personality"]:
+ answer = True
+ if not module.check_mode:
+ try:
+ array.set_host(
+ module.params["name"], personality=module.params["personality"]
+ )
+ except Exception:
+ module.fail_json(msg="Personality change failed.")
+ return answer
+
+
+def _update_preferred_array(module, array, answer=False):
+ """Update existing preferred array list. Only called when supported"""
+ preferred_array = array.get_host(module.params["name"], preferred_array=True)[
+ "preferred_array"
+ ]
+ if preferred_array == [] and module.params["preferred_array"] != ["delete"]:
+ answer = True
+ if not module.check_mode:
+ try:
+ array.set_host(
+ module.params["name"],
+ preferred_array=module.params["preferred_array"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Preferred array list creation failed for {0}.".format(
+ module.params["name"]
+ )
+ )
+ elif preferred_array != []:
+ if module.params["preferred_array"] == ["delete"]:
+ answer = True
+ if not module.check_mode:
+ try:
+ array.set_host(module.params["name"], preferred_array=[])
+ except Exception:
+ module.fail_json(
+ msg="Preferred array list deletion failed for {0}.".format(
+ module.params["name"]
+ )
+ )
+ elif preferred_array != module.params["preferred_array"]:
+ answer = True
+ if not module.check_mode:
+ try:
+ array.set_host(
+ module.params["name"],
+ preferred_array=module.params["preferred_array"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Preferred array list change failed for {0}.".format(
+ module.params["name"]
+ )
+ )
+ return answer
+
+
+def _set_vlan(module):
+ array = get_array(module)
+ res = array.patch_hosts(
+ names=[module.params["name"]],
+ host=flasharray.HostPatch(vlan=module.params["vlan"]),
+ )
+ if res.status_code != 200:
+ module.warn(
+ "Failed to set host VLAN ID. Error: {0}".format(res.errors[0].message)
+ )
+
+
+def _update_vlan(module):
+ changed = False
+ array = get_array(module)
+ host_vlan = getattr(
+ list(array.get_hosts(names=[module.params["name"]]).items)[0], "vlan", None
+ )
+ if module.params["vlan"] != host_vlan:
+ changed = True
+ if not module.check_mode:
+ res = array.patch_hosts(
+ names=[module.params["name"]],
+ host=flasharray.HostPatch(vlan=module.params["vlan"]),
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to update host VLAN ID. Error: {0}".format(
+ res.errors[0].message
+ )
+ )
+ return changed
+
+
+def get_multi_hosts(module):
+ """Return True is all hosts exist"""
+ hosts = []
+ array = get_array(module)
+ for host_num in range(
+ module.params["start"], module.params["count"] + module.params["start"]
+ ):
+ if module.params["suffix"]:
+ hosts.append(
+ module.params["name"]
+ + str(host_num).zfill(module.params["digits"])
+ + module.params["suffix"]
+ )
+ else:
+ hosts.append(
+ module.params["name"] + str(host_num).zfill(module.params["digits"])
+ )
+ return bool(array.get_hosts(names=hosts).status_code == 200)
+
+
+def get_host(module, array):
+ """Return host or None"""
+ host = None
+ for hst in array.list_hosts():
+ if hst["name"].casefold() == module.params["name"].casefold():
+ module.params["name"] = hst["name"]
+ host = hst
+ break
+ return host
+
+
+def rename_exists(module, array):
+ """Determine if rename target already exists"""
+ exists = False
+ for hst in array.list_hosts():
+ if hst["name"].casefold() == module.params["rename"].casefold():
+ exists = True
+ break
+ return exists
+
+
+def make_multi_hosts(module):
+ """Create multiple hosts"""
+ changed = True
+ if not module.check_mode:
+ hosts = []
+ array = get_array(module)
+ for host_num in range(
+ module.params["start"], module.params["count"] + module.params["start"]
+ ):
+ if module.params["suffix"]:
+ hosts.append(
+ module.params["name"]
+ + str(host_num).zfill(module.params["digits"])
+ + module.params["suffix"]
+ )
+ else:
+ hosts.append(
+ module.params["name"] + str(host_num).zfill(module.params["digits"])
+ )
+ if module.params["personality"]:
+ host = flasharray.HostPost(personality=module.params["personality"])
+ else:
+ host = flasharray.HostPost()
+ res = array.post_hosts(names=hosts, host=host)
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Multi-Host {0}#{1} creation failed: {2}".format(
+ module.params["name"],
+ module.params["suffix"],
+ res.errors[0].message,
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def make_host(module, array):
+ """Create a new host"""
+ changed = True
+ if not module.check_mode:
+ try:
+ array.create_host(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Host {0} creation failed.".format(module.params["name"])
+ )
+ try:
+ if module.params["vlan"]:
+ _set_vlan(module)
+ _set_host_initiators(module, array)
+ api_version = array._list_available_rest_versions()
+ if AC_REQUIRED_API_VERSION in api_version and module.params["personality"]:
+ _set_host_personality(module, array)
+ if (
+ PREFERRED_ARRAY_API_VERSION in api_version
+ and module.params["preferred_array"]
+ ):
+ _set_preferred_array(module, array)
+ if module.params["host_user"] or module.params["target_user"]:
+ _set_chap_security(module, array)
+ if module.params["volume"]:
+ if module.params["lun"]:
+ array.connect_host(
+ module.params["name"],
+ module.params["volume"],
+ lun=module.params["lun"],
+ )
+ else:
+ array.connect_host(module.params["name"], module.params["volume"])
+ except Exception:
+ module.fail_json(
+ msg="Host {0} configuration failed.".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def update_host(module, array):
+ """Modify a host"""
+ changed = False
+ renamed = False
+ vlan_changed = False
+ if module.params["state"] == "present":
+ if module.params["vlan"]:
+ vlan_changed = _update_vlan(module)
+ if module.params["rename"]:
+ if not rename_exists(module, array):
+ if not module.check_mode:
+ try:
+ array.rename_host(
+ module.params["name"], module.params["rename"]
+ )
+ module.params["name"] = module.params["rename"]
+ renamed = True
+ except Exception:
+ module.fail_json(
+ msg="Rename to {0} failed.".format(module.params["rename"])
+ )
+ else:
+ module.warn(
+ "Rename failed. Target hostname {0} already exists. "
+ "Continuing with any other changes...".format(
+ module.params["rename"]
+ )
+ )
+ init_changed = vol_changed = pers_changed = pref_changed = chap_changed = False
+ volumes = array.list_host_connections(module.params["name"])
+ if module.params["iqn"] or module.params["wwns"] or module.params["nqn"]:
+ init_changed = _update_host_initiators(module, array)
+ if module.params["volume"]:
+ current_vols = [vol["vol"] for vol in volumes]
+ if not module.params["volume"] in current_vols:
+ vol_changed = _connect_new_volume(module, array)
+ api_version = array._list_available_rest_versions()
+ if AC_REQUIRED_API_VERSION in api_version:
+ if module.params["personality"]:
+ pers_changed = _update_host_personality(module, array)
+ if PREFERRED_ARRAY_API_VERSION in api_version:
+ if module.params["preferred_array"]:
+ pref_changed = _update_preferred_array(module, array)
+ if module.params["target_user"] or module.params["host_user"]:
+ chap_changed = _update_chap_security(module, array)
+ changed = (
+ init_changed
+ or vol_changed
+ or pers_changed
+ or pref_changed
+ or chap_changed
+ or vlan_changed
+ or renamed
+ )
+ else:
+ if module.params["volume"]:
+ volumes = array.list_host_connections(module.params["name"])
+ current_vols = [vol["vol"] for vol in volumes]
+ if module.params["volume"] in current_vols:
+ vol_changed = _disconnect_volume(module, array)
+ changed = vol_changed
+ module.exit_json(changed=changed)
+
+
+def delete_host(module, array):
+ """Delete a host"""
+ changed = True
+ if not module.check_mode:
+ try:
+ hgroup = array.get_host(module.params["name"])["hgroup"]
+ if hgroup is not None:
+ array.set_hgroup(hgroup, remhostlist=[module.params["name"]])
+ for vol in array.list_host_connections(module.params["name"]):
+ array.disconnect_host(module.params["name"], vol["vol"])
+ array.delete_host(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Host {0} deletion failed".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True, aliases=["host"]),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ protocol=dict(
+ type="str",
+ choices=["fc", "iscsi", "nvme", "mixed"],
+ removed_from_collection="1.13",
+ removed_in_version="2.0.0",
+ ),
+ nqn=dict(type="list", elements="str"),
+ iqn=dict(type="list", elements="str"),
+ wwns=dict(type="list", elements="str"),
+ host_password=dict(type="str", no_log=True),
+ host_user=dict(type="str"),
+ target_password=dict(type="str", no_log=True),
+ target_user=dict(type="str"),
+ volume=dict(type="str"),
+ rename=dict(type="str"),
+ lun=dict(type="int"),
+ count=dict(type="int"),
+ start=dict(type="int", default=0),
+ digits=dict(type="int", default=1),
+ suffix=dict(type="str"),
+ personality=dict(
+ type="str",
+ default="",
+ choices=[
+ "hpux",
+ "vms",
+ "aix",
+ "esxi",
+ "solaris",
+ "hitachi-vsp",
+ "oracle-vm-server",
+ "delete",
+ "",
+ ],
+ ),
+ preferred_array=dict(type="list", elements="str"),
+ vlan=dict(type="str"),
+ )
+ )
+
+ required_together = [
+ ["host_password", "host_user"],
+ ["target_password", "target_user"],
+ ]
+
+ module = AnsibleModule(
+ argument_spec, supports_check_mode=True, required_together=required_together
+ )
+
+ array = get_system(module)
+ pattern = re.compile("^[a-zA-Z0-9]([a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?$")
+ if module.params["rename"]:
+ if not pattern.match(module.params["rename"]):
+ module.fail_json(
+ msg="Rename value {0} does not conform to naming convention".format(
+ module.params["rename"]
+ )
+ )
+ if not pattern.match(module.params["name"]):
+ module.fail_json(
+ msg="Host name {0} does not conform to naming convention".format(
+ module.params["name"]
+ )
+ )
+ if _is_cbs(array):
+ if module.params["wwns"] or module.params["nqn"]:
+ module.fail_json(msg="Cloud Block Store only supports iSCSI as a protocol")
+ api_version = array._list_available_rest_versions()
+ if module.params["nqn"] is not None and NVME_API_VERSION not in api_version:
+ module.fail_json(msg="NVMe protocol not supported. Please upgrade your array.")
+ state = module.params["state"]
+ if module.params["suffix"]:
+ suffix_len = len(module.params["suffix"])
+ else:
+ suffix_len = 0
+ if module.params["vlan"]:
+ if not HAS_PURESTORAGE:
+ module.fail_json(
+ msg="py-pure-client sdk is required to support 'vlan' parameter"
+ )
+ if VLAN_VERSION not in api_version:
+ module.fail_json(
+ msg="'vlan' parameter is not supported until Purity//FA 6.3.4 or higher"
+ )
+ if not module.params["vlan"] in ["any", "untagged"]:
+ try:
+ vlan = int(module.params["vlan"])
+ if vlan not in range(1, 4094):
+ module.fail_json(
+ msg="VLAN must be set to a number between 1 and 4094"
+ )
+ except Exception:
+ module.fail_json(
+ msg="Invalid string for VLAN. Must be 'any', 'untagged' or a number between 1 and 4094"
+ )
+ if module.params["count"]:
+ if not HAS_PURESTORAGE:
+ module.fail_json(
+ msg="py-pure-client sdk is required to support 'count' parameter"
+ )
+ if MULTI_HOST_VERSION not in api_version:
+ module.fail_json(
+ msg="'count' parameter is not supported until Purity//FA 6.0.0 or higher"
+ )
+ if module.params["digits"] and module.params["digits"] not in range(1, 10):
+ module.fail_json(msg="'digits' must be in the range of 1 to 10")
+ if module.params["start"] < 0:
+ module.fail_json(msg="'start' must be a positive number")
+ if not pattern.match(module.params["name"]):
+ module.fail_json(
+ msg="Host name pattern {0} does not conform to naming convention".format(
+ module.params["name"]
+ )
+ )
+ elif module.params["suffix"] and not pattern.match(module.params["suffix"]):
+ module.fail_json(
+ msg="Suffix pattern {0} does not conform to naming convention".format(
+ module.params["suffix"]
+ )
+ )
+ elif (
+ len(module.params["name"])
+ + max(
+ len(str(module.params["count"] + module.params["start"])),
+ module.params["digits"],
+ )
+ + suffix_len
+ > 63
+ ):
+ module.fail_json(msg="Host name length exceeds maximum allowed")
+ host = get_multi_hosts(module)
+ if not host and state == "present":
+ make_multi_hosts(module)
+ else:
+ host = get_host(module, array)
+ if module.params["lun"] and not 1 <= module.params["lun"] <= 4095:
+ module.fail_json(
+ msg="LUN ID of {0} is out of range (1 to 4095)".format(
+ module.params["lun"]
+ )
+ )
+ if module.params["volume"]:
+ try:
+ array.get_volume(module.params["volume"])
+ except Exception:
+ module.exit_json(changed=False)
+ if module.params["preferred_array"]:
+ try:
+ if module.params["preferred_array"] != ["delete"]:
+ all_connected_arrays = array.list_array_connections()
+ if not all_connected_arrays:
+ module.fail_json(
+ msg="No target arrays connected to source array - preferred arrays not possible."
+ )
+ else:
+ current_arrays = [array.get()["array_name"]]
+ api_version = array._list_available_rest_versions()
+ if NVME_API_VERSION in api_version:
+ for current_array in range(0, len(all_connected_arrays)):
+ if (
+ all_connected_arrays[current_array]["type"]
+ == "sync-replication"
+ ):
+ current_arrays.append(
+ all_connected_arrays[current_array][
+ "array_name"
+ ]
+ )
+ else:
+ for current_array in range(0, len(all_connected_arrays)):
+ if all_connected_arrays[current_array]["type"] == [
+ "sync-replication"
+ ]:
+ current_arrays.append(
+ all_connected_arrays[current_array][
+ "array_name"
+ ]
+ )
+ for array_to_connect in range(
+ 0, len(module.params["preferred_array"])
+ ):
+ if (
+ module.params["preferred_array"][array_to_connect]
+ not in current_arrays
+ ):
+ module.fail_json(
+ msg="Array {0} is not a synchronously connected array.".format(
+ module.params["preferred_array"][array_to_connect]
+ )
+ )
+ except Exception:
+ module.fail_json(msg="Failed to get existing array connections.")
+
+ if host is None and state == "present" and not module.params["rename"]:
+ make_host(module, array)
+ elif host is None and state == "present" and module.params["rename"]:
+ module.exit_json(changed=False)
+ elif host and state == "present":
+ update_host(module, array)
+ elif host and state == "absent" and module.params["volume"]:
+ update_host(module, array)
+ elif host and state == "absent":
+ delete_host(module, array)
+ elif host is None and state == "absent":
+ module.exit_json(changed=False)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_info.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_info.py
new file mode 100644
index 000000000..de7f05002
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_info.py
@@ -0,0 +1,2286 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2019, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_info
+version_added: '1.0.0'
+short_description: Collect information from Pure Storage FlashArray
+description:
+ - Collect information from a Pure Storage Flasharray running the
+ Purity//FA operating system. By default, the module will collect basic
+ information including hosts, host groups, protection
+ groups and volume counts. Additional information can be collected
+ based on the configured set of arguements.
+author:
+ - Pure Storage ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ gather_subset:
+ description:
+ - When supplied, this argument will define the information to be collected.
+ Possible values for this include all, minimum, config, performance,
+ capacity, network, subnet, interfaces, hgroups, pgroups, hosts,
+ admins, volumes, snapshots, pods, replication, vgroups, offload, apps,
+ arrays, certs, kmip, clients, policies, dir_snaps, filesystems,
+ alerts and virtual_machines.
+ type: list
+ elements: str
+ required: false
+ default: minimum
+extends_documentation_fragment:
+ - purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: collect default set of information
+ purestorage.flasharray.purefa_info:
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ register: array_info
+- name: show default information
+ debug:
+ msg: "{{ array_info['purefa_info']['default'] }}"
+
+- name: collect configuration and capacity information
+ purestorage.flasharray.purefa_info:
+ gather_subset:
+ - config
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ register: array_info
+- name: show configuration information
+ debug:
+ msg: "{{ array_info['purefa_info']['config'] }}"
+
+- name: collect all information
+ purestorage.flasharray.purefa_info:
+ gather_subset:
+ - all
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+- name: show all information
+ debug:
+ msg: "{{ array_info['purefa_info'] }}"
+"""
+
+RETURN = r"""
+purefa_info:
+ description: Returns the information collected from the FlashArray
+ returned: always
+ type: dict
+"""
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_array,
+ get_system,
+ purefa_argument_spec,
+)
+
+HAS_PACKAGING = True
+try:
+ from packaging import version
+except ImportError:
+ HAS_PACKAGING = False
+try:
+ from purestorage import purestorage
+except ImportError:
+ purestorage = None
+import time
+
+SEC_TO_DAY = 86400000
+ADMIN_API_VERSION = "1.14"
+S3_REQUIRED_API_VERSION = "1.16"
+LATENCY_REQUIRED_API_VERSION = "1.16"
+AC_REQUIRED_API_VERSION = "1.14"
+CAP_REQUIRED_API_VERSION = "1.6"
+SAN_REQUIRED_API_VERSION = "1.10"
+NVME_API_VERSION = "1.16"
+PREFERRED_API_VERSION = "1.15"
+P53_API_VERSION = "1.17"
+ACTIVE_DR_API = "1.19"
+V6_MINIMUM_API_VERSION = "2.2"
+FILES_API_VERSION = "2.3"
+FC_REPL_API_VERSION = "2.4"
+ENCRYPTION_STATUS_API_VERSION = "2.6"
+DIR_QUOTA_API_VERSION = "2.7"
+SHARED_CAP_API_VERSION = "2.9"
+PURE_OUI = "naa.624a9370"
+SAFE_MODE_VERSION = "2.10"
+PER_PG_VERSION = "2.13"
+SAML2_VERSION = "2.11"
+NFS_USER_MAP_VERSION = "2.15"
+DEFAULT_PROT_API_VERSION = "2.16"
+VM_VERSION = "2.14"
+VLAN_VERSION = "2.17"
+NEIGHBOR_API_VERSION = "2.22"
+POD_QUOTA_VERSION = "2.23"
+
+
+def generate_default_dict(module, array):
+ default_info = {}
+ defaults = array.get()
+ api_version = array._list_available_rest_versions()
+ default_info["api_versions"] = api_version
+ if FILES_API_VERSION in api_version:
+ arrayv6 = get_array(module)
+ if VM_VERSION in api_version:
+ default_info["virtual_machines"] = len(
+ arrayv6.get_virtual_machines(vm_type="vvol").items
+ )
+ default_info["virtual_machine_snaps"] = len(
+ arrayv6.get_virtual_machine_snapshots(vm_type="vvol").items
+ )
+ default_info["snapshot_policies"] = len(arrayv6.get_policies_snapshot().items)
+ default_info["nfs_policies"] = len(arrayv6.get_policies_nfs().items)
+ default_info["smb_policies"] = len(arrayv6.get_policies_smb().items)
+ default_info["filesystems"] = len(arrayv6.get_file_systems().items)
+ default_info["directories"] = len(arrayv6.get_directories().items)
+ default_info["exports"] = len(arrayv6.get_directory_exports().items)
+ default_info["directory_snapshots"] = len(
+ arrayv6.get_directory_snapshots().items
+ )
+ if DIR_QUOTA_API_VERSION in api_version:
+ default_info["quota_policies"] = len(arrayv6.get_policies_quota().items)
+ if ENCRYPTION_STATUS_API_VERSION in api_version:
+ array_data = list(arrayv6.get_arrays().items)[0]
+ encryption = array_data.encryption
+ default_info["encryption_enabled"] = encryption.data_at_rest.enabled
+ if default_info["encryption_enabled"]:
+ default_info["encryption_algorithm"] = encryption.data_at_rest.algorithm
+ default_info["encryption_module_version"] = encryption.module_version
+ eradication = array_data.eradication_config
+ default_info["eradication_days_timer"] = int(
+ eradication.eradication_delay / SEC_TO_DAY
+ )
+ if SAFE_MODE_VERSION in api_version:
+ if eradication.manual_eradication == "all-enabled":
+ default_info["safe_mode"] = "Disabled"
+ else:
+ default_info["safe_mode"] = "Enabled"
+ if AC_REQUIRED_API_VERSION in api_version:
+ default_info["volume_groups"] = len(array.list_vgroups())
+ default_info["connected_arrays"] = len(array.list_array_connections())
+ default_info["pods"] = len(array.list_pods())
+ default_info["connection_key"] = array.get(connection_key=True)[
+ "connection_key"
+ ]
+ hosts = array.list_hosts()
+ admins = array.list_admins()
+ snaps = array.list_volumes(snap=True, pending=True)
+ volumes = array.list_volumes(pending=True)
+ pgroups = array.list_pgroups(pending=True)
+ hgroups = array.list_hgroups()
+ default_info["array_model"] = array.get(controllers=True)[0]["model"]
+ default_info["array_name"] = defaults["array_name"]
+ default_info["purity_version"] = defaults["version"]
+ default_info["hosts"] = len(hosts)
+ default_info["snapshots"] = len(snaps)
+ default_info["volumes"] = len(volumes)
+ default_info["protection_groups"] = len(pgroups)
+ default_info["hostgroups"] = len(hgroups)
+ default_info["admins"] = len(admins)
+ default_info["remote_assist"] = array.get_remote_assist_status()["status"]
+ if P53_API_VERSION in api_version:
+ default_info["maintenance_window"] = array.list_maintenance_windows()
+ return default_info
+
+
+def generate_perf_dict(array):
+ perf_info = {}
+ api_version = array._list_available_rest_versions()
+ if LATENCY_REQUIRED_API_VERSION in api_version:
+ latency_info = array.get(action="monitor", latency=True)[0]
+ perf_info = array.get(action="monitor")[0]
+ perf_info["writes_per_sec"] = perf_info["writes_per_sec"]
+ perf_info["reads_per_sec"] = perf_info["reads_per_sec"]
+
+ perf_info["input_per_sec"] = perf_info["input_per_sec"]
+ perf_info["output_per_sec"] = perf_info["output_per_sec"]
+
+ if LATENCY_REQUIRED_API_VERSION in api_version:
+ perf_info["san_usec_per_read_op"] = latency_info["san_usec_per_read_op"]
+ perf_info["san_usec_per_write_op"] = latency_info["san_usec_per_write_op"]
+ perf_info["queue_usec_per_read_op"] = latency_info["queue_usec_per_read_op"]
+ perf_info["queue_usec_per_write_op"] = latency_info["queue_usec_per_write_op"]
+ perf_info["qos_rate_limit_usec_per_read_op"] = latency_info[
+ "qos_rate_limit_usec_per_read_op"
+ ]
+ perf_info["qos_rate_limit_usec_per_write_op"] = latency_info[
+ "qos_rate_limit_usec_per_write_op"
+ ]
+ perf_info["local_queue_usec_per_op"] = perf_info["local_queue_usec_per_op"]
+ perf_info["usec_per_read_op"] = perf_info["usec_per_read_op"]
+ perf_info["usec_per_write_op"] = perf_info["usec_per_write_op"]
+ perf_info["queue_depth"] = perf_info["queue_depth"]
+ return perf_info
+
+
+def generate_config_dict(module, array):
+ config_info = {}
+ api_version = array._list_available_rest_versions()
+ config_info["console_lock"] = array.get_console_lock_status()["console_lock"]
+ if NFS_USER_MAP_VERSION not in api_version:
+ config_info["dns"] = array.get_dns()
+ config_info["smtp"] = array.list_alert_recipients()
+ config_info["snmp"] = array.list_snmp_managers()
+ config_info["snmp_v3_engine_id"] = array.get_snmp_engine_id()["engine_id"]
+ if V6_MINIMUM_API_VERSION in api_version:
+ config_info["directory_service"] = {}
+ arrayv6 = get_array(module)
+ services = list(arrayv6.get_directory_services().items)
+ for service in range(0, len(services)):
+ service_type = services[service].name
+ config_info["directory_service"][service_type] = {
+ "base_dn": getattr(services[service], "base_dn", "None"),
+ "bind_user": getattr(services[service], "bind_user", "None"),
+ "enabled": services[service].enabled,
+ "services": services[service].services,
+ "uris": services[service].uris,
+ }
+ config_info["directory_service_roles"] = {}
+ roles = list(arrayv6.get_directory_services_roles().items)
+ for role in range(0, len(roles)):
+ role_name = roles[role].role.name
+ try:
+ config_info["directory_service_roles"][role_name] = {
+ "group": roles[role].group,
+ "group_base": roles[role].group_base,
+ }
+ except Exception:
+ pass
+ smi_s = list(arrayv6.get_smi_s().items)[0]
+ config_info["smi-s"] = {
+ "slp_enabled": smi_s.slp_enabled,
+ "wbem_https_enabled": smi_s.wbem_https_enabled,
+ }
+ # Add additional SMI-S section to help with formatting
+ # issues caused by `-` in the dict name.
+ config_info["smi_s"] = {
+ "slp_enabled": smi_s.slp_enabled,
+ "wbem_https_enabled": smi_s.wbem_https_enabled,
+ }
+ if NFS_USER_MAP_VERSION in api_version:
+ config_info["dns"] = {}
+ dns_configs = list(arrayv6.get_dns().items)
+ for config in range(0, len(dns_configs)):
+ config_info["dns"][dns_configs[config].services[0]] = {
+ "nameservers": dns_configs[config].nameservers,
+ "domain": dns_configs[config].domain,
+ }
+ try:
+ config_info["dns"][dns_configs[config].services[0]][
+ "source"
+ ] = dns_configs[config].source["name"]
+ except Exception:
+ pass
+ if SAML2_VERSION in api_version:
+ config_info["saml2sso"] = {}
+ saml2 = list(arrayv6.get_sso_saml2_idps().items)
+ if saml2:
+ config_info["saml2sso"] = {
+ "enabled": saml2[0].enabled,
+ "array_url": saml2[0].array_url,
+ "name": saml2[0].name,
+ "idp": {
+ "url": getattr(saml2[0].idp, "url", None),
+ "encrypt_enabled": saml2[0].idp.encrypt_assertion_enabled,
+ "sign_enabled": saml2[0].idp.sign_request_enabled,
+ "metadata_url": saml2[0].idp.metadata_url,
+ },
+ "sp": {
+ "decrypt_cred": getattr(
+ saml2[0].sp.decryption_credential, "name", None
+ ),
+ "sign_cred": getattr(
+ saml2[0].sp.signing_credential, "name", None
+ ),
+ },
+ }
+ if FILES_API_VERSION in api_version:
+ config_info["active_directory"] = {}
+ try:
+ ad_accounts = list(arrayv6.get_active_directory().items)
+ for ad_account in range(0, len(ad_accounts)):
+ ad_name = ad_accounts[ad_account].name
+ config_info["active_directory"][ad_name] = {
+ "computer_name": ad_accounts[ad_account].computer_name,
+ "domain": ad_accounts[ad_account].domain,
+ "directory_servers": getattr(
+ ad_accounts[ad_account], "directory_servers", None
+ ),
+ "kerberos_servers": getattr(
+ ad_accounts[ad_account], "kerberos_servers", None
+ ),
+ "service_principal_names": getattr(
+ ad_accounts[ad_account], "service_principal_names", None
+ ),
+ "tls": getattr(ad_accounts[ad_account], "tls", None),
+ }
+ except Exception:
+ module.warn("FA-Files is not enabled on this array")
+ if DEFAULT_PROT_API_VERSION in api_version:
+ config_info["default_protections"] = {}
+ default_prots = list(arrayv6.get_container_default_protections().items)
+ for prot in range(0, len(default_prots)):
+ container = getattr(default_prots[prot], "name", "-")
+ config_info["default_protections"][container] = {
+ "protections": [],
+ "type": getattr(default_prots[prot], "type", "array"),
+ }
+ for container_prot in range(
+ 0, len(default_prots[prot].default_protections)
+ ):
+ config_info["default_protections"][container]["protections"].append(
+ {
+ "type": default_prots[prot]
+ .default_protections[container_prot]
+ .type,
+ "name": default_prots[prot]
+ .default_protections[container_prot]
+ .name,
+ }
+ )
+
+ else:
+ config_info["directory_service"] = {}
+ config_info["directory_service"]["management"] = array.get_directory_service()
+ if S3_REQUIRED_API_VERSION in api_version:
+ config_info["directory_service_roles"] = {}
+ roles = array.list_directory_service_roles()
+ for role in range(0, len(roles)):
+ role_name = roles[role]["name"]
+ config_info["directory_service_roles"][role_name] = {
+ "group": roles[role]["group"],
+ "group_base": roles[role]["group_base"],
+ }
+ else:
+ config_info["directory_service"].update(
+ array.get_directory_service(groups=True)
+ )
+ config_info["ntp"] = array.get(ntpserver=True)["ntpserver"]
+ config_info["syslog"] = array.get(syslogserver=True)["syslogserver"]
+ config_info["phonehome"] = array.get(phonehome=True)["phonehome"]
+ config_info["proxy"] = array.get(proxy=True)["proxy"]
+ config_info["relayhost"] = array.get(relayhost=True)["relayhost"]
+ config_info["senderdomain"] = array.get(senderdomain=True)["senderdomain"]
+ config_info["syslog"] = array.get(syslogserver=True)["syslogserver"]
+ config_info["idle_timeout"] = array.get(idle_timeout=True)["idle_timeout"]
+ config_info["scsi_timeout"] = array.get(scsi_timeout=True)["scsi_timeout"]
+ if S3_REQUIRED_API_VERSION in api_version:
+ config_info["global_admin"] = array.get_global_admin_attributes()
+ if (
+ config_info["global_admin"]["lockout_duration"]
+ and config_info["global_admin"]["lockout_duration"] > 0
+ ):
+ config_info["global_admin"]["lockout_duration"] = int(
+ config_info["global_admin"]["lockout_duration"] / 1000
+ )
+ return config_info
+
+
+def generate_filesystems_dict(array):
+ files_info = {}
+ filesystems = list(array.get_file_systems().items)
+ for filesystem in range(0, len(filesystems)):
+ fs_name = filesystems[filesystem].name
+ files_info[fs_name] = {
+ "destroyed": filesystems[filesystem].destroyed,
+ "directories": {},
+ }
+ directories = list(array.get_directories(file_system_names=[fs_name]).items)
+ for directory in range(0, len(directories)):
+ d_name = directories[directory].directory_name
+ files_info[fs_name]["directories"][d_name] = {
+ "path": directories[directory].path,
+ "data_reduction": directories[directory].space.data_reduction,
+ "snapshots_space": directories[directory].space.snapshots,
+ "total_physical_space": directories[directory].space.total_physical,
+ "unique_space": directories[directory].space.unique,
+ "virtual_space": directories[directory].space.virtual,
+ "destroyed": directories[directory].destroyed,
+ "full_name": directories[directory].name,
+ "used_provisioned": getattr(
+ directories[directory].space, "used_provisioned", None
+ ),
+ "exports": {},
+ }
+ exports = list(
+ array.get_directory_exports(
+ directory_names=[
+ files_info[fs_name]["directories"][d_name]["full_name"]
+ ]
+ ).items
+ )
+ for export in range(0, len(exports)):
+ e_name = exports[export].export_name
+ files_info[fs_name]["directories"][d_name]["exports"][e_name] = {
+ "enabled": exports[export].enabled,
+ "policy": {
+ "name": exports[export].policy.name,
+ "type": exports[export].policy.resource_type,
+ },
+ }
+ return files_info
+
+
+def generate_pgsnaps_dict(array):
+ pgsnaps_info = {}
+ snapshots = list(array.get_protection_group_snapshots().items)
+ for snapshot in range(0, len(snapshots)):
+ s_name = snapshots[snapshot].name
+ pgsnaps_info[s_name] = {
+ "destroyed": snapshots[snapshot].destroyed,
+ "source": snapshots[snapshot].source.name,
+ "suffix": snapshots[snapshot].suffix,
+ "snapshot_space": snapshots[snapshot].space.snapshots,
+ "used_provisioned": getattr(
+ snapshots[snapshot].space, "used_provisioned", None
+ ),
+ }
+ try:
+ if pgsnaps_info[s_name]["destroyed"]:
+ pgsnaps_info[s_name]["time_remaining"] = snapshots[
+ snapshot
+ ].time_remaining
+ except AttributeError:
+ pass
+ try:
+ pgsnaps_info[s_name]["manual_eradication"] = snapshots[
+ snapshot
+ ].eradication_config.manual_eradication
+ except AttributeError:
+ pass
+ return pgsnaps_info
+
+
+def generate_dir_snaps_dict(array):
+ dir_snaps_info = {}
+ snapshots = list(array.get_directory_snapshots().items)
+ for snapshot in range(0, len(snapshots)):
+ s_name = snapshots[snapshot].name
+ dir_snaps_info[s_name] = {
+ "destroyed": snapshots[snapshot].destroyed,
+ "source": snapshots[snapshot].source.name,
+ "suffix": snapshots[snapshot].suffix,
+ "client_name": snapshots[snapshot].client_name,
+ "snapshot_space": snapshots[snapshot].space.snapshots,
+ "total_physical_space": snapshots[snapshot].space.total_physical,
+ "unique_space": snapshots[snapshot].space.unique,
+ "used_provisioned": getattr(
+ snapshots[snapshot].space, "used_provisioned", None
+ ),
+ }
+ try:
+ dir_snaps_info[s_name]["policy"] = snapshots[snapshot].policy.name
+ except Exception:
+ dir_snaps_info[s_name]["policy"] = ""
+ if dir_snaps_info[s_name]["destroyed"]:
+ dir_snaps_info[s_name]["time_remaining"] = snapshots[
+ snapshot
+ ].time_remaining
+ return dir_snaps_info
+
+
+def generate_policies_dict(array, quota_available, nfs_user_mapping):
+ policy_info = {}
+ policies = list(array.get_policies().items)
+ for policy in range(0, len(policies)):
+ p_name = policies[policy].name
+ policy_info[p_name] = {
+ "type": policies[policy].policy_type,
+ "enabled": policies[policy].enabled,
+ "members": [],
+ "rules": [],
+ }
+ members = list(array.get_directories_policies(policy_names=[p_name]).items)
+ for member in range(0, len(members)):
+ m_name = members[member].member.name
+ policy_info[p_name]["members"].append(m_name)
+ if policies[policy].policy_type == "smb":
+ rules = list(
+ array.get_policies_smb_client_rules(policy_names=[p_name]).items
+ )
+ for rule in range(0, len(rules)):
+ smb_rules_dict = {
+ "client": rules[rule].client,
+ "smb_encryption_required": rules[rule].smb_encryption_required,
+ "anonymous_access_allowed": rules[rule].anonymous_access_allowed,
+ }
+ policy_info[p_name]["rules"].append(smb_rules_dict)
+ if policies[policy].policy_type == "nfs":
+ if nfs_user_mapping:
+ nfs_policy = list(array.get_policies_nfs(names=[p_name]).items)[0]
+ policy_info[p_name][
+ "user_mapping_enabled"
+ ] = nfs_policy.user_mapping_enabled
+ rules = list(
+ array.get_policies_nfs_client_rules(policy_names=[p_name]).items
+ )
+ for rule in range(0, len(rules)):
+ nfs_rules_dict = {
+ "access": rules[rule].access,
+ "permission": rules[rule].permission,
+ "client": rules[rule].client,
+ }
+ policy_info[p_name]["rules"].append(nfs_rules_dict)
+ if policies[policy].policy_type == "snapshot":
+ if HAS_PACKAGING:
+ suffix_enabled = version.parse(
+ array.get_rest_version()
+ ) >= version.parse(SHARED_CAP_API_VERSION)
+ else:
+ suffix_enabled = False
+ rules = list(array.get_policies_snapshot_rules(policy_names=[p_name]).items)
+ for rule in range(0, len(rules)):
+ try:
+ snap_rules_dict = {
+ "at": str(int(rules[rule].at / 3600000)).zfill(2) + ":00",
+ "client_name": rules[rule].client_name,
+ "every": str(int(rules[rule].every / 60000)) + " mins",
+ "keep_for": str(int(rules[rule].keep_for / 60000)) + " mins",
+ }
+ except AttributeError:
+ snap_rules_dict = {
+ "at": None,
+ "client_name": rules[rule].client_name,
+ "every": str(int(rules[rule].every / 60000)) + " mins",
+ "keep_for": str(int(rules[rule].keep_for / 60000)) + " mins",
+ }
+ if suffix_enabled:
+ try:
+ snap_rules_dict["suffix"] = rules[rule].suffix
+ except AttributeError:
+ snap_rules_dict["suffix"] = ""
+ policy_info[p_name]["rules"].append(snap_rules_dict)
+ if policies[policy].policy_type == "quota" and quota_available:
+ rules = list(array.get_policies_quota_rules(policy_names=[p_name]).items)
+ for rule in range(0, len(rules)):
+ quota_rules_dict = {
+ "enforced": rules[rule].enforced,
+ "quota_limit": rules[rule].quota_limit,
+ "notifications": rules[rule].notifications,
+ }
+ policy_info[p_name]["rules"].append(quota_rules_dict)
+ return policy_info
+
+
+def generate_clients_dict(array):
+ clients_info = {}
+ clients = list(array.get_api_clients().items)
+ for client in range(0, len(clients)):
+ c_name = clients[client].name
+ clients_info[c_name] = {
+ "enabled": clients[client].enabled,
+ "TTL(seconds)": clients[client].access_token_ttl_in_ms / 1000,
+ "key_id": clients[client].key_id,
+ "client_id": clients[client].id,
+ "max_role": clients[client].max_role,
+ "public_key": clients[client].public_key,
+ }
+ return clients_info
+
+
+def generate_admin_dict(array):
+ admin_info = {}
+ api_version = array._list_available_rest_versions()
+ if ADMIN_API_VERSION in api_version:
+ admins = array.list_admins()
+ for admin in range(0, len(admins)):
+ admin_name = admins[admin]["name"]
+ admin_info[admin_name] = {
+ "type": admins[admin]["type"],
+ "role": admins[admin]["role"],
+ }
+ return admin_info
+
+
+def generate_subnet_dict(array):
+ sub_info = {}
+ subnets = array.list_subnets()
+ for sub in range(0, len(subnets)):
+ sub_name = subnets[sub]["name"]
+ if subnets[sub]["enabled"]:
+ sub_info[sub_name] = {
+ "gateway": subnets[sub]["gateway"],
+ "mtu": subnets[sub]["mtu"],
+ "vlan": subnets[sub]["vlan"],
+ "prefix": subnets[sub]["prefix"],
+ "interfaces": subnets[sub]["interfaces"],
+ "services": subnets[sub]["services"],
+ }
+ return sub_info
+
+
+def generate_network_dict(module, array):
+ net_info = {}
+ api_version = array._list_available_rest_versions()
+ ports = array.list_network_interfaces()
+ for port in range(0, len(ports)):
+ int_name = ports[port]["name"]
+ net_info[int_name] = {
+ "hwaddr": ports[port]["hwaddr"],
+ "mtu": ports[port]["mtu"],
+ "enabled": ports[port]["enabled"],
+ "speed": ports[port]["speed"],
+ "address": ports[port]["address"],
+ "slaves": ports[port]["slaves"],
+ "services": ports[port]["services"],
+ "gateway": ports[port]["gateway"],
+ "netmask": ports[port]["netmask"],
+ }
+ if ports[port]["subnet"]:
+ subnets = array.get_subnet(ports[port]["subnet"])
+ if subnets["enabled"]:
+ net_info[int_name]["subnet"] = {
+ "name": subnets["name"],
+ "prefix": subnets["prefix"],
+ "vlan": subnets["vlan"],
+ }
+ if NEIGHBOR_API_VERSION in api_version:
+ arrayv6 = get_array(module)
+ neighbors = list(arrayv6.get_network_interfaces_neighbors().items)
+ for neighbor in range(0, len(neighbors)):
+ neighbor_info = neighbors[neighbor]
+ int_name = neighbor_info.local_port.name
+ net_info[int_name].update(
+ {
+ "neighbor": {
+ "initial_ttl_in_sec": neighbor_info.initial_ttl_in_sec,
+ "neighbor_port": {
+ "description": getattr(
+ neighbor_info.neighbor_port, "description", None
+ ),
+ "name": getattr(
+ neighbor_info.neighbor_chassis, "name", None
+ ),
+ "id": getattr(
+ neighbor_info.neighbor_port.id, "value", None
+ ),
+ },
+ "neighbor_chassis": {
+ "addresses": getattr(
+ neighbor_info.neighbor_chassis, "addresses", None
+ ),
+ "description": getattr(
+ neighbor_info.neighbor_chassis, "description", None
+ ),
+ "name": getattr(
+ neighbor_info.neighbor_chassis, "name", None
+ ),
+ "bridge": {
+ "enabled": getattr(
+ neighbor_info.neighbor_chassis.bridge,
+ "enabled",
+ False,
+ ),
+ "supported": getattr(
+ neighbor_info.neighbor_chassis.bridge,
+ "supported",
+ False,
+ ),
+ },
+ "repeater": {
+ "enabled": getattr(
+ neighbor_info.neighbor_chassis.repeater,
+ "enabled",
+ False,
+ ),
+ "supported": getattr(
+ neighbor_info.neighbor_chassis.repeater,
+ "supported",
+ False,
+ ),
+ },
+ "router": {
+ "enabled": getattr(
+ neighbor_info.neighbor_chassis.router,
+ "enabled",
+ False,
+ ),
+ "supported": getattr(
+ neighbor_info.neighbor_chassis.router,
+ "supported",
+ False,
+ ),
+ },
+ "station_only": {
+ "enabled": getattr(
+ neighbor_info.neighbor_chassis.station_only,
+ "enabled",
+ False,
+ ),
+ "supported": getattr(
+ neighbor_info.neighbor_chassis.station_only,
+ "supported",
+ False,
+ ),
+ },
+ "telephone": {
+ "enabled": getattr(
+ neighbor_info.neighbor_chassis.telephone,
+ "enabled",
+ False,
+ ),
+ "supported": getattr(
+ neighbor_info.neighbor_chassis.telephone,
+ "supported",
+ False,
+ ),
+ },
+ "wlan_access_point": {
+ "enabled": getattr(
+ neighbor_info.neighbor_chassis.wlan_access_point,
+ "enabled",
+ False,
+ ),
+ "supported": getattr(
+ neighbor_info.neighbor_chassis.wlan_access_point,
+ "supported",
+ False,
+ ),
+ },
+ "docsis_cable_device": {
+ "enabled": getattr(
+ neighbor_info.neighbor_chassis.docsis_cable_device,
+ "enabled",
+ False,
+ ),
+ "supported": getattr(
+ neighbor_info.neighbor_chassis.docsis_cable_device,
+ "supported",
+ False,
+ ),
+ },
+ "id": {
+ "type": getattr(
+ neighbor_info.neighbor_chassis.id,
+ "type",
+ None,
+ ),
+ "value": getattr(
+ neighbor_info.neighbor_chassis.id,
+ "value",
+ None,
+ ),
+ },
+ },
+ }
+ }
+ )
+ return net_info
+
+
+def generate_capacity_dict(module, array):
+ capacity_info = {}
+ api_version = array._list_available_rest_versions()
+ if V6_MINIMUM_API_VERSION in api_version:
+ new_version = bool(SHARED_CAP_API_VERSION in api_version)
+ arrayv6 = get_array(module)
+ total_capacity = list(arrayv6.get_arrays().items)[0].capacity
+ capacity = list(arrayv6.get_arrays_space().items)[0]
+ capacity_info["total_capacity"] = total_capacity
+ if new_version:
+ capacity_info["provisioned_space"] = getattr(
+ capacity.space, "total_provisioned", 0
+ )
+ capacity_info["free_space"] = total_capacity - getattr(
+ capacity.space, "total_physical", 0
+ )
+ capacity_info["data_reduction"] = getattr(
+ capacity.space, "data_reduction", 0
+ )
+ capacity_info["system_space"] = getattr(capacity.space, "system", 0)
+ capacity_info["volume_space"] = getattr(capacity.space, "unique", 0)
+ capacity_info["shared_space"] = getattr(capacity.space, "shared", 0)
+ capacity_info["snapshot_space"] = getattr(capacity.space, "snapshots", 0)
+ capacity_info["thin_provisioning"] = getattr(
+ capacity.space, "thin_provisioning", 0
+ )
+ capacity_info["total_reduction"] = getattr(
+ capacity.space, "total_reduction", 0
+ )
+ capacity_info["replication"] = getattr(capacity.space, "replication", 0)
+ capacity_info["shared_effective"] = getattr(
+ capacity.space, "shared_effective", 0
+ )
+ capacity_info["snapshots_effective"] = getattr(
+ capacity.space, "snapshots_effective", 0
+ )
+ capacity_info["unique_effective"] = getattr(
+ capacity.space, "total_effective", 0
+ )
+ capacity_info["total_effective"] = getattr(
+ capacity.space, "total_effective", 0
+ )
+ capacity_info["used_provisioned"] = getattr(
+ capacity.space, "used_provisioned", 0
+ )
+ else:
+ capacity_info["provisioned_space"] = capacity.space["total_provisioned"]
+ capacity_info["free_space"] = (
+ total_capacity - capacity.space["total_physical"]
+ )
+ capacity_info["data_reduction"] = capacity.space["data_reduction"]
+ capacity_info["system_space"] = capacity.space["system"]
+ capacity_info["volume_space"] = capacity.space["unique"]
+ capacity_info["shared_space"] = capacity.space["shared"]
+ capacity_info["snapshot_space"] = capacity.space["snapshots"]
+ capacity_info["thin_provisioning"] = capacity.space["thin_provisioning"]
+ capacity_info["total_reduction"] = capacity.space["total_reduction"]
+ capacity_info["replication"] = capacity.space["replication"]
+ elif CAP_REQUIRED_API_VERSION in api_version:
+ volumes = array.list_volumes(pending=True)
+ capacity_info["provisioned_space"] = sum(item["size"] for item in volumes)
+ capacity = array.get(space=True)
+ total_capacity = capacity[0]["capacity"]
+ used_space = capacity[0]["total"]
+ capacity_info["free_space"] = total_capacity - used_space
+ capacity_info["total_capacity"] = total_capacity
+ capacity_info["data_reduction"] = capacity[0]["data_reduction"]
+ capacity_info["system_space"] = capacity[0]["system"]
+ capacity_info["volume_space"] = capacity[0]["volumes"]
+ capacity_info["shared_space"] = capacity[0]["shared_space"]
+ capacity_info["snapshot_space"] = capacity[0]["snapshots"]
+ capacity_info["thin_provisioning"] = capacity[0]["thin_provisioning"]
+ capacity_info["total_reduction"] = capacity[0]["total_reduction"]
+ return capacity_info
+
+
+def generate_snap_dict(module, array):
+ snap_info = {}
+ api_version = array._list_available_rest_versions()
+ if FC_REPL_API_VERSION in api_version:
+ arrayv6 = get_array(module)
+ snapsv6 = list(arrayv6.get_volume_snapshots(destroyed=False).items)
+ snaps = array.list_volumes(snap=True)
+ for snap in range(0, len(snaps)):
+ snapshot = snaps[snap]["name"]
+ snap_info[snapshot] = {
+ "size": snaps[snap]["size"],
+ "source": snaps[snap]["source"],
+ "created": snaps[snap]["created"],
+ "tags": [],
+ "remote": [],
+ }
+ if FC_REPL_API_VERSION in api_version:
+ for snap in range(0, len(snapsv6)):
+ snapshot = snapsv6[snap].name
+ snap_info[snapshot]["snapshot_space"] = snapsv6[snap].space.snapshots
+ snap_info[snapshot]["used_provisioned"] = (
+ getattr(snapsv6[snap].space, "used_provisioned", None),
+ )
+ snap_info[snapshot]["total_physical"] = snapsv6[snap].space.total_physical
+ snap_info[snapshot]["total_provisioned"] = snapsv6[
+ snap
+ ].space.total_provisioned
+ snap_info[snapshot]["unique_space"] = snapsv6[snap].space.unique
+ if SHARED_CAP_API_VERSION in api_version:
+ snap_info[snapshot]["snapshots_effective"] = snapsv6[
+ snap
+ ].space.snapshots_effective
+ offloads = list(arrayv6.get_offloads().items)
+ for offload in range(0, len(offloads)):
+ offload_name = offloads[offload].name
+ check_offload = arrayv6.get_remote_volume_snapshots(on=offload_name)
+ if check_offload.status_code == 200:
+ remote_snaps = list(
+ arrayv6.get_remote_volume_snapshots(
+ on=offload_name, destroyed=False
+ ).items
+ )
+ for remote_snap in range(0, len(remote_snaps)):
+ remote_snap_name = remote_snaps[remote_snap].name.split(":")[1]
+ remote_transfer = list(
+ arrayv6.get_remote_volume_snapshots_transfer(
+ on=offload_name, names=[remote_snaps[remote_snap].name]
+ ).items
+ )[0]
+ remote_dict = {
+ "source": remote_snaps[remote_snap].source.name,
+ "suffix": remote_snaps[remote_snap].suffix,
+ "size": remote_snaps[remote_snap].provisioned,
+ "data_transferred": remote_transfer.data_transferred,
+ "completed": time.strftime(
+ "%Y-%m-%d %H:%M:%S",
+ time.gmtime(remote_transfer.completed / 1000),
+ )
+ + " UTC",
+ "physical_bytes_written": remote_transfer.physical_bytes_written,
+ "progress": remote_transfer.progress,
+ "created": time.strftime(
+ "%Y-%m-%d %H:%M:%S",
+ time.gmtime(remote_snaps[remote_snap].created / 1000),
+ )
+ + " UTC",
+ }
+ try:
+ snap_info[remote_snap_name]["remote"].append(remote_dict)
+ except KeyError:
+ snap_info[remote_snap_name] = {"remote": []}
+ snap_info[remote_snap_name]["remote"].append(remote_dict)
+ if ACTIVE_DR_API in api_version:
+ snaptags = array.list_volumes(snap=True, tags=True, namespace="*")
+ for snaptag in range(0, len(snaptags)):
+ if snaptags[snaptag]["namespace"] != "vasa-integration.purestorage.com":
+ snapname = snaptags[snaptag]["name"]
+ tagdict = {
+ "key": snaptags[snaptag]["key"],
+ "value": snaptags[snaptag]["value"],
+ "namespace": snaptags[snaptag]["namespace"],
+ }
+ snap_info[snapname]["tags"].append(tagdict)
+ return snap_info
+
+
+def generate_del_snap_dict(module, array):
+ snap_info = {}
+ api_version = array._list_available_rest_versions()
+ if FC_REPL_API_VERSION in api_version:
+ arrayv6 = get_array(module)
+ snapsv6 = list(arrayv6.get_volume_snapshots(destroyed=True).items)
+ snaps = array.list_volumes(snap=True, pending_only=True)
+ for snap in range(0, len(snaps)):
+ snapshot = snaps[snap]["name"]
+ snap_info[snapshot] = {
+ "size": snaps[snap]["size"],
+ "source": snaps[snap]["source"],
+ "created": snaps[snap]["created"],
+ "time_remaining": snaps[snap]["time_remaining"],
+ "tags": [],
+ "remote": [],
+ }
+ if FC_REPL_API_VERSION in api_version:
+ for snap in range(0, len(snapsv6)):
+ snapshot = snapsv6[snap].name
+ snap_info[snapshot]["snapshot_space"] = snapsv6[snap].space.snapshots
+ snap_info[snapshot]["used_provisioned"] = (
+ getattr(snapsv6[snap].space, "used_provisioned", None),
+ )
+ snap_info[snapshot]["total_physical"] = snapsv6[snap].space.total_physical
+ snap_info[snapshot]["total_provisioned"] = snapsv6[
+ snap
+ ].space.total_provisioned
+ snap_info[snapshot]["unique_space"] = snapsv6[snap].space.unique
+ offloads = list(arrayv6.get_offloads().items)
+ for offload in range(0, len(offloads)):
+ offload_name = offloads[offload].name
+ check_offload = arrayv6.get_remote_volume_snapshots(on=offload_name)
+ if check_offload.status_code == 200:
+ remote_snaps = list(
+ arrayv6.get_remote_volume_snapshots(
+ on=offload_name, destroyed=True
+ ).items
+ )
+ for remote_snap in range(0, len(remote_snaps)):
+ remote_snap_name = remote_snaps[remote_snap].name.split(":")[1]
+ remote_transfer = list(
+ arrayv6.get_remote_volume_snapshots_transfer(
+ on=offload_name, names=[remote_snaps[remote_snap].name]
+ ).items
+ )[0]
+ remote_dict = {
+ "source": remote_snaps[remote_snap].source.name,
+ "suffix": remote_snaps[remote_snap].suffix,
+ "size": remote_snaps[remote_snap].provisioned,
+ "data_transferred": remote_transfer.data_transferred,
+ "completed": time.strftime(
+ "%Y-%m-%d %H:%M:%S",
+ time.gmtime(remote_transfer.completed / 1000),
+ )
+ + " UTC",
+ "physical_bytes_written": remote_transfer.physical_bytes_written,
+ "progress": remote_transfer.progress,
+ "created": time.strftime(
+ "%Y-%m-%d %H:%M:%S",
+ time.gmtime(remote_snaps[remote_snap].created / 1000),
+ )
+ + " UTC",
+ }
+ try:
+ snap_info[remote_snap_name]["remote"].append(remote_dict)
+ except KeyError:
+ snap_info[remote_snap_name] = {"remote": []}
+ snap_info[remote_snap_name]["remote"].append(remote_dict)
+ if ACTIVE_DR_API in api_version:
+ snaptags = array.list_volumes(
+ snap=True, tags=True, pending_only=True, namespace="*"
+ )
+ for snaptag in range(0, len(snaptags)):
+ if snaptags[snaptag]["namespace"] != "vasa-integration.purestorage.com":
+ snapname = snaptags[snaptag]["name"]
+ tagdict = {
+ "key": snaptags[snaptag]["key"],
+ "value": snaptags[snaptag]["value"],
+ "namespace": snaptags[snaptag]["namespace"],
+ }
+ snap_info[snapname]["tags"].append(tagdict)
+ return snap_info
+
+
+def generate_del_vol_dict(module, array):
+ volume_info = {}
+ api_version = array._list_available_rest_versions()
+ vols = array.list_volumes(pending_only=True)
+ for vol in range(0, len(vols)):
+ volume = vols[vol]["name"]
+ volume_info[volume] = {
+ "size": vols[vol]["size"],
+ "source": vols[vol]["source"],
+ "created": vols[vol]["created"],
+ "serial": vols[vol]["serial"],
+ "page83_naa": PURE_OUI + vols[vol]["serial"],
+ "nvme_nguid": "eui.00"
+ + vols[vol]["serial"][0:14].lower()
+ + "24a937"
+ + vols[vol]["serial"][-10:].lower(),
+ "time_remaining": vols[vol]["time_remaining"],
+ "tags": [],
+ }
+ if V6_MINIMUM_API_VERSION in api_version:
+ arrayv6 = get_array(module)
+ vols_space = list(arrayv6.get_volumes_space(destroyed=True).items)
+ for vol in range(0, len(vols_space)):
+ name = vols_space[vol].name
+ volume_info[name]["snapshots_space"] = vols_space[vol].space.snapshots
+ # Provide system as this matches the old naming convention
+ volume_info[name]["system"] = vols_space[vol].space.unique
+ volume_info[name]["unique_space"] = vols_space[vol].space.unique
+ volume_info[name]["virtual_space"] = vols_space[vol].space.virtual
+ volume_info[name]["total_physical_space"] = vols_space[
+ vol
+ ].space.total_physical
+ volume_info[name]["data_reduction"] = vols_space[vol].space.data_reduction
+ volume_info[name]["total_reduction"] = vols_space[vol].space.total_reduction
+ volume_info[name]["total_provisioned"] = vols_space[
+ vol
+ ].space.total_provisioned
+ volume_info[name]["thin_provisioning"] = vols_space[
+ vol
+ ].space.thin_provisioning
+ if SHARED_CAP_API_VERSION in api_version:
+ volume_info[name]["snapshots_effective"] = vols_space[
+ vol
+ ].space.snapshots_effective
+ volume_info[name]["unique_effective"] = vols_space[
+ vol
+ ].space.unique_effective
+ volume_info[name]["used_provisioned"] = (
+ getattr(vols_space[vol].space, "used_provisioned", None),
+ )
+ if ACTIVE_DR_API in api_version:
+ voltags = array.list_volumes(tags=True, pending_only=True)
+ for voltag in range(0, len(voltags)):
+ if voltags[voltag]["namespace"] != "vasa-integration.purestorage.com":
+ volume = voltags[voltag]["name"]
+ tagdict = {
+ "key": voltags[voltag]["key"],
+ "value": voltags[voltag]["value"],
+ "copyable": voltags[voltag]["copyable"],
+ "namespace": voltags[voltag]["namespace"],
+ }
+ volume_info[volume]["tags"].append(tagdict)
+ if SAFE_MODE_VERSION in api_version:
+ volumes = list(arrayv6.get_volumes(destroyed=True).items)
+ for vol in range(0, len(volumes)):
+ name = volumes[vol].name
+ volume_info[name]["priority"] = volumes[vol].priority
+ volume_info[name]["priority_adjustment"] = volumes[
+ vol
+ ].priority_adjustment.priority_adjustment_operator + str(
+ volumes[vol].priority_adjustment.priority_adjustment_value
+ )
+ return volume_info
+
+
+def generate_vol_dict(module, array):
+ volume_info = {}
+ vols_space = array.list_volumes(space=True)
+ vols = array.list_volumes()
+ for vol in range(0, len(vols)):
+ volume = vols[vol]["name"]
+ volume_info[volume] = {
+ "protocol_endpoint": False,
+ "source": vols[vol]["source"],
+ "size": vols[vol]["size"],
+ "serial": vols[vol]["serial"],
+ "page83_naa": PURE_OUI + vols[vol]["serial"],
+ "nvme_nguid": "eui.00"
+ + vols[vol]["serial"][0:14].lower()
+ + "24a937"
+ + vols[vol]["serial"][-10:].lower(),
+ "tags": [],
+ "hosts": [],
+ "bandwidth": "",
+ "iops_limit": "",
+ "data_reduction": vols_space[vol]["data_reduction"],
+ "thin_provisioning": vols_space[vol]["thin_provisioning"],
+ "total_reduction": vols_space[vol]["total_reduction"],
+ }
+ api_version = array._list_available_rest_versions()
+ if V6_MINIMUM_API_VERSION in api_version:
+ arrayv6 = get_array(module)
+ vols_space = list(arrayv6.get_volumes_space(destroyed=False).items)
+ for vol in range(0, len(vols_space)):
+ name = vols_space[vol].name
+ volume_info[name]["snapshots_space"] = vols_space[vol].space.snapshots
+ # Provide system as this matches the old naming convention
+ volume_info[name]["system"] = vols_space[vol].space.unique
+ volume_info[name]["unique_space"] = vols_space[vol].space.unique
+ volume_info[name]["virtual_space"] = vols_space[vol].space.virtual
+ volume_info[name]["total_physical_space"] = vols_space[
+ vol
+ ].space.total_physical
+ if SHARED_CAP_API_VERSION in api_version:
+ volume_info[name]["snapshots_effective"] = vols_space[
+ vol
+ ].space.snapshots_effective
+ volume_info[name]["unique_effective"] = vols_space[
+ vol
+ ].space.unique_effective
+ volume_info[name]["total_effective"] = vols_space[
+ vol
+ ].space.total_effective
+ volume_info[name]["used_provisioned"] = (
+ getattr(vols_space[vol].space, "used_provisioned", None),
+ )
+ if AC_REQUIRED_API_VERSION in api_version:
+ qvols = array.list_volumes(qos=True)
+ for qvol in range(0, len(qvols)):
+ volume = qvols[qvol]["name"]
+ qos = qvols[qvol]["bandwidth_limit"]
+ volume_info[volume]["bandwidth"] = qos
+ if P53_API_VERSION in api_version:
+ iops = qvols[qvol]["iops_limit"]
+ volume_info[volume]["iops_limit"] = iops
+ vvols = array.list_volumes(protocol_endpoint=True)
+ for vvol in range(0, len(vvols)):
+ volume = vvols[vvol]["name"]
+ volume_info[volume] = {
+ "protocol_endpoint": True,
+ "host_encryption_key_status": None,
+ "source": vvols[vvol]["source"],
+ "serial": vvols[vvol]["serial"],
+ "nvme_nguid": "eui.00"
+ + vols[vol]["serial"][0:14].lower()
+ + "24a937"
+ + vols[vol]["serial"][-10:].lower(),
+ "page83_naa": PURE_OUI + vvols[vvol]["serial"],
+ "tags": [],
+ "hosts": [],
+ }
+ if P53_API_VERSION in array._list_available_rest_versions():
+ e2ees = array.list_volumes(host_encryption_key=True)
+ for e2ee in range(0, len(e2ees)):
+ volume = e2ees[e2ee]["name"]
+ volume_info[volume]["host_encryption_key_status"] = e2ees[e2ee][
+ "host_encryption_key_status"
+ ]
+ if SAFE_MODE_VERSION in api_version:
+ volumes = list(arrayv6.get_volumes(destroyed=False).items)
+ for vol in range(0, len(volumes)):
+ name = volumes[vol].name
+ volume_info[name]["priority"] = volumes[vol].priority
+ volume_info[name]["priority_adjustment"] = volumes[
+ vol
+ ].priority_adjustment.priority_adjustment_operator + str(
+ volumes[vol].priority_adjustment.priority_adjustment_value
+ )
+ cvols = array.list_volumes(connect=True)
+ for cvol in range(0, len(cvols)):
+ volume = cvols[cvol]["name"]
+ voldict = {"host": cvols[cvol]["host"], "lun": cvols[cvol]["lun"]}
+ volume_info[volume]["hosts"].append(voldict)
+ if ACTIVE_DR_API in api_version:
+ voltags = array.list_volumes(tags=True)
+ for voltag in range(0, len(voltags)):
+ if voltags[voltag]["namespace"] != "vasa-integration.purestorage.com":
+ volume = voltags[voltag]["name"]
+ tagdict = {
+ "key": voltags[voltag]["key"],
+ "value": voltags[voltag]["value"],
+ "copyable": voltags[voltag]["copyable"],
+ "namespace": voltags[voltag]["namespace"],
+ }
+ volume_info[volume]["tags"].append(tagdict)
+ return volume_info
+
+
+def generate_host_dict(module, array):
+ api_version = array._list_available_rest_versions()
+ host_info = {}
+ hosts = array.list_hosts()
+ for host in range(0, len(hosts)):
+ hostname = hosts[host]["name"]
+ tports = []
+ all_tports = []
+ host_all_info = None
+ try:
+ host_all_info = array.get_host(hostname, all=True)
+ except purestorage.PureHTTPError as err:
+ if err.code == 400:
+ continue
+ if host_all_info:
+ for tport in range(0, len(host_all_info)):
+ for itport in range(0, len(host_all_info[tport]["target_port"])):
+ tports.append(host_all_info[tport]["target_port"][itport])
+ all_tports = list(dict.fromkeys(tports))
+ host_info[hostname] = {
+ "hgroup": hosts[host]["hgroup"],
+ "iqn": hosts[host]["iqn"],
+ "wwn": hosts[host]["wwn"],
+ "personality": array.get_host(hostname, personality=True)["personality"],
+ "target_port": all_tports,
+ "volumes": [],
+ }
+ host_connections = array.list_host_connections(hostname)
+ for connection in range(0, len(host_connections)):
+ connection_dict = {
+ "hostgroup": host_connections[connection]["hgroup"],
+ "volume": host_connections[connection]["vol"],
+ "lun": host_connections[connection]["lun"],
+ }
+ host_info[hostname]["volumes"].append(connection_dict)
+ if host_info[hostname]["iqn"]:
+ chap_data = array.get_host(hostname, chap=True)
+ host_info[hostname]["target_user"] = chap_data["target_user"]
+ host_info[hostname]["host_user"] = chap_data["host_user"]
+ if NVME_API_VERSION in api_version:
+ host_info[hostname]["nqn"] = hosts[host]["nqn"]
+ if PREFERRED_API_VERSION in api_version:
+ hosts = array.list_hosts(preferred_array=True)
+ for host in range(0, len(hosts)):
+ hostname = hosts[host]["name"]
+ host_info[hostname]["preferred_array"] = hosts[host]["preferred_array"]
+ if VLAN_VERSION in api_version:
+ arrayv6 = get_array(module)
+ hosts = list(arrayv6.get_hosts().items)
+ for host in range(0, len(hosts)):
+ if hosts[host].is_local:
+ hostname = hosts[host].name
+ host_info[hostname]["vlan"] = getattr(hosts[host], "vlan", None)
+ return host_info
+
+
+def generate_pgroups_dict(module, array):
+ pgroups_info = {}
+ api_version = array._list_available_rest_versions()
+ pgroups = array.list_pgroups()
+ if SHARED_CAP_API_VERSION in api_version:
+ array_v6 = get_array(module)
+ deleted_enabled = True
+ else:
+ deleted_enabled = False
+ for pgroup in range(0, len(pgroups)):
+ protgroup = pgroups[pgroup]["name"]
+ pgroups_info[protgroup] = {
+ "hgroups": pgroups[pgroup]["hgroups"],
+ "hosts": pgroups[pgroup]["hosts"],
+ "source": pgroups[pgroup]["source"],
+ "targets": pgroups[pgroup]["targets"],
+ "volumes": pgroups[pgroup]["volumes"],
+ }
+ try:
+ prot_sched = array.get_pgroup(protgroup, schedule=True)
+ prot_reten = array.get_pgroup(protgroup, retention=True)
+ snap_transfers = array.get_pgroup(
+ protgroup, snap=True, transfer=True, pending=True
+ )
+ except purestorage.PureHTTPError as err:
+ if err.code == 400:
+ continue
+ if prot_sched["snap_enabled"] or prot_sched["replicate_enabled"]:
+ pgroups_info[protgroup]["snap_frequency"] = prot_sched["snap_frequency"]
+ pgroups_info[protgroup]["replicate_frequency"] = prot_sched[
+ "replicate_frequency"
+ ]
+ pgroups_info[protgroup]["snap_enabled"] = prot_sched["snap_enabled"]
+ pgroups_info[protgroup]["replicate_enabled"] = prot_sched[
+ "replicate_enabled"
+ ]
+ pgroups_info[protgroup]["snap_at"] = prot_sched["snap_at"]
+ pgroups_info[protgroup]["replicate_at"] = prot_sched["replicate_at"]
+ pgroups_info[protgroup]["replicate_blackout"] = prot_sched[
+ "replicate_blackout"
+ ]
+ pgroups_info[protgroup]["per_day"] = prot_reten["per_day"]
+ pgroups_info[protgroup]["target_per_day"] = prot_reten["target_per_day"]
+ pgroups_info[protgroup]["target_days"] = prot_reten["target_days"]
+ pgroups_info[protgroup]["days"] = prot_reten["days"]
+ pgroups_info[protgroup]["all_for"] = prot_reten["all_for"]
+ pgroups_info[protgroup]["target_all_for"] = prot_reten["target_all_for"]
+ pgroups_info[protgroup]["snaps"] = {}
+ for snap_transfer in range(0, len(snap_transfers)):
+ snap = snap_transfers[snap_transfer]["name"]
+ pgroups_info[protgroup]["snaps"][snap] = {
+ "time_remaining": snap_transfers[snap_transfer]["time_remaining"],
+ "created": snap_transfers[snap_transfer]["created"],
+ "started": snap_transfers[snap_transfer]["started"],
+ "completed": snap_transfers[snap_transfer]["completed"],
+ "physical_bytes_written": snap_transfers[snap_transfer][
+ "physical_bytes_written"
+ ],
+ "data_transferred": snap_transfers[snap_transfer]["data_transferred"],
+ "progress": snap_transfers[snap_transfer]["progress"],
+ }
+ if deleted_enabled:
+ pgroups_info[protgroup]["deleted_volumes"] = []
+ volumes = list(
+ array_v6.get_protection_groups_volumes(group_names=[protgroup]).items
+ )
+ if volumes:
+ for volume in range(0, len(volumes)):
+ if volumes[volume].member["destroyed"]:
+ pgroups_info[protgroup]["deleted_volumes"].append(
+ volumes[volume].member["name"]
+ )
+ else:
+ pgroups_info[protgroup]["deleted_volumes"] = None
+ if PER_PG_VERSION in api_version:
+ try:
+ pgroups_info[protgroup]["retention_lock"] = list(
+ array_v6.get_protection_groups(names=[protgroup]).items
+ )[0].retention_lock
+ pgroups_info[protgroup]["manual_eradication"] = list(
+ array_v6.get_protection_groups(names=[protgroup]).items
+ )[0].eradication_config.manual_eradication
+ except Exception:
+ pass
+ if V6_MINIMUM_API_VERSION in api_version:
+ pgroups = list(array_v6.get_protection_groups().items)
+ for pgroup in range(0, len(pgroups)):
+ name = pgroups[pgroup].name
+ pgroups_info[name]["snapshots"] = getattr(
+ pgroups[pgroup].space, "snapshots", None
+ )
+ pgroups_info[name]["shared"] = getattr(
+ pgroups[pgroup].space, "shared", None
+ )
+ pgroups_info[name]["data_reduction"] = getattr(
+ pgroups[pgroup].space, "data_reduction", None
+ )
+ pgroups_info[name]["thin_provisioning"] = getattr(
+ pgroups[pgroup].space, "thin_provisioning", None
+ )
+ pgroups_info[name]["total_physical"] = getattr(
+ pgroups[pgroup].space, "total_physical", None
+ )
+ pgroups_info[name]["total_provisioned"] = getattr(
+ pgroups[pgroup].space, "total_provisioned", None
+ )
+ pgroups_info[name]["total_reduction"] = getattr(
+ pgroups[pgroup].space, "total_reduction", None
+ )
+ pgroups_info[name]["unique"] = getattr(
+ pgroups[pgroup].space, "unique", None
+ )
+ pgroups_info[name]["virtual"] = getattr(
+ pgroups[pgroup].space, "virtual", None
+ )
+ pgroups_info[name]["replication"] = getattr(
+ pgroups[pgroup].space, "replication", None
+ )
+ pgroups_info[name]["used_provisioned"] = getattr(
+ pgroups[pgroup].space, "used_provisioned", None
+ )
+ return pgroups_info
+
+
+def generate_rl_dict(module, array):
+ rl_info = {}
+ api_version = array._list_available_rest_versions()
+ if ACTIVE_DR_API in api_version:
+ try:
+ rlinks = array.list_pod_replica_links()
+ for rlink in range(0, len(rlinks)):
+ link_name = rlinks[rlink]["local_pod_name"]
+ since_epoch = rlinks[rlink]["recovery_point"] / 1000
+ recovery_datatime = time.strftime(
+ "%Y-%m-%d %H:%M:%S", time.localtime(since_epoch)
+ )
+ rl_info[link_name] = {
+ "status": rlinks[rlink]["status"],
+ "direction": rlinks[rlink]["direction"],
+ "lag": str(rlinks[rlink]["lag"] / 1000) + "s",
+ "remote_pod_name": rlinks[rlink]["remote_pod_name"],
+ "remote_names": rlinks[rlink]["remote_names"],
+ "recovery_point": recovery_datatime,
+ }
+ except Exception:
+ module.warn("Replica Links info requires purestorage SDK 1.19 or hisher")
+ return rl_info
+
+
+def generate_del_pods_dict(module, array):
+ pods_info = {}
+ api_version = array._list_available_rest_versions()
+ if AC_REQUIRED_API_VERSION in api_version:
+ pods = array.list_pods(mediator=True, pending_only=True)
+ for pod in range(0, len(pods)):
+ acpod = pods[pod]["name"]
+ pods_info[acpod] = {
+ "source": pods[pod]["source"],
+ "arrays": pods[pod]["arrays"],
+ "mediator": pods[pod]["mediator"],
+ "mediator_version": pods[pod]["mediator_version"],
+ "time_remaining": pods[pod]["time_remaining"],
+ }
+ if ACTIVE_DR_API in api_version:
+ if pods_info[acpod]["arrays"][0]["frozen_at"]:
+ frozen_time = pods_info[acpod]["arrays"][0]["frozen_at"] / 1000
+ frozen_datetime = time.strftime(
+ "%Y-%m-%d %H:%M:%S", time.localtime(frozen_time)
+ )
+ pods_info[acpod]["arrays"][0]["frozen_at"] = frozen_datetime
+ pods_info[acpod]["link_source_count"] = pods[pod]["link_source_count"]
+ pods_info[acpod]["link_target_count"] = pods[pod]["link_target_count"]
+ pods_info[acpod]["promotion_status"] = pods[pod]["promotion_status"]
+ pods_info[acpod]["requested_promotion_state"] = pods[pod][
+ "requested_promotion_state"
+ ]
+ if PREFERRED_API_VERSION in api_version:
+ pods_fp = array.list_pods(failover_preference=True, pending_only=True)
+ for pod in range(0, len(pods_fp)):
+ acpod = pods_fp[pod]["name"]
+ pods_info[acpod]["failover_preference"] = pods_fp[pod][
+ "failover_preference"
+ ]
+ if V6_MINIMUM_API_VERSION in api_version:
+ arrayv6 = get_array(module)
+ pods = list(arrayv6.get_pods(destroyed=True).items)
+ for pod in range(0, len(pods)):
+ name = pods[pod].name
+ pods_info[name]["snapshots"] = pods[pod].space.snapshots
+ pods_info[name]["shared"] = pods[pod].space.shared
+ pods_info[name]["data_reduction"] = pods[pod].space.data_reduction
+ pods_info[name]["thin_provisioning"] = pods[pod].space.thin_provisioning
+ pods_info[name]["total_physical"] = pods[pod].space.total_physical
+ pods_info[name]["total_provisioned"] = pods[pod].space.total_provisioned
+ pods_info[name]["total_reduction"] = pods[pod].space.total_reduction
+ pods_info[name]["unique"] = pods[pod].space.unique
+ pods_info[name]["virtual"] = pods[pod].space.virtual
+ pods_info[name]["replication"] = pods[pod].space.replication
+ pods_info[name]["used_provisioned"] = getattr(
+ pods[pod].space, "used_provisioned", None
+ )
+ if POD_QUOTA_VERSION in api_version:
+ pods_info[name]["quota_limit"] = pods[pod].quota_limit
+ return pods_info
+
+
+def generate_pods_dict(module, array):
+ pods_info = {}
+ api_version = array._list_available_rest_versions()
+ if AC_REQUIRED_API_VERSION in api_version:
+ pods = array.list_pods(mediator=True)
+ for pod in range(0, len(pods)):
+ acpod = pods[pod]["name"]
+ pods_info[acpod] = {
+ "source": pods[pod]["source"],
+ "arrays": pods[pod]["arrays"],
+ "mediator": pods[pod]["mediator"],
+ "mediator_version": pods[pod]["mediator_version"],
+ }
+ if ACTIVE_DR_API in api_version:
+ if pods_info[acpod]["arrays"][0]["frozen_at"]:
+ frozen_time = pods_info[acpod]["arrays"][0]["frozen_at"] / 1000
+ frozen_datetime = time.strftime(
+ "%Y-%m-%d %H:%M:%S", time.localtime(frozen_time)
+ )
+ pods_info[acpod]["arrays"][0]["frozen_at"] = frozen_datetime
+ pods_info[acpod]["link_source_count"] = pods[pod]["link_source_count"]
+ pods_info[acpod]["link_target_count"] = pods[pod]["link_target_count"]
+ pods_info[acpod]["promotion_status"] = pods[pod]["promotion_status"]
+ pods_info[acpod]["requested_promotion_state"] = pods[pod][
+ "requested_promotion_state"
+ ]
+ if PREFERRED_API_VERSION in api_version:
+ pods_fp = array.list_pods(failover_preference=True)
+ for pod in range(0, len(pods_fp)):
+ acpod = pods_fp[pod]["name"]
+ pods_info[acpod]["failover_preference"] = pods_fp[pod][
+ "failover_preference"
+ ]
+ if V6_MINIMUM_API_VERSION in api_version:
+ arrayv6 = get_array(module)
+ pods = list(arrayv6.get_pods(destroyed=False).items)
+ for pod in range(0, len(pods)):
+ name = pods[pod].name
+ pods_info[name]["snapshots"] = getattr(
+ pods[pod].space, "snapshots", None
+ )
+ pods_info[name]["shared"] = getattr(pods[pod].space, "shared", None)
+ pods_info[name]["data_reduction"] = getattr(
+ pods[pod].space, "data_reduction", None
+ )
+ pods_info[name]["thin_provisioning"] = getattr(
+ pods[pod].space, "thin_provisioning", None
+ )
+ pods_info[name]["total_physical"] = getattr(
+ pods[pod].space, "total_physical", None
+ )
+ pods_info[name]["total_provisioned"] = getattr(
+ pods[pod].space, "total_provisioned", None
+ )
+ pods_info[name]["total_reduction"] = getattr(
+ pods[pod].space, "total_reduction", None
+ )
+ pods_info[name]["unique"] = getattr(pods[pod].space, "unique", None)
+ pods_info[name]["virtual"] = getattr(pods[pod].space, "virtual", None)
+ pods_info[name]["replication"] = getattr(
+ pods[pod].space, "replication", None
+ )
+ pods_info[name]["used_provisioned"] = getattr(
+ pods[pod].space, "used_provisioned", None
+ )
+ return pods_info
+
+
+def generate_conn_array_dict(module, array):
+ conn_array_info = {}
+ api_version = array._list_available_rest_versions()
+ if FC_REPL_API_VERSION not in api_version:
+ carrays = array.list_array_connections()
+ for carray in range(0, len(carrays)):
+ arrayname = carrays[carray]["array_name"]
+ conn_array_info[arrayname] = {
+ "array_id": carrays[carray]["id"],
+ "throttled": carrays[carray]["throttled"],
+ "version": carrays[carray]["version"],
+ "type": carrays[carray]["type"],
+ "mgmt_ip": carrays[carray]["management_address"],
+ "repl_ip": carrays[carray]["replication_address"],
+ }
+ if P53_API_VERSION in api_version:
+ conn_array_info[arrayname]["status"] = carrays[carray]["status"]
+ else:
+ conn_array_info[arrayname]["connected"] = carrays[carray]["connected"]
+ throttles = array.list_array_connections(throttle=True)
+ for throttle in range(0, len(throttles)):
+ arrayname = throttles[throttle]["array_name"]
+ if conn_array_info[arrayname]["throttled"]:
+ conn_array_info[arrayname]["throttling"] = {
+ "default_limit": throttles[throttle]["default_limit"],
+ "window_limit": throttles[throttle]["window_limit"],
+ "window": throttles[throttle]["window"],
+ }
+ else:
+ arrayv6 = get_array(module)
+ carrays = list(arrayv6.get_array_connections().items)
+ for carray in range(0, len(carrays)):
+ arrayname = carrays[carray].name
+ conn_array_info[arrayname] = {
+ "array_id": carrays[carray].id,
+ "version": getattr(carrays[carray], "version", None),
+ "status": carrays[carray].status,
+ "type": carrays[carray].type,
+ "mgmt_ip": getattr(carrays[carray], "management_address", "-"),
+ "repl_ip": getattr(carrays[carray], "replication_addresses", "-"),
+ "transport": carrays[carray].replication_transport,
+ }
+
+ if bool(carrays[carray].throttle.to_dict()):
+ conn_array_info[arrayname]["throttled"] = True
+ conn_array_info[arrayname]["throttling"] = {}
+ try:
+ if bool(carrays[carray].throttle.window):
+ conn_array_info[arrayname]["throttling"]["window"] = carrays[
+ carray
+ ].throttle.window.to_dict()
+ except AttributeError:
+ pass
+ try:
+ if bool(carrays[carray].throttle.default_limit):
+ conn_array_info[arrayname]["throttling"][
+ "default_limit"
+ ] = carrays[carray].throttle.default_limit
+ except AttributeError:
+ pass
+ try:
+ if bool(carrays[carray].throttle.window_limit):
+ conn_array_info[arrayname]["throttling"][
+ "window_limit"
+ ] = carrays[carray].throttle.window_limit
+ except AttributeError:
+ pass
+ else:
+ conn_array_info[arrayname]["throttled"] = False
+ return conn_array_info
+
+
+def generate_apps_dict(array):
+ apps_info = {}
+ api_version = array._list_available_rest_versions()
+ if SAN_REQUIRED_API_VERSION in api_version:
+ apps = array.list_apps()
+ for app in range(0, len(apps)):
+ appname = apps[app]["name"]
+ apps_info[appname] = {
+ "version": apps[app]["version"],
+ "status": apps[app]["status"],
+ "description": apps[app]["description"],
+ }
+ if P53_API_VERSION in api_version:
+ app_nodes = array.list_app_nodes()
+ for app in range(0, len(app_nodes)):
+ appname = app_nodes[app]["name"]
+ apps_info[appname]["index"] = app_nodes[app]["index"]
+ apps_info[appname]["vnc"] = app_nodes[app]["vnc"]
+ return apps_info
+
+
+def generate_vgroups_dict(module, array):
+ vgroups_info = {}
+ api_version = array._list_available_rest_versions()
+ if AC_REQUIRED_API_VERSION in api_version:
+ vgroups = array.list_vgroups(pending=False)
+ for vgroup in range(0, len(vgroups)):
+ virtgroup = vgroups[vgroup]["name"]
+ vgroups_info[virtgroup] = {
+ "volumes": vgroups[vgroup]["volumes"],
+ }
+ if V6_MINIMUM_API_VERSION in api_version:
+ arrayv6 = get_array(module)
+ vgroups = list(arrayv6.get_volume_groups(destroyed=False).items)
+ for vgroup in range(0, len(vgroups)):
+ name = vgroups[vgroup].name
+ vgroups_info[name]["snapshots_space"] = vgroups[vgroup].space.snapshots
+ # Provide system as this matches the old naming convention
+ vgroups_info[name]["system"] = vgroups[vgroup].space.unique
+ vgroups_info[name]["unique_space"] = vgroups[vgroup].space.unique
+ vgroups_info[name]["virtual_space"] = vgroups[vgroup].space.virtual
+ vgroups_info[name]["data_reduction"] = vgroups[vgroup].space.data_reduction
+ vgroups_info[name]["total_reduction"] = vgroups[
+ vgroup
+ ].space.total_reduction
+ vgroups_info[name]["total_provisioned"] = vgroups[
+ vgroup
+ ].space.total_provisioned
+ vgroups_info[name]["thin_provisioning"] = vgroups[
+ vgroup
+ ].space.thin_provisioning
+ vgroups_info[name]["used_provisioned"] = (
+ getattr(vgroups[vgroup].space, "used_provisioned", None),
+ )
+ vgroups_info[name]["bandwidth_limit"] = getattr(
+ vgroups[vgroup].qos, "bandwidth_limit", ""
+ )
+ vgroups_info[name]["iops_limit"] = getattr(
+ vgroups[vgroup].qos, "iops_limit", ""
+ )
+ if SAFE_MODE_VERSION in api_version:
+ for vgroup in range(0, len(vgroups)):
+ name = vgroups[vgroup].name
+ vgroups_info[name]["priority_adjustment"] = vgroups[
+ vgroup
+ ].priority_adjustment.priority_adjustment_operator + str(
+ vgroups[vgroup].priority_adjustment.priority_adjustment_value
+ )
+ return vgroups_info
+
+
+def generate_del_vgroups_dict(module, array):
+ vgroups_info = {}
+ api_version = array._list_available_rest_versions()
+ if AC_REQUIRED_API_VERSION in api_version:
+ vgroups = array.list_vgroups(pending_only=True)
+ for vgroup in range(0, len(vgroups)):
+ virtgroup = vgroups[vgroup]["name"]
+ vgroups_info[virtgroup] = {
+ "volumes": vgroups[vgroup]["volumes"],
+ }
+ if V6_MINIMUM_API_VERSION in api_version:
+ arrayv6 = get_array(module)
+ vgroups = list(arrayv6.get_volume_groups(destroyed=True).items)
+ for vgroup in range(0, len(vgroups)):
+ name = vgroups[vgroup].name
+ vgroups_info[name]["snapshots_space"] = vgroups[vgroup].space.snapshots
+ # Provide system as this matches the old naming convention
+ vgroups_info[name]["system"] = vgroups[vgroup].space.unique
+ vgroups_info[name]["unique_space"] = vgroups[vgroup].space.unique
+ vgroups_info[name]["virtual_space"] = vgroups[vgroup].space.virtual
+ vgroups_info[name]["data_reduction"] = vgroups[vgroup].space.data_reduction
+ vgroups_info[name]["total_reduction"] = vgroups[
+ vgroup
+ ].space.total_reduction
+ vgroups_info[name]["total_provisioned"] = vgroups[
+ vgroup
+ ].space.total_provisioned
+ vgroups_info[name]["thin_provisioning"] = vgroups[
+ vgroup
+ ].space.thin_provisioning
+ vgroups_info[name]["used_provisioned"] = (
+ getattr(vgroups[vgroup].space, "used_provisioned", None),
+ )
+ vgroups_info[name]["time_remaining"] = (vgroups[vgroup].time_remaining,)
+ vgroups_info[name]["bandwidth_limit"] = getattr(
+ vgroups[vgroup].qos, "bandwidth_limit", ""
+ )
+ vgroups_info[name]["iops_limit"] = getattr(
+ vgroups[vgroup].qos, "iops_limit", ""
+ )
+ if SAFE_MODE_VERSION in api_version:
+ for vgroup in range(0, len(vgroups)):
+ name = vgroups[vgroup].name
+ vgroups_info[name]["priority_adjustment"] = vgroups[
+ vgroup
+ ].priority_adjustment.priority_adjustment_operator + str(
+ vgroups[vgroup].priority_adjustment.priority_adjustment_value
+ )
+ return vgroups_info
+
+
+def generate_certs_dict(array):
+ certs_info = {}
+ api_version = array._list_available_rest_versions()
+ if P53_API_VERSION in api_version:
+ certs = array.list_certificates()
+ for cert in range(0, len(certs)):
+ certificate = certs[cert]["name"]
+ valid_from = time.strftime(
+ "%a, %d %b %Y %H:%M:%S %Z",
+ time.localtime(certs[cert]["valid_from"] / 1000),
+ )
+ valid_to = time.strftime(
+ "%a, %d %b %Y %H:%M:%S %Z",
+ time.localtime(certs[cert]["valid_to"] / 1000),
+ )
+ certs_info[certificate] = {
+ "status": certs[cert]["status"],
+ "issued_to": certs[cert]["issued_to"],
+ "valid_from": valid_from,
+ "locality": certs[cert]["locality"],
+ "country": certs[cert]["country"],
+ "issued_by": certs[cert]["issued_by"],
+ "valid_to": valid_to,
+ "state": certs[cert]["state"],
+ "key_size": certs[cert]["key_size"],
+ "org_unit": certs[cert]["organizational_unit"],
+ "common_name": certs[cert]["common_name"],
+ "organization": certs[cert]["organization"],
+ "email": certs[cert]["email"],
+ }
+ return certs_info
+
+
+def generate_kmip_dict(array):
+ kmip_info = {}
+ api_version = array._list_available_rest_versions()
+ if P53_API_VERSION in api_version:
+ kmips = array.list_kmip()
+ for kmip in range(0, len(kmips)):
+ key = kmips[kmip]["name"]
+ kmip_info[key] = {
+ "certificate": kmips[kmip]["certificate"],
+ "ca_cert_configured": kmips[kmip]["ca_certificate_configured"],
+ "uri": kmips[kmip]["uri"],
+ }
+ return kmip_info
+
+
+def generate_nfs_offload_dict(module, array):
+ offload_info = {}
+ api_version = array._list_available_rest_versions()
+ if AC_REQUIRED_API_VERSION in api_version:
+ offload = array.list_nfs_offload()
+ for target in range(0, len(offload)):
+ offloadt = offload[target]["name"]
+ offload_info[offloadt] = {
+ "status": offload[target]["status"],
+ "mount_point": offload[target]["mount_point"],
+ "protocol": offload[target]["protocol"],
+ "mount_options": offload[target]["mount_options"],
+ "address": offload[target]["address"],
+ }
+ if V6_MINIMUM_API_VERSION in api_version:
+ arrayv6 = get_array(module)
+ offloads = list(arrayv6.get_offloads(protocol="nfs").items)
+ for offload in range(0, len(offloads)):
+ name = offloads[offload].name
+ offload_info[name]["snapshots"] = getattr(
+ offloads[offload].space, "snapshots", None
+ )
+ offload_info[name]["shared"] = getattr(
+ offloads[offload].space, "shared", None
+ )
+ offload_info[name]["data_reduction"] = getattr(
+ offloads[offload].space, "data_reduction", None
+ )
+ offload_info[name]["thin_provisioning"] = getattr(
+ offloads[offload].space, "thin_provisioning", None
+ )
+ offload_info[name]["total_physical"] = getattr(
+ offloads[offload].space, "total_physical", None
+ )
+ offload_info[name]["total_provisioned"] = getattr(
+ offloads[offload].space, "total_provisioned", None
+ )
+ offload_info[name]["total_reduction"] = getattr(
+ offloads[offload].space, "total_reduction", None
+ )
+ offload_info[name]["unique"] = getattr(
+ offloads[offload].space, "unique", None
+ )
+ offload_info[name]["virtual"] = getattr(
+ offloads[offload].space, "virtual", None
+ )
+ offload_info[name]["replication"] = getattr(
+ offloads[offload].space, "replication", None
+ )
+ offload_info[name]["used_provisioned"] = getattr(
+ offloads[offload].space, "used_provisioned", None
+ )
+ return offload_info
+
+
+def generate_s3_offload_dict(module, array):
+ offload_info = {}
+ api_version = array._list_available_rest_versions()
+ if S3_REQUIRED_API_VERSION in api_version:
+ offload = array.list_s3_offload()
+ for target in range(0, len(offload)):
+ offloadt = offload[target]["name"]
+ offload_info[offloadt] = {
+ "status": offload[target]["status"],
+ "bucket": offload[target]["bucket"],
+ "protocol": offload[target]["protocol"],
+ "access_key_id": offload[target]["access_key_id"],
+ }
+ if P53_API_VERSION in api_version:
+ offload_info[offloadt]["placement_strategy"] = offload[target][
+ "placement_strategy"
+ ]
+ if V6_MINIMUM_API_VERSION in api_version:
+ arrayv6 = get_array(module)
+ offloads = list(arrayv6.get_offloads(protocol="s3").items)
+ for offload in range(0, len(offloads)):
+ name = offloads[offload].name
+ offload_info[name]["snapshots"] = getattr(
+ offloads[offload].space, "snapshots", None
+ )
+ offload_info[name]["shared"] = getattr(
+ offloads[offload].space, "shared", None
+ )
+ offload_info[name]["data_reduction"] = getattr(
+ offloads[offload].space, "data_reduction", None
+ )
+ offload_info[name]["thin_provisioning"] = getattr(
+ offloads[offload].space, "thin_provisioning", None
+ )
+ offload_info[name]["total_physical"] = getattr(
+ offloads[offload].space, "total_physical", None
+ )
+ offload_info[name]["total_provisioned"] = getattr(
+ offloads[offload].space, "total_provisioned", None
+ )
+ offload_info[name]["total_reduction"] = getattr(
+ offloads[offload].space, "total_reduction", None
+ )
+ offload_info[name]["unique"] = getattr(
+ offloads[offload].space, "unique", None
+ )
+ offload_info[name]["virtual"] = getattr(
+ offloads[offload].space, "virtual", None
+ )
+ offload_info[name]["replication"] = getattr(
+ offloads[offload].space, "replication", None
+ )
+ offload_info[name]["used_provisioned"] = getattr(
+ offloads[offload].space, "used_provisioned", None
+ )
+ return offload_info
+
+
+def generate_azure_offload_dict(module, array):
+ offload_info = {}
+ api_version = array._list_available_rest_versions()
+ if P53_API_VERSION in api_version:
+ offload = array.list_azure_offload()
+ for target in range(0, len(offload)):
+ offloadt = offload[target]["name"]
+ offload_info[offloadt] = {
+ "status": offload[target]["status"],
+ "account_name": offload[target]["account_name"],
+ "protocol": offload[target]["protocol"],
+ "secret_access_key": offload[target]["secret_access_key"],
+ "container_name": offload[target]["container_name"],
+ }
+ if V6_MINIMUM_API_VERSION in api_version:
+ arrayv6 = get_array(module)
+ offloads = list(arrayv6.get_offloads(protocol="azure").items)
+ for offload in range(0, len(offloads)):
+ name = offloads[offload].name
+ offload_info[name]["snapshots"] = getattr(
+ offloads[offload].space, "snapshots", None
+ )
+ offload_info[name]["shared"] = getattr(
+ offloads[offload].space, "shared", None
+ )
+ offload_info[name]["data_reduction"] = getattr(
+ offloads[offload].space, "data_reduction", None
+ )
+ offload_info[name]["thin_provisioning"] = getattr(
+ offloads[offload].space, "thin_provisioning", None
+ )
+ offload_info[name]["total_physical"] = getattr(
+ offloads[offload].space, "total_physical", None
+ )
+ offload_info[name]["total_provisioned"] = getattr(
+ offloads[offload].space, "total_provisioned", None
+ )
+ offload_info[name]["total_reduction"] = getattr(
+ offloads[offload].space, "total_reduction", None
+ )
+ offload_info[name]["unique"] = getattr(
+ offloads[offload].space, "unique", None
+ )
+ offload_info[name]["virtual"] = getattr(
+ offloads[offload].space, "virtual", None
+ )
+ offload_info[name]["replication"] = getattr(
+ offloads[offload].space, "replication", None
+ )
+ offload_info[name]["used_provisioned"] = getattr(
+ offloads[offload].space, "used_provisioned", None
+ )
+ return offload_info
+
+
+def generate_google_offload_dict(array):
+ offload_info = {}
+ offloads = list(array.get_offloads(protocol="google-cloud").items)
+ for offload in range(0, len(offloads)):
+ name = offloads[offload].name
+ offload_info[name] = {
+ # "access_key_id": offloads[offload].google-cloud.access_key_id,
+ # "bucket": offloads[offload].google-cloud.bucket,
+ # "auth_region": offloads[offload].google-cloud.auth_region,
+ "snapshots": getattr(offloads[offload].space, "snapshots", None),
+ "shared": getattr(offloads[offload].space, "shared", None),
+ "data_reduction": getattr(offloads[offload].space, "data_reduction", None),
+ "thin_provisioning": getattr(
+ offloads[offload].space, "thin_provisioning", None
+ ),
+ "total_physical": getattr(offloads[offload].space, "total_physical", None),
+ "total_provisioned": getattr(
+ offloads[offload].space, "total_provisioned", None
+ ),
+ "total_reduction": getattr(
+ offloads[offload].space, "total_reduction", None
+ ),
+ "unique": getattr(offloads[offload].space, "unique", None),
+ "virtual": getattr(offloads[offload].space, "virtual", None),
+ "replication": getattr(offloads[offload].space, "replication", None),
+ "used_provisioned": getattr(
+ offloads[offload].space, "used_provisioned", None
+ ),
+ }
+ return offload_info
+
+
+def generate_hgroups_dict(module, array):
+ hgroups_info = {}
+ api_version = array._list_available_rest_versions()
+ hgroups = array.list_hgroups()
+ for hgroup in range(0, len(hgroups)):
+ hostgroup = hgroups[hgroup]["name"]
+ hgroups_info[hostgroup] = {
+ "hosts": hgroups[hgroup]["hosts"],
+ "pgs": [],
+ "vols": [],
+ }
+ pghgroups = array.list_hgroups(protect=True)
+ for pghg in range(0, len(pghgroups)):
+ pgname = pghgroups[pghg]["name"]
+ hgroups_info[pgname]["pgs"].append(pghgroups[pghg]["protection_group"])
+ volhgroups = array.list_hgroups(connect=True)
+ for pgvol in range(0, len(volhgroups)):
+ pgname = volhgroups[pgvol]["name"]
+ volpgdict = [volhgroups[pgvol]["vol"], volhgroups[pgvol]["lun"]]
+ hgroups_info[pgname]["vols"].append(volpgdict)
+ if V6_MINIMUM_API_VERSION in api_version:
+ arrayv6 = get_array(module)
+ hgroups = list(arrayv6.get_host_groups().items)
+ for hgroup in range(0, len(hgroups)):
+ name = hgroups[hgroup].name
+ hgroups_info[name]["snapshots"] = hgroups[hgroup].space.snapshots
+ hgroups_info[name]["data_reduction"] = hgroups[hgroup].space.data_reduction
+ hgroups_info[name]["thin_provisioning"] = hgroups[
+ hgroup
+ ].space.thin_provisioning
+ hgroups_info[name]["total_physical"] = hgroups[hgroup].space.total_physical
+ hgroups_info[name]["total_provisioned"] = hgroups[
+ hgroup
+ ].space.total_provisioned
+ hgroups_info[name]["total_reduction"] = hgroups[
+ hgroup
+ ].space.total_reduction
+ hgroups_info[name]["unique"] = hgroups[hgroup].space.unique
+ hgroups_info[name]["virtual"] = hgroups[hgroup].space.virtual
+ hgroups_info[name]["used_provisioned"] = getattr(
+ hgroups[hgroup].space, "used_provisioned", None
+ )
+ return hgroups_info
+
+
+def generate_interfaces_dict(array):
+ api_version = array._list_available_rest_versions()
+ int_info = {}
+ ports = array.list_ports()
+ for port in range(0, len(ports)):
+ int_name = ports[port]["name"]
+ if ports[port]["wwn"]:
+ int_info[int_name] = ports[port]["wwn"]
+ if ports[port]["iqn"]:
+ int_info[int_name] = ports[port]["iqn"]
+ if NVME_API_VERSION in api_version:
+ if ports[port]["nqn"]:
+ int_info[int_name] = ports[port]["nqn"]
+ return int_info
+
+
+def generate_vm_dict(array):
+ vm_info = {}
+ virt_machines = list(array.get_virtual_machines(vm_type="vvol").items)
+ for machine in range(0, len(virt_machines)):
+ name = virt_machines[machine].name
+ vm_info[name] = {
+ "vm_type": virt_machines[machine].vm_type,
+ "vm_id": virt_machines[machine].vm_id,
+ "destroyed": virt_machines[machine].destroyed,
+ "created": virt_machines[machine].created,
+ "time_remaining": getattr(virt_machines[machine], "time_remaining", None),
+ "latest_snapshot_name": getattr(
+ virt_machines[machine].recover_context, "name", None
+ ),
+ "latest_snapshot_id": getattr(
+ virt_machines[machine].recover_context, "id", None
+ ),
+ }
+ return vm_info
+
+
+def generate_alerts_dict(array):
+ alerts_info = {}
+ alerts = list(array.get_alerts().items)
+ for alert in range(0, len(alerts)):
+ name = alerts[alert].name
+ try:
+ notified_time = alerts[alert].notified / 1000
+ notified_datetime = time.strftime(
+ "%Y-%m-%d %H:%M:%S", time.localtime(notified_time)
+ )
+ except AttributeError:
+ notified_datetime = ""
+ try:
+ closed_time = alerts[alert].closed / 1000
+ closed_datetime = time.strftime(
+ "%Y-%m-%d %H:%M:%S", time.localtime(closed_time)
+ )
+ except AttributeError:
+ closed_datetime = ""
+ try:
+ updated_time = alerts[alert].updated / 1000
+ updated_datetime = time.strftime(
+ "%Y-%m-%d %H:%M:%S", time.localtime(updated_time)
+ )
+ except AttributeError:
+ updated_datetime = ""
+ try:
+ created_time = alerts[alert].created / 1000
+ created_datetime = time.strftime(
+ "%Y-%m-%d %H:%M:%S", time.localtime(created_time)
+ )
+ except AttributeError:
+ updated_datetime = ""
+ alerts_info[name] = {
+ "flagged": alerts[alert].flagged,
+ "category": alerts[alert].category,
+ "code": alerts[alert].code,
+ "issue": alerts[alert].issue,
+ "kb_url": alerts[alert].knowledge_base_url,
+ "summary": alerts[alert].summary,
+ "id": alerts[alert].id,
+ "state": alerts[alert].state,
+ "severity": alerts[alert].severity,
+ "component_name": alerts[alert].component_name,
+ "component_type": alerts[alert].component_type,
+ "created": created_datetime,
+ "closed": closed_datetime,
+ "notified": notified_datetime,
+ "updated": updated_datetime,
+ "actual": getattr(alerts[alert], "actual", ""),
+ "expected": getattr(alerts[alert], "expected", ""),
+ }
+ return alerts_info
+
+
+def generate_vmsnap_dict(array):
+ vmsnap_info = {}
+ virt_snaps = list(array.get_virtual_machine_snapshots(vm_type="vvol").items)
+ for snap in range(0, len(virt_snaps)):
+ name = virt_snaps[snap].name
+ vmsnap_info[name] = {
+ "vm_type": virt_snaps[snap].vm_type,
+ "vm_id": virt_snaps[snap].vm_id,
+ "destroyed": virt_snaps[snap].destroyed,
+ "created": virt_snaps[snap].created,
+ "time_remaining": getattr(virt_snaps[snap], "time_remaining", None),
+ "latest_pgsnapshot_name": getattr(
+ virt_snaps[snap].recover_context, "name", None
+ ),
+ "latest_pgsnapshot_id": getattr(
+ virt_snaps[snap].recover_context, "id", None
+ ),
+ }
+ return vmsnap_info
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(gather_subset=dict(default="minimum", type="list", elements="str"))
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+
+ subset = [test.lower() for test in module.params["gather_subset"]]
+ valid_subsets = (
+ "all",
+ "minimum",
+ "config",
+ "performance",
+ "capacity",
+ "network",
+ "subnet",
+ "interfaces",
+ "hgroups",
+ "pgroups",
+ "hosts",
+ "admins",
+ "volumes",
+ "snapshots",
+ "pods",
+ "replication",
+ "vgroups",
+ "offload",
+ "apps",
+ "arrays",
+ "certs",
+ "kmip",
+ "clients",
+ "policies",
+ "dir_snaps",
+ "filesystems",
+ "virtual_machines",
+ )
+ subset_test = (test in valid_subsets for test in subset)
+ if not all(subset_test):
+ module.fail_json(
+ msg="value must gather_subset must be one or more of: %s, got: %s"
+ % (",".join(valid_subsets), ",".join(subset))
+ )
+
+ info = {}
+
+ if "minimum" in subset or "all" in subset or "apps" in subset:
+ info["default"] = generate_default_dict(module, array)
+ if "performance" in subset or "all" in subset:
+ info["performance"] = generate_perf_dict(array)
+ if "config" in subset or "all" in subset:
+ info["config"] = generate_config_dict(module, array)
+ if "capacity" in subset or "all" in subset:
+ info["capacity"] = generate_capacity_dict(module, array)
+ if "network" in subset or "all" in subset:
+ info["network"] = generate_network_dict(module, array)
+ if "subnet" in subset or "all" in subset:
+ info["subnet"] = generate_subnet_dict(array)
+ if "interfaces" in subset or "all" in subset:
+ info["interfaces"] = generate_interfaces_dict(array)
+ if "hosts" in subset or "all" in subset:
+ info["hosts"] = generate_host_dict(module, array)
+ if "volumes" in subset or "all" in subset:
+ info["volumes"] = generate_vol_dict(module, array)
+ info["deleted_volumes"] = generate_del_vol_dict(module, array)
+ if "snapshots" in subset or "all" in subset:
+ info["snapshots"] = generate_snap_dict(module, array)
+ info["deleted_snapshots"] = generate_del_snap_dict(module, array)
+ if "hgroups" in subset or "all" in subset:
+ info["hgroups"] = generate_hgroups_dict(module, array)
+ if "pgroups" in subset or "all" in subset:
+ info["pgroups"] = generate_pgroups_dict(module, array)
+ if "pods" in subset or "all" in subset or "replication" in subset:
+ info["replica_links"] = generate_rl_dict(module, array)
+ info["pods"] = generate_pods_dict(module, array)
+ info["deleted_pods"] = generate_del_pods_dict(module, array)
+ if "admins" in subset or "all" in subset:
+ info["admins"] = generate_admin_dict(array)
+ if "vgroups" in subset or "all" in subset:
+ info["vgroups"] = generate_vgroups_dict(module, array)
+ info["deleted_vgroups"] = generate_del_vgroups_dict(module, array)
+ if "offload" in subset or "all" in subset:
+ info["azure_offload"] = generate_azure_offload_dict(module, array)
+ info["nfs_offload"] = generate_nfs_offload_dict(module, array)
+ info["s3_offload"] = generate_s3_offload_dict(module, array)
+ if "apps" in subset or "all" in subset:
+ if "CBS" not in info["default"]["array_model"]:
+ info["apps"] = generate_apps_dict(array)
+ else:
+ info["apps"] = {}
+ if "arrays" in subset or "all" in subset:
+ info["arrays"] = generate_conn_array_dict(module, array)
+ if "certs" in subset or "all" in subset:
+ info["certs"] = generate_certs_dict(array)
+ if "kmip" in subset or "all" in subset:
+ info["kmip"] = generate_kmip_dict(array)
+ if FILES_API_VERSION in api_version:
+ array_v6 = get_array(module)
+ if "offload" in subset or "all" in subset:
+ info["google_offload"] = generate_google_offload_dict(array_v6)
+ if "filesystems" in subset or "all" in subset:
+ info["filesystems"] = generate_filesystems_dict(array_v6)
+ if "policies" in subset or "all" in subset:
+ if NFS_USER_MAP_VERSION in api_version:
+ user_map = True
+ else:
+ user_map = False
+ if DIR_QUOTA_API_VERSION in api_version:
+ quota = True
+ else:
+ quota = False
+ info["policies"] = generate_policies_dict(array_v6, quota, user_map)
+ if "clients" in subset or "all" in subset:
+ info["clients"] = generate_clients_dict(array_v6)
+ if "dir_snaps" in subset or "all" in subset:
+ info["dir_snaps"] = generate_dir_snaps_dict(array_v6)
+ if "snapshots" in subset or "all" in subset:
+ info["pg_snapshots"] = generate_pgsnaps_dict(array_v6)
+ if "alerts" in subset or "all" in subset:
+ info["alerts"] = generate_alerts_dict(array_v6)
+ if VM_VERSION in api_version and (
+ "virtual_machines" in subset or "all" in subset
+ ):
+ info["virtual_machines"] = generate_vm_dict(array_v6)
+ info["virtual_machines_snaps"] = generate_vmsnap_dict(array_v6)
+
+ module.exit_json(changed=False, purefa_info=info)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_inventory.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_inventory.py
new file mode 100644
index 000000000..8e65ee07e
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_inventory.py
@@ -0,0 +1,368 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_inventory
+short_description: Collect information from Pure Storage FlashArray
+version_added: '1.0.0'
+description:
+ - Collect hardware inventory information from a Pure Storage Flasharray
+author:
+ - Pure Storage ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+extends_documentation_fragment:
+ - purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: collect FlashArray invenroty
+ purestorage.flasharray.purefa_inventory:
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+- name: show inventory information
+ debug:
+ msg: "{{ array_info['purefa_inv'] }}"
+
+"""
+
+RETURN = r"""
+purefa_inventory:
+ description: Returns the inventory information for the FlashArray
+ returned: always
+ type: dict
+"""
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ get_array,
+ purefa_argument_spec,
+)
+
+
+NEW_API_VERSION = "2.2"
+SFP_API_VERSION = "2.16"
+
+
+def generate_new_hardware_dict(array, versions):
+ hw_info = {
+ "fans": {},
+ "controllers": {},
+ "temps": {},
+ "drives": {},
+ "interfaces": {},
+ "power": {},
+ "chassis": {},
+ "tempatures": {},
+ }
+ components = list(array.get_hardware().items)
+ for component in range(0, len(components)):
+ component_name = components[component].name
+ if components[component].type == "chassis":
+ hw_info["chassis"][component_name] = {
+ "status": components[component].status,
+ "serial": components[component].serial,
+ "model": components[component].model,
+ "identify_enabled": components[component].identify_enabled,
+ }
+ if components[component].type == "controller":
+ hw_info["controllers"][component_name] = {
+ "status": components[component].status,
+ "serial": components[component].serial,
+ "model": components[component].model,
+ "identify_enabled": components[component].identify_enabled,
+ }
+ if components[component].type == "cooling":
+ hw_info["fans"][component_name] = {
+ "status": components[component].status,
+ }
+ if components[component].type == "temp_sensor":
+ hw_info["controllers"][component_name] = {
+ "status": components[component].status,
+ "temperature": components[component].temperature,
+ }
+ if components[component].type == "drive_bay":
+ hw_info["drives"][component_name] = {
+ "status": components[component].status,
+ "identify_enabled": components[component].identify_enabled,
+ "serial": getattr(components[component], "serial", None),
+ }
+ if components[component].type in [
+ "sas_port",
+ "fc_port",
+ "eth_port",
+ "ib_port",
+ ]:
+ hw_info["interfaces"][component_name] = {
+ "type": components[component].type,
+ "status": components[component].status,
+ "speed": components[component].speed,
+ "connector_type": None,
+ "rx_los": None,
+ "rx_power": None,
+ "static": {},
+ "temperature": None,
+ "tx_bias": None,
+ "tx_fault": None,
+ "tx_power": None,
+ "voltage": None,
+ }
+ if components[component].type == "power_supply":
+ hw_info["power"][component_name] = {
+ "status": components[component].status,
+ "voltage": components[component].voltage,
+ "serial": components[component].serial,
+ "model": components[component].model,
+ }
+ drives = list(array.get_drives().items)
+ for drive in range(0, len(drives)):
+ drive_name = drives[drive].name
+ hw_info["drives"][drive_name] = {
+ "capacity": drives[drive].capacity,
+ "status": drives[drive].status,
+ "protocol": getattr(drives[drive], "protocol", None),
+ "type": drives[drive].type,
+ }
+ if SFP_API_VERSION in versions:
+ port_details = list(array.get_network_interfaces_port_details().items)
+ for port_detail in range(0, len(port_details)):
+ port_name = port_details[port_detail].name
+ hw_info["interfaces"][port_name]["interface_type"] = port_details[
+ port_detail
+ ].interface_type
+ hw_info["interfaces"][port_name]["rx_los"] = (
+ port_details[port_detail].rx_los[0].flag
+ )
+ hw_info["interfaces"][port_name]["rx_power"] = (
+ port_details[port_detail].rx_power[0].measurement
+ )
+ hw_info["interfaces"][port_name]["static"] = {
+ "connector_type": port_details[port_detail].static.connector_type,
+ "vendor_name": port_details[port_detail].static.vendor_name,
+ "vendor_oui": port_details[port_detail].static.vendor_oui,
+ "vendor_serial_number": port_details[
+ port_detail
+ ].static.vendor_serial_number,
+ "vendor_part_number": port_details[
+ port_detail
+ ].static.vendor_part_number,
+ "vendor_date_code": port_details[port_detail].static.vendor_date_code,
+ "signaling_rate": port_details[port_detail].static.signaling_rate,
+ "wavelength": port_details[port_detail].static.wavelength,
+ "rate_identifier": port_details[port_detail].static.rate_identifier,
+ "identifier": port_details[port_detail].static.identifier,
+ "link_length": port_details[port_detail].static.link_length,
+ "voltage_thresholds": {
+ "alarm_high": port_details[
+ port_detail
+ ].static.voltage_thresholds.alarm_high,
+ "alarm_low": port_details[
+ port_detail
+ ].static.voltage_thresholds.alarm_low,
+ "warn_high": port_details[
+ port_detail
+ ].static.voltage_thresholds.warn_high,
+ "warn_low": port_details[
+ port_detail
+ ].static.voltage_thresholds.warn_low,
+ },
+ "tx_power_thresholds": {
+ "alarm_high": port_details[
+ port_detail
+ ].static.tx_power_thresholds.alarm_high,
+ "alarm_low": port_details[
+ port_detail
+ ].static.tx_power_thresholds.alarm_low,
+ "warn_high": port_details[
+ port_detail
+ ].static.tx_power_thresholds.warn_high,
+ "warn_low": port_details[
+ port_detail
+ ].static.tx_power_thresholds.warn_low,
+ },
+ "rx_power_thresholds": {
+ "alarm_high": port_details[
+ port_detail
+ ].static.rx_power_thresholds.alarm_high,
+ "alarm_low": port_details[
+ port_detail
+ ].static.rx_power_thresholds.alarm_low,
+ "warn_high": port_details[
+ port_detail
+ ].static.rx_power_thresholds.warn_high,
+ "warn_low": port_details[
+ port_detail
+ ].static.rx_power_thresholds.warn_low,
+ },
+ "tx_bias_thresholds": {
+ "alarm_high": port_details[
+ port_detail
+ ].static.tx_bias_thresholds.alarm_high,
+ "alarm_low": port_details[
+ port_detail
+ ].static.tx_bias_thresholds.alarm_low,
+ "warn_high": port_details[
+ port_detail
+ ].static.tx_bias_thresholds.warn_high,
+ "warn_low": port_details[
+ port_detail
+ ].static.tx_bias_thresholds.warn_low,
+ },
+ "temperature_thresholds": {
+ "alarm_high": port_details[
+ port_detail
+ ].static.temperature_thresholds.alarm_high,
+ "alarm_low": port_details[
+ port_detail
+ ].static.temperature_thresholds.alarm_low,
+ "warn_high": port_details[
+ port_detail
+ ].static.temperature_thresholds.warn_high,
+ "warn_low": port_details[
+ port_detail
+ ].static.temperature_thresholds.warn_low,
+ },
+ "fc_speeds": port_details[port_detail].static.fc_speeds,
+ "fc_technology": port_details[port_detail].static.fc_technology,
+ "encoding": port_details[port_detail].static.encoding,
+ "fc_link_lengths": port_details[port_detail].static.fc_link_lengths,
+ "fc_transmission_media": port_details[
+ port_detail
+ ].static.fc_transmission_media,
+ "extended_identifier": port_details[
+ port_detail
+ ].static.extended_identifier,
+ }
+ hw_info["interfaces"][port_name]["temperature"] = (
+ port_details[port_detail].temperature[0].measurement
+ )
+ hw_info["interfaces"][port_name]["tx_bias"] = (
+ port_details[port_detail].tx_bias[0].measurement
+ )
+ hw_info["interfaces"][port_name]["tx_fault"] = (
+ port_details[port_detail].tx_fault[0].flag
+ )
+ hw_info["interfaces"][port_name]["tx_power"] = (
+ port_details[port_detail].tx_power[0].measurement
+ )
+ hw_info["interfaces"][port_name]["voltage"] = (
+ port_details[port_detail].voltage[0].measurement
+ )
+ return hw_info
+
+
+def generate_hardware_dict(array):
+ hw_info = {
+ "fans": {},
+ "controllers": {},
+ "temps": {},
+ "drives": {},
+ "interfaces": {},
+ "power": {},
+ "chassis": {},
+ }
+ components = array.list_hardware()
+ for component in range(0, len(components)):
+ component_name = components[component]["name"]
+ if "FAN" in component_name:
+ fan_name = component_name
+ hw_info["fans"][fan_name] = {"status": components[component]["status"]}
+ if "PWR" in component_name:
+ pwr_name = component_name
+ hw_info["power"][pwr_name] = {
+ "status": components[component]["status"],
+ "voltage": components[component]["voltage"],
+ "serial": components[component]["serial"],
+ "model": components[component]["model"],
+ }
+ if "IB" in component_name:
+ ib_name = component_name
+ hw_info["interfaces"][ib_name] = {
+ "status": components[component]["status"],
+ "speed": components[component]["speed"],
+ }
+ if "SAS" in component_name:
+ sas_name = component_name
+ hw_info["interfaces"][sas_name] = {
+ "status": components[component]["status"],
+ "speed": components[component]["speed"],
+ }
+ if "ETH" in component_name:
+ eth_name = component_name
+ hw_info["interfaces"][eth_name] = {
+ "status": components[component]["status"],
+ "speed": components[component]["speed"],
+ }
+ if "FC" in component_name:
+ eth_name = component_name
+ hw_info["interfaces"][eth_name] = {
+ "status": components[component]["status"],
+ "speed": components[component]["speed"],
+ }
+ if "TMP" in component_name:
+ tmp_name = component_name
+ hw_info["temps"][tmp_name] = {
+ "status": components[component]["status"],
+ "temperature": components[component]["temperature"],
+ }
+ if component_name in ["CT0", "CT1"]:
+ cont_name = component_name
+ hw_info["controllers"][cont_name] = {
+ "status": components[component]["status"],
+ "serial": components[component]["serial"],
+ "model": components[component]["model"],
+ }
+ if component_name in ["CH0"]:
+ cont_name = component_name
+ hw_info["chassis"][cont_name] = {
+ "status": components[component]["status"],
+ "serial": components[component]["serial"],
+ "model": components[component]["model"],
+ }
+
+ drives = array.list_drives()
+ for drive in range(0, len(drives)):
+ drive_name = drives[drive]["name"]
+ hw_info["drives"][drive_name] = {
+ "capacity": drives[drive]["capacity"],
+ "status": drives[drive]["status"],
+ "protocol": drives[drive]["protocol"],
+ "type": drives[drive]["type"],
+ }
+ for disk in range(0, len(components)):
+ if components[disk]["name"] == drive_name:
+ hw_info["drives"][drive_name]["serial"] = components[disk]["serial"]
+
+ return hw_info
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ inv_info = {}
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+ if NEW_API_VERSION in api_version:
+ arrayv6 = get_array(module)
+ inv_info = generate_new_hardware_dict(arrayv6, api_version)
+ else:
+ inv_info = generate_hardware_dict(array)
+ module.exit_json(changed=False, purefa_inv=inv_info)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_kmip.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_kmip.py
new file mode 100644
index 000000000..8774abe87
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_kmip.py
@@ -0,0 +1,251 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2021, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_kmip
+version_added: '1.10.0'
+short_description: Manage FlashArray KMIP server objects
+description:
+- Manage FlashArray KMIP Server objects
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Name of the KMIP server object
+ type: str
+ required: true
+ certificate:
+ description:
+ - Name of existing certifcate used to verify FlashArray
+ authenticity to the KMIP server.
+ - Use the I(purestorage.flasharray.purefa_certs) module to create certificates.
+ type: str
+ state:
+ description:
+ - Action for the module to perform
+ default: present
+ choices: [ absent, present ]
+ type: str
+ ca_certificate:
+ type: str
+ description:
+ - The text of the CA certificate for the KMIP server.
+ - Includes the "-----BEGIN CERTIFICATE-----" and "-----END CERTIFICATE-----" lines
+ - Does not exceed 3000 characters in length
+ uris:
+ type: list
+ elements: str
+ description:
+ - A list of URIs for the configured KMIP servers.
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Create KMIP obejct
+ purestorage.flasharray.purefa_kmip:
+ name: foo
+ certificate: bar
+ ca_certificate: "{{lookup('file', 'example.crt') }}"
+ uris:
+ - 1.1.1.1:8888
+ - 2.3.3.3:9999
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete KMIP object
+ purestorage.flasharray.purefa_kmip:
+ name: foo
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Update KMIP object
+ purestorage.flasharray.purefa_kmip:
+ name: foo
+ ca_certificate: "{{lookup('file', 'example2.crt') }}"
+ uris:
+ - 3.3.3.3:8888
+ - 4.4.4.4:9999
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ get_array,
+ purefa_argument_spec,
+)
+
+MIN_REQUIRED_API_VERSION = "2.2"
+
+
+def update_kmip(module, array):
+ """Update existing KMIP object"""
+ changed = False
+ current_kmip = list(array.get_kmip(names=[module.params["name"]]).items)[0]
+ if (
+ module.params["certificate"]
+ and current_kmip.certificate.name != module.params["certificate"]
+ ):
+ if (
+ array.get_certificates(names=[module.params["certificate"]]).status_code
+ != 200
+ ):
+ module.fail_json(
+ msg="Array certificate {0} does not exist.".format(
+ module.params["certificate"]
+ )
+ )
+ changed = True
+ certificate = module.params["certificate"]
+ else:
+ certificate = current_kmip.certificate.name
+ if module.params["uris"] and sorted(current_kmip.uris) != sorted(
+ module.params["uris"]
+ ):
+ changed = True
+ uris = sorted(module.params["uris"])
+ else:
+ uris = sorted(current_kmip.uris)
+ if (
+ module.params["ca_certificate"]
+ and module.params["ca_certificate"] != current_kmip.ca_certificate
+ ):
+ changed = True
+ ca_cert = module.params["ca_certificate"]
+ else:
+ ca_cert = current_kmip.ca_certificate
+ if not module.check_mode:
+ if changed:
+ kmip = flasharray.KmipPost(
+ uris=uris,
+ ca_certificate=ca_cert,
+ certificate=flasharray.ReferenceNoId(name=certificate),
+ )
+ res = array.patch_kmip(names=[module.params["name"]], kmip=kmip)
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Updating existing KMIP object {0} failed. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+
+ module.exit_json(changed=changed)
+
+
+def create_kmip(module, array):
+ """Create KMIP object"""
+ if array.get_certificates(names=[module.params["certificate"]]).status_code != 200:
+ module.fail_json(
+ msg="Array certificate {0} does not exist.".format(
+ module.params["certificate"]
+ )
+ )
+ changed = True
+ kmip = flasharray.KmipPost(
+ uris=sorted(module.params["uris"]),
+ ca_certificate=module.params["ca_certificate"],
+ certificate=flasharray.ReferenceNoId(name=module.params["certificate"]),
+ )
+ if not module.check_mode:
+ res = array.post_kmip(names=[module.params["name"]], kmip=kmip)
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Creating KMIP object {0} failed. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def delete_kmip(module, array):
+ """Delete existing KMIP object"""
+ changed = True
+ if not module.check_mode:
+ res = array.delete_kmip(names=[module.params["name"]])
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete {0} KMIP object. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(
+ type="str",
+ default="present",
+ choices=["absent", "present"],
+ ),
+ name=dict(type="str", required=True),
+ certificate=dict(type="str"),
+ ca_certificate=dict(type="str", no_log=True),
+ uris=dict(type="list", elements="str"),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec,
+ supports_check_mode=True,
+ )
+
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashArray REST version not supported. "
+ "Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
+ )
+
+ array = get_array(module)
+ state = module.params["state"]
+ exists = bool(array.get_kmip(names=[module.params["name"]]).status_code == 200)
+ if module.params["certificate"] and len(module.params["certificate"]) > 3000:
+ module.fail_json(msg="Certificate exceeds 3000 characters")
+
+ if not exists and state == "present":
+ create_kmip(module, array)
+ elif exists and state == "present":
+ update_kmip(module, array)
+ elif exists and state == "absent":
+ delete_kmip(module, array)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_logging.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_logging.py
new file mode 100644
index 000000000..a2f8e136d
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_logging.py
@@ -0,0 +1,166 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2021, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["deprecated"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_logging
+version_added: '1.19.0'
+short_description: Manage Pure Storage FlashArray Audit and Session logs
+description:
+- view the FlashArray audit trail oe session logs, newest to oldest based on (start) time
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ log_type:
+ description:
+ - The type of logs to be viewed
+ type: str
+ default: audit
+ choices: [audit, session]
+ limit:
+ description:
+ - The maximum number of audit events returned
+ default: 1000
+ type: int
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: List last 100 audit events
+ purestorage.flasharray.purefa_audit:
+ limit: 100
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: List last 24 session events
+ purestorage.flasharray.purefa_audit:
+ limit: 24
+ log_type: session
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ get_array,
+ purefa_argument_spec,
+)
+
+AUDIT_API_VERSION = "2.2"
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ limit=dict(type="int", default=1000),
+ log_type=dict(type="str", default="audit", choices=["audit", "session"]),
+ )
+ )
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+ audits = []
+ changed = False
+ if AUDIT_API_VERSION in api_version:
+ changed = True
+ array = get_array(module)
+ if not module.check_mode:
+ if module.params["log_type"] == "audit":
+ all_audits = list(
+ array.get_audits(
+ limit=module.params["limit"],
+ sort=flasharray.Property("time-"),
+ ).items
+ )
+ else:
+ all_audits = list(
+ array.get_sessions(
+ limit=module.params["limit"],
+ sort=flasharray.Property("start_time-"),
+ ).items
+ )
+ for audit in range(0, len(all_audits)):
+ if module.params["log_type"] == "session":
+ start_time = getattr(all_audits[audit], "start_time", None)
+ end_time = getattr(all_audits[audit], "end_time", None)
+ if start_time:
+ human_start_time = time.strftime(
+ "%Y-%m-%d %H:%M:%S", time.localtime(start_time / 1000)
+ )
+ else:
+ human_start_time = None
+ if end_time:
+ human_end_time = time.strftime(
+ "%Y-%m-%d %H:%M:%S", time.localtime(end_time / 1000)
+ )
+ else:
+ human_end_time = None
+
+ data = {
+ "start_time": human_start_time,
+ "end_time": human_end_time,
+ "location": getattr(all_audits[audit], "location", None),
+ "user": getattr(all_audits[audit], "user", None),
+ "event": all_audits[audit].event,
+ "event_count": all_audits[audit].event_count,
+ "user_interface": getattr(
+ all_audits[audit], "user_interface", None
+ ),
+ }
+ else:
+ event_time = getattr(all_audits[audit], "time", None)
+ if event_time:
+ human_event_time = time.strftime(
+ "%Y-%m-%d %H:%M:%S", time.localtime(event_time / 1000)
+ )
+ else:
+ human_event_time = None
+ data = {
+ "time": human_event_time,
+ "arguments": all_audits[audit].arguments,
+ "command": all_audits[audit].command,
+ "subcommand": all_audits[audit].subcommand,
+ "user": all_audits[audit].user,
+ "origin": all_audits[audit].origin.name,
+ }
+ audits.append(data)
+ else:
+ module.fail_json(msg="Purity version does not support audit log return")
+ if module.params["log_type"] == "audit":
+ module.exit_json(changed=changed, audits=audits)
+ else:
+ module.exit_json(changed=changed, sessions=audits)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_maintenance.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_maintenance.py
new file mode 100644
index 000000000..8aa5c76f9
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_maintenance.py
@@ -0,0 +1,133 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2021, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_maintenance
+version_added: '1.7.0'
+short_description: Configure Pure Storage FlashArray Maintence Windows
+description:
+- Configuration for Pure Storage FlashArray Maintenance Windows.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Create or delete maintennance window
+ type: str
+ default: present
+ choices: [ absent, present ]
+ timeout :
+ type: int
+ default: 3600
+ description:
+ - Maintenance window period, specified in seconds.
+ - Range allowed is 1 minute (60 seconds) to 24 hours (86400 seconds)
+ - Default setting is 1 hour (3600 seconds)
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Delete exisitng maintenance window
+ purestorage.flasharray.purefa_maintenance:
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Set maintnence window to default of 1 hour
+ purestorage.flasharray.purefa_maintenance:
+ state: present
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Update existing maintnence window
+ purestorage.flasharray.purefa_maintenance:
+ state: present
+ timeout: 86400
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_array,
+ purefa_argument_spec,
+)
+
+
+def delete_window(module, array):
+ """Delete Maintenance Window"""
+ changed = False
+ if list(array.get_maintenance_windows().items):
+ changed = True
+ if not module.check_mode:
+ state = array.delete_maintenance_windows(names=["environment"])
+ if state.status_code != 200:
+ changed = False
+ module.exit_json(changed=changed)
+
+
+def set_window(module, array):
+ """Set Maintenace Window"""
+ changed = True
+ if not 60 <= module.params["timeout"] <= 86400:
+ module.fail_json(msg="Maintenance Window Timeout is out of range (60 to 86400)")
+ window = flasharray.MaintenanceWindowPost(timeout=module.params["timeout"] * 1000)
+ if not module.check_mode:
+ state = array.post_maintenance_windows(
+ names=["environment"], maintenance_window=window
+ )
+ if state.status_code != 200:
+ module.fail_json(msg="Setting maintenance window failed")
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ timeout=dict(type="int", default=3600),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+
+ array = get_array(module)
+
+ if module.params["state"] == "absent":
+ delete_window(module, array)
+ else:
+ set_window(module, array)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_messages.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_messages.py
new file mode 100644
index 000000000..a28bd56b2
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_messages.py
@@ -0,0 +1,198 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2022, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_messages
+version_added: '1.14.0'
+short_description: List FlashArray Alert Messages
+description:
+- List Alert messages based on filters provided
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ severity:
+ description:
+ - severity of the alerts to show
+ type: list
+ elements: str
+ choices: [ all, critical, warning, info ]
+ default: [ all ]
+ state:
+ description:
+ - State of alerts to show
+ default: open
+ choices: [ all, open, closed ]
+ type: str
+ flagged:
+ description:
+ - Show alerts that have been acknowledged or not
+ default: false
+ type: bool
+ history:
+ description:
+ - Historical time period to show alerts for, from present time
+ - Allowed time period are hour(h), day(d), week(w) and year(y)
+ type: str
+ default: 1w
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Show critical alerts from past 4 weeks that haven't been acknowledged
+ purefa_messages:
+ history: 4w
+ flagged : false
+ severity:
+ - critical
+ fa_url: 10.10.10.2
+ api_token: 89a9356f-c203-d263-8a89-c229486a13ba
+"""
+
+RETURN = r"""
+"""
+
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ get_array,
+ purefa_argument_spec,
+)
+
+MIN_REQUIRED_API_VERSION = "2.2"
+ALLOWED_PERIODS = ["h", "d", "w", "y"]
+# Time periods in micro-seconds
+HOUR = 3600000
+DAY = HOUR * 24
+WEEK = DAY * 7
+YEAR = WEEK * 52
+
+
+def _create_time_window(window):
+ period = window[-1].lower()
+ multiple = int(window[0:-1])
+ if period == "h":
+ return HOUR * multiple
+ if period == "d":
+ return DAY * multiple
+ if period == "w":
+ return WEEK * multiple
+ if period == "y":
+ return YEAR * multiple
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="open", choices=["all", "open", "closed"]),
+ history=dict(type="str", default="1w"),
+ flagged=dict(type="bool", default=False),
+ severity=dict(
+ type="list",
+ elements="str",
+ default=["all"],
+ choices=["all", "critical", "warning", "info"],
+ ),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+ time_now = int(time.time() * 1000)
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashArray REST version not supported. "
+ "Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
+ )
+ array_v6 = get_array(module)
+ if module.params["history"][-1].lower() not in ALLOWED_PERIODS:
+ module.fail_json(msg="historical window value is not an allowsd time period")
+ since_time = str(time_now - _create_time_window(module.params["history"].lower()))
+ if module.params["flagged"]:
+ flagged = " and flagged='True'"
+ else:
+ flagged = " and flagged='False'"
+
+ multi_sev = False
+ if len(module.params["severity"]) > 1:
+ if "all" in module.params["severity"]:
+ module.params["severity"] = ["*"]
+ else:
+ multi_sev = True
+ if multi_sev:
+ severity = " and ("
+ for level in range(0, len(module.params["severity"])):
+ severity += "severity='" + str(module.params["severity"][level]) + "' or "
+ severity = severity[0:-4] + ")"
+ else:
+ if module.params["severity"] == ["all"]:
+ severity = " and severity='*'"
+ else:
+ severity = " and severity='" + str(module.params["severity"][0]) + "'"
+ messages = {}
+ if module.params["state"] == "all":
+ state = " and state='*'"
+ else:
+ state = " and state='" + module.params["state"] + "'"
+ filter_string = "notified>" + since_time + state + flagged + severity
+ try:
+ res = array_v6.get_alerts(filter=filter_string)
+ alerts = list(res.items)
+ except Exception:
+ module.fail_json(
+ msg="Failed to get alert messages. Error: {0}".format(res.errors[0].message)
+ )
+ for message in range(0, len(alerts)):
+ name = alerts[message].name
+ messages[name] = {
+ "summary": alerts[message].summary,
+ "component_type": alerts[message].component_type,
+ "component_name": alerts[message].component_name,
+ "code": alerts[message].code,
+ "severity": alerts[message].severity,
+ "actual": alerts[message].actual,
+ "issue": alerts[message].issue,
+ "state": alerts[message].state,
+ "flagged": alerts[message].flagged,
+ "closed": None,
+ "created": time.strftime(
+ "%Y-%m-%d %H:%M:%S",
+ time.gmtime(alerts[message].created / 1000),
+ )
+ + " UTC",
+ "updated": time.strftime(
+ "%Y-%m-%d %H:%M:%S",
+ time.gmtime(alerts[message].updated / 1000),
+ )
+ + " UTC",
+ }
+ if alerts[message].state == "closed":
+ messages[name]["closed"] = (
+ time.strftime(
+ "%Y-%m-%d %H:%M:%S", time.gmtime(alerts[message].closed / 1000)
+ )
+ + " UTC"
+ )
+ module.exit_json(changed=False, purefa_messages=messages)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_network.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_network.py
new file mode 100644
index 000000000..e5004568a
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_network.py
@@ -0,0 +1,437 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+
+DOCUMENTATION = """
+---
+module: purefa_network
+short_description: Manage network interfaces in a Pure Storage FlashArray
+version_added: '1.0.0'
+description:
+ - This module manages the physical and virtual network interfaces on a Pure Storage FlashArray.
+ - To manage VLAN interfaces use the I(purestorage.flasharray.purefa_vlan) module.
+ - To manage network subnets use the I(purestorage.flasharray.purefa_subnet) module.
+ - To remove an IP address from a non-management port use 0.0.0.0/0
+author: Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Interface name (physical or virtual).
+ required: true
+ type: str
+ state:
+ description:
+ - State of existing interface (on/off).
+ required: false
+ default: present
+ choices: [ "present", "absent" ]
+ type: str
+ address:
+ description:
+ - IPv4 or IPv6 address of interface in CIDR notation.
+ - To remove an IP address from a non-management port use 0.0.0.0/0
+ required: false
+ type: str
+ gateway:
+ description:
+ - IPv4 or IPv6 address of interface gateway.
+ required: false
+ type: str
+ mtu:
+ description:
+ - MTU size of the interface. Range is 1280 to 9216.
+ required: false
+ default: 1500
+ type: int
+ servicelist:
+ description:
+ - Assigns the specified (comma-separated) service list to one or more specified interfaces.
+ - Replaces the previous service list.
+ - Supported service lists depend on whether the network interface is Ethernet or Fibre Channel.
+ - Note that I(system) is only valid for Cloud Block Store.
+ elements: str
+ type: list
+ choices: [ "replication", "management", "ds", "file", "iscsi", "scsi-fc", "nvme-fc", "nvme-tcp", "nvme-roce", "system"]
+ version_added: '1.15.0'
+extends_documentation_fragment:
+ - purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = """
+- name: Configure and enable network interface ct0.eth8
+ purestorage.flasharray.purefa_network:
+ name: ct0.eth8
+ gateway: 10.21.200.1
+ address: "10.21.200.18/24"
+ mtu: 9000
+ state: present
+ fa_url: 10.10.10.2
+ api_token: c6033033-fe69-2515-a9e8-966bb7fe4b40
+
+- name: Disable physical interface ct1.eth2
+ purestorage.flasharray.purefa_network:
+ name: ct1.eth2
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: c6033033-fe69-2515-a9e8-966bb7fe4b40
+
+- name: Enable virtual network interface vir0
+ purestorage.flasharray.purefa_network:
+ name: vir0
+ state: present
+ fa_url: 10.10.10.2
+ api_token: c6033033-fe69-2515-a9e8-966bb7fe4b40
+
+- name: Remove an IP address from iSCSI interface ct0.eth4
+ purestorage.flasharray.purefa_network:
+ name: ct0.eth4
+ address: 0.0.0.0/0
+ gateway: 0.0.0.0
+ fa_url: 10.10.10.2
+ api_token: c6033033-fe69-2515-a9e8-966bb7fe4b40
+
+- name: Change service list for FC interface ct0.fc1
+ purestorage.flasharray.purefa_network:
+ name: ct0.fc1
+ servicelist:
+ - replication
+ fa_url: 10.10.10.2
+ api_token: c6033033-fe69-2515-a9e8-966bb7fe4b40
+"""
+
+RETURN = """
+"""
+
+try:
+ from netaddr import IPAddress, IPNetwork
+
+ HAS_NETADDR = True
+except ImportError:
+ HAS_NETADDR = False
+
+try:
+ from pypureclient.flasharray import NetworkInterfacePatch
+
+ HAS_PYPURECLIENT = True
+except ImportError:
+ HAS_PYPURECLIENT = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ get_array,
+ purefa_argument_spec,
+)
+
+FC_ENABLE_API = "2.4"
+
+
+def _is_cbs(array, is_cbs=False):
+ """Is the selected array a Cloud Block Store"""
+ model = array.get(controllers=True)[0]["model"]
+ is_cbs = bool("CBS" in model)
+ return is_cbs
+
+
+def _get_fc_interface(module, array):
+ """Return FC Interface or None"""
+ interface = {}
+ interface_list = array.get_network_interfaces(names=[module.params["name"]])
+ if interface_list.status_code == 200:
+ interface = list(interface_list.items)[0]
+ return interface
+ else:
+ return None
+
+
+def _get_interface(module, array):
+ """Return Network Interface or None"""
+ interface = {}
+ if module.params["name"][0] == "v":
+ try:
+ interface = array.get_network_interface(module.params["name"])
+ except Exception:
+ return None
+ else:
+ try:
+ interfaces = array.list_network_interfaces()
+ except Exception:
+ return None
+ for ints in range(0, len(interfaces)):
+ if interfaces[ints]["name"] == module.params["name"]:
+ interface = interfaces[ints]
+ break
+ return interface
+
+
+def update_fc_interface(module, array, interface, api_version):
+ """Modify FC Interface settings"""
+ changed = False
+ if FC_ENABLE_API in api_version:
+ if not interface.enabled and module.params["state"] == "present":
+ changed = True
+ if not module.check_mode:
+ network = NetworkInterfacePatch(enabled=True, override_npiv_check=True)
+ res = array.patch_network_interfaces(
+ names=[module.params["name"]], network=network
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to enable interface {0}.".format(
+ module.params["name"]
+ )
+ )
+ if interface.enabled and module.params["state"] == "absent":
+ changed = True
+ if not module.check_mode:
+ network = NetworkInterfacePatch(enabled=False, override_npiv_check=True)
+ res = array.patch_network_interfaces(
+ names=[module.params["name"]], network=network
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to disable interface {0}.".format(
+ module.params["name"]
+ )
+ )
+ if module.params["servicelist"] and sorted(module.params["servicelist"]) != sorted(
+ interface.services
+ ):
+ changed = True
+ if not module.check_mode:
+ network = NetworkInterfacePatch(services=module.params["servicelist"])
+ res = array.patch_network_interfaces(
+ names=[module.params["name"]], network=network
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to update interface service list {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+
+ module.exit_json(changed=changed)
+
+
+def update_interface(module, array, interface):
+ """Modify Interface settings"""
+ changed = False
+ current_state = {
+ "mtu": interface["mtu"],
+ "gateway": interface["gateway"],
+ "address": interface["address"],
+ "netmask": interface["netmask"],
+ "services": sorted(interface["services"]),
+ }
+ if not module.params["servicelist"]:
+ services = sorted(interface["services"])
+ else:
+ services = sorted(module.params["servicelist"])
+ if not module.params["address"]:
+ address = interface["address"]
+ else:
+ if module.params["gateway"]:
+ if module.params["gateway"] and module.params["gateway"] not in IPNetwork(
+ module.params["address"]
+ ):
+ module.fail_json(msg="Gateway and subnet are not compatible.")
+ elif not module.params["gateway"] and interface["gateway"] not in [
+ None,
+ IPNetwork(module.params["address"]),
+ ]:
+ module.fail_json(msg="Gateway and subnet are not compatible.")
+ address = str(module.params["address"].split("/", 1)[0])
+ ip_version = str(IPAddress(address).version)
+ if not module.params["mtu"]:
+ mtu = interface["mtu"]
+ else:
+ if not 1280 <= module.params["mtu"] <= 9216:
+ module.fail_json(
+ msg="MTU {0} is out of range (1280 to 9216)".format(
+ module.params["mtu"]
+ )
+ )
+ else:
+ mtu = module.params["mtu"]
+ if module.params["address"]:
+ netmask = str(IPNetwork(module.params["address"]).netmask)
+ else:
+ netmask = interface["netmask"]
+ if not module.params["gateway"]:
+ gateway = interface["gateway"]
+ else:
+ cidr = str(IPAddress(netmask).netmask_bits())
+ full_addr = address + "/" + cidr
+ if module.params["gateway"] not in IPNetwork(full_addr):
+ module.fail_json(msg="Gateway and subnet are not compatible.")
+ gateway = module.params["gateway"]
+ if ip_version == "6":
+ netmask = str(IPAddress(netmask).netmask_bits())
+ new_state = {
+ "address": address,
+ "mtu": mtu,
+ "gateway": gateway,
+ "netmask": netmask,
+ "services": services,
+ }
+ if new_state != current_state:
+ changed = True
+ if (
+ "management" in interface["services"] or "app" in interface["services"]
+ ) and address == "0.0.0.0/0":
+ module.fail_json(
+ msg="Removing IP address from a management or app port is not supported"
+ )
+ if not module.check_mode:
+ try:
+ if new_state["gateway"] is not None:
+ array.set_network_interface(
+ interface["name"],
+ address=new_state["address"],
+ mtu=new_state["mtu"],
+ netmask=new_state["netmask"],
+ gateway=new_state["gateway"],
+ )
+ else:
+ array.set_network_interface(
+ interface["name"],
+ address=new_state["address"],
+ mtu=new_state["mtu"],
+ netmask=new_state["netmask"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to change settings for interface {0}.".format(
+ interface["name"]
+ )
+ )
+ if not interface["enabled"] and module.params["state"] == "present":
+ changed = True
+ if not module.check_mode:
+ try:
+ array.enable_network_interface(interface["name"])
+ except Exception:
+ module.fail_json(
+ msg="Failed to enable interface {0}.".format(interface["name"])
+ )
+ if interface["enabled"] and module.params["state"] == "absent":
+ changed = True
+ if not module.check_mode:
+ try:
+ array.disable_network_interface(interface["name"])
+ except Exception:
+ module.fail_json(
+ msg="Failed to disable interface {0}.".format(interface["name"])
+ )
+ if (
+ module.params["servicelist"]
+ and sorted(module.params["servicelist"]) != interface["services"]
+ ):
+ api_version = array._list_available_rest_versions()
+ if FC_ENABLE_API in api_version:
+ if HAS_PYPURECLIENT:
+ array = get_array(module)
+ changed = True
+ if not module.check_mode:
+ network = NetworkInterfacePatch(
+ services=module.params["servicelist"]
+ )
+ res = array.patch_network_interfaces(
+ names=[module.params["name"]], network=network
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to update interface service list {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ else:
+ module.warn_json(
+ "Servicelist not update as pypureclient module is required"
+ )
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True),
+ state=dict(type="str", default="present", choices=["present", "absent"]),
+ address=dict(type="str"),
+ gateway=dict(type="str"),
+ mtu=dict(type="int", default=1500),
+ servicelist=dict(
+ type="list",
+ elements="str",
+ choices=[
+ "replication",
+ "management",
+ "ds",
+ "file",
+ "iscsi",
+ "scsi-fc",
+ "nvme-fc",
+ "nvme-tcp",
+ "nvme-roce",
+ "system",
+ ],
+ ),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_NETADDR:
+ module.fail_json(msg="netaddr module is required")
+
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+ if not _is_cbs(array):
+ if module.params["servicelist"] and "system" in module.params["servicelist"]:
+ module.fail_json(
+ msg="Only Cloud Block Store supports the 'system' service type"
+ )
+ if "." in module.params["name"]:
+ if module.params["name"].split(".")[1][0].lower() == "f":
+ if not HAS_PYPURECLIENT:
+ module.fail_json(msg="pypureclient module is required")
+ array = get_array(module)
+ interface = _get_fc_interface(module, array)
+ if not interface:
+ module.fail_json(msg="Invalid network interface specified.")
+ else:
+ update_fc_interface(module, array, interface, api_version)
+ else:
+ interface = _get_interface(module, array)
+ if not interface:
+ module.fail_json(msg="Invalid network interface specified.")
+ else:
+ update_interface(module, array, interface)
+ else:
+ interface = _get_interface(module, array)
+ if not interface:
+ module.fail_json(msg="Invalid network interface specified.")
+ else:
+ update_interface(module, array, interface)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_ntp.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_ntp.py
new file mode 100644
index 000000000..e2a5c8f18
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_ntp.py
@@ -0,0 +1,151 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_ntp
+version_added: '1.0.0'
+short_description: Configure Pure Storage FlashArray NTP settings
+description:
+- Set or erase NTP configuration for Pure Storage FlashArrays.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Create or delete NTP servers configuration
+ type: str
+ default: present
+ choices: [ absent, present ]
+ ntp_servers:
+ type: list
+ elements: str
+ description:
+ - A list of up to 4 alternate NTP servers. These may include IPv4,
+ IPv6 or FQDNs. Invalid IP addresses will cause the module to fail.
+ No validation is performed for FQDNs.
+ - If more than 4 servers are provided, only the first 4 unique
+ nameservers will be used.
+ - if no servers are given a default of I(0.pool.ntp.org) will be used.
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Delete exisitng NTP server entries
+ purestorage.flasharray.purefa_ntp:
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Set array NTP servers
+ purestorage.flasharray.purefa_ntp:
+ state: present
+ ntp_servers:
+ - "0.pool.ntp.org"
+ - "1.pool.ntp.org"
+ - "2.pool.ntp.org"
+ - "3.pool.ntp.org"
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ purefa_argument_spec,
+)
+
+
+def _is_cbs(array, is_cbs=False):
+ """Is the selected array a Cloud Block Store"""
+ model = array.get(controllers=True)[0]["model"]
+ is_cbs = bool("CBS" in model)
+ return is_cbs
+
+
+def remove(duplicate):
+ final_list = []
+ for num in duplicate:
+ if num not in final_list:
+ final_list.append(num)
+ return final_list
+
+
+def delete_ntp(module, array):
+ """Delete NTP Servers"""
+ if array.get(ntpserver=True)["ntpserver"] != []:
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set(ntpserver=[])
+ except Exception:
+ module.fail_json(msg="Deletion of NTP servers failed")
+ else:
+ changed = False
+ module.exit_json(changed=changed)
+
+
+def create_ntp(module, array):
+ """Set NTP Servers"""
+ changed = True
+ if not module.check_mode:
+ if not module.params["ntp_servers"]:
+ module.params["ntp_servers"] = ["0.pool.ntp.org"]
+ try:
+ array.set(ntpserver=module.params["ntp_servers"][0:4])
+ except Exception:
+ module.fail_json(msg="Update of NTP servers failed")
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ ntp_servers=dict(type="list", elements="str"),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ )
+ )
+
+ required_if = [["state", "present", ["ntp_servers"]]]
+
+ module = AnsibleModule(
+ argument_spec, required_if=required_if, supports_check_mode=True
+ )
+
+ array = get_system(module)
+ if _is_cbs(array):
+ module.warn("NTP settings are not necessary for a CBS array - ignoring...")
+ module.exit_json(changed=False)
+
+ if module.params["state"] == "absent":
+ delete_ntp(module, array)
+ else:
+ module.params["ntp_servers"] = remove(module.params["ntp_servers"])
+ if sorted(array.get(ntpserver=True)["ntpserver"]) != sorted(
+ module.params["ntp_servers"][0:4]
+ ):
+ create_ntp(module, array)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_offload.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_offload.py
new file mode 100644
index 000000000..1265911fe
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_offload.py
@@ -0,0 +1,443 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2019, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_offload
+version_added: '1.0.0'
+short_description: Create, modify and delete NFS, S3 or Azure offload targets
+description:
+- Create, modify and delete NFS, S3 or Azure offload targets.
+- Only supported on Purity v5.2.0 or higher.
+- You must have a correctly configured offload network for offload to work.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Define state of offload
+ default: present
+ choices: [ absent, present ]
+ type: str
+ name:
+ description:
+ - The name of the offload target
+ required: true
+ type: str
+ protocol:
+ description:
+ - Define which protocol the offload engine uses
+ default: nfs
+ choices: [ nfs, s3, azure, gcp ]
+ type: str
+ address:
+ description:
+ - The IP or FQDN address of the NFS server
+ type: str
+ share:
+ description:
+ - NFS export on the NFS server
+ type: str
+ options:
+ description:
+ - Additonal mount options for the NFS share
+ - Supported mount options include I(port), I(rsize),
+ I(wsize), I(nfsvers), and I(tcp) or I(udp)
+ required: false
+ default: ""
+ type: str
+ access_key:
+ description:
+ - Access Key ID of the offload target
+ type: str
+ container:
+ description:
+ - Name of the blob container of the Azure target
+ default: offload
+ type: str
+ bucket:
+ description:
+ - Name of the bucket for the S3 or GCP target
+ type: str
+ account:
+ description:
+ - Name of the Azure blob storage account
+ type: str
+ secret:
+ description:
+ - Secret Access Key for the offload target
+ type: str
+ initialize:
+ description:
+ - Define whether to initialize the offload bucket
+ type: bool
+ default: true
+ placement:
+ description:
+ - AWS S3 placement strategy
+ type: str
+ choices: ['retention-based', 'aws-standard-class']
+ default: retention-based
+
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Create NFS offload target
+ purestorage.flasharray.purefa_offload:
+ name: nfs-offload
+ protocol: nfs
+ address: 10.21.200.4
+ share: "/offload_target"
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create S3 offload target
+ purestorage.flasharray.purefa_offload:
+ name: s3-offload
+ protocol: s3
+ access_key: "3794fb12c6204e19195f"
+ bucket: offload-bucket
+ secret: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"
+ placement: aws-standard-class
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create Azure offload target
+ purestorage.flasharray.purefa_offload:
+ name: azure-offload
+ protocol: azure
+ secret: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"
+ container: offload-container
+ account: user1
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete offload target
+ purestorage.flasharray.purefa_offload:
+ name: nfs-offload
+ protocol: nfs
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+"""
+
+RETURN = r"""
+"""
+
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+HAS_PACKAGING = True
+try:
+ from packaging import version
+except ImportError:
+ HAS_PACKAGING = False
+
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_array,
+ get_system,
+ purefa_argument_spec,
+)
+
+MIN_REQUIRED_API_VERSION = "1.16"
+REGEX_TARGET_NAME = re.compile(r"^[a-zA-Z0-9\-]*$")
+P53_API_VERSION = "1.17"
+GCP_API_VERSION = "2.3"
+MULTIOFFLOAD_API_VERSION = "2.11"
+MULTIOFFLOAD_LIMIT = 5
+
+
+def get_target(module, array):
+ """Return target or None"""
+ try:
+ return array.get_offload(module.params["name"])
+ except Exception:
+ return None
+
+
+def create_offload(module, array):
+ """Create offload target"""
+ changed = True
+ api_version = array._list_available_rest_versions()
+ # First check if the offload network inteface is there and enabled
+ try:
+ if not array.get_network_interface("@offload.data")["enabled"]:
+ module.fail_json(
+ msg="Offload Network interface not enabled. Please resolve."
+ )
+ except Exception:
+ module.fail_json(
+ msg="Offload Network interface not correctly configured. Please resolve."
+ )
+ if not module.check_mode:
+ if module.params["protocol"] == "nfs":
+ try:
+ array.connect_nfs_offload(
+ module.params["name"],
+ mount_point=module.params["share"],
+ address=module.params["address"],
+ mount_options=module.params["options"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to create NFS offload {0}. "
+ "Please perform diagnostic checks.".format(module.params["name"])
+ )
+ if module.params["protocol"] == "s3":
+ if P53_API_VERSION in api_version:
+ try:
+ array.connect_s3_offload(
+ module.params["name"],
+ access_key_id=module.params["access_key"],
+ secret_access_key=module.params["secret"],
+ bucket=module.params["bucket"],
+ placement_strategy=module.params["placement"],
+ initialize=module.params["initialize"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to create S3 offload {0}. "
+ "Please perform diagnostic checks.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ try:
+ array.connect_s3_offload(
+ module.params["name"],
+ access_key_id=module.params["access_key"],
+ secret_access_key=module.params["secret"],
+ bucket=module.params["bucket"],
+ initialize=module.params["initialize"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to create S3 offload {0}. "
+ "Please perform diagnostic checks.".format(
+ module.params["name"]
+ )
+ )
+ if module.params["protocol"] == "azure" and P53_API_VERSION in api_version:
+ try:
+ array.connect_azure_offload(
+ module.params["name"],
+ container_name=module.params["container"],
+ secret_access_key=module.params["secret"],
+ account_name=module.params[".bucket"],
+ initialize=module.params["initialize"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to create Azure offload {0}. "
+ "Please perform diagnostic checks.".format(module.params["name"])
+ )
+ if module.params["protocol"] == "gcp" and GCP_API_VERSION in api_version:
+ arrayv6 = get_array(module)
+ bucket = flasharray.OffloadGoogleCloud(
+ access_key_id=module.params["access_key"],
+ bucket=module.params["bucket"],
+ secret_access_key=module.params["secret"],
+ )
+ offload = flasharray.OffloadPost(google_cloud=bucket)
+ res = arrayv6.post_offloads(
+ offload=offload,
+ initialize=module.params["initialize"],
+ names=[module.params["name"]],
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to create GCP offload {0}. Error: {1}"
+ "Please perform diagnostic checks.".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def update_offload(module, array):
+ """Update offload target"""
+ changed = False
+ module.exit_json(changed=changed)
+
+
+def delete_offload(module, array):
+ """Delete offload target"""
+ changed = True
+ api_version = array._list_available_rest_versions()
+ if not module.check_mode:
+ if module.params["protocol"] == "nfs":
+ try:
+ array.disconnect_nfs_offload(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Failed to delete NFS offload {0}.".format(
+ module.params["name"]
+ )
+ )
+ if module.params["protocol"] == "s3":
+ try:
+ array.disconnect_s3_offload(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Failed to delete S3 offload {0}.".format(module.params["name"])
+ )
+ if module.params["protocol"] == "azure" and P53_API_VERSION in api_version:
+ try:
+ array.disconnect_azure_offload(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Failed to delete Azure offload {0}.".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["present", "absent"]),
+ protocol=dict(
+ type="str", default="nfs", choices=["nfs", "s3", "azure", "gcp"]
+ ),
+ placement=dict(
+ type="str",
+ default="retention-based",
+ choices=["retention-based", "aws-standard-class"],
+ ),
+ name=dict(type="str", required=True),
+ initialize=dict(default=True, type="bool"),
+ access_key=dict(type="str", no_log=False),
+ secret=dict(type="str", no_log=True),
+ bucket=dict(type="str"),
+ container=dict(type="str", default="offload"),
+ account=dict(type="str"),
+ share=dict(type="str"),
+ address=dict(type="str"),
+ options=dict(type="str", default=""),
+ )
+ )
+
+ required_if = []
+
+ if argument_spec["state"] == "present":
+ required_if = [
+ ("protocol", "nfs", ["address", "share"]),
+ ("protocol", "s3", ["access_key", "secret", "bucket"]),
+ ["protocol", "gcp", ["access_key", "secret", "bucket"]],
+ ("protocol", "azure", ["account", "secret"]),
+ ]
+
+ module = AnsibleModule(
+ argument_spec, required_if=required_if, supports_check_mode=True
+ )
+
+ if not HAS_PACKAGING:
+ module.fail_json(msg="packagingsdk is required for this module")
+ if not HAS_PURESTORAGE and module.params["protocol"] == "gcp":
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashArray REST version not supported. "
+ "Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
+ )
+
+ if (
+ not re.match(r"^[a-zA-Z][a-zA-Z0-9\-]*[a-zA-Z0-9]$", module.params["name"])
+ or len(module.params["name"]) > 56
+ ):
+ module.fail_json(
+ msg="Target name invalid. "
+ "Target name must be between 1 and 56 characters (alphanumeric and -) in length "
+ "and begin and end with a letter or number. The name must include at least one letter."
+ )
+ if module.params["protocol"] in ["s3", "gcp"]:
+ if (
+ not re.match(r"^[a-z0-9][a-z0-9.\-]*[a-z0-9]$", module.params["bucket"])
+ or len(module.params["bucket"]) > 63
+ ):
+ module.fail_json(
+ msg="Bucket name invalid. "
+ "Bucket name must be between 3 and 63 characters "
+ "(lowercase, alphanumeric, dash or period) in length "
+ "and begin and end with a letter or number."
+ )
+
+ apps = array.list_apps()
+ app_version = 0
+ all_good = False
+ for app in range(0, len(apps)):
+ if apps[app]["name"] == "offload":
+ if (
+ apps[app]["enabled"]
+ and apps[app]["status"] == "healthy"
+ and version.parse(apps[app]["version"]) >= version.parse("5.2.0")
+ ):
+ all_good = True
+ app_version = apps[app]["version"]
+ break
+
+ if not all_good:
+ module.fail_json(
+ msg="Correct Offload app not installed or incorrectly configured"
+ )
+ else:
+ if version.parse(array.get()["version"]) != version.parse(app_version):
+ module.fail_json(
+ msg="Offload app version must match Purity version. Please upgrade."
+ )
+
+ target = get_target(module, array)
+ if module.params["state"] == "present" and not target:
+ offloads = array.list_offload()
+ target_count = len(offloads)
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ MULTIOFFLOAD_LIMIT = 1
+ if target_count >= MULTIOFFLOAD_LIMIT:
+ module.fail_json(
+ msg="Cannot add offload target {0}. Offload Target Limit of {1} would be exceeded.".format(
+ module.params["name"], MULTIOFFLOAD_LIMIT
+ )
+ )
+ # TODO: (SD) Remove this check when multi-protocol offloads are supported
+ if offloads[0].protocol != module.params["protocol"]:
+ module.fail_json(msg="Currently all offloads must be of the same type.")
+ create_offload(module, array)
+ elif module.params["state"] == "present" and target:
+ update_offload(module, array)
+ elif module.params["state"] == "absent" and target:
+ delete_offload(module, array)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pg.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pg.py
new file mode 100644
index 000000000..3fa51ebbb
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pg.py
@@ -0,0 +1,909 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_pg
+version_added: '1.0.0'
+short_description: Manage protection groups on Pure Storage FlashArrays
+description:
+- Create, delete or modify protection groups on Pure Storage FlashArrays.
+- If a protection group exists and you try to add non-valid types, eg. a host
+ to a volume protection group the module will ignore the invalid types.
+- Protection Groups on Offload targets are supported.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - The name of the protection group.
+ type: str
+ aliases: [ pgroup ]
+ required: true
+ state:
+ description:
+ - Define whether the protection group should exist or not.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ volume:
+ description:
+ - List of existing volumes to add to protection group.
+ - Note that volume are case-sensitive however FlashArray volume names are unique
+ and ignore case - you cannot have I(volumea) and I(volumeA)
+ type: list
+ elements: str
+ host:
+ description:
+ - List of existing hosts to add to protection group.
+ - Note that hostnames are case-sensitive however FlashArray hostnames are unique
+ and ignore case - you cannot have I(hosta) and I(hostA)
+ type: list
+ elements: str
+ hostgroup:
+ description:
+ - List of existing hostgroups to add to protection group.
+ - Note that hostgroups are case-sensitive however FlashArray hostgroup names are unique
+ and ignore case - you cannot have I(groupa) and I(groupA)
+ type: list
+ elements: str
+ eradicate:
+ description:
+ - Define whether to eradicate the protection group on delete and leave in trash.
+ type : bool
+ default: false
+ enabled:
+ description:
+ - Define whether to enabled snapshots for the protection group.
+ type : bool
+ default: true
+ target:
+ description:
+ - List of remote arrays or offload target for replication protection group
+ to connect to.
+ - Note that all replicated protection groups are asynchronous.
+ - Target arrays or offload targets must already be connected to the source array.
+ - Maximum number of targets per Portection Group is 4, assuming your
+ configuration suppors this.
+ type: list
+ elements: str
+ rename:
+ description:
+ - Rename a protection group
+ - If the source protection group is in a Pod or Volume Group 'container'
+ you only need to provide the new protection group name in the same 'container'
+ type: str
+ safe_mode:
+ description:
+ - Enables SafeMode restrictions on the protection group
+ - B(Once set disabling this can only be performed by Pure Technical Support)
+ type: bool
+ default: false
+ version_added: '1.13.0'
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Create new local protection group
+ purestorage.flasharray.purefa_pg:
+ name: foo
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create new protection group called bar in pod called foo
+ purestorage.flasharray.purefa_pg:
+ name: "foo::bar"
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create new replicated protection group
+ purestorage.flasharray.purefa_pg:
+ name: foo
+ target:
+ - arrayb
+ - arrayc
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create new replicated protection group to offload target and remote array
+ purestorage.flasharray.purefa_pg:
+ name: foo
+ target:
+ - offload
+ - arrayc
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create new protection group with snapshots disabled
+ purestorage.flasharray.purefa_pg:
+ name: foo
+ enabled: false
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete protection group
+ purestorage.flasharray.purefa_pg:
+ name: foo
+ eradicate: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: absent
+
+- name: Eradicate protection group foo on offload target where source array is arrayA
+ purestorage.flasharray.purefa_pg:
+ name: "arrayA:foo"
+ target: offload
+ eradicate: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: absent
+
+- name: Rename protection group foo in pod arrayA to bar
+ purestorage.flasharray.purefa_pg:
+ name: "arrayA::foo"
+ rename: bar
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create protection group for hostgroups
+ purestorage.flasharray.purefa_pg:
+ name: bar
+ hostgroup:
+ - hg1
+ - hg2
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create protection group for hosts
+ purestorage.flasharray.purefa_pg:
+ name: bar
+ host:
+ - host1
+ - host2
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create replicated protection group for volumes
+ purestorage.flasharray.purefa_pg:
+ name: bar
+ volume:
+ - vol1
+ - vol2
+ target: arrayb
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ get_array,
+ purefa_argument_spec,
+)
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+
+OFFLOAD_API_VERSION = "1.16"
+P53_API_VERSION = "1.17"
+AC_PG_API_VERSION = "1.13"
+RETENTION_LOCK_VERSION = "2.13"
+
+
+def get_pod(module, array):
+ """Get ActiveCluster Pod"""
+ pod_name = module.params["name"].split("::")[0]
+ try:
+ return array.get_pod(pod=pod_name)
+ except Exception:
+ return None
+
+
+def get_targets(array):
+ """Get Offload Targets"""
+ targets = []
+ try:
+ target_details = array.list_offload()
+ except Exception:
+ return None
+
+ for targetcnt in range(0, len(target_details)):
+ if target_details[targetcnt]["status"] in ["connected", "partially_connected"]:
+ targets.append(target_details[targetcnt]["name"])
+ return targets
+
+
+def get_arrays(array):
+ """Get Connected Arrays"""
+ arrays = []
+ array_details = array.list_array_connections()
+ api_version = array._list_available_rest_versions()
+ for arraycnt in range(0, len(array_details)):
+ if P53_API_VERSION in api_version:
+ if array_details[arraycnt]["status"] in [
+ "connected",
+ "partially_connected",
+ ]:
+ arrays.append(array_details[arraycnt]["array_name"])
+ else:
+ if array_details[arraycnt]["connected"]:
+ arrays.append(array_details[arraycnt]["array_name"])
+ return arrays
+
+
+def get_pending_pgroup(module, array):
+ """Get Protection Group"""
+ pgroup = None
+ if ":" in module.params["name"]:
+ if "::" not in module.params["name"]:
+ for pgrp in array.list_pgroups(pending=True, on="*"):
+ if pgrp["name"].casefold() == module.params["name"].casefold():
+ pgroup = pgrp
+ break
+ else:
+ for pgrp in array.list_pgroups(pending=True):
+ if (
+ pgrp["name"].casefold() == module.params["name"].casefold()
+ and pgrp["time_remaining"]
+ ):
+ pgroup = pgrp
+ break
+ else:
+ for pgrp in array.list_pgroups(pending=True):
+ if (
+ pgrp["name"].casefold() == module.params["name"].casefold()
+ and pgrp["time_remaining"]
+ ):
+ pgroup = pgrp
+ break
+
+ return pgroup
+
+
+def get_pgroup(module, array):
+ """Get Protection Group"""
+ pgroup = None
+ if ":" in module.params["name"]:
+ if "::" not in module.params["name"]:
+ for pgrp in array.list_pgroups(on="*"):
+ if pgrp["name"].casefold() == module.params["name"].casefold():
+ pgroup = pgrp
+ break
+ else:
+ for pgrp in array.list_pgroups():
+ if pgrp["name"].casefold() == module.params["name"].casefold():
+ pgroup = pgrp
+ break
+ else:
+ for pgrp in array.list_pgroups():
+ if pgrp["name"].casefold() == module.params["name"].casefold():
+ pgroup = pgrp
+ break
+
+ return pgroup
+
+
+def get_pgroup_sched(module, array):
+ """Get Protection Group Schedule"""
+ pgroup = None
+
+ for pgrp in array.list_pgroups(schedule=True):
+ if pgrp["name"].casefold() == module.params["name"].casefold():
+ pgroup = pgrp
+ break
+
+ return pgroup
+
+
+def check_pg_on_offload(module, array):
+ """Check if PG already exists on offload target"""
+ array_name = array.get()["array_name"]
+ remote_pg = array_name + ":" + module.params["name"]
+ targets = get_targets(array)
+ for target in targets:
+ remote_pgs = array.list_pgroups(pending=True, on=target)
+ for rpg in range(0, len(remote_pgs)):
+ if remote_pg == remote_pgs[rpg]["name"]:
+ return target
+ return None
+
+
+def make_pgroup(module, array):
+ """Create Protection Group"""
+ changed = True
+ if module.params["target"]:
+ api_version = array._list_available_rest_versions()
+ connected_targets = []
+ connected_arrays = get_arrays(array)
+ if OFFLOAD_API_VERSION in api_version:
+ connected_targets = get_targets(array)
+ offload_name = check_pg_on_offload(module, array)
+ if offload_name and offload_name in module.params["target"][0:4]:
+ module.fail_json(
+ msg="Protection Group {0} already exists on offload target {1}.".format(
+ module.params["name"], offload_name
+ )
+ )
+
+ connected_arrays = connected_arrays + connected_targets
+ if connected_arrays == []:
+ module.fail_json(msg="No connected targets on source array.")
+ if set(module.params["target"][0:4]).issubset(connected_arrays):
+ if not module.check_mode:
+ try:
+ array.create_pgroup(
+ module.params["name"], targetlist=module.params["target"][0:4]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Creation of replicated pgroup {0} failed. {1}".format(
+ module.params["name"], module.params["target"][0:4]
+ )
+ )
+ else:
+ module.fail_json(
+ msg="Check all selected targets are connected to the source array."
+ )
+ else:
+ if not module.check_mode:
+ try:
+ array.create_pgroup(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Creation of pgroup {0} failed.".format(module.params["name"])
+ )
+ try:
+ if module.params["target"]:
+ array.set_pgroup(
+ module.params["name"],
+ replicate_enabled=module.params["enabled"],
+ )
+ else:
+ array.set_pgroup(
+ module.params["name"], snap_enabled=module.params["enabled"]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Enabling pgroup {0} failed.".format(module.params["name"])
+ )
+ if module.params["volume"]:
+ try:
+ array.set_pgroup(
+ module.params["name"], vollist=module.params["volume"]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Adding volumes to pgroup {0} failed.".format(
+ module.params["name"]
+ )
+ )
+ if module.params["host"]:
+ try:
+ array.set_pgroup(
+ module.params["name"], hostlist=module.params["host"]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Adding hosts to pgroup {0} failed.".format(
+ module.params["name"]
+ )
+ )
+ if module.params["hostgroup"]:
+ try:
+ array.set_pgroup(
+ module.params["name"], hgrouplist=module.params["hostgroup"]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Adding hostgroups to pgroup {0} failed.".format(
+ module.params["name"]
+ )
+ )
+ if module.params["safe_mode"]:
+ arrayv6 = get_array(module)
+ try:
+ arrayv6.patch_protection_groups(
+ names=[module.params["name"]],
+ protection_group=flasharray.ProtectionGroup(
+ retention_lock="ratcheted"
+ ),
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to set SafeMode on pgroup {0}".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def rename_exists(module, array):
+ """Determine if rename target already exists"""
+ exists = False
+ new_name = module.params["rename"]
+ if ":" in module.params["name"]:
+ container = module.params["name"].split(":")[0]
+ new_name = container + ":" + module.params["rename"]
+ if "::" in module.params["name"]:
+ new_name = container + "::" + module.params["rename"]
+ for pgroup in array.list_pgroups(pending=True):
+ if pgroup["name"].casefold() == new_name.casefold():
+ exists = True
+ break
+ return exists
+
+
+def update_pgroup(module, array):
+ """Update Protection Group"""
+ changed = renamed = False
+ api_version = array._list_available_rest_versions()
+ if module.params["target"]:
+ connected_targets = []
+ connected_arrays = get_arrays(array)
+
+ if OFFLOAD_API_VERSION in api_version:
+ connected_targets = get_targets(array)
+ connected_arrays = connected_arrays + connected_targets
+ if connected_arrays == []:
+ module.fail_json(msg="No targets connected to source array.")
+ current_connects = array.get_pgroup(module.params["name"])["targets"]
+ current_targets = []
+
+ if current_connects:
+ for targetcnt in range(0, len(current_connects)):
+ current_targets.append(current_connects[targetcnt]["name"])
+
+ if set(module.params["target"][0:4]) != set(current_targets):
+ if not set(module.params["target"][0:4]).issubset(connected_arrays):
+ module.fail_json(
+ msg="Check all selected targets are connected to the source array."
+ )
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set_pgroup(
+ module.params["name"],
+ targetlist=module.params["target"][0:4],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Changing targets for pgroup {0} failed.".format(
+ module.params["name"]
+ )
+ )
+
+ if (
+ module.params["target"]
+ and module.params["enabled"]
+ != get_pgroup_sched(module, array)["replicate_enabled"]
+ ):
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set_pgroup(
+ module.params["name"], replicate_enabled=module.params["enabled"]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Changing enabled status of pgroup {0} failed.".format(
+ module.params["name"]
+ )
+ )
+ elif (
+ not module.params["target"]
+ and module.params["enabled"] != get_pgroup_sched(module, array)["snap_enabled"]
+ ):
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set_pgroup(
+ module.params["name"], snap_enabled=module.params["enabled"]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Changing enabled status of pgroup {0} failed.".format(
+ module.params["name"]
+ )
+ )
+
+ if (
+ module.params["volume"]
+ and get_pgroup(module, array)["hosts"] is None
+ and get_pgroup(module, array)["hgroups"] is None
+ ):
+ if get_pgroup(module, array)["volumes"] is None:
+ if not module.check_mode:
+ changed = True
+ try:
+ array.set_pgroup(
+ module.params["name"], vollist=module.params["volume"]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Adding volumes to pgroup {0} failed.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ cased_vols = list(module.params["volume"])
+ cased_pgvols = list(get_pgroup(module, array)["volumes"])
+ if not all(x in cased_pgvols for x in cased_vols):
+ if not module.check_mode:
+ changed = True
+ try:
+ array.set_pgroup(
+ module.params["name"], addvollist=module.params["volume"]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Changing volumes in pgroup {0} failed.".format(
+ module.params["name"]
+ )
+ )
+
+ if (
+ module.params["host"]
+ and get_pgroup(module, array)["volumes"] is None
+ and get_pgroup(module, array)["hgroups"] is None
+ ):
+ if get_pgroup(module, array)["hosts"] is None:
+ if not module.check_mode:
+ changed = True
+ try:
+ array.set_pgroup(
+ module.params["name"], hostlist=module.params["host"]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Adding hosts to pgroup {0} failed.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ cased_hosts = list(module.params["host"])
+ cased_pghosts = list(get_pgroup(module, array)["hosts"])
+ if not all(x in cased_pghosts for x in cased_hosts):
+ if not module.check_mode:
+ changed = True
+ try:
+ array.set_pgroup(
+ module.params["name"], addhostlist=module.params["host"]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Changing hosts in pgroup {0} failed.".format(
+ module.params["name"]
+ )
+ )
+
+ if (
+ module.params["hostgroup"]
+ and get_pgroup(module, array)["hosts"] is None
+ and get_pgroup(module, array)["volumes"] is None
+ ):
+ if get_pgroup(module, array)["hgroups"] is None:
+ if not module.check_mode:
+ changed = True
+ try:
+ array.set_pgroup(
+ module.params["name"], hgrouplist=module.params["hostgroup"]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Adding hostgroups to pgroup {0} failed.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ cased_hostg = list(module.params["hostgroup"])
+ cased_pghostg = list(get_pgroup(module, array)["hgroups"])
+ if not all(x in cased_pghostg for x in cased_hostg):
+ if not module.check_mode:
+ changed = True
+ try:
+ array.set_pgroup(
+ module.params["name"],
+ addhgrouplist=module.params["hostgroup"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Changing hostgroups in pgroup {0} failed.".format(
+ module.params["name"]
+ )
+ )
+ if module.params["rename"]:
+ if not rename_exists(module, array):
+ if ":" in module.params["name"]:
+ container = module.params["name"].split(":")[0]
+ if "::" in module.params["name"]:
+ rename = container + "::" + module.params["rename"]
+ else:
+ rename = container + ":" + module.params["rename"]
+ else:
+ rename = module.params["rename"]
+ renamed = True
+ if not module.check_mode:
+ try:
+ array.rename_pgroup(module.params["name"], rename)
+ module.params["name"] = rename
+ except Exception:
+ module.fail_json(msg="Rename to {0} failed.".format(rename))
+ else:
+ module.warn(
+ "Rename failed. Protection group {0} already exists in container. Continuing with other changes...".format(
+ module.params["rename"]
+ )
+ )
+ if RETENTION_LOCK_VERSION in api_version:
+ arrayv6 = get_array(module)
+ current_pg = list(
+ arrayv6.get_protection_groups(names=[module.params["name"]]).items
+ )[0]
+ if current_pg.retention_lock == "unlocked" and module.params["safe_mode"]:
+ changed = True
+ if not module.check_mode:
+ res = arrayv6.patch_protection_groups(
+ names=[module.params["name"]],
+ protection_group=flasharray.ProtectionGroup(
+ retention_lock="ratcheted"
+ ),
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to set SafeMode on protection group {0}. Error: {1}".format(
+ module.params["name"],
+ res.errors[0].message,
+ )
+ )
+ if current_pg.retention_lock == "ratcheted" and not module.params["safe_mode"]:
+ module.warn(
+ "Disabling SafeMode on protection group {0} can only be performed by Pure Technical Support".format(
+ module.params["name"]
+ )
+ )
+ changed = changed or renamed
+ module.exit_json(changed=changed)
+
+
+def eradicate_pgroup(module, array):
+ """Eradicate Protection Group"""
+ changed = True
+ if not module.check_mode:
+ if ":" in module.params["name"]:
+ if "::" not in module.params["name"]:
+ try:
+ target = "".join(module.params["target"])
+ array.destroy_pgroup(
+ module.params["name"], on=target, eradicate=True
+ )
+ except Exception:
+ module.fail_json(
+ msg="Eradicating pgroup {0} failed.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ try:
+ array.destroy_pgroup(module.params["name"], eradicate=True)
+ except Exception:
+ module.fail_json(
+ msg="Eradicating pgroup {0} failed.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ try:
+ array.destroy_pgroup(module.params["name"], eradicate=True)
+ except Exception:
+ module.fail_json(
+ msg="Eradicating pgroup {0} failed.".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def delete_pgroup(module, array):
+ """Delete Protection Group"""
+ changed = True
+ if not module.check_mode:
+ if ":" in module.params["name"]:
+ if "::" not in module.params["name"]:
+ try:
+ target = "".join(module.params["target"])
+ array.destroy_pgroup(module.params["name"], on=target)
+ except Exception:
+ module.fail_json(
+ msg="Deleting pgroup {0} failed.".format(module.params["name"])
+ )
+ else:
+ try:
+ array.destroy_pgroup(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Deleting pgroup {0} failed.".format(module.params["name"])
+ )
+ else:
+ try:
+ array.destroy_pgroup(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Deleting pgroup {0} failed.".format(module.params["name"])
+ )
+ if module.params["eradicate"]:
+ eradicate_pgroup(module, array)
+
+ module.exit_json(changed=changed)
+
+
+def recover_pgroup(module, array):
+ """Recover deleted protection group"""
+ changed = True
+ if not module.check_mode:
+ if ":" in module.params["name"]:
+ if "::" not in module.params["name"]:
+ try:
+ target = "".join(module.params["target"])
+ array.recover_pgroup(module.params["name"], on=target)
+ except Exception:
+ module.fail_json(
+ msg="Recover pgroup {0} failed.".format(module.params["name"])
+ )
+ else:
+ try:
+ array.recover_pgroup(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Recover pgroup {0} failed.".format(module.params["name"])
+ )
+ else:
+ try:
+ array.recover_pgroup(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="ecover pgroup {0} failed.".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True, aliases=["pgroup"]),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ volume=dict(type="list", elements="str"),
+ host=dict(type="list", elements="str"),
+ hostgroup=dict(type="list", elements="str"),
+ target=dict(type="list", elements="str"),
+ safe_mode=dict(type="bool", default=False),
+ eradicate=dict(type="bool", default=False),
+ enabled=dict(type="bool", default=True),
+ rename=dict(type="str"),
+ )
+ )
+
+ mutually_exclusive = [["volume", "host", "hostgroup"]]
+ module = AnsibleModule(
+ argument_spec, mutually_exclusive=mutually_exclusive, supports_check_mode=True
+ )
+ if not HAS_PURESTORAGE and module.params["safe_mode"]:
+ module.fail_json(
+ msg="py-pure-client sdk is required to support 'safe_mode' parameter"
+ )
+
+ state = module.params["state"]
+ array = get_system(module)
+ pattern = re.compile("^[a-zA-Z0-9]([a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?$")
+ if module.params["rename"]:
+ if not pattern.match(module.params["rename"]):
+ module.fail_json(
+ msg="Rename value {0} does not conform to naming convention".format(
+ module.params["rename"]
+ )
+ )
+ if not pattern.match(module.params["name"].split(":")[-1]):
+ module.fail_json(
+ msg="Protection Group name {0} does not conform to naming convention".format(
+ module.params["name"]
+ )
+ )
+ api_version = array._list_available_rest_versions()
+ if module.params["safe_mode"] and RETENTION_LOCK_VERSION not in api_version:
+ module.fail_json(
+ msg="API version does not support setting SafeMode on a protection group."
+ )
+ if ":" in module.params["name"] and OFFLOAD_API_VERSION not in api_version:
+ module.fail_json(msg="API version does not support offload protection groups.")
+ if "::" in module.params["name"] and AC_PG_API_VERSION not in api_version:
+ module.fail_json(
+ msg="API version does not support ActiveCluster protection groups."
+ )
+ if ":" in module.params["name"]:
+ if "::" in module.params["name"]:
+ pgname = module.params["name"].split("::")[1]
+ else:
+ pgname = module.params["name"].split(":")[1]
+ if not pattern.match(pgname):
+ module.fail_json(
+ msg="Protection Group name {0} does not conform to naming convention".format(
+ pgname
+ )
+ )
+ else:
+ if not pattern.match(module.params["name"]):
+ module.fail_json(
+ msg="Protection Group name {0} does not conform to naming convention".format(
+ module.params["name"]
+ )
+ )
+
+ pgroup = get_pgroup(module, array)
+ xpgroup = get_pending_pgroup(module, array)
+ if "::" in module.params["name"]:
+ if not get_pod(module, array):
+ module.fail_json(
+ msg="Pod {0} does not exist.".format(
+ module.params["name"].split("::")[0]
+ )
+ )
+
+ if module.params["host"]:
+ try:
+ for hst in module.params["host"]:
+ array.get_host(hst)
+ except Exception:
+ module.fail_json(msg="Host {0} not found".format(hst))
+
+ if module.params["hostgroup"]:
+ try:
+ for hstg in module.params["hostgroup"]:
+ array.get_hgroup(hstg)
+ except Exception:
+ module.fail_json(msg="Hostgroup {0} not found".format(hstg))
+
+ if pgroup and state == "present":
+ update_pgroup(module, array)
+ elif pgroup and state == "absent":
+ delete_pgroup(module, array)
+ elif xpgroup and state == "absent" and module.params["eradicate"]:
+ eradicate_pgroup(module, array)
+ elif (
+ not pgroup
+ and not xpgroup
+ and state == "present"
+ and not module.params["rename"]
+ ):
+ make_pgroup(module, array)
+ elif not pgroup and state == "present" and module.params["rename"]:
+ module.exit_json(changed=False)
+ elif xpgroup and state == "present":
+ recover_pgroup(module, array)
+ elif pgroup is None and state == "absent":
+ module.exit_json(changed=False)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pgsched.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pgsched.py
new file mode 100644
index 000000000..dc0a488d4
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pgsched.py
@@ -0,0 +1,527 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_pgsched
+short_description: Manage protection groups replication schedules on Pure Storage FlashArrays
+version_added: '1.0.0'
+description:
+- Modify or delete protection groups replication schedules on Pure Storage FlashArrays.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - The name of the protection group.
+ type: str
+ required: true
+ state:
+ description:
+ - Define whether to set or delete the protection group schedule.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ schedule:
+ description:
+ - Which schedule to change.
+ type: str
+ choices: ['replication', 'snapshot']
+ required: true
+ enabled:
+ description:
+ - Enable the schedule being configured.
+ type: bool
+ default: true
+ replicate_at:
+ description:
+ - Specifies the preferred time as HH:MM:SS, using 24-hour clock, at which to generate snapshots.
+ type: int
+ blackout_start:
+ description:
+ - Specifies the time at which to suspend replication.
+ - Provide a time in 12-hour AM/PM format, eg. 11AM
+ type: str
+ blackout_end:
+ description:
+ - Specifies the time at which to restart replication.
+ - Provide a time in 12-hour AM/PM format, eg. 5PM
+ type: str
+ replicate_frequency:
+ description:
+ - Specifies the replication frequency in seconds.
+ - Range 900 - 34560000 (FA-405, //M10, //X10i and Cloud Block Store).
+ - Range 300 - 34560000 (all other arrays).
+ type: int
+ snap_at:
+ description:
+ - Specifies the preferred time as HH:MM:SS, using 24-hour clock, at which to generate snapshots.
+ - Only valid if I(snap_frequency) is an exact multiple of 86400, ie 1 day.
+ type: int
+ snap_frequency:
+ description:
+ - Specifies the snapshot frequency in seconds.
+ - Range available 300 - 34560000.
+ type: int
+ days:
+ description:
+ - Specifies the number of days to keep the I(per_day) snapshots beyond the
+ I(all_for) period before they are eradicated
+ - Max retention period is 4000 days
+ type: int
+ all_for:
+ description:
+ - Specifies the length of time, in seconds, to keep the snapshots on the
+ source array before they are eradicated.
+ - Range available 1 - 34560000.
+ type: int
+ per_day:
+ description:
+ - Specifies the number of I(per_day) snapshots to keep beyond the I(all_for) period.
+ - Maximum number is 1440
+ type: int
+ target_all_for:
+ description:
+ - Specifies the length of time, in seconds, to keep the replicated snapshots on the targets.
+ - Range is 1 - 34560000 seconds.
+ type: int
+ target_per_day:
+ description:
+ - Specifies the number of I(per_day) replicated snapshots to keep beyond the I(target_all_for) period.
+ - Maximum number is 1440
+ type: int
+ target_days:
+ description:
+ - Specifies the number of days to keep the I(target_per_day) replicated snapshots
+ beyond the I(target_all_for) period before they are eradicated.
+ - Max retention period is 4000 days
+ type: int
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Update protection group snapshot schedule
+ purestorage.flasharray.purefa_pgsched:
+ name: foo
+ schedule: snapshot
+ enabled: true
+ snap_frequency: 86400
+ snap_at: 15:30:00
+ per_day: 5
+ all_for: 5
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Update protection group replication schedule
+ purestorage.flasharray.purefa_pgsched:
+ name: foo
+ schedule: replication
+ enabled: true
+ replicate_frequency: 86400
+ replicate_at: 15:30:00
+ target_per_day: 5
+ target_all_for: 5
+ blackout_start: 2AM
+ blackout_end: 5AM
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete protection group snapshot schedule
+ purestorage.flasharray.purefa_pgsched:
+ name: foo
+ schedule: snapshot
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete protection group replication schedule
+ purestorage.flasharray.purefa_pgsched:
+ name: foo
+ schedule: replication
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ purefa_argument_spec,
+)
+
+
+def get_pending_pgroup(module, array):
+ """Get Protection Group"""
+ pgroup = None
+ if ":" in module.params["name"]:
+ for pgrp in array.list_pgroups(pending=True, on="*"):
+ if pgrp["name"] == module.params["name"] and pgrp["time_remaining"]:
+ pgroup = pgrp
+ break
+ else:
+ for pgrp in array.list_pgroups(pending=True):
+ if pgrp["name"] == module.params["name"] and pgrp["time_remaining"]:
+ pgroup = pgrp
+ break
+
+ return pgroup
+
+
+def get_pgroup(module, array):
+ """Get Protection Group"""
+ pgroup = None
+ if ":" in module.params["name"]:
+ if "::" not in module.params["name"]:
+ for pgrp in array.list_pgroups(on="*"):
+ if pgrp["name"] == module.params["name"]:
+ pgroup = pgrp
+ break
+ else:
+ for pgrp in array.list_pgroups():
+ if pgrp["name"] == module.params["name"]:
+ pgroup = pgrp
+ break
+ else:
+ for pgrp in array.list_pgroups():
+ if pgrp["name"] == module.params["name"]:
+ pgroup = pgrp
+ break
+
+ return pgroup
+
+
+def _convert_to_minutes(hour):
+ if hour[-2:] == "AM" and hour[:2] == "12":
+ return 0
+ elif hour[-2:] == "AM":
+ return int(hour[:-2]) * 3600
+ elif hour[-2:] == "PM" and hour[:2] == "12":
+ return 43200
+ return (int(hour[:-2]) + 12) * 3600
+
+
+def update_schedule(module, array):
+ """Update Protection Group Schedule"""
+ changed = False
+ try:
+ schedule = array.get_pgroup(module.params["name"], schedule=True)
+ retention = array.get_pgroup(module.params["name"], retention=True)
+ if not schedule["replicate_blackout"]:
+ schedule["replicate_blackout"] = [{"start": 0, "end": 0}]
+ except Exception:
+ module.fail_json(
+ msg="Failed to get current schedule for pgroup {0}.".format(
+ module.params["name"]
+ )
+ )
+ current_repl = {
+ "replicate_frequency": schedule["replicate_frequency"],
+ "replicate_enabled": schedule["replicate_enabled"],
+ "target_days": retention["target_days"],
+ "replicate_at": schedule["replicate_at"],
+ "target_per_day": retention["target_per_day"],
+ "target_all_for": retention["target_all_for"],
+ "blackout_start": schedule["replicate_blackout"][0]["start"],
+ "blackout_end": schedule["replicate_blackout"][0]["end"],
+ }
+ current_snap = {
+ "days": retention["days"],
+ "snap_frequency": schedule["snap_frequency"],
+ "snap_enabled": schedule["snap_enabled"],
+ "snap_at": schedule["snap_at"],
+ "per_day": retention["per_day"],
+ "all_for": retention["all_for"],
+ }
+ if module.params["schedule"] == "snapshot":
+ if not module.params["snap_frequency"]:
+ snap_frequency = current_snap["snap_frequency"]
+ else:
+ if not 300 <= module.params["snap_frequency"] <= 34560000:
+ module.fail_json(
+ msg="Snap Frequency support is out of range (300 to 34560000)"
+ )
+ else:
+ snap_frequency = module.params["snap_frequency"]
+
+ if not module.params["snap_at"]:
+ snap_at = current_snap["snap_at"]
+ else:
+ snap_at = module.params["snap_at"]
+
+ if not module.params["days"]:
+ if isinstance(module.params["days"], int):
+ days = module.params["days"]
+ else:
+ days = current_snap["days"]
+ else:
+ if module.params["days"] > 4000:
+ module.fail_json(msg="Maximum value for days is 4000")
+ else:
+ days = module.params["days"]
+
+ if module.params["per_day"] is None:
+ per_day = current_snap["per_day"]
+ else:
+ if module.params["per_day"] > 1440:
+ module.fail_json(msg="Maximum value for per_day is 1440")
+ else:
+ per_day = module.params["per_day"]
+
+ if not module.params["all_for"]:
+ all_for = current_snap["all_for"]
+ else:
+ if module.params["all_for"] > 34560000:
+ module.fail_json(msg="Maximum all_for value is 34560000")
+ else:
+ all_for = module.params["all_for"]
+ new_snap = {
+ "days": days,
+ "snap_frequency": snap_frequency,
+ "snap_enabled": module.params["enabled"],
+ "snap_at": snap_at,
+ "per_day": per_day,
+ "all_for": all_for,
+ }
+ if current_snap != new_snap:
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set_pgroup(
+ module.params["name"], snap_enabled=module.params["enabled"]
+ )
+ array.set_pgroup(
+ module.params["name"],
+ snap_frequency=snap_frequency,
+ snap_at=snap_at,
+ )
+ array.set_pgroup(
+ module.params["name"],
+ days=days,
+ per_day=per_day,
+ all_for=all_for,
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to change snapshot schedule for pgroup {0}.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ if not module.params["replicate_frequency"]:
+ replicate_frequency = current_repl["replicate_frequency"]
+ else:
+ model = array.get(controllers=True)[0]["model"]
+ if "405" in model or "10" in model or "CBS" in model:
+ if not 900 <= module.params["replicate_frequency"] <= 34560000:
+ module.fail_json(
+ msg="Replication Frequency support is out of range (900 to 34560000)"
+ )
+ else:
+ replicate_frequency = module.params["replicate_frequency"]
+ else:
+ if not 300 <= module.params["replicate_frequency"] <= 34560000:
+ module.fail_json(
+ msg="Replication Frequency support is out of range (300 to 34560000)"
+ )
+ else:
+ replicate_frequency = module.params["replicate_frequency"]
+
+ if not module.params["replicate_at"]:
+ replicate_at = current_repl["replicate_at"]
+ else:
+ replicate_at = module.params["replicate_at"]
+
+ if not module.params["target_days"]:
+ if isinstance(module.params["target_days"], int):
+ target_days = module.params["target_days"]
+ else:
+ target_days = current_repl["target_days"]
+ else:
+ if module.params["target_days"] > 4000:
+ module.fail_json(msg="Maximum value for target_days is 4000")
+ else:
+ target_days = module.params["target_days"]
+
+ if not module.params["target_per_day"]:
+ if isinstance(module.params["target_per_day"], int):
+ target_per_day = module.params["target_per_day"]
+ else:
+ target_per_day = current_repl["target_per_day"]
+ else:
+ if module.params["target_per_day"] > 1440:
+ module.fail_json(msg="Maximum value for target_per_day is 1440")
+ else:
+ target_per_day = module.params["target_per_day"]
+
+ if not module.params["target_all_for"]:
+ target_all_for = current_repl["target_all_for"]
+ else:
+ if module.params["target_all_for"] > 34560000:
+ module.fail_json(msg="Maximum target_all_for value is 34560000")
+ else:
+ target_all_for = module.params["target_all_for"]
+ if not module.params["blackout_end"]:
+ blackout_end = current_repl["blackout_start"]
+ else:
+ blackout_end = _convert_to_minutes(module.params["blackout_end"])
+ if not module.params["blackout_start"]:
+ blackout_start = current_repl["blackout_start"]
+ else:
+ blackout_start = _convert_to_minutes(module.params["blackout_start"])
+
+ new_repl = {
+ "replicate_frequency": replicate_frequency,
+ "replicate_enabled": module.params["enabled"],
+ "target_days": target_days,
+ "replicate_at": replicate_at,
+ "target_per_day": target_per_day,
+ "target_all_for": target_all_for,
+ "blackout_start": blackout_start,
+ "blackout_end": blackout_end,
+ }
+ if current_repl != new_repl:
+ changed = True
+ if not module.check_mode:
+ blackout = {"start": blackout_start, "end": blackout_end}
+ try:
+ array.set_pgroup(
+ module.params["name"],
+ replicate_enabled=module.params["enabled"],
+ )
+ array.set_pgroup(
+ module.params["name"],
+ replicate_frequency=replicate_frequency,
+ replicate_at=replicate_at,
+ )
+ if blackout_start == 0:
+ array.set_pgroup(module.params["name"], replicate_blackout=None)
+ else:
+ array.set_pgroup(
+ module.params["name"], replicate_blackout=blackout
+ )
+ array.set_pgroup(
+ module.params["name"],
+ target_days=target_days,
+ target_per_day=target_per_day,
+ target_all_for=target_all_for,
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to change replication schedule for pgroup {0}.".format(
+ module.params["name"]
+ )
+ )
+
+ module.exit_json(changed=changed)
+
+
+def delete_schedule(module, array):
+ """Delete, ie. disable, Protection Group Schedules"""
+ changed = False
+ try:
+ current_state = array.get_pgroup(module.params["name"], schedule=True)
+ if module.params["schedule"] == "replication":
+ if current_state["replicate_enabled"]:
+ changed = True
+ if not module.check_mode:
+ array.set_pgroup(module.params["name"], replicate_enabled=False)
+ array.set_pgroup(
+ module.params["name"],
+ target_days=0,
+ target_per_day=0,
+ target_all_for=1,
+ )
+ array.set_pgroup(
+ module.params["name"],
+ replicate_frequency=14400,
+ replicate_blackout=None,
+ )
+ else:
+ if current_state["snap_enabled"]:
+ changed = True
+ if not module.check_mode:
+ array.set_pgroup(module.params["name"], snap_enabled=False)
+ array.set_pgroup(
+ module.params["name"], days=0, per_day=0, all_for=1
+ )
+ array.set_pgroup(module.params["name"], snap_frequency=300)
+ except Exception:
+ module.fail_json(
+ msg="Deleting pgroup {0} {1} schedule failed.".format(
+ module.params["name"], module.params["schedule"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ schedule=dict(
+ type="str", required=True, choices=["replication", "snapshot"]
+ ),
+ blackout_start=dict(type="str"),
+ blackout_end=dict(type="str"),
+ snap_at=dict(type="int"),
+ replicate_at=dict(type="int"),
+ replicate_frequency=dict(type="int"),
+ snap_frequency=dict(type="int"),
+ all_for=dict(type="int"),
+ days=dict(type="int"),
+ per_day=dict(type="int"),
+ target_all_for=dict(type="int"),
+ target_per_day=dict(type="int"),
+ target_days=dict(type="int"),
+ enabled=dict(type="bool", default=True),
+ )
+ )
+
+ required_together = [["blackout_start", "blackout_end"]]
+
+ module = AnsibleModule(
+ argument_spec, required_together=required_together, supports_check_mode=True
+ )
+
+ state = module.params["state"]
+ array = get_system(module)
+
+ pgroup = get_pgroup(module, array)
+ if module.params["snap_at"] and module.params["snap_frequency"]:
+ if not module.params["snap_frequency"] % 86400 == 0:
+ module.fail_json(
+ msg="snap_at not valid unless snapshot frequency is measured in days, ie. a multiple of 86400"
+ )
+ if pgroup and state == "present":
+ update_schedule(module, array)
+ elif pgroup and state == "absent":
+ delete_schedule(module, array)
+ elif pgroup is None:
+ module.fail_json(
+ msg="Specified protection group {0} does not exist.".format(
+ module.params["name"]
+ )
+ )
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pgsnap.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pgsnap.py
new file mode 100644
index 000000000..822b0491f
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pgsnap.py
@@ -0,0 +1,481 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_pgsnap
+version_added: '1.0.0'
+short_description: Manage protection group snapshots on Pure Storage FlashArrays
+description:
+- Create or delete protection group snapshots on Pure Storage FlashArray.
+- Recovery of replicated snapshots on the replica target array is enabled.
+- Support for ActiveCluster and Volume Group protection groups is supported.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - The name of the source protection group.
+ type: str
+ required: true
+ suffix:
+ description:
+ - Suffix of snapshot name.
+ - Special case. If I(latest) the module will select the latest snapshot created in the group
+ type: str
+ state:
+ description:
+ - Define whether the protection group snapshot should exist or not.
+ Copy (added in 2.7) will create a full read/write clone of the
+ snapshot.
+ type: str
+ choices: [ absent, present, copy ]
+ default: present
+ eradicate:
+ description:
+ - Define whether to eradicate the snapshot on delete or leave in trash.
+ type: bool
+ default: false
+ restore:
+ description:
+ - Restore a specific volume from a protection group snapshot.
+ - The protection group name is not required. Only provide the name of the
+ volume to be restored.
+ type: str
+ overwrite:
+ description:
+ - Define whether to overwrite the target volume if it already exists.
+ type: bool
+ default: false
+ target:
+ description:
+ - Volume to restore a specified volume to.
+ - If not supplied this will default to the volume defined in I(restore)
+ type: str
+ offload:
+ description:
+ - Name of offload target on which the snapshot exists.
+ - This is only applicable for deletion and erasure of snapshots
+ type: str
+ now:
+ description:
+ - Whether to initiate a snapshot of the protection group immeadiately
+ type: bool
+ default: false
+ apply_retention:
+ description:
+ - Apply retention schedule settings to the snapshot
+ type: bool
+ default: false
+ remote:
+ description:
+ - Force immeadiate snapshot to remote targets
+ type: bool
+ default: false
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Create protection group snapshot foo.ansible
+ purestorage.flasharray.purefa_pgsnap:
+ name: foo
+ suffix: ansible
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: present
+
+- name: Delete and eradicate protection group snapshot named foo.snap
+ purestorage.flasharray.purefa_pgsnap:
+ name: foo
+ suffix: snap
+ eradicate: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: absent
+
+- name: Restore volume data from local protection group snapshot named foo.snap to volume data2
+ purestorage.flasharray.purefa_pgsnap:
+ name: foo
+ suffix: snap
+ restore: data
+ target: data2
+ overwrite: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: copy
+
+- name: Restore remote protection group snapshot arrayA:pgname.snap.data to local copy
+ purestorage.flasharray.purefa_pgsnap:
+ name: arrayA:pgname
+ suffix: snap
+ restore: data
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: copy
+
+- name: Restore AC pod protection group snapshot pod1::pgname.snap.data to pdo1::data2
+ purestorage.flasharray.purefa_pgsnap:
+ name: pod1::pgname
+ suffix: snap
+ restore: data
+ target: pod1::data2
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: copy
+
+- name: Create snapshot of existing pgroup foo with suffix and force immeadiate copy to remote targets
+ purestorage.flasharray.purefa_pgsnap:
+ name: pgname
+ suffix: force
+ now: true
+ apply_retention: true
+ remote: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete and eradicate snapshot named foo.snap on offload target bar from arrayA
+ purestorage.flasharray.purefa_pgsnap:
+ name: "arrayA:foo"
+ suffix: snap
+ offload: bar
+ eradicate: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: absent
+"""
+
+RETURN = r"""
+"""
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ purefa_argument_spec,
+)
+
+from datetime import datetime
+
+OFFLOAD_API = "1.16"
+POD_SNAPSHOT = "2.4"
+
+
+def _check_offload(module, array):
+ try:
+ offload = array.get_offload(module.params["offload"])
+ if offload["status"] == "connected":
+ return True
+ return False
+ except Exception:
+ return None
+
+
+def get_pgroup(module, array):
+ """Return Protection Group or None"""
+ try:
+ return array.get_pgroup(module.params["name"])
+ except Exception:
+ return None
+
+
+def get_pgroupvolume(module, array):
+ """Return Protection Group Volume or None"""
+ try:
+ pgroup = array.get_pgroup(module.params["name"])
+ if "::" in module.params["name"]:
+ restore_volume = (
+ module.params["name"].split("::")[0] + "::" + module.params["restore"]
+ )
+ else:
+ restore_volume = module.params["restore"]
+ for volume in pgroup["volumes"]:
+ if volume == restore_volume:
+ return volume
+ except Exception:
+ return None
+
+
+def get_rpgsnapshot(module, array):
+ """Return iReplicated Snapshot or None"""
+ try:
+ snapname = (
+ module.params["name"]
+ + "."
+ + module.params["suffix"]
+ + "."
+ + module.params["restore"]
+ )
+ for snap in array.list_volumes(snap=True):
+ if snap["name"] == snapname:
+ return snapname
+ except Exception:
+ return None
+
+
+def get_offload_snapshot(module, array):
+ """Return Snapshot (active or deleted) or None"""
+ try:
+ snapname = module.params["name"] + "." + module.params["suffix"]
+ for snap in array.get_pgroup(
+ module.params["name"], snap=True, on=module.params["offload"]
+ ):
+ if snap["name"] == snapname:
+ return snapname
+ except Exception:
+ return None
+
+
+def get_pgsnapshot(module, array):
+ """Return Snapshot (active or deleted) or None"""
+ try:
+ snapname = module.params["name"] + "." + module.params["suffix"]
+ for snap in array.get_pgroup(module.params["name"], pending=True, snap=True):
+ if snap["name"] == snapname:
+ return snapname
+ except Exception:
+ return None
+
+
+def create_pgsnapshot(module, array):
+ """Create Protection Group Snapshot"""
+ changed = True
+ if not module.check_mode:
+ try:
+ if (
+ module.params["now"]
+ and array.get_pgroup(module.params["name"])["targets"] is not None
+ ):
+ array.create_pgroup_snapshot(
+ source=module.params["name"],
+ suffix=module.params["suffix"],
+ snap=True,
+ apply_retention=module.params["apply_retention"],
+ replicate_now=module.params["remote"],
+ )
+ else:
+ array.create_pgroup_snapshot(
+ source=module.params["name"],
+ suffix=module.params["suffix"],
+ snap=True,
+ apply_retention=module.params["apply_retention"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Snapshot of pgroup {0} failed.".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def restore_pgsnapvolume(module, array):
+ """Restore a Protection Group Snapshot Volume"""
+ api_version = array._list_available_rest_versions()
+ changed = True
+ if module.params["suffix"] == "latest":
+ all_snaps = array.get_pgroup(
+ module.params["name"], snap=True, transfer=True
+ ).reverse()
+ for snap in all_snaps:
+ if not snap["completed"]:
+ latest_snap = snap["name"]
+ break
+ try:
+ module.params["suffix"] = latest_snap.split(".")[1]
+ except NameError:
+ module.fail_json(msg="There is no completed snapshot available.")
+ if ":" in module.params["name"] and "::" not in module.params["name"]:
+ if get_rpgsnapshot(module, array) is None:
+ module.fail_json(
+ msg="Selected restore snapshot {0} does not exist in the Protection Group".format(
+ module.params["restore"]
+ )
+ )
+ else:
+ if get_pgroupvolume(module, array) is None:
+ module.fail_json(
+ msg="Selected restore volume {0} does not exist in the Protection Group".format(
+ module.params["restore"]
+ )
+ )
+ volume = (
+ module.params["name"]
+ + "."
+ + module.params["suffix"]
+ + "."
+ + module.params["restore"]
+ )
+ if "::" in module.params["target"]:
+ target_pod_name = module.params["target"].split(":")[0]
+ if "::" in module.params["name"]:
+ source_pod_name = module.params["name"].split(":")[0]
+ else:
+ source_pod_name = ""
+ if source_pod_name != target_pod_name:
+ if (
+ len(array.get_pod(target_pod_name, mediator=True)["arrays"]) > 1
+ and POD_SNAPSHOT not in api_version
+ ):
+ module.fail_json(msg="Volume cannot be restored to a stretched pod")
+ if not module.check_mode:
+ try:
+ array.copy_volume(
+ volume, module.params["target"], overwrite=module.params["overwrite"]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to restore {0} from pgroup {1}".format(
+ volume, module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def delete_offload_snapshot(module, array):
+ """Delete Offloaded Protection Group Snapshot"""
+ changed = False
+ snapname = module.params["name"] + "." + module.params["suffix"]
+ if ":" in module.params["name"] and module.params["offload"]:
+ if _check_offload(module, array):
+ changed = True
+ if not module.check_mode:
+ try:
+ array.destroy_pgroup(snapname, on=module.params["offload"])
+ if module.params["eradicate"]:
+ try:
+ array.eradicate_pgroup(
+ snapname, on=module.params["offload"]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to eradicate offloaded snapshot {0} on target {1}".format(
+ snapname, module.params["offload"]
+ )
+ )
+ except Exception:
+ pass
+ else:
+ module.fail_json(
+ msg="Offload target {0} does not exist or not connected".format(
+ module.params["offload"]
+ )
+ )
+ else:
+ module.fail_json(msg="Protection Group name not in the correct format")
+
+ module.exit_json(changed=changed)
+
+
+def delete_pgsnapshot(module, array):
+ """Delete Protection Group Snapshot"""
+ changed = True
+ if not module.check_mode:
+ snapname = module.params["name"] + "." + module.params["suffix"]
+ try:
+ array.destroy_pgroup(snapname)
+ if module.params["eradicate"]:
+ try:
+ array.eradicate_pgroup(snapname)
+ except Exception:
+ module.fail_json(
+ msg="Failed to eradicate pgroup {0}".format(snapname)
+ )
+ except Exception:
+ module.fail_json(msg="Failed to delete pgroup {0}".format(snapname))
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True),
+ suffix=dict(type="str"),
+ restore=dict(type="str"),
+ offload=dict(type="str"),
+ overwrite=dict(type="bool", default=False),
+ target=dict(type="str"),
+ eradicate=dict(type="bool", default=False),
+ now=dict(type="bool", default=False),
+ apply_retention=dict(type="bool", default=False),
+ remote=dict(type="bool", default=False),
+ state=dict(
+ type="str", default="present", choices=["absent", "present", "copy"]
+ ),
+ )
+ )
+
+ required_if = [("state", "copy", ["suffix", "restore"])]
+
+ module = AnsibleModule(
+ argument_spec, required_if=required_if, supports_check_mode=True
+ )
+ pattern = re.compile("^(?=.*[a-zA-Z-])[a-zA-Z0-9]([a-zA-Z0-9-]{0,63}[a-zA-Z0-9])?$")
+ state = module.params["state"]
+ if state == "present":
+ if module.params["suffix"] is None:
+ suffix = "snap-" + str(
+ (datetime.utcnow() - datetime(1970, 1, 1, 0, 0, 0, 0)).total_seconds()
+ )
+ module.params["suffix"] = suffix.replace(".", "")
+ else:
+ if not pattern.match(module.params["suffix"]):
+ module.fail_json(
+ msg="Suffix name {0} does not conform to suffix name rules".format(
+ module.params["suffix"]
+ )
+ )
+
+ if not module.params["target"] and module.params["restore"]:
+ module.params["target"] = module.params["restore"]
+
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+ if OFFLOAD_API not in api_version and module.params["offload"]:
+ module.fail_json(
+ msg="Minimum version {0} required for offload support".format(OFFLOAD_API)
+ )
+ pgroup = get_pgroup(module, array)
+ if pgroup is None:
+ module.fail_json(
+ msg="Protection Group {0} does not exist.".format(module.params["name"])
+ )
+ pgsnap = get_pgsnapshot(module, array)
+ if state != "absent" and module.params["offload"]:
+ module.fail_json(
+ msg="offload parameter not supported for state {0}".format(state)
+ )
+ elif state == "copy":
+ restore_pgsnapvolume(module, array)
+ elif state == "present" and not pgsnap:
+ create_pgsnapshot(module, array)
+ elif state == "present" and pgsnap:
+ module.exit_json(changed=False)
+ elif (
+ state == "absent"
+ and module.params["offload"]
+ and get_offload_snapshot(module, array)
+ ):
+ delete_offload_snapshot(module, array)
+ elif state == "absent" and pgsnap:
+ delete_pgsnapshot(module, array)
+ elif state == "absent" and not pgsnap:
+ module.exit_json(changed=False)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_phonehome.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_phonehome.py
new file mode 100644
index 000000000..b428b3e33
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_phonehome.py
@@ -0,0 +1,106 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_phonehome
+version_added: '1.0.0'
+short_description: Enable or Disable Pure Storage FlashArray Phonehome
+description:
+- Enablke or Disable Phonehome for a Pure Storage FlashArray.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Define state of phonehome
+ type: str
+ default: present
+ choices: [ present, absent ]
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Enable Phonehome
+ purestorage.flasharray.purefa_phonehome:
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Disable Phonehome
+ purestorage.flasharray.purefa_phonehome:
+ state: disable
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ purefa_argument_spec,
+)
+
+
+def enable_ph(module, array):
+ """Enable Remote Assist"""
+ changed = False
+ if array.get_phonehome()["phonehome"] != "enabled":
+ try:
+ if not module.check_mode:
+ array.enable_phonehome()
+ changed = True
+ except Exception:
+ module.fail_json(msg="Enabling Phonehome failed")
+ module.exit_json(changed=changed)
+
+
+def disable_ph(module, array):
+ """Disable Remote Assist"""
+ changed = False
+ if array.get_phonehome()["phonehome"] == "enabled":
+ try:
+ if not module.check_mode:
+ array.disable_phonehome()
+ changed = True
+ except Exception:
+ module.fail_json(msg="Disabling Remote Assist failed")
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["present", "absent"]),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ array = get_system(module)
+
+ if module.params["state"] == "present":
+ enable_ph(module, array)
+ else:
+ disable_ph(module, array)
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pod.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pod.py
new file mode 100644
index 000000000..75c4eb6c9
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pod.py
@@ -0,0 +1,664 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2019, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_pod
+short_description: Manage AC pods in Pure Storage FlashArrays
+version_added: '1.0.0'
+description:
+- Manage AC pods in a Pure Storage FlashArray.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - The name of the pod.
+ type: str
+ required: true
+ stretch:
+ description:
+ - The name of the array to stretch to/unstretch from. Must be synchromously replicated.
+ - To unstretch an array use state I(absent)
+ - You can only specify a remote array, ie you cannot unstretch a pod from the
+ current array and then restretch back to the current array.
+ - To restretch a pod you must perform this from the remaining array the pod
+ resides on.
+ type: str
+ failover:
+ description:
+ - The name of the array given priority to stay online if arrays loose
+ contact with eachother.
+ - Oprions are either array in the cluster, or I(auto)
+ type: list
+ elements: str
+ state:
+ description:
+ - Define whether the pod should exist or not.
+ default: present
+ choices: [ absent, present ]
+ type: str
+ eradicate:
+ description:
+ - Define whether to eradicate the pod on delete or leave in trash.
+ type: bool
+ default: false
+ target:
+ description:
+ - Name of clone target pod.
+ type: str
+ mediator:
+ description:
+ - Name of the mediator to use for a pod
+ type: str
+ default: purestorage
+ promote:
+ description:
+ - Promote/demote any pod not in a stretched relationship. .
+ - Demoting a pod will render it read-only.
+ required: false
+ type: bool
+ quiesce:
+ description:
+ - Quiesce/Skip quiesce when I(promote) is false and demoting an ActiveDR pod.
+ - Quiesce will ensure all local data has been replicated before demotion.
+ - Skipping quiesce looses all pending data to be replicated to the remote pod.
+ - Can only demote the pod if it is in a Acrive DR replica link relationship.
+ - This will default to True
+ required: false
+ type: bool
+ undo:
+ description:
+ - Use the I(undo-remote) pod when I(promote) is true and promoting an ActiveDR pod.
+ - This will default to True
+ required: false
+ type: bool
+ quota:
+ description:
+ - Logical quota limit of the pod in K, M, G, T or P units, or bytes.
+ type: str
+ version_added: '1.18.0'
+ ignore_usage:
+ description:
+ - Flag used to override checks for quota management
+ operations.
+ - If set to true, pod usage is not checked against the
+ quota_limits that are set.
+ - If set to false, the actual logical bytes in use are prevented
+ from exceeding the limits set on the pod.
+ - Client operations might be impacted.
+ - If the limit exceeds the quota, the operation is not allowed.
+ default: false
+ type: bool
+ version_added: '1.18.0'
+
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Create new pod named foo
+ purestorage.flasharray.purefa_pod:
+ name: foo
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: present
+
+- name: Delete and eradicate pod named foo
+ purestorage.flasharray.purefa_pod:
+ name: foo
+ eradicate: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: absent
+
+- name: Set failover array for pod named foo
+ purestorage.flasharray.purefa_pod:
+ name: foo
+ failover:
+ - array1
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Set mediator for pod named foo
+ purestorage.flasharray.purefa_pod:
+ name: foo
+ mediator: bar
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Stretch a pod named foo to array2
+ purestorage.flasharray.purefa_pod:
+ name: foo
+ stretch: array2
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Unstretch a pod named foo from array2
+ purestorage.flasharray.purefa_pod:
+ name: foo
+ stretch: array2
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create clone of pod foo named bar
+ purestorage.flasharray.purefa_pod:
+ name: foo
+ target: bar
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: present
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ get_array,
+ purefa_argument_spec,
+)
+
+
+POD_API_VERSION = "1.13"
+POD_QUOTA_VERSION = "2.23"
+
+
+def human_to_bytes(size):
+ """Given a human-readable byte string (e.g. 2G, 30M),
+ return the number of bytes. Will return 0 if the argument has
+ unexpected form.
+ """
+ bytes = size[:-1]
+ unit = size[-1].upper()
+ if bytes.isdigit():
+ bytes = int(bytes)
+ if unit == "P":
+ bytes *= 1125899906842624
+ elif unit == "T":
+ bytes *= 1099511627776
+ elif unit == "G":
+ bytes *= 1073741824
+ elif unit == "M":
+ bytes *= 1048576
+ elif unit == "K":
+ bytes *= 1024
+ else:
+ bytes = 0
+ else:
+ bytes = 0
+ return bytes
+
+
+def get_pod(module, array):
+ """Return Pod or None"""
+ try:
+ return array.get_pod(module.params["name"])
+ except Exception:
+ return None
+
+
+def get_undo_pod(module, array):
+ """Return Undo Pod or None"""
+ try:
+ return array.get_pod(module.params["name"] + ".undo-demote", pending_only=True)
+ except Exception:
+ return None
+
+
+def get_target(module, array):
+ """Return Pod or None"""
+ try:
+ return array.get_pod(module.params["target"])
+ except Exception:
+ return None
+
+
+def get_destroyed_pod(module, array):
+ """Return Destroyed Volume or None"""
+ try:
+ return bool(
+ array.get_pod(module.params["name"], pending=True)["time_remaining"] != ""
+ )
+ except Exception:
+ return False
+
+
+def get_destroyed_target(module, array):
+ """Return Destroyed Volume or None"""
+ try:
+ return bool(
+ array.get_pod(module.params["target"], pending=True)["time_remaining"] != ""
+ )
+ except Exception:
+ return False
+
+
+def check_arrays(module, array):
+ """Check if array name provided are sync-replicated"""
+ good_arrays = []
+ good_arrays.append(array.get()["array_name"])
+ connected_arrays = array.list_array_connections()
+ for arr in range(0, len(connected_arrays)):
+ if connected_arrays[arr]["type"] == "sync-replication":
+ good_arrays.append(connected_arrays[arr]["array_name"])
+ if module.params["failover"] is not None:
+ if module.params["failover"] == ["auto"]:
+ failover_array = []
+ else:
+ failover_array = module.params["failover"]
+ if failover_array != []:
+ for arr in range(0, len(failover_array)):
+ if failover_array[arr] not in good_arrays:
+ module.fail_json(
+ msg="Failover array {0} is not valid.".format(
+ failover_array[arr]
+ )
+ )
+ if module.params["stretch"] is not None:
+ if module.params["stretch"] not in good_arrays:
+ module.fail_json(
+ msg="Stretch: Array {0} is not connected.".format(
+ module.params["stretch"]
+ )
+ )
+ return None
+
+
+def create_pod(module, array):
+ """Create Pod"""
+ changed = True
+ if module.params["target"]:
+ module.fail_json(msg="Cannot clone non-existant pod.")
+ if not module.check_mode:
+ try:
+ if module.params["failover"]:
+ array.create_pod(
+ module.params["name"], failover_list=module.params["failover"]
+ )
+ else:
+ array.create_pod(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Pod {0} creation failed.".format(module.params["name"])
+ )
+ if module.params["mediator"] != "purestorage":
+ try:
+ array.set_pod(module.params["name"], mediator=module.params["mediator"])
+ except Exception:
+ module.warn(
+ "Failed to communicate with mediator {0}, using default value".format(
+ module.params["mediator"]
+ )
+ )
+ if module.params["stretch"]:
+ current_array = array.get()["array_name"]
+ if module.params["stretch"] != current_array:
+ try:
+ array.add_pod(module.params["name"], module.params["rrays"])
+ except Exception:
+ module.fail_json(
+ msg="Failed to stretch pod {0} to array {1}.".format(
+ module.params["name"], module.params["stretch"]
+ )
+ )
+ if module.params["quota"]:
+ arrayv6 = get_array(module)
+ res = arrayv6.patch_pods(
+ names=[module.params["name"]],
+ pod=flasharray.PodPatch(
+ quota_limit=human_to_bytes(module.params["quota"])
+ ),
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to apply quota to pod {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def clone_pod(module, array):
+ """Create Pod Clone"""
+ changed = False
+ if get_target(module, array) is None:
+ if not get_destroyed_target(module, array):
+ changed = True
+ if not module.check_mode:
+ try:
+ array.clone_pod(module.params["name"], module.params["target"])
+ except Exception:
+ module.fail_json(
+ msg="Clone pod {0} to pod {1} failed.".format(
+ module.params["name"], module.params["target"]
+ )
+ )
+ else:
+ module.fail_json(
+ msg="Target pod {0} already exists but deleted.".format(
+ module.params["target"]
+ )
+ )
+
+ module.exit_json(changed=changed)
+
+
+def update_pod(module, array):
+ """Update Pod configuration"""
+ changed = False
+ current_config = array.get_pod(module.params["name"], failover_preference=True)
+ if module.params["failover"]:
+ current_failover = current_config["failover_preference"]
+ if current_failover == [] or sorted(module.params["failover"]) != sorted(
+ current_failover
+ ):
+ changed = True
+ if not module.check_mode:
+ try:
+ if module.params["failover"] == ["auto"]:
+ if current_failover != []:
+ array.set_pod(module.params["name"], failover_preference=[])
+ else:
+ array.set_pod(
+ module.params["name"],
+ failover_preference=module.params["failover"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to set failover preference for pod {0}.".format(
+ module.params["name"]
+ )
+ )
+ current_config = array.get_pod(module.params["name"], mediator=True)
+ if current_config["mediator"] != module.params["mediator"]:
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set_pod(module.params["name"], mediator=module.params["mediator"])
+ except Exception:
+ module.warn(
+ "Failed to communicate with mediator {0}. Setting unchanged.".format(
+ module.params["mediator"]
+ )
+ )
+ if module.params["promote"] is not None:
+ if len(current_config["arrays"]) > 1:
+ module.fail_json(
+ msg="Promotion/Demotion not permitted. Pod {0} is stretched".format(
+ module.params["name"]
+ )
+ )
+ else:
+ if (
+ current_config["promotion_status"] == "demoted"
+ and module.params["promote"]
+ ):
+ try:
+ if module.params["undo"] is None:
+ module.params["undo"] = True
+ if current_config["promotion_status"] == "quiescing":
+ module.fail_json(
+ msg="Cannot promote pod {0} as it is still quiesing".format(
+ module.params["name"]
+ )
+ )
+ elif module.params["undo"]:
+ changed = True
+ if not module.check_mode:
+ if get_undo_pod(module, array):
+ array.promote_pod(
+ module.params["name"],
+ promote_from=module.params["name"] + ".undo-demote",
+ )
+ else:
+ array.promote_pod(module.params["name"])
+ module.warn(
+ "undo-demote pod remaining for {0}. Consider eradicating this.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ changed = True
+ if not module.check_mode:
+ array.promote_pod(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Failed to promote pod {0}.".format(module.params["name"])
+ )
+ elif (
+ current_config["promotion_status"] != "demoted"
+ and not module.params["promote"]
+ ):
+ try:
+ if get_undo_pod(module, array):
+ module.fail_json(
+ msg="Cannot demote pod {0} due to associated undo-demote pod not being eradicated".format(
+ module.params["name"]
+ )
+ )
+ if module.params["quiesce"] is None:
+ module.params["quiesce"] = True
+ if current_config["link_target_count"] == 0:
+ changed = True
+ if not module.check_mode:
+ array.demote_pod(module.params["name"])
+ elif not module.params["quiesce"]:
+ changed = True
+ if not module.check_mode:
+ array.demote_pod(module.params["name"], skip_quiesce=True)
+ else:
+ changed = True
+ if not module.check_mode:
+ array.demote_pod(module.params["name"], quiesce=True)
+ except Exception:
+ module.fail_json(
+ msg="Failed to demote pod {0}.".format(module.params["name"])
+ )
+ if module.params["quota"]:
+ arrayv6 = get_array(module)
+ current_pod = list(arrayv6.get_pods(names=[module.params["name"]]).items)[0]
+ quota = human_to_bytes(module.params["quota"])
+ if current_pod.quota_limit != quota:
+ changed = True
+ if not module.check_mode:
+ res = arrayv6.patch_pods(
+ names=[module.params["name"]],
+ pod=flasharray.PodPatch(
+ quota_limit=quota, ignore_usage=module.params["ignore_usage"]
+ ),
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to update quota on pod {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+
+ module.exit_json(changed=changed)
+
+
+def stretch_pod(module, array):
+ """Stretch/unstretch Pod configuration"""
+ changed = False
+ current_config = array.get_pod(module.params["name"], failover_preference=True)
+ if module.params["stretch"]:
+ current_arrays = []
+ for arr in range(0, len(current_config["arrays"])):
+ current_arrays.append(current_config["arrays"][arr]["name"])
+ if (
+ module.params["stretch"] not in current_arrays
+ and module.params["state"] == "present"
+ ):
+ changed = True
+ if not module.check_mode:
+ try:
+ array.add_pod(module.params["name"], module.params["stretch"])
+ except Exception:
+ module.fail_json(
+ msg="Failed to stretch pod {0} to array {1}.".format(
+ module.params["name"], module.params["stretch"]
+ )
+ )
+
+ if (
+ module.params["stretch"] in current_arrays
+ and module.params["state"] == "absent"
+ ):
+ changed = True
+ if not module.check_mode:
+ try:
+ array.remove_pod(module.params["name"], module.params["stretch"])
+ except Exception:
+ module.fail_json(
+ msg="Failed to unstretch pod {0} from array {1}.".format(
+ module.params["name"], module.params["stretch"]
+ )
+ )
+
+ module.exit_json(changed=changed)
+
+
+def delete_pod(module, array):
+ """Delete Pod"""
+ changed = True
+ if not module.check_mode:
+ try:
+ array.destroy_pod(module.params["name"])
+ if module.params["eradicate"]:
+ try:
+ array.eradicate_pod(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Eradicate pod {0} failed.".format(module.params["name"])
+ )
+ except Exception:
+ module.fail_json(msg="Delete pod {0} failed.".format(module.params["name"]))
+ module.exit_json(changed=changed)
+
+
+def eradicate_pod(module, array):
+ """Eradicate Deleted Pod"""
+ changed = True
+ if not module.check_mode:
+ if module.params["eradicate"]:
+ try:
+ array.eradicate_pod(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Eradication of pod {0} failed".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def recover_pod(module, array):
+ """Recover Deleted Pod"""
+ changed = True
+ if not module.check_mode:
+ try:
+ array.recover_pod(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Recovery of pod {0} failed".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True),
+ stretch=dict(type="str"),
+ target=dict(type="str"),
+ mediator=dict(type="str", default="purestorage"),
+ failover=dict(type="list", elements="str"),
+ promote=dict(type="bool"),
+ undo=dict(type="bool"),
+ quiesce=dict(type="bool"),
+ eradicate=dict(type="bool", default=False),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ quota=dict(type="str"),
+ ignore_usage=dict(type="bool", default=False),
+ )
+ )
+
+ mutually_exclusive = [
+ ["stretch", "failover"],
+ ["stretch", "eradicate"],
+ ["stretch", "mediator"],
+ ["target", "mediator"],
+ ["target", "stretch"],
+ ["target", "failover"],
+ ["target", "eradicate"],
+ ]
+
+ module = AnsibleModule(
+ argument_spec, mutually_exclusive=mutually_exclusive, supports_check_mode=True
+ )
+
+ state = module.params["state"]
+ array = get_system(module)
+
+ api_version = array._list_available_rest_versions()
+ if POD_API_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashArray REST version not supported. "
+ "Minimum version required: {0}".format(POD_API_VERSION)
+ )
+
+ if module.params["quota"] and POD_QUOTA_VERSION in api_version:
+ if not HAS_PURESTORAGE:
+ module.fail_json(
+ msg="py-pure-client sdk is required to support 'count' parameter"
+ )
+ pod = get_pod(module, array)
+ destroyed = ""
+ if not pod:
+ destroyed = get_destroyed_pod(module, array)
+ if module.params["failover"] or module.params["failover"] != "auto":
+ check_arrays(module, array)
+
+ if state == "present" and not pod:
+ create_pod(module, array)
+ elif pod and module.params["stretch"]:
+ stretch_pod(module, array)
+ elif state == "present" and pod and module.params["target"]:
+ clone_pod(module, array)
+ elif state == "present" and pod and module.params["target"]:
+ clone_pod(module, array)
+ elif state == "present" and pod:
+ update_pod(module, array)
+ elif state == "absent" and pod and not module.params["stretch"]:
+ delete_pod(module, array)
+ elif state == "present" and destroyed:
+ recover_pod(module, array)
+ elif state == "absent" and destroyed:
+ eradicate_pod(module, array)
+ elif state == "absent" and not pod:
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pod_replica.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pod_replica.py
new file mode 100644
index 000000000..87ace4eb3
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pod_replica.py
@@ -0,0 +1,279 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+
+DOCUMENTATION = """
+---
+module: purefa_pod_replica
+short_description: Manage ActiveDR pod replica links between Pure Storage FlashArrays
+version_added: '1.0.0'
+description:
+ - This module manages ActiveDR pod replica links between Pure Storage FlashArrays.
+author: Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - ActiveDR source pod name
+ required: true
+ type: str
+ state:
+ description:
+ - Creates or modifies a pod replica link
+ required: false
+ default: present
+ type: str
+ choices: [ "present", "absent" ]
+ target_array:
+ description:
+ - Remote array name to create replica on.
+ required: false
+ type: str
+ target_pod:
+ description:
+ - Name of target pod
+ - Must not be the same as the local pod.
+ type: str
+ required: false
+ pause:
+ description:
+ - Pause/unpause a pod replica link
+ required: false
+ type: bool
+extends_documentation_fragment:
+ - purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = """
+- name: Create new pod replica link from foo to bar on arrayB
+ purestorage.flasharray.purefa_pod_replica:
+ name: foo
+ target_array: arrayB
+ target_pod: bar
+ state: present
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Pause an pod replica link
+ purestorage.flasharray.purefa_pod_replica:
+ name: foo
+ pause: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete and eradicate pod replica link
+ purestorage.flasharray.purefa_pod_replica:
+ name: foo
+ state: absent
+ eradicate: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = """
+"""
+
+MIN_REQUIRED_API_VERSION = "1.19"
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ purefa_argument_spec,
+)
+
+
+def get_local_pod(module, array):
+ """Return Pod or None"""
+ try:
+ return array.get_pod(module.params["name"])
+ except Exception:
+ return None
+
+
+def get_local_rl(module, array):
+ """Return Pod Replica Link or None"""
+ try:
+ rlinks = array.list_pod_replica_links()
+ for link in range(0, len(rlinks)):
+ if rlinks[link]["local_pod_name"] == module.params["name"]:
+ return rlinks[link]
+ return None
+ except Exception:
+ return None
+
+
+def _get_arrays(array):
+ """Get Connected Arrays"""
+ arrays = []
+ array_details = array.list_array_connections()
+ for arraycnt in range(0, len(array_details)):
+ arrays.append(array_details[arraycnt]["array_name"])
+ return arrays
+
+
+def update_rl(module, array, local_rl):
+ """Create Pod Replica Link"""
+ changed = False
+ if module.params["pause"] is not None:
+ if local_rl["status"] != "paused" and module.params["pause"]:
+ changed = True
+ if not module.check_mode:
+ try:
+ array.pause_pod_replica_link(
+ local_pod_name=module.params["name"],
+ remote_pod_name=local_rl["remote_pod_name"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to pause replica link {0}.".format(
+ module.params["name"]
+ )
+ )
+ elif local_rl["status"] == "paused" and not module.params["pause"]:
+ changed = True
+ if not module.check_mode:
+ try:
+ array.resume_pod_replica_link(
+ local_pod_name=module.params["name"],
+ remote_pod_name=local_rl["remote_pod_name"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to resume replica link {0}.".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def create_rl(module, array):
+ """Create Pod Replica Link"""
+ changed = True
+ if not module.params["target_pod"]:
+ module.fail_json(msg="target_pod required to create a new replica link.")
+ if not module.params["target_array"]:
+ module.fail_json(msg="target_array required to create a new replica link.")
+ try:
+ connected_arrays = array.list_array_connections()
+ if connected_arrays == []:
+ module.fail_json(msg="No connected arrays.")
+ else:
+ good_array = False
+ for conn_array in range(0, len(connected_arrays)):
+ if connected_arrays[conn_array]["array_name"] == module.params[
+ "target_array"
+ ] and connected_arrays[conn_array]["status"] in [
+ "connected",
+ "connecting",
+ "partially_connected",
+ ]:
+ good_array = True
+ break
+ if not good_array:
+ module.fail_json(
+ msg="Target array {0} is not connected to the source array.".format(
+ module.params["target_array"]
+ )
+ )
+ else:
+ if not module.check_mode:
+ try:
+ array.create_pod_replica_link(
+ local_pod_name=module.params["name"],
+ remote_name=module.params["target_array"],
+ remote_pod_name=module.params["target_pod"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to create replica link {0} to target array {1}".format(
+ module.params["name"], module.params["target_array"]
+ )
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to create replica link for pod {0}.".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def delete_rl(module, array, local_rl):
+ """Delete Pod Replica Link"""
+ changed = True
+ if not module.check_mode:
+ try:
+ array.delete_pod_replica_link(
+ module.params["name"], remote_pod_name=local_rl["remote_pod_name"]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to delete replica link for pod {0}.".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True),
+ target_pod=dict(type="str"),
+ target_array=dict(type="str"),
+ pause=dict(type="bool"),
+ state=dict(default="present", choices=["present", "absent"]),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ state = module.params["state"]
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(msg="Purity v6.0.0 or higher required.")
+
+ local_pod = get_local_pod(module, array)
+ local_replica_link = get_local_rl(module, array)
+
+ if not local_pod:
+ module.fail_json(
+ msg="Selected local pod {0} does not exist.".format(module.params["name"])
+ )
+
+ if len(local_pod["arrays"]) > 1:
+ module.fail_json(
+ msg="Local Pod {0} is already stretched.".format(module.params["name"])
+ )
+
+ if local_replica_link:
+ if local_replica_link["status"] == "unhealthy":
+ module.fail_json(msg="Replca Link unhealthy - please check remote array")
+ if state == "present" and not local_replica_link:
+ create_rl(module, array)
+ elif state == "present" and local_replica_link:
+ update_rl(module, array, local_replica_link)
+ elif state == "absent" and local_replica_link:
+ delete_rl(module, array, local_replica_link)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_policy.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_policy.py
new file mode 100644
index 000000000..37017e4df
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_policy.py
@@ -0,0 +1,1606 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_policy
+version_added: '1.5.0'
+short_description: Manage FlashArray File System Policies
+description:
+- Manage FlashArray file system policies for NFS, SMB and snapshot
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Name of the policy
+ type: str
+ required: true
+ state:
+ description:
+ - Define whether the policy should exist or not.
+ default: present
+ choices: [ absent, present ]
+ type: str
+ policy:
+ description:
+ - The type of policy to use
+ choices: [ nfs, smb, snapshot, quota ]
+ required: true
+ type: str
+ enabled:
+ description:
+ - Define if policy is enabled or not
+ type: bool
+ default: true
+ smb_anon_allowed:
+ description:
+ - Specifies whether access to information is allowed for anonymous users
+ type: bool
+ default: false
+ client:
+ description:
+ - Specifies which SMB or NFS clients are given access
+ - Accepted notation, IP, IP mask, or hostname
+ type: str
+ smb_encrypt:
+ description:
+ - Specifies whether the remote client is required to use SMB encryption
+ type: bool
+ default: false
+ nfs_access:
+ description:
+ - Specifies access control for the export
+ choices: [ root-squash, no-root-squash, all-squash ]
+ type: str
+ default: no-root-squash
+ nfs_permission:
+ description:
+ - Specifies which read-write client access permissions are allowed for the export
+ choices: [ ro, rw ]
+ default: rw
+ type: str
+ user_mapping:
+ description:
+ - Defines if user mapping is enabled
+ type: bool
+ version_added: 1.14.0
+ snap_at:
+ description:
+ - Specifies the number of hours since midnight at which to take a snapshot
+ or the hour including AM/PM
+ - Can only be set on the rule with the smallest I(snap_every) value.
+ - Cannot be set if the I(snap_every) value is not measured in days.
+ - Can only be set for at most one rule in the same policy.
+ type: str
+ snap_every:
+ description:
+ - Specifies the interval between snapshots, in minutes.
+ - The value for all rules must be multiples of one another.
+ - Must be unique for each rule in the same policy.
+ - Value must be between 5 and 525600.
+ type: int
+ snap_keep_for:
+ description:
+ - Specifies the period that snapshots are retained before they are eradicated, in minutes.
+ - Cannot be less than the I(snap_every) value of the rule.
+ - Value must be unique for each rule in the same policy.
+ - Value must be between 5 and 525600.
+ type: int
+ snap_client_name:
+ description:
+ - The customizable portion of the client visible snapshot name.
+ type: str
+ snap_suffix:
+ description:
+ - The snapshot suffix name
+ - The suffix value can only be set for one rule in the same policy
+ - The suffix value can only be set on a rule with the same ``keep_for`` value and ``every`` value
+ - The suffix value can only be set on the rule with the largest ``keep_for`` value
+ - If not specified, defaults to a monotonically increasing number generated by the system.
+ type: str
+ version_added: 1.10.0
+ rename:
+ description:
+ - New name of policy
+ type: str
+ directory:
+ description:
+ - Directories to have the quota rule applied to.
+ type: list
+ elements: str
+ version_added: 1.9.0
+ quota_limit:
+ description:
+ - Logical space limit of the share in M, G, T or P units. See examples.
+ - If size is not set at filesystem creation time the filesystem size becomes unlimited.
+ - This value cannot be set to 0.
+ type: str
+ version_added: 1.9.0
+ quota_notifications:
+ description:
+ - Targets to notify when usage approaches the quota limit.
+ - The list of notification targets is a comma-separated string
+ - If not specified, notification targets are not assigned.
+ type: list
+ elements: str
+ choices: [ user, group ]
+ version_added: 1.9.0
+ quota_enforced:
+ description:
+ - Defines if the directory quota is enforced.
+ default: true
+ type: bool
+ ignore_usage:
+ description:
+ - Flag used to override checks for quota management
+ operations.
+ - If set to true, directory usage is not checked against the
+ quota_limits that are set.
+ - If set to false, the actual logical bytes in use are prevented
+ from exceeding the limits set on the directory.
+ - Client operations might be impacted.
+ - If the limit exceeds the quota, the client operation is not allowed.
+ default: false
+ type: bool
+ version_added: 1.9.0
+ anonuid:
+ description:
+ - The ID to which any users whose UID is affected by I(access) of
+ I(root-squash) or I(all-squash) will be mapped to.
+ - Clear using "".
+ type: str
+ default: "65534"
+ version_added: 1.14.0
+ anongid:
+ description:
+ - The ID to which any users whose GID is affected by I(access) of
+ I(root-squash) or I(all-squash) will be mapped to.
+ - This is ignored when I(user_mapping) is enabled.
+ - Clear using "".
+ type: str
+ default: "65534"
+ version_added: 1.14.0
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Create an NFS policy with initial rule
+ purestorage.flasharray.purefa_policy:
+ name: export1
+ policy: nfs
+ nfs_access: root-squash
+ nfs_permission: ro
+ client: client1
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create an empty NFS policy with no rules
+ purestorage.flasharray.purefa_policy:
+ name: export1
+ policy: nfs
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create an empty snapshot policy with no rules
+ purestorage.flasharray.purefa_policy:
+ name: snap1
+ policy: snapshot
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create an empty snapshot policy with single directory member
+ purestorage.flasharray.purefa_policy:
+ name: snap1
+ policy: snapshot
+ directory: "foo:bar"
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Disable a policy
+ purestorage.flasharray.purefa_policy:
+ name: export1
+ policy: nfs
+ enabled: false
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Add rule to existing NFS export policy
+ purestorage.flasharray.purefa_policy:
+ name: export1
+ policy: nfs
+ nfs_access: root-squash
+ nfs_permission: ro
+ client: client2
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Add rule to existing SMB export policy
+ purestorage.flasharray.purefa_policy:
+ name: export1
+ policy: smb
+ smb_encrypt: true
+ smb_anon_allowed: false
+ client: client1
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Add non-suffix rule to existing snapshot export policy
+ purestorage.flasharray.purefa_policy:
+ name: snap1
+ policy: snapshot
+ snap_client_name: foo
+ snap_every: 15
+ snap_keep_for: 1440
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Add suffix rule to existing snapshot export policy
+ purestorage.flasharray.purefa_policy:
+ name: snap1
+ policy: snapshot
+ snap_client_name: foo
+ snap_suffix: bar
+ snap_every: 1440
+ snap_keep_for: 1440
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete policy rule for a client
+ purestorage.flasharray.purefa_policy:
+ name: export1
+ policy: nfs
+ client: client2
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete policy
+ purestorage.flasharray.purefa_policy:
+ name: export1
+ policy: nfs
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create directory quota policy for directory bar
+ purestorage.flasharray.purefa_policy:
+ name: foo
+ directory:
+ - "foo:root"
+ - "bar:bin"
+ policy: quota
+ quota_limit: 10G
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete directory quota policy foo
+ purestorage.flasharray.purefa_policy:
+ name: foo
+ policy: quota
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create empty directory quota policy foo
+ purestorage.flasharray.purefa_policy:
+ name: foo
+ policy: quota
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Detach directory "foo:bar" from quota policy quota1
+ purestorage.flasharray.purefa_policy:
+ name: quota1
+ directory:
+ - "foo:bar"
+ state: absent
+ policy: quota
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Remove quota rule from quota policy foo
+ purestorage.flasharray.purefa_policy:
+ name: foo
+ policy: quota
+ quota_limit: 10G
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+HAS_PACKAGING = True
+try:
+ from packaging import version
+except ImportError:
+ HAS_PACKAGING = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ get_array,
+ purefa_argument_spec,
+)
+
+MIN_REQUIRED_API_VERSION = "2.3"
+MIN_QUOTA_API_VERSION = "2.7"
+MIN_SUFFIX_API_VERSION = "2.9"
+USER_MAP_VERSION = "2.15"
+ALL_SQUASH_VERSION = "2.16"
+
+
+def _human_to_bytes(size):
+ """Given a human-readable byte string (e.g. 2G, 30M),
+ return the number of bytes. Will return 0 if the argument has
+ unexpected form.
+ """
+ bytes = size[:-1]
+ unit = size[-1].upper()
+ if bytes.isdigit():
+ bytes = int(bytes)
+ if unit == "P":
+ bytes *= 1125899906842624
+ elif unit == "T":
+ bytes *= 1099511627776
+ elif unit == "G":
+ bytes *= 1073741824
+ elif unit == "M":
+ bytes *= 1048576
+ elif unit == "K":
+ bytes *= 1024
+ else:
+ bytes = 0
+ else:
+ bytes = 0
+ return bytes
+
+
+def _convert_to_millisecs(hour):
+ if hour[-2:].upper() == "AM" and hour[:2] == "12":
+ return 0
+ elif hour[-2:].upper() == "AM":
+ return int(hour[:-2]) * 3600000
+ elif hour[-2:].upper() == "PM" and hour[:2] == "12":
+ return 43200000
+ return (int(hour[:-2]) + 12) * 3600000
+
+
+def rename_policy(module, array):
+ """Rename a file system policy"""
+ changed = False
+ target_exists = bool(
+ array.get_policies(names=[module.params["rename"]]).status_code == 200
+ )
+ if target_exists:
+ module.fail_json(
+ msg="Rename failed - Target policy {0} already exists".format(
+ module.params["rename"]
+ )
+ )
+ if not module.check_mode:
+ changed = True
+ if module.params["policy"] == "nfs":
+ res = array.patch_policies_nfs(
+ names=[module.params["name"]],
+ policy=flasharray.PolicyPatch(name=module.params["rename"]),
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to rename NFS policy {0} to {1}".format(
+ module.params["name"], module.params["rename"]
+ )
+ )
+ elif module.params["policy"] == "smb":
+ res = array.patch_policies_smb(
+ names=[module.params["name"]],
+ policy=flasharray.PolicyPatch(name=module.params["rename"]),
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to rename SMB policy {0} to {1}".format(
+ module.params["name"], module.params["rename"]
+ )
+ )
+ elif module.params["policy"] == "snapshot":
+ res = array.patch_policies_snapshot(
+ names=[module.params["name"]],
+ policy=flasharray.PolicyPatch(name=module.params["rename"]),
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to rename snapshot policy {0} to {1}".format(
+ module.params["name"], module.params["rename"]
+ )
+ )
+ else:
+ res = array.patch_policies_quota(
+ names=[module.params["name"]],
+ policy=flasharray.PolicyPatch(name=module.params["rename"]),
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to rename quota policy {0} to {1}".format(
+ module.params["name"], module.params["rename"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def delete_policy(module, array):
+ """Delete a file system policy or rule within a policy"""
+ changed = True
+ if not module.check_mode:
+ changed = False
+ if module.params["policy"] == "nfs":
+ if not module.params["client"]:
+ res = array.delete_policies_nfs(names=[module.params["name"]])
+ if res.status_code == 200:
+ changed = True
+ else:
+ module.fail_json(
+ msg="Deletion of NFS policy {0} failed. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ else:
+ rules = list(
+ array.get_policies_nfs_client_rules(
+ policy_names=[module.params["name"]]
+ ).items
+ )
+ if rules:
+ rule_name = ""
+ for rule in range(0, len(rules)):
+ if rules[rule].client == module.params["client"]:
+ rule_name = rules[rule].name
+ break
+ if rule_name:
+ deleted = bool(
+ array.delete_policies_nfs_client_rules(
+ policy_names=[module.params["name"]], names=[rule_name]
+ ).status_code
+ == 200
+ )
+ if deleted:
+ changed = True
+ else:
+ module.fail_json(
+ msg="Failed to delete client {0} from NFS policy {1}. Error: {2}".format(
+ module.params["client"],
+ module.params["name"],
+ deleted.errors[0].message,
+ )
+ )
+ elif module.params["policy"] == "smb":
+ if not module.params["client"]:
+ res = array.delete_policies_smb(names=[module.params["name"]])
+ if res.status_code == 200:
+ changed = True
+ else:
+ module.fail_json(
+ msg="Deletion of SMB policy {0} failed. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ else:
+ rules = list(
+ array.get_policies_smb_client_rules(
+ policy_names=[module.params["name"]]
+ ).items
+ )
+ if rules:
+ rule_name = ""
+ for rule in range(0, len(rules)):
+ if rules[rule].client == module.params["client"]:
+ rule_name = rules[rule].name
+ break
+ if rule_name:
+ deleted = bool(
+ array.delete_policies_smb_client_rules(
+ policy_names=[module.params["name"]], names=[rule_name]
+ ).status_code
+ == 200
+ )
+ if deleted:
+ changed = True
+ else:
+ module.fail_json(
+ msg="Failed to delete client {0} from SMB policy {1}. Error: {2}".format(
+ module.params["client"],
+ module.params["name"],
+ deleted.errors[0].message,
+ )
+ )
+ elif module.params["policy"] == "snapshot":
+ if not module.params["snap_client_name"] and not module.params["directory"]:
+ res = array.delete_policies_snapshot(names=[module.params["name"]])
+ if res.status_code == 200:
+ changed = True
+ else:
+ module.fail_json(
+ msg="Deletion of Snapshot policy {0} failed. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ if module.params["directory"]:
+ dirs = []
+ old_dirs = []
+ current_dirs = list(
+ array.get_directories_policies_snapshot(
+ policy_names=[module.params["name"]]
+ ).items
+ )
+ if current_dirs:
+ for current_dir in range(0, len(current_dirs)):
+ dirs.append(current_dirs[current_dir].member.name)
+ for old_dir in range(0, len(module.params["directory"])):
+ if module.params["directory"][old_dir] in dirs:
+ old_dirs.append(module.params["directory"][old_dir])
+ else:
+ old_dirs = module.params["directory"]
+ if old_dirs:
+ changed = True
+ for rem_dir in range(0, len(old_dirs)):
+ if not module.check_mode:
+ directory_removed = (
+ array.delete_directories_policies_snapshot(
+ member_names=[old_dirs[rem_dir]],
+ policy_names=module.params["name"],
+ )
+ )
+ if directory_removed.status_code != 200:
+ module.fail_json(
+ msg="Failed to remove directory from Snapshot policy {0}. Error: {1}".format(
+ module.params["name"],
+ directory_removed.errors[0].message,
+ )
+ )
+ if module.params["snap_client_name"]:
+ rules = list(
+ array.get_policies_snapshot_rules(
+ policy_names=[module.params["name"]]
+ ).items
+ )
+ if rules:
+ rule_name = ""
+ for rule in range(0, len(rules)):
+ if rules[rule].client_name == module.params["snap_client_name"]:
+ rule_name = rules[rule].name
+ break
+ if rule_name:
+ deleted = bool(
+ array.delete_policies_snapshot_rules(
+ policy_names=[module.params["name"]], names=[rule_name]
+ ).status_code
+ == 200
+ )
+ if deleted:
+ changed = True
+ else:
+ module.fail_json(
+ msg="Failed to delete client {0} from Snapshot policy {1}. Error: {2}".format(
+ module.params["snap_client_name"],
+ module.params["name"],
+ deleted.errors[0].message,
+ )
+ )
+ else:
+ if module.params["quota_limit"]:
+ quota_limit = _human_to_bytes(module.params["quota_limit"])
+ rules = list(
+ array.get_policies_quota_rules(
+ policy_names=[module.params["name"]]
+ ).items
+ )
+ if rules:
+ for rule in range(0, len(rules)):
+ if rules[rule].quota_limit == quota_limit:
+ if (
+ module.params["quota_enforced"] == rules[rule].enforced
+ and ",".join(module.params["quota_notifications"])
+ == rules[rule].notifications
+ ):
+ res = array.delete_policies_quota_rules(
+ policy_names=[module.params["name"]],
+ names=[rules[rule].name],
+ )
+ if res.status_code == 200:
+ changed = True
+ else:
+ module.fail_json(
+ msg="Deletion of Quota rule failed. Error: {0}".format(
+ res.errors[0].message
+ )
+ )
+ if module.params["directory"]:
+ members = list(
+ array.get_policies_quota_members(
+ policy_names=[module.params["name"]]
+ ).items
+ )
+ if members:
+ for member in range(0, len(members)):
+ if members[member].member.name in module.params["directory"]:
+ res = array.delete_policies_quota_members(
+ policy_names=[module.params["name"]],
+ member_names=[members[member].member.name],
+ member_types="directories",
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Deletion of Quota member {0} from policy {1}. Error: {2}".format(
+ members[member].member.name,
+ module.params["name"],
+ res.errors[0].message,
+ )
+ )
+ else:
+ changed = True
+ if not module.params["quota_limit"] and not module.params["directory"]:
+ members = list(
+ array.get_policies_quota_members(
+ policy_names=[module.params["name"]]
+ ).items
+ )
+ if members:
+ member_names = []
+ for member in range(0, len(members)):
+ member_names.append(members[member].member.name)
+ res = array.delete_policies_quota_members(
+ policy_names=[module.params["name"]],
+ member_names=member_names,
+ member_types="directories",
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Deletion of Quota members {0} failed. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ res = array.delete_policies_quota(names=[module.params["name"]])
+ if res.status_code == 200:
+ changed = True
+ else:
+ module.fail_json(
+ msg="Deletion of Quota policy {0} failed. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def create_policy(module, array, all_squash):
+ """Create a file system export"""
+ changed = True
+ if not module.check_mode:
+ changed = False
+ if module.params["policy"] == "nfs":
+ created = array.post_policies_nfs(
+ names=[module.params["name"]],
+ policy=flasharray.PolicyPost(enabled=module.params["enabled"]),
+ )
+
+ if created.status_code == 200:
+ policy = flasharray.PolicyNfsPost(
+ user_mapping_enabled=module.params["user_mapping"],
+ )
+ res = array.patch_policies_nfs(
+ names=[module.params["name"]], policy=policy
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to set NFS policy {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ if module.params["client"]:
+ if all_squash:
+ rules = flasharray.PolicyrulenfsclientpostRules(
+ access=module.params["nfs_access"],
+ anongid=module.params["anongid"],
+ anonuid=module.params["anonuid"],
+ client=module.params["client"],
+ permission=module.params["nfs_permission"],
+ )
+ else:
+ rules = flasharray.PolicyrulenfsclientpostRules(
+ access=module.params["nfs_access"],
+ client=module.params["client"],
+ permission=module.params["nfs_permission"],
+ )
+ rule = flasharray.PolicyRuleNfsClientPost(rules=[rules])
+ rule_created = array.post_policies_nfs_client_rules(
+ policy_names=[module.params["name"]], rules=rule
+ )
+ if rule_created.status_code != 200:
+ module.fail_json(
+ msg="Failed to create rule for NFS policy {0}. Error: {1}".format(
+ module.params["name"], rule_created.errors[0].message
+ )
+ )
+ changed = True
+ else:
+ module.fail_json(
+ msg="Failed to create NFS policy {0}. Error: {1}".format(
+ module.params["name"], created.errors[0].message
+ )
+ )
+ elif module.params["policy"] == "smb":
+ created = array.post_policies_smb(
+ names=[module.params["name"]],
+ policy=flasharray.PolicyPost(enabled=module.params["enabled"]),
+ )
+ if created.status_code == 200:
+ changed = True
+ if module.params["client"]:
+ rules = flasharray.PolicyrulesmbclientpostRules(
+ anonymous_access_allowed=module.params["smb_anon_allowed"],
+ client=module.params["client"],
+ smb_encryption_required=module.params["smb_encrypt"],
+ )
+ rule = flasharray.PolicyRuleSmbClientPost(rules=[rules])
+ rule_created = array.post_policies_smb_client_rules(
+ policy_names=[module.params["name"]], rules=rule
+ )
+ if rule_created.status_code != 200:
+ module.fail_json(
+ msg="Failed to create rule for SMB policy {0}. Error: {1}".format(
+ module.params["name"], rule_created.errors[0].message
+ )
+ )
+ else:
+ module.fail_json(
+ msg="Failed to create SMB policy {0}. Error: {1}".format(
+ module.params["name"], created.errors[0].message
+ )
+ )
+ elif module.params["policy"] == "snapshot":
+ if HAS_PACKAGING:
+ suffix_enabled = version.parse(
+ array.get_rest_version()
+ ) >= version.parse(MIN_SUFFIX_API_VERSION)
+ else:
+ suffix_enabled = False
+ created = array.post_policies_snapshot(
+ names=[module.params["name"]],
+ policy=flasharray.PolicyPost(enabled=module.params["enabled"]),
+ )
+ if created.status_code == 200:
+ changed = True
+ if module.params["snap_client_name"]:
+ if module.params["snap_keep_for"] < module.params["snap_every"]:
+ module.fail_json(
+ msg="Retention period (snap_keep_for) cannot be less than snapshot interval (snap_every)."
+ )
+ if module.params["snap_at"]:
+ if not module.params["snap_every"] % 1440 == 0:
+ module.fail_json(
+ msg="snap_at time can only be set if snap_every is multiple of 1440"
+ )
+ if suffix_enabled:
+ rules = flasharray.PolicyrulesnapshotpostRules(
+ at=_convert_to_millisecs(module.params["snap_at"]),
+ client_name=module.params["snap_client_name"],
+ every=module.params["snap_every"] * 60000,
+ keep_for=module.params["snap_keep_for"] * 60000,
+ suffix=module.params["snap_suffix"],
+ )
+ else:
+ rules = flasharray.PolicyrulesnapshotpostRules(
+ at=_convert_to_millisecs(module.params["snap_at"]),
+ client_name=module.params["snap_client_name"],
+ every=module.params["snap_every"] * 60000,
+ keep_for=module.params["snap_keep_for"] * 60000,
+ )
+ else:
+ if suffix_enabled:
+ rules = flasharray.PolicyrulesnapshotpostRules(
+ client_name=module.params["snap_client_name"],
+ every=module.params["snap_every"] * 60000,
+ keep_for=module.params["snap_keep_for"] * 60000,
+ suffix=module.params["snap_suffix"],
+ )
+ else:
+ rules = flasharray.PolicyrulesnapshotpostRules(
+ client_name=module.params["snap_client_name"],
+ every=module.params["snap_every"] * 60000,
+ keep_for=module.params["snap_keep_for"] * 60000,
+ )
+ rule = flasharray.PolicyRuleSnapshotPost(rules=[rules])
+ rule_created = array.post_policies_snapshot_rules(
+ policy_names=[module.params["name"]], rules=rule
+ )
+ if rule_created.status_code != 200:
+ module.fail_json(
+ msg="Failed to create rule for Snapshot policy {0}. Error: {1}".format(
+ module.params["name"], rule_created.errors[0].message
+ )
+ )
+ if module.params["directory"]:
+ policies = flasharray.DirectoryPolicyPost(
+ policies=[
+ flasharray.DirectorypolicypostPolicies(
+ policy=flasharray.Reference(name=module.params["name"])
+ )
+ ]
+ )
+ directory_added = array.post_directories_policies_snapshot(
+ member_names=module.params["directory"], policies=policies
+ )
+ if directory_added.status_code != 200:
+ module.fail_json(
+ msg="Failed to add directory for Snapshot policy {0}. Error: {1}".format(
+ module.params["name"],
+ directory_added.errors[0].message,
+ )
+ )
+ else:
+ module.fail_json(
+ msg="Failed to create Snapshot policy {0}. Error: {1}".format(
+ module.params["name"], created.errors[0].message
+ )
+ )
+ else:
+ created = array.post_policies_quota(
+ names=[module.params["name"]],
+ policy=flasharray.PolicyPost(enabled=module.params["enabled"]),
+ )
+ if created.status_code == 200:
+ changed = True
+ if module.params["quota_limit"]:
+ quota = _human_to_bytes(module.params["quota_limit"])
+ rules = flasharray.PolicyrulequotapostRules(
+ enforced=module.params["quota_enforced"],
+ quota_limit=quota,
+ notifications=",".join(module.params["quota_notifications"]),
+ )
+ rule = flasharray.PolicyRuleQuotaPost(rules=[rules])
+ quota_created = array.post_policies_quota_rules(
+ policy_names=[module.params["name"]],
+ rules=rule,
+ ignore_usage=module.params["ignore_usage"],
+ )
+ if quota_created.status_code != 200:
+ module.fail_json(
+ msg="Failed to create rule for Quota policy {0}. Error: {1}".format(
+ module.params["name"], quota_created.errors[0].message
+ )
+ )
+ if module.params["directory"]:
+ members = []
+ for mem in range(0, len(module.params["directory"])):
+ members.append(
+ flasharray.PolicymemberpostMembers(
+ member=flasharray.ReferenceWithType(
+ name=module.params["directory"][mem],
+ resource_type="directories",
+ )
+ )
+ )
+ member = flasharray.PolicyMemberPost(members=members)
+ members_created = array.post_policies_quota_members(
+ policy_names=[module.params["name"]],
+ members=member,
+ ignore_usage=module.params["ignore_usage"],
+ )
+ if members_created.status_code != 200:
+ module.fail_json(
+ msg="Failed to add members to Quota policy {0}. Error: {1}".format(
+ module.params["name"],
+ members_created.errors[0].message,
+ )
+ )
+ else:
+ module.fail_json(
+ msg="Failed to create Quota policy {0}. Error: {1}".format(
+ module.params["name"], created.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def update_policy(module, array, api_version, all_squash):
+ """Update an existing policy including add/remove rules"""
+ changed = (
+ changed_dir
+ ) = (
+ changed_rule
+ ) = changed_enable = changed_quota = changed_member = changed_user_map = False
+ if module.params["policy"] == "nfs":
+ try:
+ current_enabled = list(
+ array.get_policies_nfs(names=[module.params["name"]]).items
+ )[0].enabled
+ if USER_MAP_VERSION in api_version:
+ current_user_map = list(
+ array.get_policies_nfs(names=[module.params["name"]]).items
+ )[0].user_mapping_enabled
+ except Exception:
+ module.fail_json(
+ msg="Incorrect policy type specified for existing policy {0}".format(
+ module.params["name"]
+ )
+ )
+ if (
+ module.params["user_mapping"]
+ and current_user_map != module.params["user_mapping"]
+ ):
+ changed_user_map = True
+ if not module.check_mode:
+ res = array.patch_policies_nfs(
+ names=[module.params["name"]],
+ policy=flasharray.PolicyNfsPatch(
+ user_mapping_enabled=module.params["user_mapping"]
+ ),
+ )
+ if res.status_code != 200:
+ module.exit_json(
+ msg="Failed to enable/disable User Mapping for NFS policy {0}".format(
+ module.params["name"]
+ )
+ )
+ if current_enabled != module.params["enabled"]:
+ changed_enable = True
+ if not module.check_mode:
+ res = array.patch_policies_nfs(
+ names=[module.params["name"]],
+ policy=flasharray.PolicyPatch(enabled=module.params["enabled"]),
+ )
+ if res.status_code != 200:
+ module.exit_json(
+ msg="Failed to enable/disable NFS policy {0}".format(
+ module.params["name"]
+ )
+ )
+ if module.params["client"]:
+ rules = list(
+ array.get_policies_nfs_client_rules(
+ policy_names=[module.params["name"]]
+ ).items
+ )
+ if rules:
+ rule_name = ""
+ for rule in range(0, len(rules)):
+ if rules[rule].client == module.params["client"]:
+ rule_name = rules[rule].name
+ break
+ if not rule_name:
+ if all_squash:
+ rules = flasharray.PolicyrulenfsclientpostRules(
+ permission=module.params["nfs_permission"],
+ client=module.params["client"],
+ anongid=module.params["anongid"],
+ anonuid=module.params["anonuid"],
+ access=module.params["nfs_access"],
+ )
+ else:
+ rules = flasharray.PolicyrulenfsclientpostRules(
+ permission=module.params["nfs_permission"],
+ client=module.params["client"],
+ access=module.params["nfs_access"],
+ )
+ rule = flasharray.PolicyRuleNfsClientPost(rules=[rules])
+ changed_rule = True
+ if not module.check_mode:
+ rule_created = array.post_policies_nfs_client_rules(
+ policy_names=[module.params["name"]], rules=rule
+ )
+ if rule_created.status_code != 200:
+ module.fail_json(
+ msg="Failed to create new rule for NFS policy {0}. Error: {1}".format(
+ module.params["name"],
+ rule_created.errors[0].message,
+ )
+ )
+ else:
+ if all_squash:
+ rules = flasharray.PolicyrulenfsclientpostRules(
+ permission=module.params["nfs_permission"],
+ anongid=module.params["anongid"],
+ anonuid=module.params["anonuid"],
+ client=module.params["client"],
+ access=module.params["nfs_access"],
+ )
+ else:
+ rules = flasharray.PolicyrulenfsclientpostRules(
+ permission=module.params["nfs_permission"],
+ client=module.params["client"],
+ access=module.params["nfs_access"],
+ )
+ rule = flasharray.PolicyRuleNfsClientPost(rules=[rules])
+ changed_rule = True
+ if not module.check_mode:
+ rule_created = array.post_policies_nfs_client_rules(
+ policy_names=[module.params["name"]], rules=rule
+ )
+ if rule_created.status_code != 200:
+ module.fail_json(
+ msg="Failed to create new rule for SMB policy {0}. Error: {1}".format(
+ module.params["name"], rule_created.errors[0].message
+ )
+ )
+ elif module.params["policy"] == "smb":
+ try:
+ current_enabled = list(
+ array.get_policies_smb(names=[module.params["name"]]).items
+ )[0].enabled
+ except Exception:
+ module.fail_json(
+ msg="Incorrect policy type specified for existing policy {0}".format(
+ module.params["name"]
+ )
+ )
+ if current_enabled != module.params["enabled"]:
+ changed_enable = True
+ if not module.check_mode:
+ res = array.patch_policies_smb(
+ names=[module.params["name"]],
+ policy=flasharray.PolicyPatch(enabled=module.params["enabled"]),
+ )
+ if res.status_code != 200:
+ module.exit_json(
+ msg="Failed to enable/disable SMB policy {0}".format(
+ module.params["name"]
+ )
+ )
+ if module.params["client"]:
+ rules = list(
+ array.get_policies_smb_client_rules(
+ policy_names=[module.params["name"]]
+ ).items
+ )
+ if rules:
+ rule_name = ""
+ for rule in range(0, len(rules)):
+ if rules[rule].client == module.params["client"]:
+ rule_name = rules[rule].name
+ break
+ if not rule_name:
+ rules = flasharray.PolicyrulesmbclientpostRules(
+ anonymous_access_allowed=module.params["smb_anon_allowed"],
+ client=module.params["client"],
+ smb_encryption_required=module.params["smb_encrypt"],
+ )
+ rule = flasharray.PolicyRuleSmbClientPost(rules=[rules])
+ changed_rule = True
+ if not module.check_mode:
+ rule_created = array.post_policies_smb_client_rules(
+ policy_names=[module.params["name"]], rules=rule
+ )
+ if rule_created.status_code != 200:
+ module.fail_json(
+ msg="Failed to create new rule for SMB policy {0}. Error: {1}".format(
+ module.params["name"],
+ rule_created.errors[0].message,
+ )
+ )
+ else:
+ rules = flasharray.PolicyrulesmbclientpostRules(
+ anonymous_access_allowed=module.params["smb_anon_allowed"],
+ client=module.params["client"],
+ smb_encryption_required=module.params["smb_encrypt"],
+ )
+ rule = flasharray.PolicyRuleSmbClientPost(rules=[rules])
+ changed_rule = True
+ if not module.check_mode:
+ rule_created = array.post_policies_smb_client_rules(
+ policy_names=[module.params["name"]], rules=rule
+ )
+ if rule_created.status_code != 200:
+ module.fail_json(
+ msg="Failed to create new rule for SMB policy {0}. Error: {1}".format(
+ module.params["name"], rule_created.errors[0].message
+ )
+ )
+ elif module.params["policy"] == "snapshot":
+ if HAS_PACKAGING:
+ suffix_enabled = version.parse(array.get_rest_version()) >= version.parse(
+ MIN_SUFFIX_API_VERSION
+ )
+ else:
+ suffix_enabled = False
+ try:
+ current_enabled = list(
+ array.get_policies_snapshot(names=[module.params["name"]]).items
+ )[0].enabled
+ except Exception:
+ module.fail_json(
+ msg="Incorrect policy type specified for existing policy {0}".format(
+ module.params["name"]
+ )
+ )
+ if current_enabled != module.params["enabled"]:
+ changed_enable = True
+ if not module.check_mode:
+ res = array.patch_policies_snapshot(
+ names=[module.params["name"]],
+ policy=flasharray.PolicyPatch(enabled=module.params["enabled"]),
+ )
+ if res.status_code != 200:
+ module.exit_json(
+ msg="Failed to enable/disable snapshot policy {0}".format(
+ module.params["name"]
+ )
+ )
+ if module.params["directory"]:
+ dirs = []
+ new_dirs = []
+ current_dirs = list(
+ array.get_directories_policies_snapshot(
+ policy_names=[module.params["name"]]
+ ).items
+ )
+ if current_dirs:
+ for current_dir in range(0, len(current_dirs)):
+ dirs.append(current_dirs[current_dir].member.name)
+ for new_dir in range(0, len(module.params["directory"])):
+ if module.params["directory"][new_dir] not in dirs:
+ changed_dir = True
+ new_dirs.append(module.params["directory"][new_dir])
+ else:
+ new_dirs = module.params["directory"]
+ if new_dirs:
+ policies = flasharray.DirectoryPolicyPost(
+ policies=[
+ flasharray.DirectorypolicypostPolicies(
+ policy=flasharray.Reference(name=module.params["name"])
+ )
+ ]
+ )
+ changed_dir = True
+ for add_dir in range(0, len(new_dirs)):
+ if not module.check_mode:
+ directory_added = array.post_directories_policies_snapshot(
+ member_names=[new_dirs[add_dir]], policies=policies
+ )
+ if directory_added.status_code != 200:
+ module.fail_json(
+ msg="Failed to add new directory to Snapshot policy {0}. Error: {1}".format(
+ module.params["name"],
+ directory_added.errors[0].message,
+ )
+ )
+ if module.params["snap_client_name"]:
+ if module.params["snap_at"]:
+ if not module.params["snap_every"] % 1440 == 0:
+ module.fail_json(
+ msg="snap_at time can only be set if snap_every is multiple of 1440"
+ )
+ if module.params["snap_keep_for"] < module.params["snap_every"]:
+ module.fail_json(
+ msg="Retention period (snap_keep_for) cannot be less than snapshot interval (snap_every)."
+ )
+ if (
+ module.params["snap_keep_for"] != module.params["snap_every"]
+ and module.params["snap_suffix"]
+ ):
+ module.fail_json(
+ msg="Suffix (snap_suufix) can only be applied when `snap_keep_for` and `snap_every` are equal."
+ )
+ rules = list(
+ array.get_policies_snapshot_rules(
+ policy_names=[module.params["name"]]
+ ).items
+ )
+ if rules:
+ rule_name = ""
+ for rule in range(0, len(rules)):
+ if rules[rule].client_name == module.params["snap_client_name"]:
+ rule_name = rules[rule].name
+ break
+ if not rule_name:
+ if module.params["snap_keep_for"] < module.params["snap_every"]:
+ module.fail_json(
+ msg="Retention period (snap_keep_for) cannot be less than snapshot interval (snap_every)."
+ )
+ if module.params["snap_at"]:
+ if not module.params["snap_every"] % 1440 == 0:
+ module.fail_json(
+ msg="snap_at time can only be set if snap_every is multiple of 1440"
+ )
+ if suffix_enabled:
+ rules = flasharray.PolicyrulesnapshotpostRules(
+ at=_convert_to_millisecs(module.params["snap_at"]),
+ client_name=module.params["snap_client_name"],
+ every=module.params["snap_every"] * 60000,
+ keep_for=module.params["snap_keep_for"] * 60000,
+ suffix=module.params["snap_suffix"],
+ )
+ else:
+ rules = flasharray.PolicyrulesnapshotpostRules(
+ at=_convert_to_millisecs(module.params["snap_at"]),
+ client_name=module.params["snap_client_name"],
+ every=module.params["snap_every"] * 60000,
+ keep_for=module.params["snap_keep_for"] * 60000,
+ )
+ else:
+ if suffix_enabled:
+ rules = flasharray.PolicyrulesnapshotpostRules(
+ client_name=module.params["snap_client_name"],
+ every=module.params["snap_every"] * 60000,
+ keep_for=module.params["snap_keep_for"] * 60000,
+ suffix=module.params["snap_suffix"],
+ )
+ else:
+ rules = flasharray.PolicyrulesnapshotpostRules(
+ client_name=module.params["snap_client_name"],
+ every=module.params["snap_every"] * 60000,
+ keep_for=module.params["snap_keep_for"] * 60000,
+ )
+ rule = flasharray.PolicyRuleSnapshotPost(rules=[rules])
+ changed_rule = True
+ if not module.check_mode:
+ rule_created = array.post_policies_snapshot_rules(
+ policy_names=[module.params["name"]], rules=rule
+ )
+ if rule_created.status_code != 200:
+ err_no = len(rule_created.errors) - 1
+ module.fail_json(
+ msg="Failed to create new rule for Snapshot policy {0}. Error: {1}".format(
+ module.params["name"],
+ rule_created.errors[err_no].message,
+ )
+ )
+ else:
+ if module.params["snap_keep_for"] < module.params["snap_every"]:
+ module.fail_json(
+ msg="Retention period (snap_keep_for) cannot be less than snapshot interval (snap_every)."
+ )
+ if module.params["snap_at"]:
+ if not module.params["snap_every"] % 1440 == 0:
+ module.fail_json(
+ msg="snap_at time can only be set if snap_every is multiple of 1440"
+ )
+ if suffix_enabled:
+ rules = flasharray.PolicyrulesnapshotpostRules(
+ at=_convert_to_millisecs(module.params["snap_at"]),
+ client_name=module.params["snap_client_name"],
+ every=module.params["snap_every"] * 60000,
+ keep_for=module.params["snap_keep_for"] * 60000,
+ suffix=module.params["snap_suffix"],
+ )
+ else:
+ rules = flasharray.PolicyrulesnapshotpostRules(
+ at=_convert_to_millisecs(module.params["snap_at"]),
+ client_name=module.params["snap_client_name"],
+ every=module.params["snap_every"] * 60000,
+ keep_for=module.params["snap_keep_for"] * 60000,
+ )
+ else:
+ if suffix_enabled:
+ rules = flasharray.PolicyrulesnapshotpostRules(
+ client_name=module.params["snap_client_name"],
+ every=module.params["snap_every"] * 60000,
+ keep_for=module.params["snap_keep_for"] * 60000,
+ suffix=module.params["snap_suffix"],
+ )
+ else:
+ rules = flasharray.PolicyrulesnapshotpostRules(
+ client_name=module.params["snap_client_name"],
+ every=module.params["snap_every"] * 60000,
+ keep_for=module.params["snap_keep_for"] * 60000,
+ )
+ rule = flasharray.PolicyRuleSnapshotPost(rules=[rules])
+ changed_rule = True
+ if not module.check_mode:
+ rule_created = array.post_policies_snapshot_rules(
+ policy_names=[module.params["name"]], rules=rule
+ )
+ if rule_created.status_code != 200:
+ err_no = len(rule_created.errors) - 1
+ module.fail_json(
+ msg="Failed to create new rule for Snapshot policy {0}. Error: {1}".format(
+ module.params["name"],
+ rule_created.errors[err_no].message,
+ )
+ )
+ else:
+ current_enabled = list(
+ array.get_policies_quota(names=[module.params["name"]]).items
+ )[0].enabled
+ if current_enabled != module.params["enabled"]:
+ changed_quota = True
+ if not module.check_mode:
+ res = array.patch_policies_quota(
+ names=[module.params["name"]],
+ policy=flasharray.PolicyPatch(enabled=module.params["enabled"]),
+ )
+ if res.status_code != 200:
+ module.exit_json(
+ msg="Failed to enable/disable snapshot policy {0}".format(
+ module.params["name"]
+ )
+ )
+ if module.params["directory"]:
+ current_members = list(
+ array.get_policies_quota_members(
+ policy_names=[module.params["name"]]
+ ).items
+ )
+ if current_members:
+ if module.params["state"] == "absent":
+ for member in range(0, len(current_members)):
+ if (
+ current_members[member].member.name
+ in module.params["directory"]
+ ):
+ changed_member = True
+ if not module.check_mode:
+ res = array.delete_policies_quota_members(
+ policy_names=[module.params["name"]],
+ member_names=[current_members[member].member.name],
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete rule {0} from quota policy {1}. Error: {2}".format(
+ current_members[member].member.name,
+ module.params["name"],
+ rule_created.errors[0].message,
+ )
+ )
+ else:
+ members = []
+ cmembers = []
+ for cmem in range(0, len(current_members)):
+ cmembers.append(current_members[cmem].member.name)
+ mem_diff = list(set(module.params["directory"]) - set(cmembers))
+ if mem_diff:
+ for mem in range(0, len(mem_diff)):
+ members.append(
+ flasharray.PolicymemberpostMembers(
+ member=flasharray.ReferenceWithType(
+ name=mem_diff[mem],
+ resource_type="directories",
+ )
+ )
+ )
+ member = flasharray.PolicyMemberPost(members=members)
+ changed_member = True
+ if not module.check_mode:
+ members_created = array.post_policies_quota_members(
+ policy_names=[module.params["name"]],
+ members=member,
+ ignore_usage=module.params["ignore_usage"],
+ )
+ if members_created.status_code != 200:
+ module.fail_json(
+ msg="Failed to update members for Quota policy {0}. Error: {1}".format(
+ module.params["name"],
+ members_created.errors[0].message,
+ )
+ )
+ else:
+ members = []
+ for mem in range(0, len(module.params["directory"])):
+ members.append(
+ flasharray.PolicymemberpostMembers(
+ member=flasharray.ReferenceWithType(
+ name=module.params["directory"][mem],
+ resource_type="directories",
+ )
+ )
+ )
+ member = flasharray.PolicyMemberPost(members=members)
+ changed_member = True
+ if not module.check_mode:
+ members_created = array.post_policies_quota_members(
+ policy_names=[module.params["name"]],
+ members=member,
+ ignore_usage=module.params["ignore_usage"],
+ )
+ if members_created.status_code != 200:
+ module.fail_json(
+ msg="Failed to update members for Quota policy {0}. Error: {1}".format(
+ module.params["name"],
+ members_created.errors[0].message,
+ )
+ )
+ if module.params["quota_limit"]:
+ quota = _human_to_bytes(module.params["quota_limit"])
+ current_rules = list(
+ array.get_policies_quota_rules(
+ policy_names=[module.params["name"]]
+ ).items
+ )
+ if current_rules:
+ one_enforced = False
+ for check_rule in range(0, len(current_rules)):
+ if current_rules[check_rule].enforced:
+ one_enforced = True
+ for rule in range(0, len(current_rules)):
+ rule_exists = False
+ if not module.params["quota_notifications"]:
+ current_notifications = "none"
+ else:
+ current_notifications = ",".join(
+ module.params["quota_notifications"]
+ )
+ if bool(
+ (current_rules[rule].quota_limit == quota)
+ and (
+ current_rules[rule].enforced
+ == module.params["quota_enforced"]
+ )
+ and (current_rules[rule].notifications == current_notifications)
+ ):
+ rule_exists = True
+ break
+
+ if not rule_exists:
+ if module.params["quota_enforced"] and one_enforced:
+ module.fail_json(
+ msg="Only one enforced rule can be defined per policy"
+ )
+ rules = flasharray.PolicyrulequotapostRules(
+ enforced=module.params["quota_enforced"],
+ quota_limit=quota,
+ notifications=",".join(module.params["quota_notifications"]),
+ )
+ rule = flasharray.PolicyRuleQuotaPost(rules=[rules])
+ changed_quota = True
+ if not module.check_mode:
+ quota_created = array.post_policies_quota_rules(
+ policy_names=[module.params["name"]],
+ rules=rule,
+ ignore_usage=module.params["ignore_usage"],
+ )
+ if quota_created.status_code != 200:
+ module.fail_json(
+ msg="Failed to add new rule to Quota policy {0}. Error: {1}".format(
+ module.params["name"],
+ quota_created.errors[0].message,
+ )
+ )
+ else:
+ rules = flasharray.PolicyrulequotapostRules(
+ enforced=module.params["quota_enforced"],
+ quota_limit=quota,
+ notifications=",".join(module.params["quota_notifications"]),
+ )
+ rule = flasharray.PolicyRuleQuotaPost(rules=[rules])
+ changed_quota = True
+ if not module.check_mode:
+ quota_created = array.post_policies_quota_rules(
+ policy_names=[module.params["name"]],
+ rules=rule,
+ ignore_usage=module.params["ignore_usage"],
+ )
+ if quota_created.status_code != 200:
+ module.fail_json(
+ msg="Failed to add rule to Quota policy {0}. Error: {1}".format(
+ module.params["name"], quota_created.errors[0].message
+ )
+ )
+
+ if (
+ changed_rule
+ or changed_enable
+ or changed_quota
+ or changed_member
+ or changed_dir
+ or changed_user_map
+ ):
+ changed = True
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ nfs_access=dict(
+ type="str",
+ default="no-root-squash",
+ choices=["root-squash", "no-root-squash", "all-squash"],
+ ),
+ nfs_permission=dict(type="str", default="rw", choices=["rw", "ro"]),
+ policy=dict(
+ type="str", required=True, choices=["nfs", "smb", "snapshot", "quota"]
+ ),
+ name=dict(type="str", required=True),
+ rename=dict(type="str"),
+ client=dict(type="str"),
+ enabled=dict(type="bool", default=True),
+ snap_at=dict(type="str"),
+ snap_every=dict(type="int"),
+ snap_keep_for=dict(type="int"),
+ snap_client_name=dict(type="str"),
+ snap_suffix=dict(type="str"),
+ smb_anon_allowed=dict(type="bool", default=False),
+ smb_encrypt=dict(type="bool", default=False),
+ ignore_usage=dict(type="bool", default=False),
+ quota_enforced=dict(type="bool", default=True),
+ quota_limit=dict(type="str"),
+ anongid=dict(type="str", default="65534"),
+ anonuid=dict(type="str", default="65534"),
+ quota_notifications=dict(
+ type="list", elements="str", choices=["user", "group"]
+ ),
+ user_mapping=dict(type="bool"),
+ directory=dict(type="list", elements="str"),
+ )
+ )
+
+ required_together = [["snap_keep_for", "snap_every"]]
+ module = AnsibleModule(
+ argument_spec, required_together=required_together, supports_check_mode=True
+ )
+
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashArray REST version not supported. "
+ "Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
+ )
+ if module.params["policy"] == "quota" and MIN_QUOTA_API_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashArray REST version not supportedi for directory quotas. "
+ "Minimum version required: {0}".format(MIN_QUOTA_API_VERSION)
+ )
+ array = get_array(module)
+ state = module.params["state"]
+ if module.params["quota_notifications"]:
+ module.params["quota_notifications"].sort(reverse=True)
+ quota_notifications = []
+ [
+ quota_notifications.append(x)
+ for x in module.params["quota_notifications"]
+ if x not in quota_notifications
+ ]
+ module.params["quota_notifications"] = quota_notifications
+ else:
+ module.params["quota_notifications"] = []
+
+ if (
+ module.params["nfs_access"] == "all-squash"
+ and ALL_SQUASH_VERSION not in api_version
+ ):
+ module.fail_json(
+ msg="all-squash is not supported in this version of Purity//FA"
+ )
+
+ all_squash = ALL_SQUASH_VERSION in api_version
+ exists = bool(array.get_policies(names=[module.params["name"]]).status_code == 200)
+
+ if state == "present" and not exists:
+ create_policy(module, array, all_squash)
+ elif state == "present" and exists and module.params["rename"]:
+ rename_policy(module, array)
+ elif state == "present" and exists:
+ update_policy(module, array, api_version, all_squash)
+ elif state == "absent" and exists:
+ delete_policy(module, array)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_proxy.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_proxy.py
new file mode 100644
index 000000000..37dd7ac6a
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_proxy.py
@@ -0,0 +1,131 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2019, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_proxy
+version_added: '1.0.0'
+author:
+ - Pure Storage ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+short_description: Configure FlashArray phonehome HTTPs proxy settings
+description:
+- Set or erase configuration for the HTTPS phonehome proxy settings.
+options:
+ state:
+ description:
+ - Set or delete proxy configuration
+ default: present
+ type: str
+ choices: [ absent, present ]
+ host:
+ description:
+ - The proxy host name.
+ type: str
+ port:
+ description:
+ - The proxy TCP/IP port number.
+ type: int
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Delete exisitng proxy settings
+ purestorage.flasharray.purefa_proxy:
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Set proxy settings
+ purestorage.flasharray.purefa_proxy:
+ host: purestorage.com
+ port: 8080
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ purefa_argument_spec,
+)
+
+
+def delete_proxy(module, array):
+ """Delete proxy settings"""
+ changed = False
+ current_proxy = array.get(proxy=True)["proxy"]
+ if current_proxy != "":
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set(proxy="")
+ except Exception:
+ module.fail_json(msg="Delete proxy settigs failed")
+ module.exit_json(changed=changed)
+
+
+def create_proxy(module, array):
+ """Set proxy settings"""
+ changed = False
+ current_proxy = array.get(proxy=True)
+ if current_proxy is not None:
+ new_proxy = (
+ "https://" + module.params["host"] + ":" + str(module.params["port"])
+ )
+ if new_proxy != current_proxy["proxy"]:
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set(proxy=new_proxy)
+ except Exception:
+ module.fail_json(msg="Set phone home proxy failed.")
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ host=dict(type="str"),
+ port=dict(type="int"),
+ )
+ )
+
+ required_together = [["host", "port"]]
+
+ module = AnsibleModule(
+ argument_spec, required_together=required_together, supports_check_mode=True
+ )
+
+ state = module.params["state"]
+ array = get_system(module)
+
+ if state == "absent":
+ delete_proxy(module, array)
+ elif state == "present":
+ create_proxy(module, array)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_ra.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_ra.py
new file mode 100644
index 000000000..4899b0797
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_ra.py
@@ -0,0 +1,121 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_ra
+version_added: '1.0.0'
+short_description: Enable or Disable Pure Storage FlashArray Remote Assist
+description:
+- Enablke or Disable Remote Assist for a Pure Storage FlashArray.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Define state of remote assist
+ - When set to I(enable) the RA port can be exposed using the
+ I(debug) module.
+ type: str
+ default: enable
+ choices: [ enable, disable ]
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Enable Remote Assist port
+ purestorage.flasharray.purefa_ra:
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ register: result
+
+ debug:
+ msg: "Remote Assist: {{ result['ra_facts'] }}"
+
+- name: Disable Remote Assist port
+ purestorage.flasharray.purefa_ra:
+ state: disable
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ purefa_argument_spec,
+)
+
+
+def enable_ra(module, array):
+ """Enable Remote Assist"""
+ changed = False
+ ra_facts = {}
+ if not array.get_remote_assist_status()["status"] in ["connected", "enabled"]:
+ changed = True
+ if not module.check_mode:
+ try:
+ ra_data = array.enable_remote_assist()
+ ra_facts["fa_ra"] = {"name": ra_data["name"], "port": ra_data["port"]}
+ except Exception:
+ module.fail_json(msg="Enabling Remote Assist failed")
+ else:
+ if not module.check_mode:
+ try:
+ ra_data = array.get_remote_assist_status()
+ ra_facts["fa_ra"] = {"name": ra_data["name"], "port": ra_data["port"]}
+ except Exception:
+ module.fail_json(msg="Getting Remote Assist failed")
+ module.exit_json(changed=changed, ra_info=ra_facts)
+
+
+def disable_ra(module, array):
+ """Disable Remote Assist"""
+ changed = False
+ if array.get_remote_assist_status()["status"] in ["connected", "enabled"]:
+ changed = True
+ if not module.check_mode:
+ try:
+ array.disable_remote_assist()
+ except Exception:
+ module.fail_json(msg="Disabling Remote Assist failed")
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="enable", choices=["enable", "disable"]),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ array = get_system(module)
+
+ if module.params["state"] == "enable":
+ enable_ra(module, array)
+ else:
+ disable_ra(module, array)
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_saml.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_saml.py
new file mode 100644
index 000000000..9d5fc7443
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_saml.py
@@ -0,0 +1,340 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2022, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_saml
+version_added: '1.12.0'
+short_description: Manage FlashArray SAML2 service and identity providers
+description:
+- Enable or disable FlashArray SAML2 providers
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Name of the SAML2 identity provider (IdP)
+ type: str
+ required: true
+ state:
+ description:
+ - Define whether the API client should exist or not.
+ default: present
+ choices: [ absent, present ]
+ type: str
+ url:
+ description:
+ - The URL of the identity provider
+ type: str
+ array_url:
+ description:
+ - The URL of the FlashArray
+ type: str
+ metadata_url:
+ description:
+ - The URL of the identity provider metadata
+ type: str
+ enabled:
+ description:
+ - Defines the enabled state of the identity provider
+ default: false
+ type: bool
+ encrypt_asserts:
+ description:
+ - If set to true, SAML assertions will be encrypted by the identity provider
+ default: false
+ type: bool
+ sign_request:
+ description:
+ - If set to true, SAML requests will be signed by the service provider.
+ default: false
+ type: bool
+ x509_cert:
+ description:
+ - The X509 certificate that the service provider uses to verify the SAML
+ response signature from the identity provider
+ type: str
+ decryption_credential:
+ description:
+ - The credential used by the service provider to decrypt encrypted SAML assertions from the identity provider
+ type: str
+ signing_credential:
+ description:
+ - The credential used by the service provider to sign SAML requests
+ type: str
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Create (disabled) SAML2 SSO with only metadata URL
+ purestorage.flasharray.purefa_saml:
+ name: myIDP
+ array_url: "https://10.10.10.2"
+ metadata_url: "https://myidp.acme.com/adfs/ls"
+ x509_cert: "{{lookup('file', 'x509_cert_file') }}"
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Enable SAML2 SSO
+ purestorage.flasharray.purefa_saml:
+ name: myISO
+ enabled: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete SAML2 SSO
+ purestorage.flasharray.purefa_saml:
+ state: absent
+ name: myIDP
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient.flasharray import (
+ Saml2Sso,
+ Saml2SsoPost,
+ Saml2SsoSp,
+ Saml2SsoIdp,
+ ReferenceNoId,
+ )
+except ImportError:
+ HAS_PURESTORAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ get_array,
+ purefa_argument_spec,
+)
+
+MIN_REQUIRED_API_VERSION = "2.11"
+
+
+def delete_saml(module, array):
+ """Delete SSO SAML2 IdP"""
+ changed = True
+ if not module.check_mode:
+ try:
+ array.delete_sso_saml2_idps(names=[module.params["name"]])
+ except Exception:
+ module.fail_json(
+ msg="Failed to delete SAML2 IdP {0}".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def update_saml(module, array):
+ """Update SSO SAML2 IdP"""
+ changed = False
+ current_idp = list(array.get_sso_saml2_idps(names=[module.params["name"]]).items)[0]
+ old_idp = {
+ "array_url": current_idp.array_url,
+ "enabled": current_idp.enabled,
+ "sp_sign_cred": getattr(current_idp.sp.signing_credential, "name", None),
+ "sp_decrypt_cred": getattr(current_idp.sp.decryption_credential, "name", None),
+ "id_metadata": current_idp.idp.metadata_url,
+ "id_url": getattr(current_idp.idp, "url", None),
+ "id_sign_enabled": current_idp.idp.sign_request_enabled,
+ "id_encrypt_enabled": current_idp.idp.encrypt_assertion_enabled,
+ "id_cert": current_idp.idp.verification_certificate,
+ }
+ if module.params["url"]:
+ new_url = module.params["url"]
+ else:
+ new_url = old_idp["id_url"]
+ if module.params["array_url"]:
+ new_array_url = module.params["array_url"]
+ else:
+ new_array_url = old_idp["array_url"]
+ if module.params["enabled"] != old_idp["enabled"]:
+ new_enabled = module.params["enabled"]
+ else:
+ new_enabled = old_idp["enabled"]
+ if module.params["sign_request"] != old_idp["id_sign_enabled"]:
+ new_sign = module.params["sign_request"]
+ else:
+ new_sign = old_idp["id_sign_enabled"]
+ if module.params["encrypt_asserts"] != old_idp["id_encrypt_enabled"]:
+ new_encrypt = module.params["encrypt_asserts"]
+ else:
+ new_encrypt = old_idp["id_encrypt_enabled"]
+ if module.params["signing_credential"]:
+ new_sign_cred = module.params["signing_credential"]
+ else:
+ new_sign_cred = old_idp["sp_sign_cred"]
+ if module.params["decryption_credential"]:
+ new_decrypt_cred = module.params["decryption_credential"]
+ else:
+ new_decrypt_cred = old_idp["sp_decrypt_cred"]
+ if module.params["metadata_url"]:
+ new_meta_url = module.params["metadata_url"]
+ else:
+ new_meta_url = old_idp["id_metadata"]
+ if module.params["x509_cert"]:
+ new_cert = module.params["x509_cert"]
+ else:
+ new_cert = old_idp["id_cert"]
+ new_idp = {
+ "array_url": new_array_url,
+ "enabled": new_enabled,
+ "sp_sign_cred": new_sign_cred,
+ "sp_decrypt_cred": new_decrypt_cred,
+ "id_metadata": new_meta_url,
+ "id_sign_enabled": new_sign,
+ "id_encrypt_enabled": new_encrypt,
+ "id_url": new_url,
+ "id_cert": new_cert,
+ }
+ if old_idp != new_idp:
+ changed = True
+ if not module.check_mode:
+ sp = Saml2SsoSp(
+ decryption_credential=ReferenceNoId(name=new_idp["sp_decrypt_cred"]),
+ signing_credential=ReferenceNoId(name=new_idp["sp_sign_cred"]),
+ )
+ idp = Saml2SsoIdp(
+ url=new_idp["id_url"],
+ metadata_url=new_idp["id_metadata"],
+ sign_request_enabled=new_idp["id_sign_enabled"],
+ encrypt_assertion_enabled=new_idp["id_encrypt_enabled"],
+ verification_certificate=new_idp["id_cert"],
+ )
+ res = array.patch_sso_saml2_idps(
+ idp=Saml2Sso(
+ array_url=new_idp["array_url"],
+ idp=idp,
+ sp=sp,
+ enabled=new_idp["enabled"],
+ ),
+ names=[module.params["name"]],
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to update SAML2 IdP {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def create_saml(module, array):
+ """Create SAML2 IdP"""
+ changed = True
+ if not module.check_mode:
+ sp = Saml2SsoSp(
+ decryption_credential=ReferenceNoId(
+ name=module.params["decryption_credential"]
+ ),
+ signing_credential=ReferenceNoId(name=module.params["signing_credential"]),
+ )
+ idp = Saml2SsoIdp(
+ url=module.params["url"],
+ metadata_url=module.params["metadata_url"],
+ sign_request_enabled=module.params["sign_request"],
+ encrypt_assertion_enabled=module.params["encrypt_asserts"],
+ verification_certificate=module.params["x509_cert"],
+ )
+ if not module.check_mode:
+ res = array.post_sso_saml2_idps(
+ idp=Saml2SsoPost(array_url=module.params["array_url"], idp=idp, sp=sp),
+ names=[module.params["name"]],
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to create SAML2 Identity Provider {0}. Error message: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ if module.params["enabled"]:
+ res = array.patch_sso_saml2_idps(
+ idp=Saml2Sso(enabled=module.params["enabled"]),
+ names=[module.params["name"]],
+ )
+ if res.status_code != 200:
+ array.delete_sso_saml2_idps(names=[module.params["name"]])
+ module.fail_json(
+ msg="Failed to create SAML2 Identity Provider {0}. Error message: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ name=dict(type="str", required=True),
+ url=dict(type="str"),
+ array_url=dict(type="str"),
+ metadata_url=dict(type="str"),
+ x509_cert=dict(type="str", no_log=True),
+ signing_credential=dict(type="str"),
+ decryption_credential=dict(type="str"),
+ enabled=dict(type="bool", default=False),
+ encrypt_asserts=dict(type="bool", default=False),
+ sign_request=dict(type="bool", default=False),
+ )
+ )
+
+ required_if = [
+ ["encrypt_asserts", True, ["decryption_credential"]],
+ ["sign_request", True, ["signing_credential"]],
+ ]
+
+ module = AnsibleModule(
+ argument_spec, supports_check_mode=True, required_if=required_if
+ )
+
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashArray REST version not supported. "
+ "Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
+ )
+ array = get_array(module)
+ state = module.params["state"]
+
+ try:
+ list(array.get_sso_saml2_idps(names=[module.params["name"]]).items)[0]
+ exists = True
+ except AttributeError:
+ exists = False
+ if not exists and state == "present":
+ create_saml(module, array)
+ elif exists and state == "present":
+ update_saml(module, array)
+ elif exists and state == "absent":
+ delete_saml(module, array)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_smis.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_smis.py
new file mode 100644
index 000000000..f752cb950
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_smis.py
@@ -0,0 +1,132 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_smis
+version_added: '1.0.0'
+short_description: Enable or disable FlashArray SMI-S features
+description:
+- Enable or disable FlashArray SMI-S Provider and/or SLP
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ slp:
+ description:
+ - Enable/Disable Service Locator Protocol
+ - Ports used are TCP 427 and UDP 427
+ type: bool
+ default: true
+ smis:
+ description:
+ - Enable/Disable SMI-S Provider
+ - Port used is TCP 5989
+ type: bool
+ default: true
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Enable SMI-S and SLP
+ purestorage.flasharray.purefa_smis:
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Disable SMI-S and SLP
+ purestorage.flasharray.purefa_smis:
+ smis: false
+ slp: false
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ get_array,
+ purefa_argument_spec,
+)
+
+MIN_REQUIRED_API_VERSION = "2.2"
+
+
+def update_smis(module, array):
+ """Update SMI-S features"""
+ changed = smis_changed = False
+ try:
+ current = list(array.get_smi_s().items)[0]
+ except Exception:
+ module.fail_json(msg="Failed to get current SMI-S settings.")
+ slp_enabled = current.slp_enabled
+ wbem_enabled = current.wbem_https_enabled
+ if slp_enabled != module.params["slp"]:
+ slp_enabled = module.params["slp"]
+ smis_changed = True
+ if wbem_enabled != module.params["smis"]:
+ wbem_enabled = module.params["smis"]
+ smis_changed = True
+ if smis_changed:
+ smi_s = flasharray.Smis(
+ slp_enabled=slp_enabled, wbem_https_enabled=wbem_enabled
+ )
+ changed = True
+ if not module.check_mode:
+ try:
+ array.patch_smi_s(smi_s=smi_s)
+ except Exception:
+ module.fail_json(msg="Failed to change SMI-S settings.")
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ smis=dict(type="bool", default=True),
+ slp=dict(type="bool", default=True),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashArray REST version not supported. "
+ "Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
+ )
+ array = get_array(module)
+
+ update_smis(module, array)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_smtp.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_smtp.py
new file mode 100644
index 000000000..d2c1a5e2b
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_smtp.py
@@ -0,0 +1,161 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_smtp
+version_added: '1.0.0'
+author:
+ - Pure Storage ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+short_description: Configure FlashArray SMTP settings
+description:
+- Set or erase configuration for the SMTP settings.
+- If username/password are set this will always force a change as there is
+ no way to see if the password is differnet from the current SMTP configuration.
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Set or delete SMTP configuration
+ default: present
+ type: str
+ choices: [ absent, present ]
+ password:
+ description:
+ - The SMTP password.
+ type: str
+ user:
+ description:
+ - The SMTP username.
+ type: str
+ relay_host:
+ description:
+ - IPv4 or IPv6 address or FQDN. A port number may be appended.
+ type: str
+ sender_domain:
+ description:
+ - Domain name.
+ type: str
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Delete exisitng SMTP settings
+ purestorage.flasharray.purefa_smtp:
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+- name: Set SMTP settings
+ purestorage.flasharray.purefa_smtp:
+ sender_domain: purestorage.com
+ password: account_password
+ user: smtp_account
+ relay_host: 10.2.56.78:2345
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ purefa_argument_spec,
+)
+
+
+def delete_smtp(module, array):
+ """Delete SMTP settings"""
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set_smtp(sender_domain="", user_name="", password="", relay_host="")
+ except Exception:
+ module.fail_json(msg="Delete SMTP settigs failed")
+ module.exit_json(changed=changed)
+
+
+def create_smtp(module, array):
+ """Set SMTP settings"""
+ changed = changed_sender = changed_relay = changed_creds = False
+ current_smtp = array.get_smtp()
+ if (
+ module.params["sender_domain"]
+ and current_smtp["sender_domain"] != module.params["sender_domain"]
+ ):
+ changed_sender = True
+ if not module.check_mode:
+ try:
+ array.set_smtp(sender_domain=module.params["sender_domain"])
+ except Exception:
+ module.fail_json(msg="Set SMTP sender domain failed.")
+ if (
+ module.params["relay_host"]
+ and current_smtp["relay_host"] != module.params["relay_host"]
+ ):
+ changed_relay = True
+ if not module.check_mode:
+ try:
+ array.set_smtp(relay_host=module.params["relay_host"])
+ except Exception:
+ module.fail_json(msg="Set SMTP relay host failed.")
+ if module.params["user"]:
+ changed_creds = True
+ if not module.check_mode:
+ try:
+ array.set_smtp(
+ user_name=module.params["user"], password=module.params["password"]
+ )
+ except Exception:
+ module.fail_json(msg="Set SMTP username/password failed.")
+ changed = bool(changed_sender or changed_relay or changed_creds)
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ sender_domain=dict(type="str"),
+ password=dict(type="str", no_log=True),
+ user=dict(type="str"),
+ relay_host=dict(type="str"),
+ )
+ )
+
+ required_together = [["user", "password"]]
+
+ module = AnsibleModule(
+ argument_spec, required_together=required_together, supports_check_mode=True
+ )
+
+ state = module.params["state"]
+ array = get_system(module)
+
+ if state == "absent":
+ delete_smtp(module, array)
+ elif state == "present":
+ create_smtp(module, array)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_snap.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_snap.py
new file mode 100644
index 000000000..db567a398
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_snap.py
@@ -0,0 +1,640 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_snap
+version_added: '1.0.0'
+short_description: Manage volume snapshots on Pure Storage FlashArrays
+description:
+- Create or delete volumes and volume snapshots on Pure Storage FlashArray.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - The name of the source volume.
+ type: str
+ required: true
+ suffix:
+ description:
+ - Suffix of snapshot name.
+ - Not used during creation if I(offload) is provided.
+ type: str
+ target:
+ description:
+ - Name of target volume if creating from snapshot.
+ - Name of new snapshot suffix if renaming a snapshot
+ type: str
+ overwrite:
+ description:
+ - Define whether to overwrite existing volume when creating from snapshot.
+ type: bool
+ default: false
+ offload:
+ description:
+ - Only valid for Purity//FA 6.1 or higher
+ - Name of offload target for the snapshot.
+ - Target can be either another FlashArray or an Offload Target
+ - This is only applicable for creation, deletion and eradication of snapshots
+ - I(state) of I(copy) is not supported.
+ - I(suffix) is not supported for offload snapshots.
+ type: str
+ state:
+ description:
+ - Define whether the volume snapshot should exist or not.
+ choices: [ absent, copy, present, rename ]
+ type: str
+ default: present
+ eradicate:
+ description:
+ - Define whether to eradicate the snapshot on delete or leave in trash.
+ type: bool
+ default: false
+ ignore_repl:
+ description:
+ - Only valid for Purity//FA 6.1 or higher
+ - If set to true, allow destruction/eradication of snapshots in use by replication.
+ - If set to false, allow destruction/eradication of snapshots not in use by replication
+ type: bool
+ default: false
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Create snapshot foo.ansible
+ purestorage.flasharray.purefa_snap:
+ name: foo
+ suffix: ansible
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: present
+
+- name: Create R/W clone foo_clone from snapshot foo.snap
+ purestorage.flasharray.purefa_snap:
+ name: foo
+ suffix: snap
+ target: foo_clone
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: copy
+
+- name: Create R/W clone foo_clone from remote mnapshot arrayB:foo.snap
+ purestorage.flasharray.purefa_snap:
+ name: arrayB:foo
+ suffix: snap
+ target: foo_clone
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: copy
+
+- name: Overwrite existing volume foo_clone with snapshot foo.snap
+ purestorage.flasharray.purefa_snap:
+ name: foo
+ suffix: snap
+ target: foo_clone
+ overwrite: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: copy
+
+- name: Delete and eradicate snapshot named foo.snap
+ purestorage.flasharray.purefa_snap:
+ name: foo
+ suffix: snap
+ eradicate: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: absent
+
+- name: Rename snapshot foo.fred to foo.dave
+ purestorage.flasharray.purefa_snap:
+ name: foo
+ suffix: fred
+ target: dave
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: rename
+
+- name: Create a remote volume snapshot on offload device arrayB
+ purestorage.flasharray.purefa_snap:
+ name: foo
+ offload: arrayB
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete and eradicate a volume snapshot foo.1 on offload device arrayB
+ purestorage.flasharray.purefa_snap:
+ name: foo
+ suffix: 1
+ offload: arrayB
+ eradicate: true
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+HAS_PUREERROR = True
+try:
+ from purestorage import PureHTTPError
+except ImportError:
+ HAS_PUREERROR = False
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_array,
+ get_system,
+ purefa_argument_spec,
+)
+from datetime import datetime
+
+GET_SEND_API = "2.4"
+
+
+def _check_offload(module, array):
+ try:
+ offload = list(array.get_offloads(names=[module.params["offload"]]).items)[0]
+ if offload.status == "connected":
+ return True
+ return False
+ except Exception:
+ return False
+
+
+def _check_target(module, array):
+ try:
+ target = list(
+ array.get_array_connections(names=[module.params["offload"]]).items
+ )[0]
+ if target.status == "connected":
+ return True
+ return False
+ except Exception:
+ return False
+
+
+def _check_offload_snapshot(module, array):
+ """Return Remote Snapshot (active or deleted) or None"""
+ source_array = list(array.get_arrays().items)[0].name
+ snapname = (
+ source_array + ":" + module.params["name"] + "." + module.params["suffix"]
+ )
+ if _check_offload(module, array):
+ res = array.get_remote_volume_snapshots(
+ on=module.params["offload"], names=[snapname], destroyed=False
+ )
+ else:
+ res = array.get_volume_snapshots(names=[snapname], destroyed=False)
+ if res.status_code != 200:
+ return None
+ return list(res.items)[0]
+
+
+def get_volume(module, array):
+ """Return Volume or None"""
+ try:
+ return array.get_volume(module.params["name"])
+ except Exception:
+ return None
+
+
+def get_target(module, array):
+ """Return Volume or None"""
+ try:
+ return array.get_volume(module.params["target"])
+ except Exception:
+ return None
+
+
+def get_deleted_snapshot(module, array, arrayv6):
+ """Return Deleted Snapshot"""
+ snapname = module.params["name"] + "." + module.params["suffix"]
+ if module.params["offload"]:
+ source_array = list(arrayv6.get_arrays().items)[0].name
+ snapname = module.params["name"] + "." + module.params["suffix"]
+ full_snapname = source_array + ":" + snapname
+ if _check_offload(module, arrayv6):
+ res = arrayv6.get_remote_volume_snapshots(
+ on=module.params["offload"], names=[full_snapname], destroyed=True
+ )
+ else:
+ res = arrayv6.get_volume_snapshots(names=[snapname], destroyed=True)
+ if res.status_code == 200:
+ return list(res.items)[0].destroyed
+ else:
+ return False
+ else:
+ try:
+ return bool(
+ array.get_volume(snapname, snap=True, pending=True)[0]["time_remaining"]
+ != ""
+ )
+ except Exception:
+ return False
+
+
+def get_snapshot(module, array):
+ """Return Snapshot or None"""
+ try:
+ snapname = module.params["name"] + "." + module.params["suffix"]
+ for snaps in array.get_volume(module.params["name"], snap=True, pending=False):
+ if snaps["name"] == snapname:
+ return True
+ except Exception:
+ return False
+
+
+def create_snapshot(module, array, arrayv6):
+ """Create Snapshot"""
+ changed = False
+ if module.params["offload"]:
+ module.params["suffix"] = None
+ changed = True
+ if not module.check_mode:
+ res = arrayv6.post_remote_volume_snapshots(
+ source_names=[module.params["name"]], on=module.params["offload"]
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to create remote snapshot for volume {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ else:
+ remote_snap = list(res.items)[0].name
+ module.params["suffix"] = remote_snap.split(".")[1]
+ else:
+ changed = True
+ if not module.check_mode:
+ try:
+ array.create_snapshot(
+ module.params["name"], suffix=module.params["suffix"]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to create snapshot for volume {0}".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed, suffix=module.params["suffix"])
+
+
+def create_from_snapshot(module, array):
+ """Create Volume from Snapshot"""
+ source = module.params["name"] + "." + module.params["suffix"]
+ tgt = get_target(module, array)
+ if tgt is None:
+ changed = True
+ if not module.check_mode:
+ array.copy_volume(source, module.params["target"])
+ elif tgt is not None and module.params["overwrite"]:
+ changed = True
+ if not module.check_mode:
+ array.copy_volume(
+ source, module.params["target"], overwrite=module.params["overwrite"]
+ )
+ elif tgt is not None and not module.params["overwrite"]:
+ changed = False
+ module.exit_json(changed=changed)
+
+
+def recover_snapshot(module, array, arrayv6):
+ """Recover Snapshot"""
+ changed = False
+ snapname = module.params["name"] + "." + module.params["suffix"]
+ if module.params["offload"] and _check_offload(module, arrayv6):
+ source_array = list(array.get_arrays().items)[0].name
+ snapname = source_array + module.params["name"] + "." + module.params["suffix"]
+ changed = True
+ if not module.check_mode:
+ res = arrayv6.patch_remote_volume_snapshots(
+ names=[snapname],
+ on=module.params["offload"],
+ remote_volume_snapshot=flasharray.DestroyedPatchPost(destroyed=False),
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to recover remote snapshot {0}".format(snapname)
+ )
+ else:
+ changed = True
+ if not module.check_mode:
+ try:
+ array.recover_volume(snapname)
+ except Exception:
+ module.fail_json(msg="Recovery of snapshot {0} failed".format(snapname))
+ module.exit_json(changed=changed)
+
+
+def update_snapshot(module, array):
+ """Update Snapshot - basically just rename..."""
+ changed = True
+ if not module.check_mode:
+ current_name = module.params["name"] + "." + module.params["suffix"]
+ new_name = module.params["name"] + "." + module.params["target"]
+ res = array.patch_volume_snapshots(
+ names=[current_name],
+ volume_snapshot=flasharray.VolumeSnapshotPatch(name=new_name),
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to rename {0} to {1}. Error: {2}".format(
+ current_name, new_name, res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def delete_snapshot(module, array, arrayv6):
+ """Delete Snapshot"""
+ changed = False
+ snapname = module.params["name"] + "." + module.params["suffix"]
+ if module.params["offload"] and _check_offload(module, arrayv6):
+ source_array = list(arrayv6.get_arrays().items)[0].name
+ full_snapname = source_array + ":" + snapname
+ changed = True
+ if not module.check_mode:
+ res = arrayv6.patch_remote_volume_snapshots(
+ names=[full_snapname],
+ on=module.params["offload"],
+ volume_snapshot=flasharray.VolumeSnapshotPatch(destroyed=True),
+ replication_snapshot=module.params["ignore_repl"],
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete remote snapshot {0}. Error: {1}".format(
+ snapname, res.errors[0].message
+ )
+ )
+ if module.params["eradicate"]:
+ res = arrayv6.delete_remote_volume_snapshots(
+ names=[full_snapname],
+ on=module.params["offload"],
+ replication_snapshot=module.params["ignore_repl"],
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to eradicate remote snapshot {0}. Error: {1}".format(
+ snapname, res.errors[0].message
+ )
+ )
+ elif module.params["offload"] and _check_target(module, arrayv6):
+ changed = True
+ if not module.check_mode:
+ res = arrayv6.patch_volume_snapshots(
+ names=[snapname],
+ volume_snapshot=flasharray.DestroyedPatchPost(destroyed=True),
+ replication_snapshot=module.params["ignore_repl"],
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete remote snapshot {0}. Error: {1}".format(
+ snapname, res.errors[0].message
+ )
+ )
+ if module.params["eradicate"]:
+ res = arrayv6.delete_volume_snapshots(
+ names=[snapname], replication_snapshot=module.params["ignore_repl"]
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to eradicate remote snapshot {0}. Error: {1}".format(
+ snapname, res.errors[0].message
+ )
+ )
+ else:
+ changed = True
+ if not module.check_mode:
+ api_version = array._list_available_rest_versions()
+ if GET_SEND_API in api_version:
+ module.warn("here")
+ res = arrayv6.patch_volume_snapshots(
+ names=[snapname],
+ volume_snapshot=flasharray.DestroyedPatchPost(destroyed=True),
+ replication_snapshot=module.params["ignore_repl"],
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete remote snapshot {0}. Error: {1}".format(
+ snapname, res.errors[0].message
+ )
+ )
+ if module.params["eradicate"]:
+ res = arrayv6.delete_volume_snapshots(
+ names=[snapname],
+ replication_snapshot=module.params["ignore_repl"],
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to eradicate remote snapshot {0}. Error: {1}".format(
+ snapname, res.errors[0].message
+ )
+ )
+ else:
+ try:
+ array.destroy_volume(snapname)
+ if module.params["eradicate"]:
+ try:
+ array.eradicate_volume(snapname)
+ except PureHTTPError as err:
+ module.fail_json(
+ msg="Error eradicating snapshot. Error: {0}".format(
+ err.text
+ )
+ )
+ except PureHTTPError as err:
+ module.fail_json(
+ msg="Error deleting snapshot. Error: {0}".format(err.text)
+ )
+ module.exit_json(changed=changed)
+
+
+def eradicate_snapshot(module, array, arrayv6):
+ """Eradicate snapshot"""
+ changed = True
+ snapname = module.params["name"] + "." + module.params["suffix"]
+ if not module.check_mode:
+ if module.params["offload"] and _check_offload(module, arrayv6):
+ source_array = list(arrayv6.get_arrays().items)[0].name
+ full_snapname = source_array + ":" + snapname
+ res = arrayv6.delete_remote_volume_snapshots(
+ names=[full_snapname],
+ on=module.params["offload"],
+ replication_snapshot=module.params["ignore_repl"],
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to eradicate remote snapshot {0}. Error: {1}".format(
+ snapname, res.errors[0].message
+ )
+ )
+ elif module.params["offload"] and _check_target(module, arrayv6):
+ res = arrayv6.delete_volume_snapshots(
+ names=[snapname], replication_snapshot=module.params["ignore_repl"]
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to eradicate remote snapshot {0}. Error: {1}".format(
+ snapname, res.errors[0].message
+ )
+ )
+ else:
+ try:
+ array.eradicate_volume(snapname)
+ except Exception:
+ changed = False
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True),
+ suffix=dict(type="str"),
+ target=dict(type="str"),
+ offload=dict(type="str"),
+ ignore_repl=dict(type="bool", default=False),
+ overwrite=dict(type="bool", default=False),
+ eradicate=dict(type="bool", default=False),
+ state=dict(
+ type="str",
+ default="present",
+ choices=["absent", "copy", "present", "rename"],
+ ),
+ )
+ )
+
+ required_if = [("state", "copy", ["target", "suffix"])]
+
+ module = AnsibleModule(
+ argument_spec, required_if=required_if, supports_check_mode=True
+ )
+ if not HAS_PUREERROR:
+ module.fail_json(msg="purestorage sdk is required for this module")
+ pattern1 = re.compile(
+ "^(?=.*[a-zA-Z-])[a-zA-Z0-9]([a-zA-Z0-9-]{0,63}[a-zA-Z0-9])?$"
+ )
+ pattern2 = re.compile("^([1-9])([0-9]{0,63}[0-9])?$")
+
+ state = module.params["state"]
+ if module.params["suffix"] is None:
+ suffix = "snap-" + str(
+ (datetime.utcnow() - datetime(1970, 1, 1, 0, 0, 0, 0)).total_seconds()
+ )
+ module.params["suffix"] = suffix.replace(".", "")
+ else:
+ if not module.params["offload"]:
+ if not (
+ pattern1.match(module.params["suffix"])
+ or pattern2.match(module.params["suffix"])
+ ) and state not in [
+ "absent",
+ "rename",
+ ]:
+ module.fail_json(
+ msg="Suffix name {0} does not conform to suffix name rules".format(
+ module.params["suffix"]
+ )
+ )
+ if state == "rename" and module.params["target"] is not None:
+ if not pattern1.match(module.params["target"]):
+ module.fail_json(
+ msg="Suffix target {0} does not conform to suffix name rules".format(
+ module.params["target"]
+ )
+ )
+
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+ if GET_SEND_API not in api_version:
+ arrayv6 = None
+ if module.params["offload"]:
+ module.fail_json(
+ msg="Purity 6.1, or higher, is required to support single volume offload snapshots"
+ )
+ if state == "rename":
+ module.fail_json(
+ msg="Purity 6.1, or higher, is required to support snapshot rename"
+ )
+ else:
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+ arrayv6 = get_array(module)
+ if module.params["offload"]:
+ if not _check_offload(module, arrayv6) and not _check_target(
+ module, arrayv6
+ ):
+ module.fail_json(
+ msg="Selected offload {0} not connected.".format(
+ module.params["offload"]
+ )
+ )
+ if (
+ state == "copy"
+ and module.params["offload"]
+ and not _check_target(module, arrayv6)
+ ):
+ module.fail_json(
+ msg="Snapshot copy is not supported when an offload target is defined"
+ )
+ destroyed = False
+ array_snap = False
+ offload_snap = False
+ volume = get_volume(module, array)
+ if module.params["offload"] and not _check_target(module, arrayv6):
+ offload_snap = _check_offload_snapshot(module, arrayv6)
+ if offload_snap is None:
+ offload_snap = False
+ else:
+ offload_snap = not offload_snap.destroyed
+ else:
+ array_snap = get_snapshot(module, array)
+ snap = array_snap or offload_snap
+
+ if not snap:
+ destroyed = get_deleted_snapshot(module, array, arrayv6)
+ if state == "present" and volume and not destroyed:
+ create_snapshot(module, array, arrayv6)
+ elif state == "present" and destroyed:
+ recover_snapshot(module, array, arrayv6)
+ elif state == "rename" and volume and snap:
+ update_snapshot(module, arrayv6)
+ elif state == "copy" and snap:
+ create_from_snapshot(module, array)
+ elif state == "absent" and snap and not destroyed:
+ delete_snapshot(module, array, arrayv6)
+ elif state == "absent" and destroyed and module.params["eradicate"]:
+ eradicate_snapshot(module, array, arrayv6)
+ elif state == "absent" and not snap:
+ module.exit_json(changed=False)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_snmp.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_snmp.py
new file mode 100644
index 000000000..b422f4835
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_snmp.py
@@ -0,0 +1,425 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2019, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_snmp
+version_added: '1.0.0'
+short_description: Configure FlashArray SNMP Managers
+description:
+- Manage SNMP managers on a Pure Storage FlashArray.
+- Changing of a named SNMP managers version is not supported.
+- This module is not idempotent and will always modify an
+ existing SNMP manager due to hidden parameters that cannot
+ be compared to the play parameters.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Name of SNMP Manager
+ required: true
+ type: str
+ state:
+ description:
+ - Create or delete SNMP manager
+ type: str
+ default: present
+ choices: [ absent, present ]
+ auth_passphrase:
+ type: str
+ description:
+ - SNMPv3 only. Passphrase of 8 - 32 characters.
+ auth_protocol:
+ type: str
+ description:
+ - SNMP v3 only. Hash algorithm to use
+ choices: [ MD5, SHA ]
+ community:
+ type: str
+ description:
+ - SNMP v2c only. Manager community ID. Between 1 and 32 characters long.
+ host:
+ type: str
+ description:
+ - IPv4 or IPv6 address or FQDN to send trap messages to.
+ user:
+ type: str
+ description:
+ - SNMP v3 only. User ID recognized by the specified SNMP manager.
+ Must be between 1 and 32 characters.
+ version:
+ type: str
+ description:
+ - Version of SNMP protocol to use for the manager.
+ choices: [ v2c, v3 ]
+ default: v2c
+ notification:
+ type: str
+ description:
+ - Action to perform on event.
+ default: trap
+ choices: [ inform, trap ]
+ privacy_passphrase:
+ type: str
+ description:
+ - SNMPv3 only. Passphrase to encrypt SNMP messages.
+ Must be between 8 and 63 non-space ASCII characters.
+ privacy_protocol:
+ type: str
+ description:
+ - SNMP v3 only. Encryption protocol to use
+ choices: [ AES, DES ]
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Delete exisitng SNMP manager
+ purestorage.flasharray.purefa_snmp:
+ name: manager1
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+- name: Create v2c SNMP manager
+ puretorage.flasharray.purefa_snmp:
+ name: manager1
+ community: public
+ host: 10.21.22.23
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+- name: Create v3 SNMP manager
+ puretorage.flasharray.purefa_snmp:
+ name: manager2
+ version: v3
+ auth_protocol: MD5
+ auth_passphrase: password
+ host: 10.21.22.23
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+- name: Update existing SNMP manager
+ purestorage.flasharray.purefa_snmp:
+ name: manager1
+ community: private
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ purefa_argument_spec,
+)
+
+
+def update_manager(module, array):
+ """Update SNMP Manager"""
+ changed = False
+ try:
+ mgr = array.get_snmp_manager(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Failed to get configuration for SNMP manager {0}.".format(
+ module.params["name"]
+ )
+ )
+ if mgr["version"] != module.params["version"]:
+ module.fail_json(msg="Changing an SNMP managers version is not supported.")
+ elif module.params["version"] == "v2c":
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set_snmp_manager(
+ module.params["name"],
+ community=module.params["community"],
+ notification=module.params["notification"],
+ host=module.params["host"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to update SNMP manager {0}.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ if module.params["auth_protocol"] and module.params["privacy_protocol"]:
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set_snmp_manager(
+ module.params["name"],
+ auth_passphrase=module.params["auth_passphrase"],
+ auth_protocol=module.params["auth_protocol"],
+ privacy_passphrase=module.params["privacy_passphrase"],
+ privacy_protocol=module.params["privacy_protocol"],
+ notification=module.params["notification"],
+ user=module.params["user"],
+ host=module.params["host"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to update SNMP manager {0}.".format(
+ module.params["name"]
+ )
+ )
+ elif module.params["auth_protocol"] and not module.params["privacy_protocol"]:
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set_snmp_manager(
+ module.params["name"],
+ version=module.params["version"],
+ auth_passphrase=module.params["auth_passphrase"],
+ auth_protocol=module.params["auth_protocol"],
+ notification=module.params["notification"],
+ user=module.params["user"],
+ host=module.params["host"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to update SNMP manager {0}.".format(
+ module.params["name"]
+ )
+ )
+ elif not module.params["auth_protocol"] and module.params["privacy_protocol"]:
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set_snmp_manager(
+ module.params["name"],
+ version=module.params["version"],
+ privacy_passphrase=module.params["privacy_passphrase"],
+ privacy_protocol=module.params["privacy_protocol"],
+ notification=module.params["notification"],
+ user=module.params["user"],
+ host=module.params["host"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to update SNMP manager {0}.".format(
+ module.params["name"]
+ )
+ )
+ elif (
+ not module.params["auth_protocol"] and not module.params["privacy_protocol"]
+ ):
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set_snmp_manager(
+ module.params["name"],
+ version=module.params["version"],
+ notification=module.params["notification"],
+ user=module.params["user"],
+ host=module.params["host"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to update SNMP manager {0}.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ module.fail_json(
+ msg="Invalid parameters selected in update. Please raise issue in Ansible GitHub"
+ )
+
+ module.exit_json(changed=changed)
+
+
+def delete_manager(module, array):
+ """Delete SNMP Manager"""
+ changed = True
+ if not module.check_mode:
+ try:
+ array.delete_snmp_manager(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Delete SNMP manager {0} failed".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def create_manager(module, array):
+ """Create SNMP Manager"""
+ changed = True
+ if not module.check_mode:
+ if module.params["version"] == "v2c":
+ try:
+ array.create_snmp_manager(
+ module.params["name"],
+ version=module.params["version"],
+ community=module.params["community"],
+ notification=module.params["notification"],
+ host=module.params["host"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to create SNMP manager {0}.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ if module.params["auth_protocol"] and module.params["privacy_protocol"]:
+ try:
+ array.create_snmp_manager(
+ module.params["name"],
+ version=module.params["version"],
+ auth_passphrase=module.params["auth_passphrase"],
+ auth_protocol=module.params["auth_protocol"],
+ privacy_passphrase=module.params["privacy_passphrase"],
+ privacy_protocol=module.params["privacy_protocol"],
+ notification=module.params["notification"],
+ user=module.params["user"],
+ host=module.params["host"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to create SNMP manager {0}.".format(
+ module.params["name"]
+ )
+ )
+ elif (
+ module.params["auth_protocol"] and not module.params["privacy_protocol"]
+ ):
+ try:
+ array.create_snmp_manager(
+ module.params["name"],
+ version=module.params["version"],
+ auth_passphrase=module.params["auth_passphrase"],
+ auth_protocol=module.params["auth_protocol"],
+ notification=module.params["notification"],
+ user=module.params["user"],
+ host=module.params["host"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to create SNMP manager {0}.".format(
+ module.params["name"]
+ )
+ )
+ elif (
+ not module.params["auth_protocol"] and module.params["privacy_protocol"]
+ ):
+ try:
+ array.create_snmp_manager(
+ module.params["name"],
+ version=module.params["version"],
+ privacy_passphrase=module.params["privacy_passphrase"],
+ privacy_protocol=module.params["privacy_protocol"],
+ notification=module.params["notification"],
+ user=module.params["user"],
+ host=module.params["host"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to create SNMP manager {0}.".format(
+ module.params["name"]
+ )
+ )
+ elif (
+ not module.params["auth_protocol"]
+ and not module.params["privacy_protocol"]
+ ):
+ try:
+ array.create_snmp_manager(
+ module.params["name"],
+ version=module.params["version"],
+ notification=module.params["notification"],
+ user=module.params["user"],
+ host=module.params["host"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to create SNMP manager {0}.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ module.fail_json(
+ msg="Invalid parameters selected in create. Please raise issue in Ansible GitHub"
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True),
+ host=dict(type="str"),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ user=dict(type="str"),
+ notification=dict(type="str", choices=["inform", "trap"], default="trap"),
+ auth_passphrase=dict(type="str", no_log=True),
+ auth_protocol=dict(type="str", choices=["MD5", "SHA"]),
+ privacy_passphrase=dict(type="str", no_log=True),
+ privacy_protocol=dict(type="str", choices=["AES", "DES"]),
+ version=dict(type="str", default="v2c", choices=["v2c", "v3"]),
+ community=dict(type="str"),
+ )
+ )
+
+ required_together = [
+ ["auth_passphrase", "auth_protocol"],
+ ["privacy_passphrase", "privacy_protocol"],
+ ]
+ required_if = [
+ ["version", "v2c", ["community", "host"]],
+ ["version", "v3", ["host", "user"]],
+ ]
+
+ module = AnsibleModule(
+ argument_spec,
+ required_together=required_together,
+ required_if=required_if,
+ supports_check_mode=True,
+ )
+
+ state = module.params["state"]
+ array = get_system(module)
+ mgr_configured = False
+ mgrs = array.list_snmp_managers()
+ for mgr in range(0, len(mgrs)):
+ if mgrs[mgr]["name"] == module.params["name"]:
+ mgr_configured = True
+ break
+ if module.params["version"] == "v3":
+ if module.params["auth_passphrase"] and (
+ 8 > len(module.params["auth_passphrase"]) > 32
+ ):
+ module.fail_json(msg="auth_password must be between 8 and 32 characters")
+ if (
+ module.params["privacy_passphrase"]
+ and 8 > len(module.params["privacy_passphrase"]) > 63
+ ):
+ module.fail_json(msg="privacy_password must be between 8 and 63 characters")
+ if state == "absent" and mgr_configured:
+ delete_manager(module, array)
+ elif mgr_configured and state == "present":
+ update_manager(module, array)
+ elif not mgr_configured and state == "present":
+ create_manager(module, array)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_snmp_agent.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_snmp_agent.py
new file mode 100644
index 000000000..b9dc8ca94
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_snmp_agent.py
@@ -0,0 +1,267 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2019, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_snmp_agent
+version_added: '1.16.0'
+short_description: Configure the FlashArray SNMP Agent
+description:
+- Manage the I(localhost) SNMP Agent on a Pure Storage FlashArray.
+- This module is not idempotent and will always modify the SNMP Agent
+ due to hidden parameters that cannot be compared to the task parameters.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ type: str
+ description:
+ - Used to set or clear the SNMP v2c community string or the SNMP v3
+ auth and privacy protocols.
+ choices: [ absent, present ]
+ default: present
+ user:
+ type: str
+ description:
+ - SNMP v3 only. User ID which must be between 1 and 32 characters.
+ version:
+ type: str
+ description:
+ - Version of SNMP protocol to use for the manager.
+ choices: [ v2c, v3 ]
+ default: v2c
+ community:
+ type: str
+ description:
+ - SNMP v2c only. Manager community ID under which Purity//FA is to
+ communicate with the specified managers.
+ - To remove the string set I(state) to I(absent) with I(version)
+ set to I(v2c)
+ auth_passphrase:
+ type: str
+ description:
+ - SNMP v3 only. Passphrade used by Purity//FA to authenticate the
+ array wit hthe specified managers.
+ - Must be between 8 and 63 non-space ASCII characters.
+ auth_protocol:
+ type: str
+ description:
+ - SNMP v3 only. Encryption protocol to use
+ - To remove the privacy and auth protocols set I(state) to
+ I(absent) with I(version) set to I(v3)
+ choices: [ MD5, SHA ]
+ privacy_passphrase:
+ type: str
+ description:
+ - SNMP v3 only. Passphrase to encrypt SNMP messages.
+ Must be between 8 and 63 non-space ASCII characters.
+ privacy_protocol:
+ type: str
+ description:
+ - SNMP v3 only. Encryption protocol to use
+ - To remove the privacy and auth protocols set I(state) to
+ I(absent) with I(version) set to I(v3)
+ choices: [ AES, DES ]
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Clear SNMP agent v2c community string
+ purestorage.flasharray.purefa_snmp_agent:
+ version: v2c
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+- name: Clear SNMP agent v3 auth and privacy protocols
+ purestorage.flasharray.purefa_snmp_agent:
+ version: v3
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+- name: Update v2c SNMP agent
+ puretorage.flasharray.purefa_snmp_agent:
+ version: v2c
+ community: public
+ host: 10.21.22.23
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+- name: Update v3 SNMP manager
+ puretorage.flasharray.purefa_snmp_agent:
+ version: v3
+ auth_protocol: MD5
+ auth_passphrase: password
+ host: 10.21.22.23
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ get_array,
+ purefa_argument_spec,
+)
+
+MIN_REQUIRED_API_VERSION = "2.1"
+
+
+def update_agent(module, array, agent):
+ """Update SNMP Agent"""
+ changed = False
+ if module.params["version"] == "v2c":
+ changed = True
+ if not module.check_mode:
+ if module.params["state"] == "delete":
+ community = ""
+ elif module.params["state"] == "present" and module.params["community"]:
+ community = module.params["community"]
+ else:
+ community = ""
+ res = array.patch_snmp_agents(
+ snmp_agent=flasharray.SnmpAgentPatch(
+ name="localhost",
+ version="v2c",
+ v2c=flasharray.SnmpV2c(community=community),
+ )
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to update SNMP agent.Error: {0}".format(
+ res.errors[0].message
+ )
+ )
+ else:
+ if module.params["state"] == "delete":
+ changed = True
+ v3 = flasharray.SnmpV3Patch(
+ auth_protocol="",
+ privacy_protocol="",
+ user=module.params["user"],
+ )
+ elif module.params["auth_protocol"] and module.params["privacy_protocol"]:
+ changed = True
+ v3 = flasharray.SnmpV3Patch(
+ auth_passphrase=module.params["auth_passphrase"],
+ auth_protocol=module.params["auth_protocol"],
+ privacy_passphrase=module.params["privacy_passphrase"],
+ privacy_protocol=module.params["privacy_protocol"],
+ user=module.params["user"],
+ )
+ elif module.params["auth_protocol"] and not module.params["privacy_protocol"]:
+ changed = True
+ v3 = flasharray.SnmpV3Patch(
+ auth_passphrase=module.params["auth_passphrase"],
+ auth_protocol=module.params["auth_protocol"],
+ user=module.params["user"],
+ )
+ elif not module.params["auth_protocol"] and module.params["privacy_protocol"]:
+ changed = True
+ v3 = flasharray.SnmpV3Patch(
+ privacy_passphrase=module.params["privacy_passphrase"],
+ privacy_protocol=module.params["privacy_protocol"],
+ user=module.params["user"],
+ )
+ elif (
+ not module.params["auth_protocol"] and not module.params["privacy_protocol"]
+ ):
+ changed = True
+ v3 = flasharray.SnmpV3Patch(user=module.params["user"])
+
+ if not module.check_mode:
+ res = array.patch_snmp_agents(
+ snmp_agent=flasharray.SnmpAgentPatch(
+ name="localhost",
+ version=module.params["version"],
+ v3=v3,
+ )
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to update SNMP agent.Error: {0}".format(
+ res.errors[0].message
+ )
+ )
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ user=dict(type="str"),
+ auth_passphrase=dict(type="str", no_log=True),
+ auth_protocol=dict(type="str", choices=["MD5", "SHA"]),
+ privacy_passphrase=dict(type="str", no_log=True),
+ privacy_protocol=dict(type="str", choices=["AES", "DES"]),
+ version=dict(type="str", default="v2c", choices=["v2c", "v3"]),
+ community=dict(type="str"),
+ )
+ )
+
+ required_together = [
+ ["auth_passphrase", "auth_protocol"],
+ ["privacy_passphrase", "privacy_protocol"],
+ ]
+ required_if = [
+ ["version", "v3", ["user"]],
+ ]
+
+ module = AnsibleModule(
+ argument_spec,
+ required_together=required_together,
+ required_if=required_if,
+ supports_check_mode=True,
+ )
+
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashArray REST version not supported. "
+ "Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
+ )
+ array = get_array(module)
+
+ agent = list(array.get_snmp_agents().items)
+ if module.params["version"] == "v3":
+ if module.params["auth_passphrase"] and (
+ 8 > len(module.params["auth_passphrase"]) > 32
+ ):
+ module.fail_json(msg="auth_password must be between 8 and 32 characters")
+ if (
+ module.params["privacy_passphrase"]
+ and 8 > len(module.params["privacy_passphrase"]) > 63
+ ):
+ module.fail_json(msg="privacy_password must be between 8 and 63 characters")
+ update_agent(module, array, agent)
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_sso.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_sso.py
new file mode 100644
index 000000000..c1199215f
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_sso.py
@@ -0,0 +1,119 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2021, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["deprecated"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_sso
+version_added: '1.9.0'
+deprecated:
+ removed_in: '2.0.0'
+ why: Superceeded by M(purestorage.flasharray.purefa_admin)
+ alternative: Use M(purestorage.flasharray.purefa_admin) instead.
+short_description: Configure Pure Storage FlashArray Single Sign-On
+description:
+- Enable or disable Single Sign-On (SSO) to give LDAP users the ability
+ to navigate seamlessly from Pure1 Manage to the current array through a
+ single login.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Enable or disable the array Signle Sign-On from Pure1 Manage
+ default: present
+ type: str
+ choices: [ present, absent ]
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Enable SSO
+ purestorage.flasharray.purefa_sso:
+ state: present
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Disable SSO
+ purestorage.flasharray.purefa_sso:
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient.flasharray import AdminSettings
+except ImportError:
+ HAS_PURESTORAGE = False
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ get_array,
+ purefa_argument_spec,
+)
+
+SSO_API_VERSION = "2.2"
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["present", "absent"]),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+
+ state = module.params["state"]
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+ changed = False
+ if SSO_API_VERSION in api_version:
+ array = get_array(module)
+ current_sso = list(array.get_admins_settings().items)[0].single_sign_on_enabled
+ if (state == "present" and not current_sso) or (
+ state == "absent" and current_sso
+ ):
+ changed = True
+ if not module.check_mode:
+ res = array.patch_admins_settings(
+ admin_settings=AdminSettings(
+ single_sign_on_enabled=bool(state == "present")
+ )
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to change Single Sign-On status. Error: {0}".format(
+ res.errors[0].message
+ )
+ )
+ else:
+ module.fail_json(msg="Purity version does not support Single Sign-On")
+ module.exit_json(changed=changed)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_subnet.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_subnet.py
new file mode 100644
index 000000000..efce8db9e
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_subnet.py
@@ -0,0 +1,327 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+
+DOCUMENTATION = """
+---
+module: purefa_subnet
+version_added: '1.0.0'
+short_description: Manage network subnets in a Pure Storage FlashArray
+description:
+ - This module manages the network subnets on a Pure Storage FlashArray.
+author: Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Subnet name.
+ required: true
+ type: str
+ state:
+ description:
+ - Create or delete subnet.
+ required: false
+ default: present
+ choices: [ "present", "absent" ]
+ type: str
+ enabled:
+ description:
+ - whether the subnet should be enabled or not
+ default: true
+ type: bool
+ prefix:
+ description:
+ - Set the IPv4 or IPv6 address to be associated with the subnet.
+ required: false
+ type: str
+ gateway:
+ description:
+ - IPv4 or IPv6 address of subnet gateway.
+ required: false
+ type: str
+ mtu:
+ description:
+ - MTU size of the subnet. Range is 568 to 9000.
+ required: false
+ default: 1500
+ type: int
+ vlan:
+ description:
+ - VLAN ID. Range is 0 to 4094.
+ required: false
+ type: int
+extends_documentation_fragment:
+ - purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = """
+- name: Create subnet subnet100
+ purestorage.flasharray.purefa_subnet:
+ name: subnet100
+ vlan: 100
+ gateway: 10.21.200.1
+ prefix: "10.21.200.0/24"
+ mtu: 9000
+ state: present
+ fa_url: 10.10.10.2
+ api_token: c6033033-fe69-2515-a9e8-966bb7fe4b40
+
+- name: Disable subnet subnet100
+ purestorage.flasharray.purefa_subnet:
+ name: subnet100
+ enabled: false
+ fa_url: 10.10.10.2
+ api_token: c6033033-fe69-2515-a9e8-966bb7fe4b40
+
+- name: Delete subnet subnet100
+ purestorage.flasharray.purefa_subnet:
+ name: subnet100
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: c6033033-fe69-2515-a9e8-966bb7fe4b40"""
+
+RETURN = """
+"""
+
+try:
+ from netaddr import IPNetwork
+
+ HAS_NETADDR = True
+except ImportError:
+ HAS_NETADDR = False
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ purefa_argument_spec,
+)
+
+
+def _get_subnet(module, array):
+ """Return subnet or None"""
+ subnet = {}
+ try:
+ subnet = array.get_subnet(module.params["name"])
+ except Exception:
+ return None
+ return subnet
+
+
+def update_subnet(module, array, subnet):
+ """Modify subnet settings"""
+ changed = False
+ current_state = {
+ "mtu": subnet["mtu"],
+ "vlan": subnet["vlan"],
+ "prefix": subnet["prefix"],
+ "gateway": subnet["gateway"],
+ }
+ if not module.params["prefix"]:
+ prefix = subnet["prefix"]
+ else:
+ if module.params["gateway"] and module.params["gateway"] not in IPNetwork(
+ module.params["prefix"]
+ ):
+ module.fail_json(msg="Gateway and subnet are not compatible.")
+ elif (
+ not module.params["gateway"]
+ and subnet["gateway"]
+ and subnet["gateway"] not in IPNetwork(module.params["prefix"])
+ ):
+ module.fail_json(msg="Gateway and subnet are not compatible.")
+ prefix = module.params["prefix"]
+ if not module.params["vlan"]:
+ vlan = subnet["vlan"]
+ else:
+ if not 0 <= module.params["vlan"] <= 4094:
+ module.fail_json(
+ msg="VLAN {0} is out of range (0 to 4094)".format(module.params["vlan"])
+ )
+ else:
+ vlan = module.params["vlan"]
+ if not module.params["mtu"]:
+ mtu = subnet["mtu"]
+ else:
+ if not 568 <= module.params["mtu"] <= 9000:
+ module.fail_json(
+ msg="MTU {0} is out of range (568 to 9000)".format(module.params["mtu"])
+ )
+ else:
+ mtu = module.params["mtu"]
+ if not module.params["gateway"]:
+ gateway = subnet["gateway"]
+ else:
+ if module.params["gateway"] not in IPNetwork(prefix):
+ module.fail_json(msg="Gateway and subnet are not compatible.")
+ gateway = module.params["gateway"]
+ new_state = {"prefix": prefix, "mtu": mtu, "gateway": gateway, "vlan": vlan}
+ if new_state != current_state:
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set_subnet(
+ subnet["name"],
+ prefix=new_state["prefix"],
+ mtu=new_state["mtu"],
+ vlan=new_state["vlan"],
+ gateway=new_state["gateway"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to change settings for subnet {0}.".format(
+ subnet["name"]
+ )
+ )
+ if subnet["enabled"] != module.params["enabled"]:
+ if module.params["enabled"]:
+ changed = True
+ if not module.check_mode:
+ try:
+ array.enable_subnet(subnet["name"])
+ except Exception:
+ module.fail_json(
+ msg="Failed to enable subnet {0}.".format(subnet["name"])
+ )
+ else:
+ changed = True
+ if not module.check_mode:
+ try:
+ array.disable_subnet(subnet["name"])
+ except Exception:
+ module.fail_json(
+ msg="Failed to disable subnet {0}.".format(subnet["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def create_subnet(module, array):
+ """Create subnet"""
+ changed = True
+ if not module.params["prefix"]:
+ module.fail_json(msg="Prefix required when creating subnet.")
+ else:
+ if module.params["gateway"] and module.params["gateway"] not in IPNetwork(
+ module.params["prefix"]
+ ):
+ module.fail_json(msg="Gateway and subnet are not compatible.")
+ prefix = module.params["prefix"]
+ if module.params["vlan"]:
+ if not 0 <= module.params["vlan"] <= 4094:
+ module.fail_json(
+ msg="VLAN {0} is out of range (0 to 4094)".format(module.params["vlan"])
+ )
+ else:
+ vlan = module.params["vlan"]
+ else:
+ vlan = 0
+ if module.params["mtu"]:
+ if not 568 <= module.params["mtu"] <= 9000:
+ module.fail_json(
+ msg="MTU {0} is out of range (568 to 9000)".format(module.params["mtu"])
+ )
+ else:
+ mtu = module.params["mtu"]
+ if module.params["gateway"]:
+ if module.params["gateway"] not in IPNetwork(prefix):
+ module.fail_json(msg="Gateway and subnet are not compatible.")
+ gateway = module.params["gateway"]
+ else:
+ gateway = ""
+ if not module.check_mode:
+ try:
+ array.create_subnet(
+ module.params["name"],
+ prefix=prefix,
+ mtu=mtu,
+ vlan=vlan,
+ gateway=gateway,
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to create subnet {0}.".format(module.params["name"])
+ )
+ if module.params["enabled"]:
+ if not module.check_mode:
+ try:
+ array.enable_subnet(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Failed to enable subnet {0}.".format(module.params["name"])
+ )
+ else:
+ if not module.check_mode:
+ try:
+ array.disable_subnet(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Failed to disable subnet {0}.".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def delete_subnet(module, array):
+ """Delete subnet"""
+ changed = True
+ if not module.check_mode:
+ try:
+ array.delete_subnet(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Failed to delete subnet {0}".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True),
+ prefix=dict(type="str"),
+ state=dict(type="str", default="present", choices=["present", "absent"]),
+ gateway=dict(type="str"),
+ enabled=dict(type="bool", default=True),
+ mtu=dict(type="int", default=1500),
+ vlan=dict(type="int"),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_NETADDR:
+ module.fail_json(msg="netaddr module is required")
+ pattern = re.compile(r"[a-zA-Z0-9]([a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?$")
+ if not pattern.match(module.params["name"]):
+ module.fail_json(
+ msg="name must be between 1 and 63 characters in length and begin and end "
+ "with a letter or number. The name must include at least one letter or '-'."
+ )
+ state = module.params["state"]
+ array = get_system(module)
+ subnet = _get_subnet(module, array)
+ if state == "present" and not subnet:
+ create_subnet(module, array)
+ if state == "present" and subnet:
+ update_subnet(module, array, subnet)
+ elif state == "absent" and subnet:
+ delete_subnet(module, array)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_syslog.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_syslog.py
new file mode 100644
index 000000000..adb385ca4
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_syslog.py
@@ -0,0 +1,218 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_syslog
+version_added: '1.0.0'
+short_description: Configure Pure Storage FlashArray syslog settings
+description:
+- Configure syslog configuration for Pure Storage FlashArrays.
+- Add or delete an individual syslog server to the existing
+ list of serves.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Create or delete syslog servers configuration
+ default: present
+ type: str
+ choices: [ absent, present ]
+ protocol:
+ description:
+ - Protocol which server uses
+ required: true
+ type: str
+ choices: [ tcp, tls, udp ]
+ port:
+ description:
+ - Port at which the server is listening. If no port is specified
+ the system will use 514
+ type: str
+ address:
+ description:
+ - Syslog server address.
+ This field supports IPv4, IPv6 or FQDN.
+ An invalid IP addresses will cause the module to fail.
+ No validation is performed for FQDNs.
+ type: str
+ required: true
+ name:
+ description:
+ - A user-specified name.
+ The name must be locally unique and cannot be changed.
+ - Only applicable with FlashArrays running Purity//FA 6.0 or higher.
+ type: str
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Delete exisitng syslog server entries
+ purestorage.flasharray.purefa_syslog:
+ address: syslog1.com
+ protocol: tcp
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Set array syslog servers
+ purestorage.flasharray.purefa_syslog:
+ state: present
+ address: syslog1.com
+ protocol: udp
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_array,
+ get_system,
+ purefa_argument_spec,
+)
+
+
+SYSLOG_NAME_API = "2.4"
+
+
+def delete_syslog(module, array):
+ """Delete Syslog Server"""
+ changed = False
+ noport_address = module.params["protocol"] + "://" + module.params["address"]
+
+ if module.params["port"]:
+ full_address = noport_address + ":" + module.params["port"]
+ else:
+ full_address = noport_address
+
+ address_list = array.get(syslogserver=True)["syslogserver"]
+
+ if address_list:
+ for address in range(0, len(address_list)):
+ if address_list[address] == full_address:
+ del address_list[address]
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set(syslogserver=address_list)
+ break
+ except Exception:
+ module.fail_json(
+ msg="Failed to remove syslog server: {0}".format(
+ full_address
+ )
+ )
+
+ module.exit_json(changed=changed)
+
+
+def add_syslog(module, array, arrayv6):
+ """Add Syslog Server"""
+ changed = False
+ noport_address = module.params["protocol"] + "://" + module.params["address"]
+
+ if module.params["port"]:
+ full_address = noport_address + ":" + module.params["port"]
+ else:
+ full_address = noport_address
+
+ address_list = array.get(syslogserver=True)["syslogserver"]
+ exists = False
+
+ if address_list:
+ for address in range(0, len(address_list)):
+ if address_list[address] == full_address:
+ exists = True
+ break
+ if not exists:
+ if arrayv6 and module.params["name"]:
+ changed = True
+ if not module.check_mode:
+ res = arrayv6.post_syslog_servers(
+ names=[module.params["name"]],
+ syslog_server=flasharray.SyslogServer(
+ name=module.params["name"], uri=full_address
+ ),
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Adding syslog server {0} failed. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ else:
+ changed = True
+ if not module.check_mode:
+ try:
+ address_list.append(full_address)
+ array.set(syslogserver=address_list)
+ except Exception:
+ module.fail_json(
+ msg="Failed to add syslog server: {0}".format(full_address)
+ )
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ address=dict(type="str", required=True),
+ protocol=dict(type="str", choices=["tcp", "tls", "udp"], required=True),
+ port=dict(type="str"),
+ name=dict(type="str"),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ array = get_system(module)
+
+ if module.params["name"] and not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+
+ api_version = array._list_available_rest_versions()
+
+ if SYSLOG_NAME_API in api_version and module.params["name"]:
+ arrayv6 = get_array(module)
+ else:
+ arrayv6 = None
+
+ if module.params["state"] == "absent":
+ delete_syslog(module, array)
+ else:
+ add_syslog(module, array, arrayv6)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_syslog_settings.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_syslog_settings.py
new file mode 100644
index 000000000..fce6dffa3
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_syslog_settings.py
@@ -0,0 +1,171 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2021, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_syslog_settings
+version_added: '1.10.0'
+short_description: Manage FlashArray syslog servers settings
+description:
+- Manage FlashArray syslog servers settings
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ severity:
+ description:
+ - Logging severity threshold for which events will be forwarded to the
+ configured syslog servers.
+ default: info
+ choices: [ debug, info, notice ]
+ type: str
+ ca_certificate:
+ type: str
+ description:
+ - The text of the CA certificate for condifured syslog servers.
+ - Includes the "-----BEGIN CERTIFICATE-----" and "-----END CERTIFICATE-----" lines
+ - Does not exceed 3000 characters in length
+ - To delete the existing CA certifcate use the special string `DELETE`
+ tls_audit:
+ type: bool
+ default: true
+ description:
+ - If messages that are necessary in order to audit TLS negotiations
+ performed by the array are forwared to the syslog servers.
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Change syslog servers settings
+ purestorage.flasharray.purefa_syslog_servers:
+ tls_audit: false
+ severity: debug
+ ca_certificate: "{{lookup('file', 'example.crt') }}"
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete existing CA certifcate for syslog servers settings
+ purestorage.flasharray.purefa_syslog_servers:
+ ca_certificate: DELETE
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ get_array,
+ purefa_argument_spec,
+)
+
+MIN_REQUIRED_API_VERSION = "2.9"
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ severity=dict(
+ type="str",
+ default="info",
+ choices=["info", "debug", "notice"],
+ ),
+ tls_audit=dict(type="bool", default=True),
+ ca_certificate=dict(type="str", no_log=True),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec,
+ supports_check_mode=True,
+ )
+
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(
+ msg="Purity//FA version not supported. Minimum version required: 6.2.0"
+ )
+
+ array = get_array(module)
+ changed = cert_change = False
+ if module.params["ca_certificate"] and len(module.params["ca_certificate"]) > 3000:
+ module.fail_json(msg="Certificate exceeds 3000 characters")
+ current = list(array.get_syslog_servers_settings().items)[0]
+ try:
+ if current.ca_certificate:
+ pass
+ except AttributeError:
+ current.ca_certificate = None
+ if current.tls_audit_enabled != module.params["tls_audit"]:
+ changed = True
+ new_tls = module.params["tls_audit"]
+ else:
+ new_tls = current.tls_audit_enabled
+ if current.logging_severity != module.params["severity"]:
+ changed = True
+ new_sev = module.params["severity"]
+ else:
+ new_sev = current.logging_severity
+ if module.params["ca_certificate"]:
+ if module.params["ca_certificate"].upper() == "DELETE":
+ if current.ca_certificate:
+ cert_change = changed = True
+ new_cert = ""
+ elif current.ca_certificate != module.params["ca_certificate"]:
+ cert_change = changed = True
+ new_cert = module.params["ca_certificate"]
+ if changed and not module.check_mode:
+ if cert_change:
+ res = array.patch_syslog_servers_settings(
+ syslog_server_settings=flasharray.SyslogServerSettings(
+ ca_certificate=new_cert,
+ tls_audit_enabled=new_tls,
+ logging_severity=new_sev,
+ )
+ )
+ else:
+ res = array.patch_syslog_servers_settings(
+ syslog_server_settings=flasharray.SyslogServerSettings(
+ tls_audit_enabled=new_tls, logging_severity=new_sev
+ )
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Changing syslog settings failed. Error: {0}".format(
+ res.errors[0].message
+ )
+ )
+
+ module.exit_json(changed=changed)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_timeout.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_timeout.py
new file mode 100644
index 000000000..e5d041fa3
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_timeout.py
@@ -0,0 +1,116 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_timeout
+version_added: '1.0.0'
+short_description: Configure Pure Storage FlashArray GUI idle timeout
+description:
+- Configure GUI idle timeout for Pure Storage FlashArrays.
+- This does not affect existing GUI sessions.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Set or disable the GUI idle timeout
+ default: present
+ type: str
+ choices: [ present, absent ]
+ timeout:
+ description:
+ - Minutes for idle timeout.
+ type: int
+ default: 30
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Set GUI idle timeout to 25 minutes
+ purestorage.flasharray.purefa_timeout:
+ timeout: 25
+ state: present
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Disable idle timeout
+ purestorage.flasharray.purefa_timeout:
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ purefa_argument_spec,
+)
+
+
+def set_timeout(module, array):
+ """Set GUI idle timeout"""
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set(idle_timeout=module.params["timeout"])
+ except Exception:
+ module.fail_json(msg="Failed to set GUI idle timeout")
+
+ module.exit_json(changed=changed)
+
+
+def disable_timeout(module, array):
+ """Disable idle timeout"""
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set(idle_timeout=0)
+ except Exception:
+ module.fail_json(msg="Failed to disable GUI idle timeout")
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ timeout=dict(type="int", default=30),
+ state=dict(type="str", default="present", choices=["present", "absent"]),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ state = module.params["state"]
+ if 5 < module.params["timeout"] > 180 and module.params["timeout"] != 0:
+ module.fail_json(msg="Timeout value must be between 5 and 180 minutes")
+ array = get_system(module)
+ current_timeout = array.get(idle_timeout=True)["idle_timeout"]
+ if state == "present" and current_timeout != module.params["timeout"]:
+ set_timeout(module, array)
+ elif state == "absent" and current_timeout != 0:
+ disable_timeout(module, array)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_token.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_token.py
new file mode 100644
index 000000000..fa66fe308
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_token.py
@@ -0,0 +1,225 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_token
+version_added: '1.0.0'
+short_description: Create or delete an API token for an existing admin user
+description:
+- Create or delete an API token for an existing admin user.
+- Uses username/password to create/delete the API token.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Create or delete API token
+ type: str
+ default: present
+ choices: [ present, absent ]
+ recreate:
+ description:
+ - Recreates the API token, overwriting the existing API token if present
+ type: bool
+ default: false
+ username:
+ description:
+ - Username of the admin user to create API token for
+ type: str
+ password:
+ description:
+ - Password of the admin user to create API token for.
+ type: str
+ fa_url:
+ description:
+ - FlashArray management IPv4 address or Hostname.
+ type: str
+ timeout:
+ description:
+ - The duration of API token validity.
+ - Valid values are weeks (w), days(d), hours(h), minutes(m) and seconds(s).
+ type: str
+"""
+
+EXAMPLES = r"""
+- name: Create API token with no expiration
+ purefa_token:
+ username: pureuser
+ password: secret
+ state: present
+ fa_url: 10.10.10.2
+- name: Create API token with 23 days expiration
+ purefa_token:
+ username: pureuser
+ password: secret
+ state: present
+ timeout: 23d
+ fa_url: 10.10.10.2
+- name: Delete API token
+ purefa_token:
+ username: pureuser
+ password: secret
+ state: absent
+ fa_url: 10.10.10.2
+"""
+
+RETURN = r"""
+purefa_token:
+ description: API token for user
+ returned: changed
+ type: str
+ sample: e649f439-49be-3806-f774-a35cbbc4c2d2
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_array,
+)
+from os import environ
+import platform
+
+VERSION = 1.0
+USER_AGENT_BASE = "Ansible_token"
+TIMEOUT_API_VERSION = "2.2"
+
+HAS_PURESTORAGE = True
+try:
+ from purestorage import purestorage
+except ImportError:
+ HAS_PURESTORAGE = False
+
+
+def _convert_time_to_millisecs(timeout):
+ if timeout[-1:].lower() not in ["w", "d", "h", "m", "s"]:
+ return 0
+ try:
+ if timeout[-1:].lower() == "w":
+ return int(timeout[:-1]) * 7 * 86400000
+ elif timeout[-1:].lower() == "d":
+ return int(timeout[:-1]) * 86400000
+ elif timeout[-1:].lower() == "h":
+ return int(timeout[:-1]) * 3600000
+ elif timeout[-1:].lower() == "m":
+ return int(timeout[:-1]) * 60000
+ except Exception:
+ return 0
+
+
+def get_session(module):
+ """Return System Object or Fail"""
+ user_agent = "%(base)s %(class)s/%(version)s (%(platform)s)" % {
+ "base": USER_AGENT_BASE,
+ "class": __name__,
+ "version": VERSION,
+ "platform": platform.platform(),
+ }
+
+ array_name = module.params["fa_url"]
+ username = module.params["username"]
+ password = module.params["password"]
+
+ if HAS_PURESTORAGE:
+ if array_name and username and password:
+ system = purestorage.FlashArray(
+ array_name, username=username, password=password, user_agent=user_agent
+ )
+ elif environ.get("PUREFA_URL"):
+ if environ.get("PUREFA_USERNAME") and environ.get("PUREFA_PASSWORD"):
+ url = environ.get("PUREFA_URL")
+ username = environ.get("PUREFA_USERNAME")
+ password = environ.get("PUREFA_PASSWORD")
+ system = purestorage.FlashArray(
+ url, username=username, password=password, user_agent=user_agent
+ )
+ else:
+ module.fail_json(
+ msg="You must set PUREFA_URL and PUREFA_USERNAME, PUREFA_PASSWORD "
+ "environment variables or the fa_url, username and password "
+ "module arguments"
+ )
+ try:
+ system.get()
+ except Exception:
+ module.fail_json(
+ msg="Pure Storage FlashArray authentication failed. Check your credentials"
+ )
+ else:
+ module.fail_json(msg="purestorage SDK is not installed.")
+ return system
+
+
+def main():
+ argument_spec = dict(
+ fa_url=dict(required=False),
+ username=dict(type="str", required=False),
+ password=dict(no_log=True, required=False),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ recreate=dict(type="bool", default=False),
+ timeout=dict(type="str"),
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=False)
+ array = get_session(module)
+ changed = False
+
+ if module.params["username"]:
+ username = module.params["username"]
+ else:
+ username = environ.get("PUREFA_USERNAME")
+ state = module.params["state"]
+ recreate = module.params["recreate"]
+
+ result = array.get_api_token(admin=username)
+ api_version = array._list_available_rest_versions()
+ if state == "present" and result["api_token"] is None:
+ result = array.create_api_token(admin=username)
+ changed = True
+ elif state == "present" and recreate:
+ result = array.delete_api_token(admin=username)
+ result = array.create_api_token(admin=username)
+ changed = True
+ elif state == "absent" and result["api_token"]:
+ result = array.delete_api_token(admin=username)
+ changed = True
+
+ api_token = result["api_token"]
+
+ if (
+ TIMEOUT_API_VERSION in api_version
+ and module.params["timeout"]
+ and state == "present"
+ ):
+ module.params["api_token"] = api_token
+ array6 = get_array(module)
+ ttl = _convert_time_to_millisecs(module.params["timeout"])
+ if ttl != 0:
+ changed = True
+ array6.delete_admins_api_tokens(names=[username])
+ res = array6.post_admins_api_tokens(names=[username], timeout=ttl)
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to set token lifetime. Error: {0}".format(
+ res.errors[0].message
+ )
+ )
+ else:
+ api_token = list(res.items)[0].api_token.token
+ module.exit_json(changed=changed, purefa_token=api_token)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_user.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_user.py
new file mode 100644
index 000000000..8544c5393
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_user.py
@@ -0,0 +1,278 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_user
+version_added: '1.0.0'
+short_description: Create, modify or delete FlashArray local user account
+description:
+- Create, modify or delete local users on a Pure Stoage FlashArray.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Create, delete or update local user account
+ default: present
+ type: str
+ choices: [ absent, present ]
+ name:
+ description:
+ - The name of the local user account
+ type: str
+ required: true
+ role:
+ description:
+ - Sets the local user's access level to the array
+ type: str
+ choices: [ readonly, ops_admin, storage_admin, array_admin ]
+ password:
+ description:
+ - Password for the local user.
+ type: str
+ old_password:
+ description:
+ - If changing an existing password, you must provide the old password for security
+ type: str
+ api:
+ description:
+ - Define whether to create an API token for this user
+ - Token can be exposed using the I(debug) module
+ type: bool
+ default: false
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Create new user ansible with API token
+ purestorage.flasharray.purefa_user:
+ name: ansible
+ password: apassword
+ role: storage_admin
+ api: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ register: result
+
+ debug:
+ msg: "API Token: {{ result['user_info']['user_api'] }}"
+
+- name: Change role type for existing user
+ purestorage.flasharray.purefa_user:
+ name: ansible
+ role: array_admin
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Change password type for existing user (NOT IDEMPOTENT)
+ purestorage.flasharray.purefa_user:
+ name: ansible
+ password: anewpassword
+ old_password: apassword
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Change API token for existing user
+ purestorage.flasharray.purefa_user:
+ name: ansible
+ api: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ register: result
+
+ debug:
+ msg: "API Token: {{ result['user_info']['user_api'] }}"
+"""
+
+RETURN = r"""
+"""
+
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ purefa_argument_spec,
+)
+
+MIN_REQUIRED_API_VERSION = "1.14"
+
+
+def get_user(module, array):
+ """Return Local User Account or None"""
+ user = None
+ users = array.list_admins()
+ for acct in range(0, len(users)):
+ if users[acct]["name"] == module.params["name"]:
+ user = users[acct]
+ return user
+
+
+def create_user(module, array):
+ """Create or Update Local User Account"""
+ changed = api_changed = role_changed = passwd_changed = False
+ user = get_user(module, array)
+ role = module.params["role"]
+ user_token = {}
+ if not user:
+ changed = True
+ if not module.check_mode:
+ try:
+ if not role:
+ role = "readonly"
+ array.create_admin(
+ module.params["name"], role=role, password=module.params["password"]
+ )
+ if module.params["api"]:
+ try:
+ user_token["user_api"] = array.create_api_token(
+ module.params["name"]
+ )["api_token"]
+ except Exception:
+ array.delete_user(module.params["name"])
+ module.fail_json(
+ msg="Local User {0}: Creation failed".format(
+ module.params["name"]
+ )
+ )
+ except Exception:
+ module.fail_json(
+ msg="Local User {0}: Creation failed".format(module.params["name"])
+ )
+ else:
+ if module.params["password"] and not module.params["old_password"]:
+ module.exit_json(changed=changed)
+ if module.params["password"] and module.params["old_password"]:
+ if module.params["old_password"] and (
+ module.params["password"] != module.params["old_password"]
+ ):
+ passwd_changed = True
+ if not module.check_mode:
+ try:
+ array.set_admin(
+ module.params["name"],
+ password=module.params["password"],
+ old_password=module.params["old_password"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Local User {0}: Password reset failed. "
+ "Check old password.".format(module.params["name"])
+ )
+ else:
+ module.fail_json(
+ msg="Local User Account {0}: Password change failed - "
+ "Check both old and new passwords".format(module.params["name"])
+ )
+ if module.params["api"]:
+ try:
+ if not array.get_api_token(module.params["name"])["api_token"] is None:
+ if not module.check_mode:
+ array.delete_api_token(module.params["name"])
+ api_changed = True
+ if not module.check_mode:
+ user_token["user_api"] = array.create_api_token(
+ module.params["name"]
+ )["api_token"]
+ except Exception:
+ module.fail_json(
+ msg="Local User {0}: API token change failed".format(
+ module.params["name"]
+ )
+ )
+ if module.params["role"] and module.params["role"] != user["role"]:
+ if module.params["name"] != "pureuser":
+ role_changed = True
+ if not module.check_mode:
+ try:
+ array.set_admin(
+ module.params["name"], role=module.params["role"]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Local User {0}: Role changed failed".format(
+ module.params["name"]
+ )
+ )
+ else:
+ module.warn("Role for 'pureuser' cannot be modified.")
+ changed = bool(passwd_changed or role_changed or api_changed)
+ module.exit_json(changed=changed, user_info=user_token)
+
+
+def delete_user(module, array):
+ """Delete Local User Account"""
+ changed = False
+ if get_user(module, array):
+ changed = True
+ if not module.check_mode:
+ try:
+ array.delete_admin(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Object Store Account {0}: Deletion failed".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(required=True, type="str"),
+ role=dict(
+ type="str",
+ choices=["readonly", "ops_admin", "storage_admin", "array_admin"],
+ ),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ password=dict(type="str", no_log=True),
+ old_password=dict(type="str", no_log=True),
+ api=dict(type="bool", default=False),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ state = module.params["state"]
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashArray REST version not supported. "
+ "Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
+ )
+ pattern = re.compile("^[a-z0-9]([a-z0-9-]{0,30}[a-z0-9])?$")
+ if not pattern.match(module.params["name"]):
+ module.fail_json(
+ msg="name must contain a minimum of 1 and a maximum of 32 characters "
+ "(alphanumeric or `-`). All letters must be lowercase."
+ )
+
+ if state == "absent":
+ delete_user(module, array)
+ elif state == "present":
+ create_user(module, array)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_vg.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_vg.py
new file mode 100644
index 000000000..febb0d5a2
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_vg.py
@@ -0,0 +1,685 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_vg
+version_added: '1.0.0'
+short_description: Manage volume groups on Pure Storage FlashArrays
+description:
+- Create, delete or modify volume groups on Pure Storage FlashArrays.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - The name of the volume group.
+ - Multi-volume-group support available from Purity//FA 6.0.0
+ B(***NOTE***) Manual deletion or eradication of individual volume groups created
+ using multi-volume-group will cause idempotency to fail
+ - Multi-volume-group support only exists for volume group creation
+ type: str
+ required: true
+ state:
+ description:
+ - Define whether the volume group should exist or not.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ eradicate:
+ description:
+ - Define whether to eradicate the volume group on delete and leave in trash.
+ type : bool
+ default: false
+ bw_qos:
+ description:
+ - Bandwidth limit for vgroup in M or G units.
+ M will set MB/s
+ G will set GB/s
+ To clear an existing QoS setting use 0 (zero)
+ type: str
+ iops_qos:
+ description:
+ - IOPs limit for vgroup - use value or K or M
+ K will mean 1000
+ M will mean 1000000
+ To clear an existing IOPs setting use 0 (zero)
+ type: str
+ count:
+ description:
+ - Number of volume groups to be created in a multiple volume group creation
+ - Only supported from Purity//FA v6.0.0 and higher
+ type: int
+ start:
+ description:
+ - Number at which to start the multiple volume group creation index
+ - Only supported from Purity//FA v6.0.0 and higher
+ type: int
+ default: 0
+ digits:
+ description:
+ - Number of digits to use for multiple volume group count. This
+ will pad the index number with zeros where necessary
+ - Only supported from Purity//FA v6.0.0 and higher
+ - Range is between 1 and 10
+ type: int
+ default: 1
+ suffix:
+ description:
+ - Suffix string, if required, for multiple volume group create
+ - Volume group names will be formed as I(<name>#I<suffix>), where
+ I(#) is a placeholder for the volume index
+ See associated descriptions
+ - Only supported from Purity//FA v6.0.0 and higher
+ type: str
+ priority_operator:
+ description:
+ - DMM Priority Adjustment operator
+ type: str
+ choices: [ +, '-' ]
+ default: +
+ version_added: '1.13.0'
+ priority_value:
+ description:
+ - DMM Priority Adjustment value
+ type: int
+ choices: [ 0, 10 ]
+ default: 0
+ version_added: '1.13.0'
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Create new volune group
+ purestorage.flasharray.purefa_vg:
+ name: foo
+ bw_qos: 50M
+ iops_qos: 100
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create 10 volune groups of pattern foo#bar with QoS
+ purestorage.flasharray.purefa_vg:
+ name: foo
+ suffix: bar
+ count: 10
+ start: 10
+ digits: 3
+ bw_qos: 50M
+ iops_qos: 100
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Update volune group QoS limits
+ purestorage.flasharray.purefa_vg:
+ name: foo
+ bw_qos: 0
+ iops_qos: 5555
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Update volune group DMM Priority Adjustment (Purity//FA 6.1.2+)
+ purestorage.flasharray.purefa_vg:
+ name: foo
+ priority_operator: '-'
+ priority_value: 10
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Destroy volume group
+ purestorage.flasharray.purefa_vg:
+ name: foo
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: absent
+
+- name: Recover deleted volune group - no changes are made to the volume group on recovery
+ purestorage.flasharray.purefa_vg:
+ name: foo
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Destroy and Eradicate volume group
+ purestorage.flasharray.purefa_vg:
+ name: foo
+ eradicate: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: absent
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_array,
+ get_system,
+ purefa_argument_spec,
+)
+
+
+VGROUP_API_VERSION = "1.13"
+VG_IOPS_VERSION = "1.17"
+MULTI_VG_VERSION = "2.2"
+PRIORITY_API_VERSION = "2.11"
+
+
+def human_to_bytes(size):
+ """Given a human-readable byte string (e.g. 2G, 30M),
+ return the number of bytes. Will return 0 if the argument has
+ unexpected form.
+ """
+ bytes = size[:-1]
+ unit = size[-1].upper()
+ if bytes.isdigit():
+ bytes = int(bytes)
+ if unit == "P":
+ bytes *= 1125899906842624
+ elif unit == "T":
+ bytes *= 1099511627776
+ elif unit == "G":
+ bytes *= 1073741824
+ elif unit == "M":
+ bytes *= 1048576
+ elif unit == "K":
+ bytes *= 1024
+ else:
+ bytes = 0
+ else:
+ bytes = 0
+ return bytes
+
+
+def human_to_real(iops):
+ """Given a human-readable IOPs string (e.g. 2K, 30M),
+ return the real number. Will return 0 if the argument has
+ unexpected form.
+ """
+ digit = iops[:-1]
+ unit = iops[-1].upper()
+ if unit.isdigit():
+ digit = iops
+ elif digit.isdigit():
+ digit = int(digit)
+ if unit == "M":
+ digit *= 1000000
+ elif unit == "K":
+ digit *= 1000
+ else:
+ digit = 0
+ else:
+ digit = 0
+ return digit
+
+
+def get_multi_vgroups(module, destroyed=False):
+ """Return True is all volume groups exist or None"""
+ names = []
+ array = get_array(module)
+ for vg_num in range(
+ module.params["start"], module.params["count"] + module.params["start"]
+ ):
+ names.append(
+ module.params["name"]
+ + str(vg_num).zfill(module.params["digits"])
+ + module.params["suffix"]
+ )
+ return bool(
+ array.get_volume_groups(names=names, destroyed=destroyed).status_code == 200
+ )
+
+
+def get_pending_vgroup(module, array):
+ """Get Deleted Volume Group"""
+ vgroup = None
+ for vgrp in array.list_vgroups(pending=True):
+ if vgrp["name"] == module.params["name"] and vgrp["time_remaining"]:
+ vgroup = vgrp
+ break
+
+ return vgroup
+
+
+def get_vgroup(module, array):
+ """Get Volume Group"""
+ vgroup = None
+ for vgrp in array.list_vgroups():
+ if vgrp["name"] == module.params["name"]:
+ vgroup = vgrp
+ break
+
+ return vgroup
+
+
+def make_vgroup(module, array):
+ """Create Volume Group"""
+ changed = True
+ api_version = array._list_available_rest_versions()
+ if (
+ module.params["bw_qos"]
+ or module.params["iops_qos"]
+ and VG_IOPS_VERSION in api_version
+ ):
+ if module.params["bw_qos"] and not module.params["iops_qos"]:
+ if int(human_to_bytes(module.params["bw_qos"])) in range(
+ 1048576, 549755813888
+ ):
+ changed = True
+ if not module.check_mode:
+ try:
+ array.create_vgroup(
+ module.params["name"],
+ bandwidth_limit=module.params["bw_qos"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Vgroup {0} creation failed.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ module.fail_json(
+ msg="Bandwidth QoS value {0} out of range.".format(
+ module.params["bw_qos"]
+ )
+ )
+ elif module.params["iops_qos"] and not module.params["bw_qos"]:
+ if int(human_to_real(module.params["iops_qos"])) in range(100, 100000000):
+ changed = True
+ if not module.check_mode:
+ try:
+ array.create_vgroup(
+ module.params["name"], iops_limit=module.params["iops_qos"]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Vgroup {0} creation failed.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ module.fail_json(
+ msg="IOPs QoS value {0} out of range.".format(
+ module.params["iops_qos"]
+ )
+ )
+ else:
+ bw_qos_size = int(human_to_bytes(module.params["bw_qos"]))
+ if int(human_to_real(module.params["iops_qos"])) in range(
+ 100, 100000000
+ ) and bw_qos_size in range(1048576, 549755813888):
+ changed = True
+ if not module.check_mode:
+ try:
+ array.create_vgroup(
+ module.params["name"],
+ iops_limit=module.params["iops_qos"],
+ bandwidth_limit=module.params["bw_qos"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Vgroup {0} creation failed.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ module.fail_json(msg="IOPs or Bandwidth QoS value out of range.")
+ else:
+ changed = True
+ if not module.check_mode:
+ try:
+ array.create_vgroup(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="creation of volume group {0} failed.".format(
+ module.params["name"]
+ )
+ )
+ if PRIORITY_API_VERSION in api_version:
+ array = get_array(module)
+ volume_group = flasharray.VolumeGroup(
+ priority_adjustment=flasharray.PriorityAdjustment(
+ priority_adjustment_operator=module.params["priority_operator"],
+ priority_adjustment_value=module.params["priority_value"],
+ ),
+ )
+ if not module.check_mode:
+ res = array.patch_volume_groups(
+ names=[module.params["name"]], volume_group=volume_group
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to set priority adjustment for volume group {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+
+ module.exit_json(changed=changed)
+
+
+def make_multi_vgroups(module, array):
+ """Create multiple Volume Groups"""
+ changed = True
+ bw_qos_size = iops_qos_size = 0
+ names = []
+ api_version = array._list_available_rest_versions()
+ array = get_array(module)
+ for vg_num in range(
+ module.params["start"], module.params["count"] + module.params["start"]
+ ):
+ names.append(
+ module.params["name"]
+ + str(vg_num).zfill(module.params["digits"])
+ + module.params["suffix"]
+ )
+ if module.params["bw_qos"]:
+ bw_qos = int(human_to_bytes(module.params["bw_qos"]))
+ if bw_qos in range(1048576, 549755813888):
+ bw_qos_size = bw_qos
+ else:
+ module.fail_json(msg="Bandwidth QoS value out of range.")
+ if module.params["iops_qos"]:
+ iops_qos = int(human_to_real(module.params["iops_qos"]))
+ if iops_qos in range(100, 100000000):
+ iops_qos_size = iops_qos
+ else:
+ module.fail_json(msg="IOPs QoS value out of range.")
+ if bw_qos_size != 0 and iops_qos_size != 0:
+ volume_group = flasharray.VolumeGroupPost(
+ qos=flasharray.Qos(bandwidth_limit=bw_qos_size, iops_limit=iops_qos_size)
+ )
+ elif bw_qos_size == 0 and iops_qos_size == 0:
+ volume_group = flasharray.VolumeGroupPost()
+ elif bw_qos_size == 0 and iops_qos_size != 0:
+ volume_group = flasharray.VolumeGroupPost(
+ qos=flasharray.Qos(iops_limit=iops_qos_size)
+ )
+ elif bw_qos_size != 0 and iops_qos_size == 0:
+ volume_group = flasharray.VolumeGroupPost(
+ qos=flasharray.Qos(bandwidth_limit=bw_qos_size)
+ )
+ if not module.check_mode:
+ res = array.post_volume_groups(names=names, volume_group=volume_group)
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Multi-Vgroup {0}#{1} creation failed: {2}".format(
+ module.params["name"],
+ module.params["suffix"],
+ res.errors[0].message,
+ )
+ )
+ if PRIORITY_API_VERSION in api_version:
+ volume_group = flasharray.VolumeGroup(
+ priority_adjustment=flasharray.PriorityAdjustment(
+ priority_adjustment_operator=module.params["priority_operator"],
+ priority_adjustment_value=module.params["priority_value"],
+ ),
+ )
+ res = array.patch_volume_groups(names=names, volume_group=volume_group)
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to set priority adjustments for multi-vgroup {0}#{1}. Error: {2}".format(
+ module.params["name"],
+ module.params["suffix"],
+ res.errors[0].message,
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def update_vgroup(module, array):
+ """Update Volume Group"""
+ changed = False
+ api_version = array._list_available_rest_versions()
+ if PRIORITY_API_VERSION in api_version:
+ arrayv6 = get_array(module)
+ vg_prio = list(arrayv6.get_volume_groups(names=[module.params["name"]]).items)[
+ 0
+ ].priority_adjustment
+ if (
+ module.params["priority_operator"]
+ and vg_prio.priority_adjustment_operator
+ != module.params["priority_operator"]
+ ):
+ changed = True
+ new_operator = module.params["priority_operator"]
+ else:
+ new_operator = vg_prio.priority_adjustment_operator
+ if vg_prio.priority_adjustment_value != module.params["priority_value"]:
+ changed = True
+ new_value = module.params["priority_value"]
+ else:
+ new_value = vg_prio.priority_adjustment_value
+ if changed and not module.check_mode:
+ volume_group = flasharray.VolumeGroup(
+ priority_adjustment=flasharray.PriorityAdjustment(
+ priority_adjustment_operator=new_operator,
+ priority_adjustment_value=new_value,
+ )
+ )
+ res = arrayv6.patch_volume_groups(
+ names=[module.params["name"]], volume_group=volume_group
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to changfe DMM Priority for volume group {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ if VG_IOPS_VERSION in api_version:
+ try:
+ vg_qos = array.get_vgroup(module.params["name"], qos=True)
+ except Exception:
+ module.fail_json(
+ msg="Failed to get QoS settings for vgroup {0}.".format(
+ module.params["name"]
+ )
+ )
+ if VG_IOPS_VERSION in api_version:
+ if vg_qos["bandwidth_limit"] is None:
+ vg_qos["bandwidth_limit"] = 0
+ if vg_qos["iops_limit"] is None:
+ vg_qos["iops_limit"] = 0
+ if module.params["bw_qos"] and VG_IOPS_VERSION in api_version:
+ if human_to_bytes(module.params["bw_qos"]) != vg_qos["bandwidth_limit"]:
+ if module.params["bw_qos"] == "0":
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set_vgroup(module.params["name"], bandwidth_limit="")
+ except Exception:
+ module.fail_json(
+ msg="Vgroup {0} Bandwidth QoS removal failed.".format(
+ module.params["name"]
+ )
+ )
+ elif int(human_to_bytes(module.params["bw_qos"])) in range(
+ 1048576, 549755813888
+ ):
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set_vgroup(
+ module.params["name"],
+ bandwidth_limit=module.params["bw_qos"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Vgroup {0} Bandwidth QoS change failed.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ module.fail_json(
+ msg="Bandwidth QoS value {0} out of range.".format(
+ module.params["bw_qos"]
+ )
+ )
+ if module.params["iops_qos"] and VG_IOPS_VERSION in api_version:
+ if human_to_real(module.params["iops_qos"]) != vg_qos["iops_limit"]:
+ if module.params["iops_qos"] == "0":
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set_vgroup(module.params["name"], iops_limit="")
+ except Exception:
+ module.fail_json(
+ msg="Vgroup {0} IOPs QoS removal failed.".format(
+ module.params["name"]
+ )
+ )
+ elif int(human_to_real(module.params["iops_qos"])) in range(100, 100000000):
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set_vgroup(
+ module.params["name"], iops_limit=module.params["iops_qos"]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Vgroup {0} IOPs QoS change failed.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ module.fail_json(
+ msg="Bandwidth QoS value {0} out of range.".format(
+ module.params["bw_qos"]
+ )
+ )
+
+ module.exit_json(changed=changed)
+
+
+def recover_vgroup(module, array):
+ """Recover Volume Group"""
+ changed = True
+ if not module.check_mode:
+ try:
+ array.recover_vgroup(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Recovery of volume group {0} failed.".format(module.params["name"])
+ )
+
+ module.exit_json(changed=changed)
+
+
+def eradicate_vgroup(module, array):
+ """Eradicate Volume Group"""
+ changed = True
+ if not module.check_mode:
+ try:
+ array.eradicate_vgroup(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Eradicating vgroup {0} failed.".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def delete_vgroup(module, array):
+ """Delete Volume Group"""
+ changed = True
+ if not module.check_mode:
+ try:
+ array.destroy_vgroup(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Deleting vgroup {0} failed.".format(module.params["name"])
+ )
+ if module.params["eradicate"]:
+ eradicate_vgroup(module, array)
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ bw_qos=dict(type="str"),
+ iops_qos=dict(type="str"),
+ count=dict(type="int"),
+ start=dict(type="int", default=0),
+ digits=dict(type="int", default=1),
+ suffix=dict(type="str"),
+ priority_operator=dict(type="str", choices=["+", "-"], default="+"),
+ priority_value=dict(type="int", choices=[0, 10], default=0),
+ eradicate=dict(type="bool", default=False),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ state = module.params["state"]
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+ if VGROUP_API_VERSION not in api_version:
+ module.fail_json(msg="API version does not support volume groups.")
+
+ vgroup = get_vgroup(module, array)
+ xvgroup = get_pending_vgroup(module, array)
+
+ if module.params["count"]:
+ if not HAS_PURESTORAGE:
+ module.fail_json(
+ msg="py-pure-client sdk is required to support 'count' parameter"
+ )
+ if MULTI_VG_VERSION not in api_version:
+ module.fail_json(
+ msg="'count' parameter is not supported until Purity//FA 6.0.0 or higher"
+ )
+ if module.params["digits"] and module.params["digits"] not in range(1, 10):
+ module.fail_json(msg="'digits' must be in the range of 1 to 10")
+ if module.params["start"] < 0:
+ module.fail_json(msg="'start' must be a positive number")
+ vgroup = get_multi_vgroups(module)
+ if state == "present" and not vgroup:
+ make_multi_vgroups(module, array)
+ elif state == "absent" and not vgroup:
+ module.exit_json(changed=False)
+ else:
+ module.warn("Method not yet supported for multi-vgroup")
+ else:
+ if xvgroup and state == "present":
+ recover_vgroup(module, array)
+ elif vgroup and state == "absent":
+ delete_vgroup(module, array)
+ elif xvgroup and state == "absent" and module.params["eradicate"]:
+ eradicate_vgroup(module, array)
+ elif not vgroup and not xvgroup and state == "present":
+ make_vgroup(module, array)
+ elif vgroup and state == "present":
+ update_vgroup(module, array)
+ elif vgroup is None and state == "absent":
+ module.exit_json(changed=False)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_vlan.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_vlan.py
new file mode 100644
index 000000000..e804e334d
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_vlan.py
@@ -0,0 +1,267 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+
+DOCUMENTATION = """
+---
+module: purefa_vlan
+version_added: '1.0.0'
+short_description: Manage network VLAN interfaces in a Pure Storage FlashArray
+description:
+ - This module manages the VLAN network interfaces on a Pure Storage FlashArray.
+author: Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Interface name, including controller indentifier.
+ - VLANs are only supported on iSCSI, NVMe-RoCE and file
+ physical interfaces
+ required: true
+ type: str
+ state:
+ description:
+ - State of existing interface (on/off).
+ required: false
+ default: present
+ choices: [ "present", "absent" ]
+ type: str
+ enabled:
+ description:
+ - Define if VLAN interface is enabled or not.
+ required: false
+ default: true
+ type: bool
+ address:
+ description:
+ - IPv4 or IPv6 address of interface.
+ required: false
+ type: str
+ subnet:
+ description:
+ - Name of subnet interface associated with.
+ required: true
+ type: str
+extends_documentation_fragment:
+ - purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = """
+- name: Configure and enable VLAN interface ct0.eth8 for subnet test
+ purestorage.flasharray.purefa_vlan:
+ name: ct0.eth8
+ subnet: test
+ address: 10.21.200.18
+ state: present
+ fa_url: 10.10.10.2
+ api_token: c6033033-fe69-2515-a9e8-966bb7fe4b40
+
+- name: Disable VLAN interface for subnet test on ct1.eth2
+ purestorage.flasharray.purefa_vlan:
+ name: ct1.eth2
+ subnet: test
+ enabled: false
+ fa_url: 10.10.10.2
+ api_token: c6033033-fe69-2515-a9e8-966bb7fe4b40
+
+- name: Delete VLAN inteface for subnet test on ct0.eth4
+ purestorage.flasharray.purefa_vlan:
+ name: ct0.eth4
+ subnet: test
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: c6033033-fe69-2515-a9e8-966bb7fe4b40"""
+
+RETURN = """
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ purefa_argument_spec,
+)
+
+
+def _get_subnet(module, array):
+ """Return subnet or None"""
+ subnet = {}
+ try:
+ subnet = array.get_subnet(module.params["subnet"])
+ except Exception:
+ return None
+ return subnet
+
+
+def _get_interface(module, array):
+ """Return Interface or None"""
+ interface = {}
+ if "ct" in module.params["name"]:
+ try:
+ interfaces = array.list_network_interfaces()
+ except Exception:
+ return None
+ for ints in range(0, len(interfaces)):
+ if interfaces[ints]["name"] == module.params["name"]:
+ interface = interfaces[ints]
+ break
+ return interface
+
+
+def _get_vif(array, interface, subnet):
+ """Return VLAN Interface or None"""
+ vif_info = {}
+ vif_name = interface["name"] + "." + str(subnet["vlan"])
+ try:
+ interfaces = array.list_network_interfaces()
+ except Exception:
+ return None
+ for ints in range(0, len(interfaces)):
+ if interfaces[ints]["name"] == vif_name:
+ vif_info = interfaces[ints]
+ break
+ return vif_info
+
+
+def create_vif(module, array, interface, subnet):
+ """Create VLAN Interface"""
+ changed = True
+ if not module.check_mode:
+ vif_name = interface["name"] + "." + str(subnet["vlan"])
+ if module.params["address"]:
+ try:
+ array.create_vlan_interface(
+ vif_name, module.params["subnet"], address=module.params["address"]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to create VLAN interface {0}.".format(vif_name)
+ )
+ else:
+ try:
+ array.create_vlan_interface(vif_name, module.params["subnet"])
+ except Exception:
+ module.fail_json(
+ msg="Failed to create VLAN interface {0}.".format(vif_name)
+ )
+ if not module.params["enabled"]:
+ try:
+ array.set_network_interface(vif_name, enabled=False)
+ except Exception:
+ module.fail_json(
+ msg="Failed to disable VLAN interface {0} on creation.".format(
+ vif_name
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def update_vif(module, array, interface, subnet):
+ """Modify VLAN Interface settings"""
+ changed = False
+ vif_info = _get_vif(array, interface, subnet)
+ vif_name = vif_info["name"]
+ if module.params["address"]:
+ if module.params["address"] != vif_info["address"]:
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set_network_interface(
+ vif_name, address=module.params["address"]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to change IP address for VLAN interface {0}.".format(
+ subnet
+ )
+ )
+
+ if module.params["enabled"] != vif_info["enabled"]:
+ if module.params["enabled"]:
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set_network_interface(vif_name, enabled=True)
+ except Exception:
+ module.fail_json(
+ msg="Failed to enable VLAN interface {0}.".format(vif_name)
+ )
+ else:
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set_network_interface(vif_name, enabled=False)
+ except Exception:
+ module.fail_json(
+ msg="Failed to disable VLAN interface {0}.".format(vif_name)
+ )
+
+ module.exit_json(changed=changed)
+
+
+def delete_vif(module, array, subnet):
+ """Delete VLAN Interface"""
+ changed = True
+ if not module.check_mode:
+ vif_name = module.params["name"] + "." + str(subnet["vlan"])
+ try:
+ array.delete_vlan_interface(vif_name)
+ except Exception:
+ module.fail_json(msg="Failed to delete VLAN inerface {0}".format(vif_name))
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True),
+ subnet=dict(type="str", required=True),
+ enabled=dict(type="bool", default=True),
+ state=dict(type="str", default="present", choices=["present", "absent"]),
+ address=dict(type="str"),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ state = module.params["state"]
+ array = get_system(module)
+ subnet = _get_subnet(module, array)
+ interface = _get_interface(module, array)
+ if not subnet:
+ module.fail_json(msg="Invalid subnet specified.")
+ if not interface:
+ module.fail_json(msg="Invalid interface specified.")
+ if ("iscsi" or "nvme-roce" or "nvme-tcp" or "file") not in interface["services"]:
+ module.fail_json(
+ msg="Invalid interface specified - must have service type of iSCSI, NVMe-RoCE, NVMe-TCP or file enabled."
+ )
+ if subnet["vlan"]:
+ vif_name = module.params["name"] + "." + str(subnet["vlan"])
+ vif = bool(vif_name in subnet["interfaces"])
+
+ if state == "present" and not vif:
+ create_vif(module, array, interface, subnet)
+ elif state == "present" and vif:
+ update_vif(module, array, interface, subnet)
+ elif state == "absent" and vif:
+ delete_vif(module, array, subnet)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_vnc.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_vnc.py
new file mode 100644
index 000000000..48e154c77
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_vnc.py
@@ -0,0 +1,161 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_vnc
+version_added: '1.0.0'
+short_description: Enable or Disable VNC port for installed apps
+description:
+- Enablke or Disable VNC access for installed apps
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Define state of VNC
+ type: str
+ default: present
+ choices: [ present, absent ]
+ name:
+ description:
+ - Name od app
+ type: str
+ required: true
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Enable VNC for application test
+ purestorage.flasharray.purefa_vnc:
+ name: test
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Disable VNC for application test
+ purestorage.flasharray.purefa_vnc:
+ name: test
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+vnc:
+ description: VNC port information for application
+ type: dict
+ returned: success
+ contains:
+ status:
+ description: Status of application
+ type: str
+ sample: 'healthy'
+ index:
+ description: Application index number
+ type: int
+ version:
+ description: Application version installed
+ type: str
+ sample: '5.2.1'
+ vnc:
+ description: IP address and port number for VNC connection
+ type: dict
+ sample: ['10.21.200.34:5900']
+ name:
+ description: Application name
+ type: str
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ purefa_argument_spec,
+)
+
+MIN_REQUIRED_API_VERSION = "1.17"
+
+
+def enable_vnc(module, array, app):
+ """Enable VNC port"""
+ changed = False
+ vnc_fact = []
+ if not app["vnc_enabled"]:
+ try:
+ if not module.check_mode:
+ array.enable_app_vnc(module.params["name"])
+ vnc_fact = array.get_app_node(module.params["name"])
+ changed = True
+ except Exception:
+ module.fail_json(
+ msg="Enabling VNC for {0} failed".format(module.params["name"])
+ )
+ module.exit_json(changed=changed, vnc=vnc_fact)
+
+
+def disable_vnc(module, array, app):
+ """Disable VNC port"""
+ changed = False
+ if app["vnc_enabled"]:
+ try:
+ if not module.check_mode:
+ array.disable_app_vnc(module.params["name"])
+ changed = True
+ except Exception:
+ module.fail_json(
+ msg="Disabling VNC for {0} failed".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["present", "absent"]),
+ name=dict(type="str", required=True),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashArray REST version not supported. "
+ "Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
+ )
+ try:
+ app = array.get_app(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Selected application {0} does not exist".format(module.params["name"])
+ )
+ if not app["enabled"]:
+ module.fail_json(
+ msg="Application {0} is not enabled".format(module.params["name"])
+ )
+ if module.params["state"] == "present":
+ enable_vnc(module, array, app)
+ else:
+ disable_vnc(module, array, app)
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_volume.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_volume.py
new file mode 100644
index 000000000..c3c92f6d4
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_volume.py
@@ -0,0 +1,1726 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_volume
+version_added: '1.0.0'
+short_description: Manage volumes on Pure Storage FlashArrays
+description:
+- Create, delete or extend the capacity of a volume on Pure Storage FlashArray.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - The name of the volume.
+ - Volume could be created in a POD with this syntax POD_NAME::VOLUME_NAME.
+ - Volume could be created in a volume group with this syntax VG_NAME/VOLUME_NAME.
+ - Multi-volume support available from Purity//FA 6.0.0
+ B(***NOTE***) Manual deletion or eradication of individual volumes created
+ using multi-volume will cause idempotency to fail
+ - Multi-volume support only exists for volume creation
+ type: str
+ required: true
+ target:
+ description:
+ - The name of the target volume, if copying.
+ type: str
+ state:
+ description:
+ - Define whether the volume should exist or not.
+ default: present
+ choices: [ absent, present ]
+ type: str
+ eradicate:
+ description:
+ - Define whether to eradicate the volume on delete or leave in trash.
+ type: bool
+ default: false
+ overwrite:
+ description:
+ - Define whether to overwrite a target volume if it already exisits.
+ type: bool
+ default: false
+ size:
+ description:
+ - Volume size in M, G, T or P units.
+ type: str
+ count:
+ description:
+ - Number of volumes to be created in a multiple volume creation
+ - Only supported from Purity//FA v6.0.0 and higher
+ type: int
+ start:
+ description:
+ - Number at which to start the multiple volume creation index
+ - Only supported from Purity//FA v6.0.0 and higher
+ type: int
+ default: 0
+ digits:
+ description:
+ - Number of digits to use for multiple volume count. This
+ will pad the index number with zeros where necessary
+ - Only supported from Purity//FA v6.0.0 and higher
+ - Range is between 1 and 10
+ type: int
+ default: 1
+ suffix:
+ description:
+ - Suffix string, if required, for multiple volume create
+ - Volume names will be formed as I(<name>#I<suffix>), where
+ I(#) is a placeholder for the volume index
+ See associated descriptions
+ - Only supported from Purity//FA v6.0.0 and higher
+ type: str
+ bw_qos:
+ description:
+ - Bandwidth limit for volume in M or G units.
+ M will set MB/s
+ G will set GB/s
+ To clear an existing QoS setting use 0 (zero)
+ type: str
+ aliases: [ qos ]
+ iops_qos:
+ description:
+ - IOPs limit for volume - use value or K or M
+ K will mean 1000
+ M will mean 1000000
+ To clear an existing IOPs setting use 0 (zero)
+ type: str
+ move:
+ description:
+ - Move a volume in and out of a pod or vgroup
+ - Provide the name of pod or vgroup to move the volume to
+ - Pod and Vgroup names must be unique in the array
+ - To move to the local array, specify C(local)
+ - This is not idempotent - use C(ignore_errors) in the play
+ type: str
+ rename:
+ description:
+ - Value to rename the specified volume to.
+ - Rename only applies to the container the current volumes is in.
+ - There is no requirement to specify the pod or vgroup name as this is implied.
+ type: str
+ pgroup:
+ description:
+ - Name of exisitng, not deleted, protection group to add volume to
+ - Only application for volume(s) creation
+ - Superceeded from Purity//FA 6.3.4 by I(add_to_pgs)
+ type: str
+ version_added: 1.8.0
+ priority_operator:
+ description:
+ - DMM Priority Adjustment operator
+ type: str
+ choices: [ '=', '+', '-' ]
+ version_added: '1.13.0'
+ priority_value:
+ description:
+ - DMM Priority Adjustment value
+ type: int
+ choices: [ -10, 0, 10 ]
+ version_added: '1.13.0'
+ with_default_protection:
+ description:
+ - Whether to add the default container protection groups to
+ those specified in I(add_to_pgs) as the inital protection
+ of a new volume.
+ type: bool
+ default: true
+ version_added: '1.14.0'
+ add_to_pgs:
+ description:
+ - A new volume will be added to the specified protection groups
+ on creation
+ type: list
+ elements: str
+ version_added: '1.14.0'
+ promotion_state:
+ description:
+ - Promote or demote the volume so that the volume starts or
+ stops accepting write requests.
+ type: str
+ choices: [ promoted, demoted ]
+ version_added: '1.16.0'
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Create new volume named foo with a QoS limit
+ purestorage.flasharray.purefa_volume:
+ name: foo
+ size: 1T
+ bw_qos: 58M
+ iops_qos: 23K
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: present
+
+- name: Create new volume named foo with a DMM priority (Purity//FA 6.1.2+)
+ purestorage.flasharray.purefa_volume:
+ name: foo
+ size: 1T
+ priority_operator: +
+ priorty_value: 10
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: present
+
+- name: Create new volume named foo in pod bar in protection group pg1
+ purestorage.flasharray.purefa_volume:
+ name: bar::foo
+ prgoup: pg1
+ size: 1T
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: present
+
+- name: Create 10 volumes with index starting at 10 but padded with 3 digits
+ purestorage.flasharray.purefa_volume:
+ name: foo
+ size: 1T
+ suffix: bar
+ count: 10
+ start: 10
+ digits: 3
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: present
+
+- name: Extend the size of an existing volume named foo
+ purestorage.flasharray.purefa_volume:
+ name: foo
+ size: 2T
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: present
+
+- name: Delete and eradicate volume named foo
+ purestorage.flasharray.purefa_volume:
+ name: foo
+ eradicate: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: absent
+
+- name: Create clone of volume bar named foo
+ purestorage.flasharray.purefa_volume:
+ name: foo
+ target: bar
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: present
+
+- name: Overwrite volume bar with volume foo
+ purestorage.flasharray.purefa_volume:
+ name: foo
+ target: bar
+ overwrite: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: present
+
+- name: Clear volume QoS from volume foo
+ purestorage.flasharray.purefa_volume:
+ name: foo
+ bw_qos: 0
+ iops_qos: 0
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: present
+
+- name: Move local volume foo from local array to pod bar
+ purestorage.flasharray.purefa_volume:
+ name: foo
+ move: bar
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Move volume foo in pod bar to local array
+ purestorage.flasharray.purefa_volume:
+ name: bar::foo
+ move: local
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Move volume foo in pod bar to vgroup fin
+ purestorage.flasharray.purefa_volume:
+ name: bar::foo
+ move: fin
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+volume:
+ description: A dictionary describing the changed volume. Only some
+ attributes below will be returned with various actions.
+ type: dict
+ returned: success
+ contains:
+ source:
+ description: Volume name of source volume used for volume copy
+ type: str
+ serial:
+ description: Volume serial number
+ type: str
+ sample: '361019ECACE43D83000120A4'
+ nvme_nguid:
+ description: Volume NVMe namespace globally unique identifier
+ type: str
+ sample: 'eui.00cd6b99ef25864724a937c5000be684'
+ page83_naa:
+ description: Volume NAA canonical name
+ type: str
+ sample: 'naa.624a9370361019ecace43db3000120a4'
+ created:
+ description: Volume creation time
+ type: str
+ sample: '2019-03-13T22:49:24Z'
+ name:
+ description: Volume name
+ type: str
+ size:
+ description: Volume size in bytes
+ type: int
+ bandwidth_limit:
+ description: Volume bandwidth limit in bytes/sec
+ type: int
+ iops_limit:
+ description: Volume IOPs limit
+ type: int
+ priority_operator:
+ description: DMM Priority Adjustment operator
+ type: str
+ priority_value:
+ description: DMM Priority Adjustment value
+ type: int
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+import re
+import time
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_array,
+ get_system,
+ purefa_argument_spec,
+)
+
+
+QOS_API_VERSION = "1.14"
+VGROUPS_API_VERSION = "1.13"
+POD_API_VERSION = "1.13"
+AC_QOS_VERSION = "1.16"
+OFFLOAD_API_VERSION = "1.16"
+IOPS_API_VERSION = "1.17"
+MULTI_VOLUME_VERSION = "2.2"
+PROMOTE_API_VERSION = "1.19"
+PURE_OUI = "naa.624a9370"
+PRIORITY_API_VERSION = "2.11"
+DEFAULT_API_VERSION = "2.16"
+VOLUME_PROMOTION_API_VERSION = "2.2"
+
+
+def _create_nguid(serial):
+ nguid = "eui.00" + serial[0:14] + "24a937" + serial[-10:]
+ return nguid
+
+
+def get_pod(module, array):
+ """Get ActiveCluster Pod"""
+ pod_name = module.params["pgroup"].split("::")[0]
+ try:
+ return array.get_pod(pod=pod_name)
+ except Exception:
+ return None
+
+
+def get_pending_pgroup(module, array):
+ """Get Protection Group"""
+ pgroup = None
+ if ":" in module.params["pgroup"]:
+ if "::" not in module.params["pgroup"]:
+ for pgrp in array.list_pgroups(pending=True, on="*"):
+ if pgrp["name"] == module.params["pgroup"] and pgrp["time_remaining"]:
+ pgroup = pgrp
+ break
+ else:
+ for pgrp in array.list_pgroups(pending=True):
+ if pgrp["name"] == module.params["pgroup"] and pgrp["time_remaining"]:
+ pgroup = pgrp
+ break
+ else:
+ for pgrp in array.list_pgroups(pending=True):
+ if pgrp["name"] == module.params["pgroup"] and pgrp["time_remaining"]:
+ pgroup = pgrp
+ break
+
+ return pgroup
+
+
+def get_pgroup(module, array):
+ """Get Protection Group"""
+ pgroup = None
+ if ":" in module.params["pgroup"]:
+ if "::" not in module.params["pgroup"]:
+ for pgrp in array.list_pgroups(on="*"):
+ if pgrp["name"] == module.params["pgroup"]:
+ pgroup = pgrp
+ break
+ else:
+ for pgrp in array.list_pgroups():
+ if pgrp["name"] == module.params["pgroup"]:
+ pgroup = pgrp
+ break
+ else:
+ for pgrp in array.list_pgroups():
+ if pgrp["name"] == module.params["pgroup"]:
+ pgroup = pgrp
+ break
+
+ return pgroup
+
+
+def human_to_bytes(size):
+ """Given a human-readable byte string (e.g. 2G, 30M),
+ return the number of bytes. Will return 0 if the argument has
+ unexpected form.
+ """
+ bytes = size[:-1]
+ unit = size[-1].upper()
+ if bytes.isdigit():
+ bytes = int(bytes)
+ if unit == "P":
+ bytes *= 1125899906842624
+ elif unit == "T":
+ bytes *= 1099511627776
+ elif unit == "G":
+ bytes *= 1073741824
+ elif unit == "M":
+ bytes *= 1048576
+ elif unit == "K":
+ bytes *= 1024
+ else:
+ bytes = 0
+ else:
+ bytes = 0
+ return bytes
+
+
+def human_to_real(iops):
+ """Given a human-readable IOPs string (e.g. 2K, 30M),
+ return the real number. Will return 0 if the argument has
+ unexpected form.
+ """
+ digit = iops[:-1]
+ unit = iops[-1].upper()
+ if unit.isdigit():
+ digit = iops
+ elif digit.isdigit():
+ digit = int(digit)
+ if unit == "M":
+ digit *= 1000000
+ elif unit == "K":
+ digit *= 1000
+ else:
+ digit = 0
+ else:
+ digit = 0
+ return digit
+
+
+def get_multi_volumes(module, destroyed=False):
+ """Return True is all volumes exist or None"""
+ names = []
+ array = get_array(module)
+ for vol_num in range(
+ module.params["start"], module.params["count"] + module.params["start"]
+ ):
+ names.append(
+ module.params["name"]
+ + str(vol_num).zfill(module.params["digits"])
+ + module.params["suffix"]
+ )
+ return bool(array.get_volumes(names=names, destroyed=destroyed).status_code == 200)
+
+
+def get_volume(module, array):
+ """Return Volume or None"""
+ try:
+ return array.get_volume(module.params["name"])
+ except Exception:
+ return None
+
+
+def get_endpoint(name, array):
+ """Return Endpoint or None"""
+ try:
+ return array.get_volume(name, pending=True, protocol_endpoint=True)
+ except Exception:
+ return None
+
+
+def get_destroyed_volume(vol, array):
+ """Return Destroyed Volume or None"""
+ try:
+ return bool(array.get_volume(vol, pending=True)["time_remaining"] != "")
+ except Exception:
+ return False
+
+
+def get_destroyed_endpoint(vol, array):
+ """Return Destroyed Endpoint or None"""
+ try:
+ return bool(
+ array.get_volume(vol, protocol_endpoint=True, pending=True)[
+ "time_remaining"
+ ]
+ != ""
+ )
+ except Exception:
+ return False
+
+
+def get_target(module, array):
+ """Return Volume or None"""
+ try:
+ return array.get_volume(module.params["target"])
+ except Exception:
+ return None
+
+
+def check_vgroup(module, array):
+ """Check is the requested VG to create volume in exists"""
+ vg_exists = False
+ api_version = array._list_available_rest_versions()
+ if VGROUPS_API_VERSION in api_version:
+ vg_name = module.params["name"].split("/")[0]
+ try:
+ vgs = array.list_vgroups()
+ except Exception:
+ module.fail_json(msg="Failed to get volume groups list. Check array.")
+ for vgroup in range(0, len(vgs)):
+ if vg_name == vgs[vgroup]["name"]:
+ vg_exists = True
+ break
+ else:
+ module.fail_json(
+ msg="VG volumes are not supported. Please upgrade your FlashArray."
+ )
+ return vg_exists
+
+
+def check_pod(module, array):
+ """Check is the requested pod to create volume in exists"""
+ pod_exists = False
+ api_version = array._list_available_rest_versions()
+ if POD_API_VERSION in api_version:
+ pod_name = module.params["name"].split("::")[0]
+ try:
+ pods = array.list_pods()
+ except Exception:
+ module.fail_json(msg="Failed to get pod list. Check array.")
+ for pod in range(0, len(pods)):
+ if pod_name == pods[pod]["name"]:
+ pod_exists = True
+ break
+ else:
+ module.fail_json(
+ msg="Pod volumes are not supported. Please upgrade your FlashArray."
+ )
+ return pod_exists
+
+
+def create_volume(module, array):
+ """Create Volume"""
+ volfact = []
+ changed = False
+ api_version = array._list_available_rest_versions()
+ if "/" in module.params["name"] and not check_vgroup(module, array):
+ module.fail_json(
+ msg="Failed to create volume {0}. Volume Group does not exist.".format(
+ module.params["name"]
+ )
+ )
+ if "::" in module.params["name"]:
+ if not check_pod(module, array):
+ module.fail_json(
+ msg="Failed to create volume {0}. Pod does not exist".format(
+ module.params["name"]
+ )
+ )
+ pod_name = module.params["name"].split("::")[0]
+ if PROMOTE_API_VERSION in api_version:
+ if array.get_pod(pod_name)["promotion_status"] == "demoted":
+ module.fail_json(msg="Volume cannot be created in a demoted pod")
+ if module.params["bw_qos"] or module.params["iops_qos"]:
+ if AC_QOS_VERSION not in api_version:
+ module.warn(
+ "Pods cannot cannot contain volumes with QoS settings. Ignoring..."
+ )
+ module.params["bw_qos"] = module.params["iops_qos"] = None
+ if not module.params["size"]:
+ module.fail_json(msg="Size for a new volume must be specified")
+ if module.params["bw_qos"] or module.params["iops_qos"]:
+ if module.params["bw_qos"] and QOS_API_VERSION in api_version:
+ if module.params["iops_qos"] and IOPS_API_VERSION in api_version:
+ if module.params["bw_qos"] and not module.params["iops_qos"]:
+ if int(human_to_bytes(module.params["bw_qos"])) in range(
+ 1048576, 549755813888
+ ):
+ changed = True
+ if not module.check_mode:
+ try:
+ volfact = array.create_volume(
+ module.params["name"],
+ module.params["size"],
+ bandwidth_limit=module.params["bw_qos"],
+ )
+ volfact["page83_naa"] = (
+ PURE_OUI + volfact["serial"].lower()
+ )
+ volfact["nvme_nguid"] = _create_nguid(
+ volfact["serial"].lower()
+ )
+ except Exception:
+ module.fail_json(
+ msg="Volume {0} creation failed.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ module.fail_json(
+ msg="Bandwidth QoS value {0} out of range.".format(
+ module.params["bw_qos"]
+ )
+ )
+ elif module.params["iops_qos"] and not module.params["bw_qos"]:
+ if (
+ 100000000
+ >= int(human_to_real(module.params["iops_qos"]))
+ >= 100
+ ):
+ changed = True
+ if not module.check_mode:
+ try:
+ volfact = array.create_volume(
+ module.params["name"],
+ module.params["size"],
+ iops_limit=module.params["iops_qos"],
+ )
+ volfact["page83_naa"] = (
+ PURE_OUI + volfact["serial"].lower()
+ )
+ volfact["nvme_nguid"] = _create_nguid(
+ volfact["serial"].lower()
+ )
+ except Exception:
+ module.fail_json(
+ msg="Volume {0} creation failed.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ module.fail_json(
+ msg="IOPs QoS value {0} out of range.".format(
+ module.params["iops_qos"]
+ )
+ )
+ else:
+ bw_qos_size = int(human_to_bytes(module.params["bw_qos"]))
+ if int(human_to_real(module.params["iops_qos"])) in range(
+ 100, 100000000
+ ) and bw_qos_size in range(1048576, 549755813888):
+ changed = True
+ if not module.check_mode:
+ try:
+ volfact = array.create_volume(
+ module.params["name"],
+ module.params["size"],
+ iops_limit=module.params["iops_qos"],
+ bandwidth_limit=module.params["bw_qos"],
+ )
+ volfact["page83_naa"] = (
+ PURE_OUI + volfact["serial"].lower()
+ )
+ volfact["nvme_nguid"] = _create_nguid(
+ volfact["serial"].lower()
+ )
+ except Exception:
+ module.fail_json(
+ msg="Volume {0} creation failed.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ module.fail_json(
+ msg="IOPs or Bandwidth QoS value out of range."
+ )
+ else:
+ if module.params["bw_qos"]:
+ if int(human_to_bytes(module.params["bw_qos"])) in range(
+ 1048576, 549755813888
+ ):
+ changed = True
+ if not module.check_mode:
+ try:
+ volfact = array.create_volume(
+ module.params["name"],
+ module.params["size"],
+ bandwidth_limit=module.params["bw_qos"],
+ )
+ volfact["page83_naa"] = (
+ PURE_OUI + volfact["serial"].lower()
+ )
+ volfact["nvme_nguid"] = _create_nguid(
+ volfact["serial"].lower()
+ )
+ except Exception:
+ module.fail_json(
+ msg="Volume {0} creation failed.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ module.fail_json(
+ msg="Bandwidth QoS value {0} out of range.".format(
+ module.params["bw_qos"]
+ )
+ )
+ else:
+ changed = True
+ if not module.check_mode:
+ try:
+ volfact = array.create_volume(
+ module.params["name"], module.params["size"]
+ )
+ volfact["page83_naa"] = PURE_OUI + volfact["serial"].lower()
+ volfact["nvme_nguid"] = _create_nguid(
+ volfact["serial"].lower()
+ )
+ except Exception:
+ module.fail_json(
+ msg="Volume {0} creation failed.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ changed = True
+ if not module.check_mode:
+ try:
+ volfact = array.create_volume(
+ module.params["name"], module.params["size"]
+ )
+ volfact["page83_naa"] = PURE_OUI + volfact["serial"].lower()
+ volfact["nvme_nguid"] = _create_nguid(volfact["serial"].lower())
+ except Exception:
+ module.fail_json(
+ msg="Volume {0} creation failed.".format(module.params["name"])
+ )
+ if VOLUME_PROMOTION_API_VERSION in api_version and module.params["promotion_state"]:
+ arrayv6 = get_array(module)
+ volume = flasharray.VolumePatch(
+ requested_promotion_state=module.params["promotion_state"]
+ )
+ changed = True
+ if not module.check_mode:
+ res = arrayv6.patch_volumes(names=[module.params["name"]], volume=volume)
+ if res.status_code != 200:
+ arrayv6.patch_volumes(
+ names=[module.params["name"]],
+ volume=flasharray.VolumePatch(destroyed=True),
+ )
+ arrayv6.delete_volumes(names=[module.params["name"]])
+ module.fail_json(
+ msg="Failed to set Promotion State for volume {0}.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ volfact["promotion_state"] = module.params["promotion_state"]
+ if PRIORITY_API_VERSION in api_version and module.params["priority_operator"]:
+ arrayv6 = get_array(module)
+ volume = flasharray.VolumePatch(
+ priority_adjustment=flasharray.PriorityAdjustment(
+ priority_adjustment_operator=module.params["priority_operator"],
+ priority_adjustment_value=module.params["priority_value"],
+ )
+ )
+ res = arrayv6.patch_volumes(names=[module.params["name"]], volume=volume)
+ if res.status_code != 200:
+ arrayv6.patch_volumes(
+ names=[module.params["name"]],
+ volume=flasharray.VolumePatch(destroyed=True),
+ )
+ arrayv6.delete_volumes(names=[module.params["name"]])
+ module.fail_json(
+ msg="Failed to set DMM Priority Adjustment on volume {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ else:
+ volfact["priority_operator"] = module.params["priority_operator"]
+ volfact["priority_value"] = module.params["priority_value"]
+ if module.params["pgroup"] and DEFAULT_API_VERSION not in api_version:
+ changed = True
+ if not module.check_mode:
+ try:
+ array.set_pgroup(
+ module.params["pgroup"], addvollist=[module.params["name"]]
+ )
+ except Exception:
+ module.warn_json(
+ "Failed to add {0} to protection group {1}.".format(
+ module.params["name"], module.params["pgroup"]
+ )
+ )
+
+ module.exit_json(changed=changed, volume=volfact)
+
+
+def create_multi_volume(module, array, single=False):
+ """Create Volume"""
+ volfact = {}
+ changed = True
+ api_version = array._list_available_rest_versions()
+ bw_qos_size = iops_qos_size = 0
+ names = []
+ if "/" in module.params["name"] and not check_vgroup(module, array):
+ module.fail_json(
+ msg="Multi-volume create failed. Volume Group {0} does not exist.".format(
+ module.params["name"].split("/")[0]
+ )
+ )
+ if "::" in module.params["name"]:
+ if not check_pod(module, array):
+ module.fail_json(
+ msg="Multi-volume create failed. Pod {0} does not exist".format(
+ module.params["name"].split(":")[0]
+ )
+ )
+ pod_name = module.params["name"].split("::")[0]
+ if PROMOTE_API_VERSION in api_version:
+ if array.get_pod(pod_name)["promotion_status"] == "demoted":
+ module.fail_json(msg="Volume cannot be created in a demoted pod")
+ array = get_array(module)
+ if not single:
+ for vol_num in range(
+ module.params["start"], module.params["count"] + module.params["start"]
+ ):
+ names.append(
+ module.params["name"]
+ + str(vol_num).zfill(module.params["digits"])
+ + module.params["suffix"]
+ )
+ else:
+ names.append(module.params["name"])
+ if module.params["bw_qos"]:
+ bw_qos = int(human_to_bytes(module.params["bw_qos"]))
+ if bw_qos in range(1048576, 549755813888):
+ bw_qos_size = bw_qos
+ else:
+ module.fail_json(msg="Bandwidth QoS value out of range.")
+ if module.params["iops_qos"]:
+ iops_qos = int(human_to_real(module.params["iops_qos"]))
+ if iops_qos in range(100, 100000000):
+ iops_qos_size = iops_qos
+ else:
+ module.fail_json(msg="IOPs QoS value out of range.")
+ if bw_qos_size != 0 and iops_qos_size != 0:
+ vols = flasharray.VolumePost(
+ provisioned=human_to_bytes(module.params["size"]),
+ qos=flasharray.Qos(bandwidth_limit=bw_qos_size, iops_limit=iops_qos_size),
+ subtype="regular",
+ )
+ elif bw_qos_size == 0 and iops_qos_size == 0:
+ vols = flasharray.VolumePost(
+ provisioned=human_to_bytes(module.params["size"]), subtype="regular"
+ )
+ elif bw_qos_size == 0 and iops_qos_size != 0:
+ vols = flasharray.VolumePost(
+ provisioned=human_to_bytes(module.params["size"]),
+ qos=flasharray.Qos(iops_limit=iops_qos_size),
+ subtype="regular",
+ )
+ elif bw_qos_size != 0 and iops_qos_size == 0:
+ vols = flasharray.VolumePost(
+ provisioned=human_to_bytes(module.params["size"]),
+ qos=flasharray.Qos(bandwidth_limit=bw_qos_size),
+ subtype="regular",
+ )
+ if not module.check_mode:
+ if DEFAULT_API_VERSION in api_version:
+ if module.params["add_to_pgs"]:
+ add_to_pgs = []
+ for add_pg in range(0, len(module.params["add_to_pgs"])):
+ add_to_pgs.append(
+ flasharray.FixedReference(
+ name=module.params["add_to_pgs"][add_pg]
+ )
+ )
+ res = array.post_volumes(
+ names=names,
+ volume=vols,
+ with_default_protection=module.params["with_default_protection"],
+ add_to_protection_groups=add_to_pgs,
+ )
+ else:
+ res = array.post_volumes(
+ names=names,
+ volume=vols,
+ with_default_protection=module.params["with_default_protection"],
+ )
+ else:
+ res = array.post_volumes(names=names, volume=vols)
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Multi-Volume {0}#{1} creation failed: {2}".format(
+ module.params["name"],
+ module.params["suffix"],
+ res.errors[0].message,
+ )
+ )
+ else:
+ if (
+ VOLUME_PROMOTION_API_VERSION in api_version
+ and module.params["promotion_state"]
+ ):
+ volume = flasharray.VolumePatch(
+ requested_promotion_state=module.params["promotion_state"]
+ )
+ prom_res = array.patch_volumes(names=names, volume=volume)
+ if prom_res.status_code != 200:
+ array.patch_volumes(
+ names=names,
+ volume=flasharray.VolumePatch(destroyed=True),
+ )
+ array.delete_volumes(names=names)
+ module.warn(
+ "Failed to set promotion status on volumes. Error: {0}".format(
+ prom_res.errors[0].message
+ )
+ )
+ if (
+ PRIORITY_API_VERSION in api_version
+ and module.params["priority_operator"]
+ ):
+ volume = flasharray.VolumePatch(
+ priority_adjustment=flasharray.PriorityAdjustment(
+ priority_adjustment_operator=module.params["priority_operator"],
+ priority_adjustment_value=module.params["priority_value"],
+ )
+ )
+ prio_res = array.patch_volumes(names=names, volume=volume)
+ if prio_res.status_code != 200:
+ array.patch_volumes(
+ names=names,
+ volume=flasharray.VolumePatch(destroyed=True),
+ )
+ array.delete_volumes(names=names)
+ module.fail_json(
+ msg="Failed to set DMM Priority Adjustment on volumes. Error: {0}".format(
+ prio_res.errors[0].message
+ )
+ )
+ prio_temp = list(prio_res.items)
+ temp = list(res.items)
+ for count in range(0, len(temp)):
+ vol_name = temp[count].name
+ volfact[vol_name] = {
+ "size": temp[count].provisioned,
+ "serial": temp[count].serial,
+ "created": time.strftime(
+ "%Y-%m-%d %H:%M:%S", time.localtime(temp[count].created / 1000)
+ ),
+ "page83_naa": PURE_OUI + temp[count].serial.lower(),
+ "nvme_nguid": _create_nguid(temp[count].serial.lower()),
+ }
+ if bw_qos_size != 0:
+ volfact[vol_name]["bandwidth_limit"] = temp[
+ count
+ ].qos.bandwidth_limit
+ if iops_qos_size != 0:
+ volfact[vol_name]["iops_limit"] = temp[count].qos.iops_limit
+ if (
+ VOLUME_PROMOTION_API_VERSION in api_version
+ and module.params["promotion_state"]
+ ):
+ volfact[vol_name]["promotion_status"] = prio_temp[
+ count
+ ].promotion_status
+ if (
+ PRIORITY_API_VERSION in api_version
+ and module.params["priority_operator"]
+ ):
+ volfact[vol_name]["priority_operator"] = prio_temp[
+ count
+ ].priority_adjustment.priority_adjustment_operator
+ volfact[vol_name]["priority_value"] = prio_temp[
+ count
+ ].priority_adjustment.priority_adjustment_value
+
+ if module.params["pgroup"] and DEFAULT_API_VERSION not in api_version:
+ if not module.check_mode:
+ res = array.post_protection_groups_volumes(
+ group_names=[module.params["pgroup"]], member_names=names
+ )
+ if res.status_code != 200:
+ module.warn(
+ "Failed to add {0} to protection group {1}.".format(
+ module.params["name"], module.params["pgroup"]
+ )
+ )
+
+ module.exit_json(changed=changed, volume=volfact)
+
+
+def copy_from_volume(module, array):
+ """Create Volume Clone"""
+ volfact = []
+ changed = False
+ tgt = get_target(module, array)
+ api_version = array._list_available_rest_versions()
+ arrayv6 = get_array(module)
+ if tgt is None:
+ changed = True
+ if not module.check_mode:
+ if DEFAULT_API_VERSION in api_version:
+ if module.params["add_to_pgs"]:
+ add_to_pgs = []
+ for add_pg in range(0, len(module.params["add_to_pgs"])):
+ add_to_pgs.append(
+ flasharray.FixedReference(
+ name=module.params["add_to_pgs"][add_pg]
+ )
+ )
+ res = arrayv6.post_volumes(
+ with_default_protection=module.params[
+ "with_default_protection"
+ ],
+ add_to_protection_groups=add_to_pgs,
+ names=[module.params["target"]],
+ volume=flasharray.VolumePost(
+ source=flasharray.Reference(name=module.params["name"])
+ ),
+ )
+ else:
+ res = arrayv6.post_volumes(
+ with_default_protection=module.params[
+ "with_default_protection"
+ ],
+ names=[module.params["target"]],
+ volume=flasharray.VolumePost(
+ source=flasharray.Reference(name=module.params["name"])
+ ),
+ )
+
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to copy volume {0} to {1}. Error: {2}".format(
+ module.params["name"],
+ module.params["target"],
+ res.errors[0].message,
+ )
+ )
+ vol_data = list(res.items)
+ volfact = {
+ "size": vol_data[0].provisioned,
+ "serial": vol_data[0].serial,
+ "created": time.strftime(
+ "%Y-%m-%d %H:%M:%S", time.localtime(vol_data[0].created / 1000)
+ ),
+ "page83_naa": PURE_OUI + vol_data[0].serial.lower(),
+ "nvme_nguid": _create_nguid(vol_data[0].serial.lower()),
+ }
+ else:
+ try:
+ volfact = array.copy_volume(
+ module.params["name"], module.params["target"]
+ )
+ volfact["page83_naa"] = PURE_OUI + volfact["serial"].lower()
+ volfact["nvme_nguid"] = _create_nguid(volfact["serial"].lower())
+ changed = True
+ except Exception:
+ module.fail_json(
+ msg="Copy volume {0} to volume {1} failed.".format(
+ module.params["name"], module.params["target"]
+ )
+ )
+ elif tgt is not None and module.params["overwrite"]:
+ changed = True
+ if not module.check_mode:
+ if DEFAULT_API_VERSION not in api_version:
+ try:
+ volfact = array.copy_volume(
+ module.params["name"],
+ module.params["target"],
+ overwrite=module.params["overwrite"],
+ )
+ volfact["page83_naa"] = PURE_OUI + volfact["serial"].lower()
+ volfact["nvme_nguid"] = _create_nguid(volfact["serial"].lower())
+ changed = True
+ except Exception:
+ module.fail_json(
+ msg="Copy volume {0} to volume {1} failed.".format(
+ module.params["name"], module.params["target"]
+ )
+ )
+ else:
+ res = arrayv6.post_volumes(
+ overwrite=module.params["overwrite"],
+ names=[module.params["target"]],
+ volume=flasharray.VolumePost(
+ source=flasharray.Reference(name=module.params["name"])
+ ),
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to copy volume {0} to {1}. Error: {2}".format(
+ module.params["name"],
+ module.params["target"],
+ res.errors[0].message,
+ )
+ )
+ vol_data = list(res.items)
+ volfact = {
+ "size": vol_data[0].provisioned,
+ "serial": vol_data[0].serial,
+ "created": time.strftime(
+ "%Y-%m-%d %H:%M:%S", time.localtime(vol_data[0].created / 1000)
+ ),
+ "page83_naa": PURE_OUI + vol_data[0].serial.lower(),
+ "nvme_nguid": _create_nguid(vol_data[0].serial.lower()),
+ }
+
+ module.exit_json(changed=changed, volume=volfact)
+
+
+def update_volume(module, array):
+ """Update Volume size and/or QoS"""
+ volfact = {}
+ changed = False
+ arrayv6 = None
+ api_version = array._list_available_rest_versions()
+ if MULTI_VOLUME_VERSION in api_version:
+ arrayv6 = get_array(module)
+ vol = array.get_volume(module.params["name"])
+ vol_qos = array.get_volume(module.params["name"], qos=True)
+ if QOS_API_VERSION in api_version:
+ if vol_qos["bandwidth_limit"] is None:
+ vol_qos["bandwidth_limit"] = 0
+ if IOPS_API_VERSION in api_version:
+ if vol_qos["iops_limit"] is None:
+ vol_qos["iops_limit"] = 0
+ if "::" in module.params["name"]:
+ if module.params["bw_qos"] or module.params["iops_qos"]:
+ if AC_QOS_VERSION not in api_version:
+ module.warn(
+ "Pods cannot cannot contain volumes with QoS settings. Ignoring..."
+ )
+ module.params["bw_qos"] = module.params["iops_qos"] = None
+ if module.params["size"]:
+ if human_to_bytes(module.params["size"]) != vol["size"]:
+ if human_to_bytes(module.params["size"]) > vol["size"]:
+ changed = True
+ if not module.check_mode:
+ try:
+ volfact = array.extend_volume(
+ module.params["name"], module.params["size"]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Volume {0} resize failed.".format(
+ module.params["name"]
+ )
+ )
+ if module.params["bw_qos"] and QOS_API_VERSION in api_version:
+ if int(human_to_bytes(module.params["bw_qos"])) != int(
+ vol_qos["bandwidth_limit"]
+ ):
+ if module.params["bw_qos"] == "0":
+ changed = True
+ if not module.check_mode:
+ try:
+ volfact = array.set_volume(
+ module.params["name"], bandwidth_limit=""
+ )
+ except Exception:
+ module.fail_json(
+ msg="Volume {0} Bandwidth QoS removal failed.".format(
+ module.params["name"]
+ )
+ )
+ elif int(human_to_bytes(module.params["bw_qos"])) in range(
+ 1048576, 549755813888
+ ):
+ changed = True
+ if not module.check_mode:
+ try:
+ volfact = array.set_volume(
+ module.params["name"],
+ bandwidth_limit=module.params["bw_qos"],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Volume {0} Bandwidth QoS change failed.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ module.fail_json(
+ msg="Bandwidth QoS value {0} out of range.".format(
+ module.params["bw_qos"]
+ )
+ )
+ if module.params["iops_qos"] and IOPS_API_VERSION in api_version:
+ if int(human_to_real(module.params["iops_qos"])) != int(vol_qos["iops_limit"]):
+ if module.params["iops_qos"] == "0":
+ changed = True
+ if not module.check_mode:
+ try:
+ volfact = array.set_volume(module.params["name"], iops_limit="")
+ except Exception:
+ module.fail_json(
+ msg="Volume {0} IOPs QoS removal failed.".format(
+ module.params["name"]
+ )
+ )
+ elif int(human_to_real(module.params["iops_qos"])) in range(100, 100000000):
+ changed = True
+ if not module.check_mode:
+ try:
+ volfact = array.set_volume(
+ module.params["name"], iops_limit=module.params["iops_qos"]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Volume {0} IOPs QoS change failed.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ module.fail_json(
+ msg="Bandwidth QoS value {0} out of range.".format(
+ module.params["bw_qos"]
+ )
+ )
+ if VOLUME_PROMOTION_API_VERSION in api_version and module.params["promotion_state"]:
+ vol6 = list(arrayv6.get_volumes(names=[module.params["name"]]).items)[0]
+ if module.params["promotion_state"] != vol6.promotion_status:
+ volume_patch = flasharray.VolumePatch(
+ requested_promotion_state=module.params["promotion_state"]
+ )
+ changed = True
+ if not module.check_mode:
+ prom_res = arrayv6.patch_volumes(
+ names=[module.params["name"]], volume=volume_patch
+ )
+ if prom_res.status_code != 200:
+ module.fail_json(
+ msg="Failed to change promotion status for volume {0}.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ if not volfact:
+ volfact = array.get_volume(module.params["name"])
+ volfact["promotion_status"] = module.params["promotion_state"]
+ if PRIORITY_API_VERSION in api_version and module.params["priority_operator"]:
+ volv6 = list(arrayv6.get_volumes(names=[module.params["name"]]).items)[0]
+ change_prio = False
+ if (
+ module.params["priority_operator"]
+ != volv6.priority_adjustment.priority_adjustment_operator
+ ):
+ change_prio = True
+ newop = module.params["priority_operator"]
+ else:
+ newop = volv6.priority_adjustment.priority_adjustment_operator
+ if (
+ module.params["priority_value"]
+ and module.params["priority_value"]
+ != volv6.priority_adjustment.priority_adjustment_value
+ ):
+ change_prio = True
+ newval = module.params["priority_value"]
+ elif (
+ not module.params["priority_value"]
+ and volv6.priority_adjustment.priority_adjustment_value != 0
+ ):
+ change_prio = True
+ newval = 0
+ else:
+ newval = volv6.priority_adjustment.priority_adjustment_value
+ volumepatch = flasharray.VolumePatch(
+ priority_adjustment=flasharray.PriorityAdjustment(
+ priority_adjustment_operator=newop,
+ priority_adjustment_value=newval,
+ )
+ )
+ if change_prio and not module.check_mode:
+ changed = True
+ prio_res = arrayv6.patch_volumes(
+ names=[module.params["name"]], volume=volumepatch
+ )
+ if prio_res.status_code != 200:
+ module.fail_json(
+ msg="Failed to change DMM Priority Adjustment for {0}. Error: {1}".format(
+ module.params["name"], prio_res.errors[0].message
+ )
+ )
+ else:
+ if not volfact:
+ volfact = array.get_volume(module.params["name"])
+ volfact["priority_operator"] = module.params["priority_operator"]
+ volfact["priority_value"] = module.params["priority_value"]
+ if MULTI_VOLUME_VERSION in api_version:
+ volume_data = list(arrayv6.get_volumes(names=[module.params["name"]]).items)[0]
+ updatefacts = {
+ "name": volume_data.name,
+ "size": volume_data.provisioned,
+ "serial": volume_data.serial,
+ "created": time.strftime(
+ "%Y-%m-%d %H:%M:%S", time.localtime(volume_data.created / 1000)
+ ),
+ "page83_naa": PURE_OUI + volume_data.serial.lower(),
+ "nvme_nguid": _create_nguid(volume_data.serial.lower()),
+ }
+ else:
+ updatefacts = array.get_volume(module.params["name"])
+ vol_fact = {**volfact, **updatefacts}
+ module.exit_json(changed=changed, volume=vol_fact)
+
+
+def rename_volume(module, array):
+ """Rename volume within a container, ie pod, vgroup or local array"""
+ volfact = []
+ changed = False
+ pod_name = ""
+ vgroup_name = ""
+ target_name = module.params["rename"]
+ target_exists = False
+ if "::" in module.params["name"]:
+ pod_name = module.params["name"].split("::")[0]
+ target_name = pod_name + "::" + module.params["rename"]
+ try:
+ array.get_volume(target_name, pending=True)
+ target_exists = True
+ except Exception:
+ target_exists = False
+ elif "/" in module.params["name"]:
+ vgroup_name = module.params["name"].split("/")[0]
+ target_name = vgroup_name + "/" + module.params["rename"]
+ try:
+ array.get_volume(target_name, pending=True)
+ target_exists = True
+ except Exception:
+ target_exists = False
+ else:
+ try:
+ array.get_volume(target_name, pending=True)
+ target_exists = True
+ except Exception:
+ target_exists = False
+ if target_exists and get_endpoint(target_name, array):
+ module.fail_json(
+ msg="Target volume {0} is a protocol-endpoinnt".format(target_name)
+ )
+ if not target_exists:
+ if get_destroyed_endpoint(target_name, array):
+ module.fail_json(
+ msg="Target volume {0} is a destroyed protocol-endpoinnt".format(
+ target_name
+ )
+ )
+ else:
+ changed = True
+ if not module.check_mode:
+ try:
+ volfact = array.rename_volume(
+ module.params["name"], module.params["rename"]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Rename volume {0} to {1} failed.".format(
+ module.params["name"], module.params["rename"]
+ )
+ )
+ else:
+ module.fail_json(msg="Target volume {0} already exists.".format(target_name))
+
+ module.exit_json(changed=changed, volume=volfact)
+
+
+def move_volume(module, array):
+ """Move volume between pods, vgroups or local array"""
+ volfact = []
+ changed = vgroup_exists = target_exists = pod_exists = False
+ api_version = array._list_available_rest_versions()
+ pod_name = ""
+ vgroup_name = ""
+ volume_name = module.params["name"]
+ if "::" in module.params["name"]:
+ volume_name = module.params["name"].split("::")[1]
+ pod_name = module.params["name"].split("::")[0]
+ if "/" in module.params["name"]:
+ volume_name = module.params["name"].split("/")[1]
+ vgroup_name = module.params["name"].split("/")[0]
+ if module.params["move"] == "local":
+ target_location = ""
+ if "::" not in module.params["name"]:
+ if "/" not in module.params["name"]:
+ module.fail_json(
+ msg="Source and destination [local] cannot be the same."
+ )
+ try:
+ target_exists = array.get_volume(volume_name, pending=True)
+ except Exception:
+ target_exists = False
+ if target_exists:
+ module.fail_json(msg="Target volume {0} already exists".format(volume_name))
+ else:
+ try:
+ pod_exists = array.get_pod(module.params["move"])
+ if len(pod_exists["arrays"]) > 1:
+ module.fail_json(msg="Volume cannot be moved into a stretched pod")
+ if pod_exists["link_target_count"] != 0:
+ module.fail_json(msg="Volume cannot be moved into a linked source pod")
+ if PROMOTE_API_VERSION in api_version:
+ if pod_exists["promotion_status"] == "demoted":
+ module.fail_json(msg="Volume cannot be moved into a demoted pod")
+ pod_exists = bool(pod_exists)
+ except Exception:
+ pod_exists = False
+ if pod_exists:
+ try:
+ target_exists = bool(
+ array.get_volume(
+ module.params["move"] + "::" + volume_name, pending=True
+ )
+ )
+ except Exception:
+ target_exists = False
+ try:
+ vgroup_exists = bool(array.get_vgroup(module.params["move"]))
+ except Exception:
+ vgroup_exists = False
+ if vgroup_exists:
+ try:
+ target_exists = bool(
+ array.get_volume(
+ module.params["move"] + "/" + volume_name, pending=True
+ )
+ )
+ except Exception:
+ target_exists = False
+ if target_exists:
+ module.fail_json(msg="Volume of same name already exists in move location")
+ if pod_exists and vgroup_exists:
+ module.fail_json(
+ msg="Move location {0} matches both a pod and a vgroup. Please rename one of these.".format(
+ module.params["move"]
+ )
+ )
+ if not pod_exists and not vgroup_exists:
+ module.fail_json(
+ msg="Move location {0} does not exist.".format(module.params["move"])
+ )
+ if "::" in module.params["name"]:
+ pod = array.get_pod(module.params["move"])
+ if len(pod["arrays"]) > 1:
+ module.fail_json(msg="Volume cannot be moved out of a stretched pod")
+ if pod["linked_target_count"] != 0:
+ module.fail_json(
+ msg="Volume cannot be moved out of a linked source pod"
+ )
+ if PROMOTE_API_VERSION in api_version:
+ if pod["promotion_status"] == "demoted":
+ module.fail_json(msg="Volume cannot be moved out of a demoted pod")
+ if "/" in module.params["name"]:
+ if (
+ vgroup_name == module.params["move"]
+ or pod_name == module.params["move"]
+ ):
+ module.fail_json(msg="Source and destination cannot be the same")
+ target_location = module.params["move"]
+ if get_endpoint(target_location, array):
+ module.fail_json(
+ msg="Target volume {0} is a protocol-endpoinnt".format(target_location)
+ )
+ changed = True
+ if not module.check_mode:
+ try:
+ volfact = array.move_volume(module.params["name"], target_location)
+ except Exception:
+ if target_location == "":
+ target_location = "[local]"
+ module.fail_json(
+ msg="Move of volume {0} to {1} failed.".format(
+ module.params["name"], target_location
+ )
+ )
+ module.exit_json(changed=changed, volume=volfact)
+
+
+def delete_volume(module, array):
+ """Delete Volume"""
+ changed = True
+ volfact = []
+ if not module.check_mode:
+ try:
+ array.destroy_volume(module.params["name"])
+ if module.params["eradicate"]:
+ try:
+ volfact = array.eradicate_volume(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Eradicate volume {0} failed.".format(module.params["name"])
+ )
+ except Exception:
+ module.fail_json(
+ msg="Delete volume {0} failed.".format(module.params["name"])
+ )
+ module.exit_json(changed=changed, volume=volfact)
+
+
+def eradicate_volume(module, array):
+ """Eradicate Deleted Volume"""
+ changed = True
+ volfact = []
+ if not module.check_mode:
+ if module.params["eradicate"]:
+ try:
+ array.eradicate_volume(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Eradication of volume {0} failed".format(module.params["name"])
+ )
+ module.exit_json(changed=changed, volume=volfact)
+
+
+def recover_volume(module, array):
+ """Recover Deleted Volume"""
+ changed = True
+ volfact = []
+ if not module.check_mode:
+ try:
+ array.recover_volume(module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Recovery of volume {0} failed".format(module.params["name"])
+ )
+ volfact = array.get_volume(module.params["name"])
+ volfact["page83_naa"] = PURE_OUI + volfact["serial"].lower()
+ volfact["nvme_nguid"] = _create_nguid(volfact["serial"].lower())
+ module.exit_json(changed=changed, volume=volfact)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True),
+ target=dict(type="str"),
+ move=dict(type="str"),
+ rename=dict(type="str"),
+ overwrite=dict(type="bool", default=False),
+ eradicate=dict(type="bool", default=False),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ bw_qos=dict(type="str", aliases=["qos"]),
+ iops_qos=dict(type="str"),
+ pgroup=dict(type="str"),
+ count=dict(type="int"),
+ start=dict(type="int", default=0),
+ digits=dict(type="int", default=1),
+ suffix=dict(type="str"),
+ priority_operator=dict(type="str", choices=["+", "-", "="]),
+ priority_value=dict(type="int", choices=[-10, 0, 10]),
+ size=dict(type="str"),
+ with_default_protection=dict(type="bool", default=True),
+ add_to_pgs=dict(type="list", elements="str"),
+ promotion_state=dict(type="str", choices=["promoted", "demoted"]),
+ )
+ )
+
+ mutually_exclusive = [
+ ["size", "target"],
+ ["move", "rename", "target", "eradicate"],
+ ["rename", "move", "target", "eradicate"],
+ ]
+ required_together = [["priority_operator", "priority_value"]]
+
+ module = AnsibleModule(
+ argument_spec,
+ mutually_exclusive=mutually_exclusive,
+ required_together=required_together,
+ supports_check_mode=True,
+ )
+
+ size = module.params["size"]
+ bw_qos = module.params["bw_qos"]
+ iops_qos = module.params["iops_qos"]
+ state = module.params["state"]
+ destroyed = False
+ array = get_system(module)
+ volume = get_volume(module, array)
+ api_version = array._list_available_rest_versions()
+ endpoint = get_endpoint(module.params["name"], array)
+
+ if endpoint:
+ module.fail_json(
+ msg="Volume {0} is an endpoint. Use purefa_endpoint module.".format(
+ module.params["name"]
+ )
+ )
+
+ if module.params["pgroup"]:
+ pattern = re.compile("^[a-zA-Z0-9]([a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?$")
+ if ":" in module.params["pgroup"] and OFFLOAD_API_VERSION not in api_version:
+ module.fail_json(
+ msg="API version does not support offload protection groups."
+ )
+ if "::" in module.params["pgroup"] and POD_API_VERSION not in api_version:
+ module.fail_json(
+ msg="API version does not support ActiveCluster protection groups."
+ )
+ if ":" in module.params["pgroup"]:
+ if "::" in module.params["pgroup"]:
+ pgname = module.params["pgroup"].split("::")[1]
+ else:
+ pgname = module.params["pgroup"].split(":")[1]
+ if not pattern.match(pgname):
+ module.fail_json(
+ msg="Protection Group name {0} does not conform to naming convention".format(
+ pgname
+ )
+ )
+ else:
+ if not pattern.match(module.params["pgroup"]):
+ module.fail_json(
+ msg="Protection Group name {0} does not conform to naming convention".format(
+ pgname
+ )
+ )
+ pgroup = get_pgroup(module, array)
+ xpgroup = get_pending_pgroup(module, array)
+ if "::" in module.params["pgroup"]:
+ if not get_pod(module, array):
+ module.fail_json(
+ msg="Pod {0} does not exist.".format(
+ module.params["pgroup"].split("::")[0]
+ )
+ )
+ if not pgroup:
+ if xpgroup:
+ module.fail_json(
+ msg="Protection Group {0} is currently deleted. Please restore to use.".format(
+ module.params["pgroup"]
+ )
+ )
+ else:
+ module.fail_json(
+ msg="Protection Group {0} does not exist.".format(
+ module.params["pgroup"]
+ )
+ )
+
+ if not volume:
+ destroyed = get_destroyed_volume(module.params["name"], array)
+ target = get_target(module, array)
+ if module.params["count"]:
+ if not HAS_PURESTORAGE:
+ module.fail_json(
+ msg="py-pure-client sdk is required to support 'count' parameter"
+ )
+ if MULTI_VOLUME_VERSION not in api_version:
+ module.fail_json(
+ msg="'count' parameter is not supported until Purity//FA 6.0.0 or higher"
+ )
+ if module.params["digits"] and module.params["digits"] not in range(1, 10):
+ module.fail_json(msg="'digits' must be in the range of 1 to 10")
+ if module.params["start"] < 0:
+ module.fail_json(msg="'start' must be a positive number")
+ volume = get_multi_volumes(module)
+ if state == "present" and not volume and size:
+ create_multi_volume(module, array)
+ elif state == "present" and not volume and not size:
+ module.fail_json(msg="Size must be specified to create a new volume")
+ elif state == "absent" and not volume:
+ module.exit_json(changed=False)
+ else:
+ module.warn("Method not yet supported for multi-volume")
+ else:
+ if state == "present" and not volume and not destroyed and size:
+ if DEFAULT_API_VERSION in api_version:
+ create_multi_volume(module, array, True)
+ else:
+ create_volume(module, array)
+ elif (
+ state == "present"
+ and volume
+ and (size or bw_qos or iops_qos or module.params["promotion_state"])
+ ):
+ update_volume(module, array)
+ elif state == "present" and not volume and module.params["move"]:
+ module.fail_json(
+ msg="Volume {0} cannot be moved - does not exist (maybe deleted)".format(
+ module.params["name"]
+ )
+ )
+ elif state == "present" and volume and module.params["move"]:
+ move_volume(module, array)
+ elif state == "present" and volume and module.params["rename"]:
+ rename_volume(module, array)
+ elif (
+ state == "present"
+ and destroyed
+ and not module.params["move"]
+ and not module.params["rename"]
+ ):
+ recover_volume(module, array)
+ elif state == "present" and destroyed and module.params["move"]:
+ module.fail_json(
+ msg="Volume {0} exists, but in destroyed state".format(
+ module.params["name"]
+ )
+ )
+ elif state == "present" and volume and target:
+ copy_from_volume(module, array)
+ elif state == "present" and volume and not target:
+ copy_from_volume(module, array)
+ elif state == "absent" and volume:
+ delete_volume(module, array)
+ elif state == "absent" and destroyed:
+ eradicate_volume(module, array)
+ elif state == "present":
+ if not volume and not size:
+ module.fail_json(msg="Size must be specified to create a new volume")
+ elif state == "absent" and not volume:
+ module.exit_json(changed=False)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_volume_tags.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_volume_tags.py
new file mode 100644
index 000000000..e9c7fdb7c
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_volume_tags.py
@@ -0,0 +1,295 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_volume_tags
+version_added: '1.0.0'
+short_description: Manage volume tags on Pure Storage FlashArrays
+description:
+- Manage volume tags for volumes on Pure Storage FlashArray.
+- Requires a minimum of Purity 6.0.0
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - The name of the volume.
+ type: str
+ required: true
+ namespace:
+ description:
+ - The name of tag namespace
+ default: default
+ type: str
+ copyable:
+ description:
+ - Define whether the volume tags are inherited on volume copies.
+ default: true
+ type: bool
+ kvp:
+ description:
+ - List of key value pairs to assign to the volume.
+ - Seperate the key from the value using a colon (:) only.
+ - All items in list will use I(namespace) and I(copyable) settings.
+ - Maximum of 5 tags per volume
+ - See examples for exact formatting requirements
+ type: list
+ elements: str
+ required: true
+ state:
+ description:
+ - Define whether the volume tag(s) should exist or not.
+ default: present
+ choices: [ absent, present ]
+ type: str
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Create new tags in namespace test for volume foo
+ purestorage.flasharray.purefa_volume_tags:
+ name: foo
+ namespace: test
+ copyable: false
+ kvp:
+ - 'key1:value1'
+ - 'key2:value2'
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Remove an existing tag in namespace test for volume foo
+ purestorage.flasharray.purefa_volume_tags:
+ name: foo
+ namespace: test
+ kvp:
+ - 'key1:value1'
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: absent
+
+- name: Update an existing tag in namespace test for volume foo
+ purestorage.flasharray.purefa_volume_tags:
+ name: foo
+ namespace: test
+ kvp:
+ - 'key1:value2'
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: present
+"""
+
+RETURN = r"""
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_system,
+ purefa_argument_spec,
+)
+
+
+TAGS_API_VERSION = "1.19"
+
+
+def get_volume(module, array):
+ """Return Volume or None"""
+ try:
+ return array.get_volume(module.params["name"], pending=True)
+ except Exception:
+ return None
+
+
+def get_endpoint(name, array):
+ """Return Endpoint or None"""
+ try:
+ return array.get_volume(name, pending=True, protocol_endpoint=True)
+ except Exception:
+ return None
+
+
+def create_tag(module, array):
+ """Create Volume Tag"""
+ changed = True
+ if not module.check_mode:
+ for tag in range(0, len(module.params["kvp"])):
+ try:
+ array.add_tag_to_volume(
+ module.params["name"],
+ copyable=module.params["copyable"],
+ namespace=module.params["namespace"],
+ key=module.params["kvp"][tag].split(":")[0],
+ value=module.params["kvp"][tag].split(":")[1],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to add tag KVP {0} to volume {1}".format(
+ module.params["kvp"][tag], module.params["name"]
+ )
+ )
+
+ module.exit_json(changed=changed)
+
+
+def update_tag(module, array, current_tags):
+ """Update Volume Tag"""
+ changed = False
+ for tag in range(0, len(module.params["kvp"])):
+ tag_exists = False
+ for current_tag in range(0, len(current_tags)):
+ if (
+ module.params["kvp"][tag].split(":")[0]
+ == current_tags[current_tag]["key"]
+ and module.params["namespace"] == current_tags[current_tag]["namespace"]
+ ):
+ tag_exists = True
+ if (
+ module.params["kvp"][tag].split(":")[1]
+ != current_tags[current_tag]["value"]
+ ):
+ changed = True
+ if not module.check_mode:
+ try:
+ array.add_tag_to_volume(
+ module.params["name"],
+ namespace=module.params["namespace"],
+ key=module.params["kvp"][tag].split(":")[0],
+ value=module.params["kvp"][tag].split(":")[1],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to update tag '{0}' from volume {1}".format(
+ module.params["kvp"][tag].split(":")[0],
+ module.params["name"],
+ )
+ )
+
+ if not tag_exists:
+ changed = True
+ if not module.check_mode:
+ try:
+ array.add_tag_to_volume(
+ module.params["name"],
+ namespace=module.params["namespace"],
+ key=module.params["kvp"][tag].split(":")[0],
+ value=module.params["kvp"][tag].split(":")[1],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to add tag KVP {0} to volume {1}".format(
+ module.params["kvp"][tag].split(":")[0],
+ module.params["name"],
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def delete_tag(module, array, current_tags):
+ """Delete Tag"""
+ changed = False
+ for tag in range(0, len(module.params["kvp"])):
+ for current_tag in range(0, len(current_tags)):
+ if (
+ module.params["kvp"][tag].split(":")[0]
+ == current_tags[current_tag]["key"]
+ and module.params["namespace"] == current_tags[current_tag]["namespace"]
+ ):
+ changed = True
+ if not module.check_mode:
+ try:
+ array.remove_tag_from_volume(
+ module.params["name"],
+ namespace=module.params["namespace"],
+ key=module.params["kvp"][tag].split(":")[0],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to remove tag KVP '{0}' from volume {1}".format(
+ module.params["kvp"][tag], module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True),
+ copyable=dict(type="bool", default=True),
+ namespace=dict(type="str", default="default"),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ kvp=dict(type="list", elements="str", required=True),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ state = module.params["state"]
+ if module.params["kvp"] is not None:
+ module.params["kvp"] = sorted(module.params["kvp"][0:5])
+ else:
+ module.fail_json(msg="No KVPs specified. Minimum of 1 is required.")
+ array = get_system(module)
+ api_version = array._list_available_rest_versions()
+
+ if TAGS_API_VERSION not in api_version:
+ module.fail_json(
+ msg="Volume tags are not supported. Purity 6.0.0, or higher, is required."
+ )
+
+ volume = get_volume(module, array)
+ endpoint = get_endpoint(module.params["name"], array)
+
+ if not volume:
+ module.fail_json(msg="Volume {0} does not exist.".format(module.params["name"]))
+ if endpoint:
+ module.fail_json(
+ msg="Volume {0} is an endpoint. Tags not allowed.".format(
+ module.params["name"]
+ )
+ )
+ if "." in module.params["name"]:
+ current_tags = array.get_volume(
+ module.params["name"],
+ snap=True,
+ pending=True,
+ tags=True,
+ namespace=module.params["namespace"],
+ )
+ else:
+ current_tags = array.get_volume(
+ module.params["name"],
+ pending=True,
+ tags=True,
+ namespace=module.params["namespace"],
+ )
+
+ if state == "present" and not current_tags:
+ create_tag(module, array)
+ elif state == "absent" and not current_tags:
+ module.exit_json(changed=False)
+ elif state == "present" and current_tags:
+ update_tag(module, array, current_tags)
+ elif state == "absent" and current_tags:
+ delete_tag(module, array, current_tags)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/requirements.txt b/ansible_collections/purestorage/flasharray/requirements.txt
new file mode 100644
index 000000000..3cf5d0672
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/requirements.txt
@@ -0,0 +1,7 @@
+purestorage
+py-pure-client
+python >= 3.6
+netaddr
+requests
+pycountry
+packaging
diff --git a/ansible_collections/purestorage/flasharray/roles/.keep b/ansible_collections/purestorage/flasharray/roles/.keep
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/roles/.keep
diff --git a/ansible_collections/purestorage/flasharray/settings.json b/ansible_collections/purestorage/flasharray/settings.json
new file mode 100644
index 000000000..f4311dd89
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/settings.json
@@ -0,0 +1,8 @@
+{
+ "id": "FlashArray-Collection",
+ "name": "Ansible Collection for FlashArray",
+ "filter": "devops",
+ "image": "http://code.purestorage.com/images/33_fa_collection.png",
+ "featured": 1,
+ "priority": 3
+}
diff --git a/ansible_collections/purestorage/flashblade/.git-blame-ignore-revs b/ansible_collections/purestorage/flashblade/.git-blame-ignore-revs
new file mode 100644
index 000000000..6d4d90c52
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/.git-blame-ignore-revs
@@ -0,0 +1,2 @@
+# Migrate code style to Black
+6c785d5453095cd0dfa4088f28b3fd4feeaafc6a
diff --git a/ansible_collections/purestorage/flashblade/.github/CONTRIBUTING.md b/ansible_collections/purestorage/flashblade/.github/CONTRIBUTING.md
new file mode 100644
index 000000000..48dc0d566
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/.github/CONTRIBUTING.md
@@ -0,0 +1,19 @@
+# WELCOME TO PURE STORAGE FLASHBLADE ANSIBLE COLLECTION GITHUB
+
+Hi! Nice to see you here!
+
+## QUESTIONS ?
+
+The GitHub issue tracker is not the best place for questions for various reasons, but the [mailing list](mailto:pure-ansible-team@purestorage.com) is a very helpful places for those things.
+
+## CONTRIBUTING ?
+
+By contributing you agree that these contributions are your own (or approved by your employer) and you grant a full, complete, irrevocable copyright license to all users and developers of the project, present and future, pursuant to the license of the project.
+
+## BUG TO REPORT ?
+
+You can report bugs or make enhancement requests at the [Ansible GitHub issue page](http://github.com/Pure-Storage-Ansible/FlashBlade-Collection/issues/new/choose) by filling out the issue template that will be presented.
+
+Also please make sure you are testing on the latest released version of Ansible or the development branch; see the [Installation Guide](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) for details.
+
+Thanks!
diff --git a/ansible_collections/purestorage/flashblade/.github/ISSUE_TEMPLATE/bug_report.md b/ansible_collections/purestorage/flashblade/.github/ISSUE_TEMPLATE/bug_report.md
new file mode 100644
index 000000000..dd84ea782
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/.github/ISSUE_TEMPLATE/bug_report.md
@@ -0,0 +1,38 @@
+---
+name: Bug report
+about: Create a report to help us improve
+title: ''
+labels: ''
+assignees: ''
+
+---
+
+**Describe the bug**
+A clear and concise description of what the bug is.
+
+**To Reproduce**
+Steps to reproduce the behavior:
+1. Go to '...'
+2. Click on '....'
+3. Scroll down to '....'
+4. See error
+
+**Expected behavior**
+A clear and concise description of what you expected to happen.
+
+**Screenshots**
+If applicable, add screenshots to help explain your problem.
+
+**Desktop (please complete the following information):**
+ - OS: [e.g. iOS]
+ - Browser [e.g. chrome, safari]
+ - Version [e.g. 22]
+
+**Smartphone (please complete the following information):**
+ - Device: [e.g. iPhone6]
+ - OS: [e.g. iOS8.1]
+ - Browser [e.g. stock browser, safari]
+ - Version [e.g. 22]
+
+**Additional context**
+Add any other context about the problem here.
diff --git a/ansible_collections/purestorage/flashblade/.github/ISSUE_TEMPLATE/feature_request.md b/ansible_collections/purestorage/flashblade/.github/ISSUE_TEMPLATE/feature_request.md
new file mode 100644
index 000000000..bbcbbe7d6
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/.github/ISSUE_TEMPLATE/feature_request.md
@@ -0,0 +1,20 @@
+---
+name: Feature request
+about: Suggest an idea for this project
+title: ''
+labels: ''
+assignees: ''
+
+---
+
+**Is your feature request related to a problem? Please describe.**
+A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
+
+**Describe the solution you'd like**
+A clear and concise description of what you want to happen.
+
+**Describe alternatives you've considered**
+A clear and concise description of any alternative solutions or features you've considered.
+
+**Additional context**
+Add any other context or screenshots about the feature request here.
diff --git a/ansible_collections/purestorage/flashblade/.github/pull_request_template.md b/ansible_collections/purestorage/flashblade/.github/pull_request_template.md
new file mode 100644
index 000000000..27079cb18
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/.github/pull_request_template.md
@@ -0,0 +1,25 @@
+##### SUMMARY
+<!--- Describe the change below, including rationale and design decisions -->
+
+<!--- HINT: Include "Fixes #nnn" if you are fixing an existing issue -->
+
+##### ISSUE TYPE
+<!--- Pick one below and delete the rest -->
+- Bugfix Pull Request
+- Docs Pull Request
+- Feature Pull Request
+- New Module Pull Request
+- New Role Pull Request
+
+##### COMPONENT NAME
+<!--- Write the short name of the module, plugin, task or feature below -->
+
+##### ADDITIONAL INFORMATION
+<!--- Include additional information to help people understand the change here -->
+<!--- A step-by-step reproduction of the problem is helpful if there is no related issue -->
+- All new PRs must include a changelog fragment
+- Details of naming convention and format can be found [here](https://docs.ansible.com/ansible/latest/community/development_process.html#creating-a-changelog-fragment)
+<!--- Paste verbatim command output below, e.g. before and after your change -->
+```paste below
+
+```
diff --git a/ansible_collections/purestorage/flashblade/.github/workflows/ansible-lint.yml b/ansible_collections/purestorage/flashblade/.github/workflows/ansible-lint.yml
new file mode 100644
index 000000000..0b2102184
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/.github/workflows/ansible-lint.yml
@@ -0,0 +1,10 @@
+name: Ansible Lint # feel free to pick your own name
+on: [push, pull_request]
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+ - name: Run ansible-lint
+ uses: ansible-community/ansible-lint-action@main
diff --git a/ansible_collections/purestorage/flashblade/.github/workflows/black.yaml b/ansible_collections/purestorage/flashblade/.github/workflows/black.yaml
new file mode 100644
index 000000000..e5f9711f6
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/.github/workflows/black.yaml
@@ -0,0 +1,11 @@
+name: Lint
+
+on: [push, pull_request]
+
+jobs:
+ lint:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+ - uses: actions/setup-python@v3
+ - uses: psf/black@stable
diff --git a/ansible_collections/purestorage/flashblade/.github/workflows/main.yml b/ansible_collections/purestorage/flashblade/.github/workflows/main.yml
new file mode 100644
index 000000000..e66ce2991
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/.github/workflows/main.yml
@@ -0,0 +1,62 @@
+name: Pure Storage Ansible CI
+
+on:
+ pull_request:
+ push:
+ schedule:
+ - cron: '25 10 * * *'
+
+jobs:
+ build:
+ name: Build flashblade on Ansible ${{ matrix.ansible }} (Python ${{ matrix.python-version }})
+ runs-on: ubuntu-20.04
+ strategy:
+ matrix:
+ ansible:
+ - stable-2.11
+ - stable-2.12
+ - stable-2.13
+ - stable-2.14
+ - stable-2.15
+ - devel
+ python-version:
+ - 3.8
+ - 3.9
+ - "3.10"
+ - "3.11"
+ exclude:
+ - python-version: "3.11"
+ ansible: stable-2.11
+ - python-version: "3.11"
+ ansible: stable-2.12
+ - python-version: "3.11"
+ ansible: stable-2.13
+ - python-version: "3.10"
+ ansible: stable-2.11
+ - python-version: 3.8
+ ansible: stable-2.14
+ - python-version: 3.8
+ ansible: stable-2.15
+ - python-version: 3.8
+ ansible: devel
+ steps:
+ - name: Check out code
+ uses: actions/checkout@v3
+
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v3
+ with:
+ python-version: ${{ matrix.python-version }}
+
+ - name: Install dependencies
+ run: |
+ python${{ matrix.python }} -m pip install --upgrade pip
+ python${{ matrix.python }} -m pip install https://github.com/ansible/ansible/archive/${{ matrix.ansible }}.tar.gz --disable-pip-version-check
+
+ - name: Run sanity tests
+ run: |
+ pwd
+ mkdir -p ansible_collections/purestorage/flashblade
+ rsync -av . ansible_collections/purestorage/flashblade --exclude ansible_collection/purestorage/flashblade
+ cd ansible_collections/purestorage/flashblade
+ ansible-test sanity -v --color --python ${{ matrix.python-version }} --docker
diff --git a/ansible_collections/purestorage/flashblade/.github/workflows/stale.yml b/ansible_collections/purestorage/flashblade/.github/workflows/stale.yml
new file mode 100644
index 000000000..7bbc0505b
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/.github/workflows/stale.yml
@@ -0,0 +1,19 @@
+name: Mark stale issues and pull requests
+
+on:
+ schedule:
+ - cron: "0 0 * * *"
+
+jobs:
+ stale:
+
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/stale@v1
+ with:
+ repo-token: ${{ secrets.GITHUB_TOKEN }}
+ stale-issue-message: 'Stale issue message'
+ stale-pr-message: 'Stale pull request message'
+ stale-issue-label: 'no-issue-activity'
+ stale-pr-label: 'no-pr-activity'
diff --git a/ansible_collections/purestorage/flashblade/.gitignore b/ansible_collections/purestorage/flashblade/.gitignore
new file mode 100644
index 000000000..c53f26278
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/.gitignore
@@ -0,0 +1,4 @@
+*.tar.gz
+*.pyc
+.pylintrc
+collections/ansible_collections/purestorage/flashblade/tests/output/*
diff --git a/ansible_collections/purestorage/flashblade/.pylintrc b/ansible_collections/purestorage/flashblade/.pylintrc
new file mode 100644
index 000000000..9570a2b59
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/.pylintrc
@@ -0,0 +1,587 @@
+[MASTER]
+
+# A comma-separated list of package or module names from where C extensions may
+# be loaded. Extensions are loading into the active Python interpreter and may
+# run arbitrary code
+extension-pkg-whitelist=
+
+# Add files or directories to the blacklist. They should be base names, not
+# paths.
+ignore=CVS
+
+# Add files or directories matching the regex patterns to the blacklist. The
+# regex matches against base names, not paths.
+ignore-patterns=
+
+# Python code to execute, usually for sys.path manipulation such as
+# pygtk.require().
+#init-hook=
+
+# Use multiple processes to speed up Pylint.
+jobs=1
+
+# List of plugins (as comma separated values of python modules names) to load,
+# usually to register additional checkers.
+load-plugins=
+
+# Pickle collected data for later comparisons.
+persistent=yes
+
+# Specify a configuration file.
+#rcfile=
+
+# When enabled, pylint would attempt to guess common misconfiguration and emit
+# user-friendly hints instead of false-positive error messages
+suggestion-mode=yes
+
+# Allow loading of arbitrary C extensions. Extensions are imported into the
+# active Python interpreter and may run arbitrary code.
+unsafe-load-any-extension=no
+
+
+[MESSAGES CONTROL]
+
+# Only show warnings with the listed confidence levels. Leave empty to show
+# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED
+confidence=
+
+# Disable the message, report, category or checker with the given id(s). You
+# can either give multiple identifiers separated by comma (,) or put this
+# option multiple times (only on the command line, not in the configuration
+# file where it should appear only once).You can also use "--disable=all" to
+# disable everything first and then reenable specific checks. For example, if
+# you want to run only the similarities checker, you can use "--disable=all
+# --enable=similarities". If you want to run only the classes checker, but have
+# no Warning level messages displayed, use"--disable=all --enable=classes
+# --disable=W"
+disable=
+ abstract-method,
+ access-member-before-definition,
+ ansible-deprecated-version,
+ arguments-differ,
+ assignment-from-no-return,
+ assignment-from-none,
+ attribute-defined-outside-init,
+ bad-continuation,
+ bad-indentation,
+ bad-mcs-classmethod-argument,
+ broad-except,
+ c-extension-no-member,
+ cell-var-from-loop,
+ chained-comparison,
+ comparison-with-callable,
+ consider-iterating-dictionary,
+ consider-merging-isinstance,
+ consider-using-dict-comprehension,
+ consider-using-enumerate,
+ consider-using-get,
+ consider-using-in,
+ consider-using-set-comprehension,
+ consider-using-ternary,
+ deprecated-lambda,
+ deprecated-method,
+ deprecated-module,
+ eval-used,
+ exec-used,
+ expression-not-assigned,
+ fixme,
+ function-redefined,
+ global-statement,
+ global-variable-undefined,
+ import-error,
+ import-self,
+ inconsistent-return-statements,
+ invalid-envvar-default,
+ invalid-name,
+ invalid-sequence-index,
+ keyword-arg-before-vararg,
+ len-as-condition,
+ line-too-long,
+ literal-comparison,
+ locally-disabled,
+ method-hidden,
+ misplaced-comparison-constant,
+ missing-docstring,
+ no-else-raise,
+ no-else-return,
+ no-init,
+ no-member,
+ no-name-in-module,
+ no-self-use,
+ no-value-for-parameter,
+ non-iterator-returned,
+ not-a-mapping,
+ not-an-iterable,
+ not-callable,
+ old-style-class,
+ pointless-statement,
+ pointless-string-statement,
+ possibly-unused-variable,
+ protected-access,
+ redefined-argument-from-local,
+ redefined-builtin,
+ redefined-outer-name,
+ redefined-variable-type,
+ reimported,
+ relative-import,
+ signature-differs,
+ simplifiable-if-expression,
+ simplifiable-if-statement,
+ subprocess-popen-preexec-fn,
+ super-init-not-called,
+ superfluous-parens,
+ too-few-public-methods,
+ too-many-ancestors,
+ too-many-arguments,
+ too-many-boolean-expressions,
+ too-many-branches,
+ too-many-function-args,
+ too-many-instance-attributes,
+ too-many-lines,
+ too-many-locals,
+ too-many-nested-blocks,
+ too-many-public-methods,
+ too-many-return-statements,
+ too-many-statements,
+ trailing-comma-tuple,
+ trailing-comma-tuple,
+ try-except-raise,
+ unbalanced-tuple-unpacking,
+ undefined-loop-variable,
+ unexpected-keyword-arg,
+ ungrouped-imports,
+ unidiomatic-typecheck,
+ unnecessary-pass,
+ unsubscriptable-object,
+ unsupported-assignment-operation,
+ unsupported-delete-operation,
+ unsupported-membership-test,
+ unused-argument,
+ unused-import,
+ unused-variable,
+ used-before-assignment,
+ useless-object-inheritance,
+ useless-return,
+ useless-super-delegation,
+ wrong-import-order,
+ wrong-import-position,
+
+# Enable the message, report, category or checker with the given id(s). You can
+# either give multiple identifier separated by comma (,) or put this option
+# multiple time (only on the command line, not in the configuration file where
+# it should appear only once). See also the "--disable" option for examples.
+enable=c-extension-no-member
+
+
+[REPORTS]
+
+# Python expression which should return a note less than 10 (10 is the highest
+# note). You have access to the variables errors warning, statement which
+# respectively contain the number of errors / warnings messages and the total
+# number of statements analyzed. This is used by the global evaluation report
+# (RP0004).
+evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
+
+# Template used to display messages. This is a python new-style format string
+# used to format the message information. See doc for all details
+#msg-template=
+
+# Set the output format. Available formats are text, parseable, colorized, json
+# and msvs (visual studio).You can also give a reporter class, eg
+# mypackage.mymodule.MyReporterClass.
+output-format=text
+
+# Tells whether to display a full report or only the messages
+reports=no
+
+# Activate the evaluation score.
+score=yes
+
+
+[REFACTORING]
+
+# Maximum number of nested blocks for function / method body
+max-nested-blocks=5
+
+# Complete name of functions that never returns. When checking for
+# inconsistent-return-statements if a never returning function is called then
+# it will be considered as an explicit return statement and no message will be
+# printed.
+never-returning-functions=optparse.Values,sys.exit
+
+
+[VARIABLES]
+
+# List of additional names supposed to be defined in builtins. Remember that
+# you should avoid to define new builtins when possible.
+additional-builtins=
+
+# Tells whether unused global variables should be treated as a violation.
+allow-global-unused-variables=yes
+
+# List of strings which can identify a callback function by name. A callback
+# name must start or end with one of those strings.
+callbacks=cb_,
+ _cb
+
+# A regular expression matching the name of dummy variables (i.e. expectedly
+# not used).
+dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_
+
+# Argument names that match this expression will be ignored. Default to name
+# with leading underscore
+ignored-argument-names=_.*|^ignored_|^unused_
+
+# Tells whether we should check for unused import in __init__ files.
+init-import=no
+
+# List of qualified module names which can have objects that can redefine
+# builtins.
+redefining-builtins-modules=six.moves,past.builtins,future.builtins,io,builtins
+
+
+[BASIC]
+
+# Naming style matching correct argument names
+argument-naming-style=snake_case
+
+# Regular expression matching correct argument names. Overrides argument-
+# naming-style
+#argument-rgx=
+
+# Naming style matching correct attribute names
+attr-naming-style=snake_case
+
+# Regular expression matching correct attribute names. Overrides attr-naming-
+# style
+#attr-rgx=
+
+# Bad variable names which should always be refused, separated by a comma
+bad-names=foo,
+ bar,
+ baz,
+ toto,
+ tutu,
+ tata,
+ _,
+
+# Naming style matching correct class attribute names
+class-attribute-naming-style=any
+
+# Regular expression matching correct class attribute names. Overrides class-
+# attribute-naming-style
+#class-attribute-rgx=
+
+# Naming style matching correct class names
+class-naming-style=PascalCase
+
+# Regular expression matching correct class names. Overrides class-naming-style
+#class-rgx=
+
+# Naming style matching correct constant names
+const-naming-style=UPPER_CASE
+
+# Regular expression matching correct constant names. Overrides const-naming-
+# style
+#const-rgx=
+
+# Minimum line length for functions/classes that require docstrings, shorter
+# ones are exempt.
+docstring-min-length=-1
+
+# Naming style matching correct function names
+function-naming-style=snake_case
+
+# Regular expression matching correct function names. Overrides function-
+# naming-style
+#function-rgx=
+
+# Good variable names which should always be accepted, separated by a comma
+good-names=i,
+ j,
+ k,
+ f,
+ e,
+ ex,
+ Run,
+ C,
+ __metaclass__,
+
+# Include a hint for the correct naming format with invalid-name
+include-naming-hint=no
+
+# Naming style matching correct inline iteration names
+inlinevar-naming-style=any
+
+# Regular expression matching correct inline iteration names. Overrides
+# inlinevar-naming-style
+#inlinevar-rgx=
+
+# Naming style matching correct method names
+method-naming-style=snake_case
+
+# Regular expression matching correct method names. Overrides method-naming-
+# style
+#method-rgx=
+
+# Naming style matching correct module names
+module-naming-style=snake_case
+
+# Regular expression matching correct module names. Overrides module-naming-
+# style
+#module-rgx=
+module-rgx=[a-z_][a-z0-9_-]{2,40}$
+method-rgx=[a-z_][a-z0-9_]{2,40}$
+function-rgx=[a-z_][a-z0-9_]{2,40}$
+
+# Colon-delimited sets of names that determine each other's naming style when
+# the name regexes allow several styles.
+name-group=
+
+# Regular expression which should only match function or class names that do
+# not require a docstring.
+no-docstring-rgx=^_
+
+# List of decorators that produce properties, such as abc.abstractproperty. Add
+# to this list to register other decorators that produce valid properties.
+property-classes=abc.abstractproperty
+
+# Naming style matching correct variable names
+variable-naming-style=snake_case
+
+# Regular expression matching correct variable names. Overrides variable-
+# naming-style
+#variable-rgx=
+
+
+[SPELLING]
+
+# Limits count of emitted suggestions for spelling mistakes
+max-spelling-suggestions=4
+
+# Spelling dictionary name. Available dictionaries: none. To make it working
+# install python-enchant package.
+spelling-dict=
+
+# List of comma separated words that should not be checked.
+spelling-ignore-words=
+
+# A path to a file that contains private dictionary; one word per line.
+spelling-private-dict-file=
+
+# Tells whether to store unknown words to indicated private dictionary in
+# --spelling-private-dict-file option instead of raising a message.
+spelling-store-unknown-words=no
+
+
+[FORMAT]
+
+# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
+expected-line-ending-format=
+
+# Regexp for a line that is allowed to be longer than the limit.
+ignore-long-lines=^\s*(# )?<?https?://\S+>?$
+
+# Number of spaces of indent required inside a hanging or continued line.
+indent-after-paren=4
+
+# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
+# tab).
+indent-string=' '
+
+# Maximum number of characters on a single line.
+max-line-length=160
+
+# Maximum number of lines in a module
+max-module-lines=1000
+
+# List of optional constructs for which whitespace checking is disabled. `dict-
+# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}.
+# `trailing-comma` allows a space between comma and closing bracket: (a, ).
+# `empty-line` allows space-only lines.
+no-space-check=trailing-comma,
+ dict-separator
+
+# Allow the body of a class to be on the same line as the declaration if body
+# contains single statement.
+single-line-class-stmt=no
+
+# Allow the body of an if to be on the same line as the test if there is no
+# else.
+single-line-if-stmt=no
+
+
+[TYPECHECK]
+
+# List of decorators that produce context managers, such as
+# contextlib.contextmanager. Add to this list to register other decorators that
+# produce valid context managers.
+contextmanager-decorators=contextlib.contextmanager
+
+# List of members which are set dynamically and missed by pylint inference
+# system, and so shouldn't trigger E1101 when accessed. Python regular
+# expressions are accepted.
+#generated-members=PurityFb.*
+
+# Tells whether missing members accessed in mixin class should be ignored. A
+# mixin class is detected if its name ends with "mixin" (case insensitive).
+ignore-mixin-members=yes
+
+# This flag controls whether pylint should warn about no-member and similar
+# checks whenever an opaque object is returned when inferring. The inference
+# can return multiple potential results while evaluating a Python object, but
+# some branches might not be evaluated, which results in partial inference. In
+# that case, it might be useful to still emit no-member and other checks for
+# the rest of the inferred objects.
+ignore-on-opaque-inference=yes
+
+# List of class names for which member attributes should not be checked (useful
+# for classes with dynamically set attributes). This supports the use of
+# qualified names.
+ignored-classes=optparse.Values,thread._local,_thread._local
+
+# List of module names for which member attributes should not be checked
+# (useful for modules/projects where namespaces are manipulated during runtime
+# and thus existing member attributes cannot be deduced by static analysis. It
+# supports qualified module names, as well as Unix pattern matching.
+ignored-modules=
+ _MovedItems,
+# Show a hint with possible names when a member name was not found. The aspect
+# of finding the hint is based on edit distance.
+missing-member-hint=yes
+
+# The minimum edit distance a name should have in order to be considered a
+# similar match for a missing member name.
+missing-member-hint-distance=1
+
+# The total number of similar names that should be taken in consideration when
+# showing a hint for a missing member.
+missing-member-max-choices=1
+
+
+[SIMILARITIES]
+
+# Ignore comments when computing similarities.
+ignore-comments=yes
+
+# Ignore docstrings when computing similarities.
+ignore-docstrings=yes
+
+# Ignore imports when computing similarities.
+ignore-imports=no
+
+# Minimum lines number of a similarity.
+min-similarity-lines=4
+
+
+[LOGGING]
+
+# Logging modules to check that the string format arguments are in logging
+# function parameter format
+logging-modules=logging
+
+
+[MISCELLANEOUS]
+
+# List of note tags to take in consideration, separated by a comma.
+notes=FIXME,
+ XXX,
+ TODO
+
+
+[CLASSES]
+
+# List of method names used to declare (i.e. assign) instance attributes.
+defining-attr-methods=__init__,
+ __new__,
+ setUp
+
+# List of member names, which should be excluded from the protected access
+# warning.
+exclude-protected=_asdict,
+ _fields,
+ _replace,
+ _source,
+ _make
+
+# List of valid names for the first argument in a class method.
+valid-classmethod-first-arg=cls
+
+# List of valid names for the first argument in a metaclass class method.
+valid-metaclass-classmethod-first-arg=mcs
+
+
+[DESIGN]
+
+# Maximum number of arguments for function / method
+max-args=5
+
+# Maximum number of attributes for a class (see R0902).
+max-attributes=7
+
+# Maximum number of boolean expressions in a if statement
+max-bool-expr=5
+
+# Maximum number of branch for function / method body
+max-branches=12
+
+# Maximum number of locals for function / method body
+max-locals=15
+
+# Maximum number of parents for a class (see R0901).
+max-parents=7
+
+# Maximum number of public methods for a class (see R0904).
+max-public-methods=20
+
+# Maximum number of return / yield for function / method body
+max-returns=6
+
+# Maximum number of statements in function / method body
+max-statements=50
+
+# Minimum number of public methods for a class (see R0903).
+min-public-methods=2
+
+
+[IMPORTS]
+
+# Allow wildcard imports from modules that define __all__.
+allow-wildcard-with-all=no
+
+# Analyse import fallback blocks. This can be used to support both Python 2 and
+# 3 compatible code, which means that the block might have code that exists
+# only in one or another interpreter, leading to false positives when analysed.
+analyse-fallback-blocks=no
+
+# Deprecated modules which should not be used, separated by a comma
+deprecated-modules=regsub,
+ TERMIOS,
+ Bastion,
+ rexec
+
+# Create a graph of external dependencies in the given file (report RP0402 must
+# not be disabled)
+ext-import-graph=
+
+# Create a graph of every (i.e. internal and external) dependencies in the
+# given file (report RP0402 must not be disabled)
+import-graph=
+
+# Create a graph of internal dependencies in the given file (report RP0402 must
+# not be disabled)
+int-import-graph=
+
+# Force import order to recognize a module as part of the standard
+# compatibility libraries.
+known-standard-library=
+
+# Force import order to recognize a module as part of a third party library.
+known-third-party=enchant
+
+
+[EXCEPTIONS]
+
+# Exceptions that will emit a warning when being caught. Defaults to
+# "Exception"
+overgeneral-exceptions=Exception
diff --git a/ansible_collections/purestorage/flashblade/.yamllint b/ansible_collections/purestorage/flashblade/.yamllint
new file mode 100644
index 000000000..6c19f43f7
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/.yamllint
@@ -0,0 +1,7 @@
+extends: default
+
+rules:
+ document-start: disable
+ indentation: disable
+ line-length:
+ max: 200
diff --git a/ansible_collections/purestorage/flashblade/CHANGELOG.rst b/ansible_collections/purestorage/flashblade/CHANGELOG.rst
new file mode 100644
index 000000000..c252af127
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/CHANGELOG.rst
@@ -0,0 +1,263 @@
+====================================
+Purestorage.Flashblade Release Notes
+====================================
+
+.. contents:: Topics
+
+
+v1.11.0
+=======
+
+Minor Changes
+-------------
+
+- purefb_info - Added `encryption` and `support_keys` information.
+- purefb_info - Added bucket quota and safemode information per bucket
+- purefb_info - Added security update version for Purity//FB 4.0.2, or higher
+- purefb_info - Updated object store account information
+- purefb_inventory - Added `part_number` to hardware item information.
+- purefb_policy - Added support for multiple rules in snapshot policies
+- purefb_proxy - Added new boolean parameter `secure`. Default of true (for backwards compatability) sets the protocol to be `https://`. False sets `http://`
+- purefb_s3acc - Added support for default bucket quotas and hard limits
+- purefb_s3acc - Added support for object account quota and hard limit
+
+Bugfixes
+--------
+
+- purefa_info - Fixed issue when more than 10 buckets have lifecycle rules.
+- purefb_s3user - Fix incorrect response when bad key/secret pair provided for new user
+
+New Modules
+-----------
+
+- purestorage.flashblade.purefb_pingtrace - Employ the internal FlashBlade ping and trace mechanisms
+
+v1.10.0
+=======
+
+Minor Changes
+-------------
+
+- All - Update documentation examples with FQCNs
+- purefb_ad - Allow service to be a list
+- purefb_bucket - Allow setting of bucket type to support VSO - requires Purity//FB 3.3.3 or higher
+- purefb_certs - Fix several misspellings of certificate
+- purefb_info - Added filesystem default, user and group quotas where available
+- purefb_info - Expose object store bucket type from Purity//FB 3.3.3
+- purefb_info - Show information for current timezone
+- purefb_policy - Allow rename of NFS Export Policies from Purity//FB 3.3.3
+- purefb_tz - Add support for FlashBlade timezone management
+
+Bugfixes
+--------
+
+- purefb_connect - Resolve connection issues between two FBs that are throttling capable
+- purefb_policy - Fix incorrect API call for NFS export policy rule creation
+
+New Modules
+-----------
+
+- purestorage.flashblade.purefb_messages - List FlashBlade Alert Messages
+- purestorage.flashblade.purefb_tz - Configure Pure Storage FlashBlade timezone
+
+v1.9.0
+======
+
+Minor Changes
+-------------
+
+- purefb_admin - New module to manage global admin settings
+- purefb_connect - Add support for array connections to have bandwidth throttling defined
+- purefb_fs - Add support for NFS export policies
+- purefb_info - Add NFS export policies and rules
+- purefb_info - Show array connections bandwidth throttle information
+- purefb_policy - Add NFS export policies, with rules, as a new policy type
+- purefb_policy - Add support for Object Store Access Policies, associated rules and user grants
+- purefb_policy - New parameter `policy_type` added. For backwards compatability, default to `snapshot` if not provided.
+
+v1.8.1
+======
+
+Minor Changes
+-------------
+
+- purefb.py - Use latest `pypureclient` SDK with fix for "best fit". No longer requires double login to negotiate best API version.
+
+v1.8.0
+======
+
+Minor Changes
+-------------
+
+- purefb.py - Add check to ensure FlashBlade uses the latest REST version possible for Purity version installed
+- purefb_info - Add object lifecycles rules to bucket subset
+- purefb_lifecycle - Add support for updated object lifecycle rules. See documentation for details of new parameters.
+- purefb_lifecycle - Change `keep_for` parameter to be `keep_previous_for`. `keep_for` is deprecated and will be removed in a later version.
+- purefb_user - Add support for managing user public key and user unlock
+
+Known Issues
+------------
+
+- purefb_lag - The mac_address field in the response is not populated. This will be fixed in a future FlashBlade update.
+
+v1.7.0
+======
+
+Minor Changes
+-------------
+
+- purefb_groupquota - New module for manage individual filesystem group quotas
+- purefb_lag - Add support for LAG management
+- purefb_snap - Add support for immeadiate snapshot to remote connected FlashBlade
+- purefb_subnet - Add support for multiple LAGs.
+- purefb_userquota - New module for manage individual filesystem user quotas
+
+Bugfixes
+--------
+
+- purefb_fs - Fix bug where changing the state of both NFS v3 and v4.1 at the same time ignored one of these.
+- purefb_s3acc - Ensure S3 Account Name is always lowercase
+- purefb_s3user - Ensure S3 Account Name is always lowercase
+- purefb_subnet - Allow subnet creation with no gateway
+
+New Modules
+-----------
+
+- purestorage.flashblade.purefb_groupquota - Manage filesystem group quotas
+- purestorage.flashblade.purefb_lag - Manage FlashBlade Link Aggregation Groups
+- purestorage.flashblade.purefb_userquota - Manage filesystem user quotas
+
+v1.6.0
+======
+
+Minor Changes
+-------------
+
+- purefa_virtualhost - New module to manage API Clients
+- purefb_ad - New module to manage Active Directory Account
+- purefb_eula - New module to sign EULA
+- purefb_info - Add Active Directory, Kerberos and Object Store Account information
+- purefb_info - Add extra info for Purity//FB 3.2+ systems
+- purefb_keytabs - New module to manage Kerberos Keytabs
+- purefb_s3user - Add access policy option to user creation
+- purefb_timeout - Add module to set GUI idle timeout
+- purefb_userpolicy - New module to manage object store user access policies
+- purefb_virtualhost - New module to manage Object Store Virtual Hosts
+
+New Modules
+-----------
+
+- purestorage.flashblade.purefb_ad - Manage FlashBlade Active Directory Account
+- purestorage.flashblade.purefb_apiclient - Manage FlashBlade API Clients
+- purestorage.flashblade.purefb_eula - Sign Pure Storage FlashBlade EULA
+- purestorage.flashblade.purefb_keytabs - Manage FlashBlade Kerberos Keytabs
+- purestorage.flashblade.purefb_timeout - Configure Pure Storage FlashBlade GUI idle timeout
+- purestorage.flashblade.purefb_userpolicy - Manage FlashBlade Object Store User Access Policies
+- purestorage.flashblade.purefb_virtualhost - Manage FlashBlade Object Store Virtual Hosts
+
+v1.5.0
+======
+
+Minor Changes
+-------------
+
+- purefb_certs - Add update functionality for array cert
+- purefb_fs - Add multiprotocol ACL support
+- purefb_info - Add information regarding filesystem multiprotocol (where available)
+- purefb_info - Add new parameter to provide details on admin users
+- purefb_info - Add replication performace statistics
+- purefb_s3user - Add ability to remove an S3 users existing access key
+
+Bugfixes
+--------
+
+- purefb_* - Return a correct value for `changed` in all modules when in check mode
+- purefb_dns - Deprecate search paramerter
+- purefb_dsrole - Resolve idempotency issue
+- purefb_lifecycle - Fix error when creating new bucket lifecycle rule.
+- purefb_policy - Ensure undeclared variables are set correctly
+- purefb_s3user - Fix maximum access_key count logic
+
+v1.4.0
+======
+
+Minor Changes
+-------------
+
+- purefb_banner - Module to manage the GUI and SSH login message
+- purefb_certgrp - Module to manage FlashBlade Certificate Groups
+- purefb_certs - Module to create and delete SSL certificates
+- purefb_connect - Support idempotency when exisitng connection is incoming
+- purefb_fs - Add new options for filesystem control (https://github.com/Pure-Storage-Ansible/FlashBlade-Collection/pull/81)
+- purefb_fs - Default filesystem size on creation changes from 32G to ``unlimited``
+- purefb_fs - Fix error in deletion and eradication of filesystem
+- purefb_fs_replica - Remove condition to attach/detach policies on unhealthy replica-link
+- purefb_info - Add support to list filesystem policies
+- purefb_lifecycle - Module to manage FlashBlade Bucket Lifecycle Rules
+- purefb_s3user - Add support for imported user access keys
+- purefb_syslog - Module to manage syslog server configuration
+
+Bugfixes
+--------
+
+- purefa_policy - Resolve multiple issues related to incorrect use of timezones
+- purefb_connect - Ensure changing encryption status on array connection is performed correctly
+- purefb_connect - Fix breaking change created in purity_fb SDK 1.9.2 for deletion of array connections
+- purefb_connect - Hide target array API token
+- purefb_ds - Ensure updating directory service configurations completes correctly
+- purefb_info - Fix issue getting array info when encrypted connection exists
+
+New Modules
+-----------
+
+- purestorage.flashblade.purefb_banner - Configure Pure Storage FlashBlade GUI and SSH MOTD message
+- purestorage.flashblade.purefb_certgrp - Manage FlashBlade Certifcate Groups
+- purestorage.flashblade.purefb_certs - Manage FlashBlade SSL Certifcates
+- purestorage.flashblade.purefb_lifecycle - Manage FlashBlade object lifecycles
+- purestorage.flashblade.purefb_syslog - Configure Pure Storage FlashBlade syslog settings
+
+v1.3.0
+======
+
+Release Summary
+---------------
+
+| Release Date: 2020-08-08
+| This changlelog describes all changes made to the modules and plugins included in this collection since Ansible 2.9.0
+
+
+Major Changes
+-------------
+
+- purefb_alert - manage alert email settings on a FlashBlade
+- purefb_bladename - manage FlashBlade name
+- purefb_bucket_replica - manage bucket replica links on a FlashBlade
+- purefb_connect - manage connections between FlashBlades
+- purefb_dns - manage DNS settings on a FlashBlade
+- purefb_fs_replica - manage filesystem replica links on a FlashBlade
+- purefb_inventory - get information about the hardware inventory of a FlashBlade
+- purefb_ntp - manage the NTP settings for a FlashBlade
+- purefb_phonehome - manage the phone home settings for a FlashBlade
+- purefb_policy - manage the filesystem snapshot policies for a FlashBlade
+- purefb_proxy - manage the phone home HTTP proxy settings for a FlashBlade
+- purefb_remote_cred - manage the Object Store Remote Credentials on a FlashBlade
+- purefb_snmp_agent - modify the FlashBlade SNMP Agent
+- purefb_snmp_mgr - manage SNMP Managers on a FlashBlade
+- purefb_target - manage remote S3-capable targets for a FlashBlade
+- purefb_user - manage local ``pureuser`` account password on a FlashBlade
+
+Minor Changes
+-------------
+
+- purefb_bucket - Versioning support added
+- purefb_info - new options added for information collection
+- purefb_network - Add replication service type
+- purefb_s3user - Limit ``access_key`` recreation to 3 times
+- purefb_s3user - return dict changed from ``ansible_facts`` to ``s3user_info``
+
+Bugfixes
+--------
+
+- purefb_bucket - Add warning message if ``state`` is ``absent`` without ``eradicate:``
+- purefb_fs - Add graceful exist when ``state`` is ``absent`` and filesystem not eradicated
+- purefb_fs - Add warning message if ``state`` is ``absent`` without ``eradicate``
diff --git a/ansible_collections/purestorage/flashblade/COPYING.GPLv3 b/ansible_collections/purestorage/flashblade/COPYING.GPLv3
new file mode 100644
index 000000000..94a9ed024
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/COPYING.GPLv3
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ <program> Copyright (C) <year> <name of author>
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<http://www.gnu.org/licenses/>.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+<http://www.gnu.org/philosophy/why-not-lgpl.html>.
diff --git a/ansible_collections/purestorage/flashblade/FILES.json b/ansible_collections/purestorage/flashblade/FILES.json
new file mode 100644
index 000000000..5cfa68659
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/FILES.json
@@ -0,0 +1,1279 @@
+{
+ "files": [
+ {
+ "name": ".",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/.keep",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/sanity",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.9.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "83175635ac646a5cfc169ec6316e0ea90071bf4e8e9a79212a413f326a3049bd",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.13.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "83175635ac646a5cfc169ec6316e0ea90071bf4e8e9a79212a413f326a3049bd",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.16.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "83175635ac646a5cfc169ec6316e0ea90071bf4e8e9a79212a413f326a3049bd",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.14.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "83175635ac646a5cfc169ec6316e0ea90071bf4e8e9a79212a413f326a3049bd",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.11.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "83175635ac646a5cfc169ec6316e0ea90071bf4e8e9a79212a413f326a3049bd",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.12.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "83175635ac646a5cfc169ec6316e0ea90071bf4e8e9a79212a413f326a3049bd",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.15.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "83175635ac646a5cfc169ec6316e0ea90071bf4e8e9a79212a413f326a3049bd",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.10.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "83175635ac646a5cfc169ec6316e0ea90071bf4e8e9a79212a413f326a3049bd",
+ "format": 1
+ },
+ {
+ "name": ".github",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".github/CONTRIBUTING.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7c429527b799623f57e6363e14ff8a319844c9120f4dfa18bcea3849cdc07128",
+ "format": 1
+ },
+ {
+ "name": ".github/workflows",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".github/workflows/black.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6fb3e0af2e41fb0618586a2990e6645fb9b29d1a7b64b7168c5d27af320569c8",
+ "format": 1
+ },
+ {
+ "name": ".github/workflows/ansible-lint.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4c85688d98b71e3a6594530a362cd5d2cf83842ceaccd0e0fc76e233777c1cef",
+ "format": 1
+ },
+ {
+ "name": ".github/workflows/stale.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0bdef4889afabcd627fc30711a0809c7468b8c9e64cbcebe1334f794a41e7bd9",
+ "format": 1
+ },
+ {
+ "name": ".github/workflows/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7f7d9b7fc9ac71a4ff36243422b04f4cf163a254c52e8ab647fb84807bc3ea21",
+ "format": 1
+ },
+ {
+ "name": ".github/ISSUE_TEMPLATE",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".github/ISSUE_TEMPLATE/feature_request.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1f48c52f209a971b8e7eae4120144d28fcf8ee38a7778a7b4d8cf1ab356617d2",
+ "format": 1
+ },
+ {
+ "name": ".github/ISSUE_TEMPLATE/bug_report.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0c8d64f29fb4536513653bf8c97da30f3340e2041b91c8952db1515d6b23a7b3",
+ "format": 1
+ },
+ {
+ "name": ".github/pull_request_template.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "565ead1b588caaa10cd6f2ed1bb6c809eb2ad93bf75da3a198690cac778432d6",
+ "format": 1
+ },
+ {
+ "name": "meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "meta/runtime.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "df18179bb2f5447a56ac92261a911649b96821c0b2c08eea62d5cc6b0195203f",
+ "format": 1
+ },
+ {
+ "name": ".yamllint",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2970fa4875092f99825ac0da3c82d2413ce973087b9945e68fdfa7b3b1e2012e",
+ "format": 1
+ },
+ {
+ "name": "README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3c4d2257a4a25daf934a2b149aaa3397371d32f99f0b7042ca51a1a5fe981917",
+ "format": 1
+ },
+ {
+ "name": "requirements.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "87cb6471722fa1096099f228091480939c5b7c3ac39c2819543324a7701e66a3",
+ "format": 1
+ },
+ {
+ "name": "playbooks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/.keep",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "playbooks/templates",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/templates/.keep",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "playbooks/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/files/.keep",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "playbooks/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/vars/.keep",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "playbooks/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/tasks/.keep",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "CHANGELOG.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9f92bbcfdf90122b0ffdbe430cd0ff9b2a3b1e3cd1c099e0436b251de8674d74",
+ "format": 1
+ },
+ {
+ "name": "settings.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f64528ffd800423e1d49a3c79cdd3892548a57177ea1a1caacbbcd275390b792",
+ "format": 1
+ },
+ {
+ "name": ".gitignore",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f3e019033a4ff6d651103704d47629e6d911cb949652bd5e6121d7a918dbc480",
+ "format": 1
+ },
+ {
+ "name": "changelogs",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "changelogs/config.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d1b77eeb2d9f7242075e746537713be29e397fe6954f13a1caf8b10695434b9b",
+ "format": 1
+ },
+ {
+ "name": "changelogs/.plugin-cache.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b528379cbf853914f8e8192b15e34bad21ea8c2b4de7faaab4f045fe1921fa4b",
+ "format": 1
+ },
+ {
+ "name": "changelogs/changelog.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "728d1a92a9effec8bd73c032a3bd53fc8eb4d9029c824a2b6e1179b6922bf488",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/186_add_tz.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "44209d75080c5e4f437f409bb37e0f16c662658a6243fa890339fc076dfa7cd3",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/81_purefb_fs_new_options.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "abb817b52fdfa70b538ca9efce8d642282383b6961c47bde20ce0a023d2b941d",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/101_fix_policy_and_timezone_error.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2e9c5c95b8333fee22646f4e83e9034172182b1e99c084725f08df48e45d3d47",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/163_admin_key.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bd290345ed66c0809e6be94cabb6f1823b7e0b3f61d6a88a13f16ae849ce4399",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/220_s3user_key_fix.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ae00607f47b12b62456cb037b31474be8b7de0820b46ced24fc4a96b43f0eb76",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/211_change_booleans.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f04fd18a42e321cb3818a579e14cc50a6d27935196ff04632e2db44f7b807322",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/217_inventory.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4832bed915e1a18327ab9d7c15c65f55094f08215a26028d426ca694a90c2ae7",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/174_access_policies.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "25f5a86a2a977555359c8088fab65902f1ee2b0cc3bc417a7383d5d5176d4802",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/152_s3acc_lowercase.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0a02995d6eeb1ac3968e952c61a552e5fc2feeef62ef7642d5f8714157da7d41",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/76_default_fs_size.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6d8689e8f46ab7d3286b7d3ee46dfa13a8bf0585cc9b197a5ca8271c9dd9590e",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/111_dsrole_update_idempotency.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "632e24ce20734ad2ee8d7938aaff910a2073fcd7f4fc633d27009ee7a16eff33",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/90_imported_keys.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ad1078e90875745edce8071846183eed39c3878156d14f96b5db78ab1c5be973",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/150_fix_joint_nfs_version_change.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0e1a7b9242317cf785fa07608c5a661bad07fc79e8fd187264d9263dc0609479",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/164_add_admin.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "53b89a2de09c79fcb3fdbdf82917985124d53f793046f1164c04a8578adb7df9",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/129-virtualhost.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0af56f02e1b7ad1ea585b3bbce897022faf28b448b69ea755951be3b5da40f7e",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/105_max_access_key.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fb9f5707e7466fe7c94479891f218bacd04ae45a37c2f207dcf51ac756fb7259",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/169_pypureclient_fix.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fb6e7bfc1c816ec77dadf6bd4ab040a8089b98a1c9c75ec15603d407c27ce3f2",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/183_v2_connections.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "700e1509315604807c70d5b186542e74e058e4f912b1fe796df41c3d8a125d57",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/96_fix_update_connection.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "828cc0c94acf44d1d373402a0cc657527d9fce8ac744319fbe0d8035684932b4",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/200_proxy.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "26631d7434c86b739bcd75c8905f8f668555217610cafb47f11a6e24937c7eb8",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/159_add_lag.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5b1d95e41e550ed7b8bdda62f09e9ae883915afd1b547d5f5bb863b21b803df3",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/115_multiprotocol.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "51375d2aac996039ee4d338cbb7cc8f9d77f423f8f519ab6f84012ff021812ae",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/85_add_banner.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ee600c3bcae632d7450ff3447192f8ca2d1622eecd67bc87c59fdd3dd8326bc6",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/185_nfs_export_rule.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8f53ac3485ed3849ca99fee6015e2767f636c1186a368b3d4e91ba6076afd7d4",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/128_add_32_to_info.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b18c7cf868d5699e4ad67e2d924c7a6323353147f8850757f7f2c4c7dda877c8",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/161_add_lifecycle_info.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b8c87e250274f2b5007ce0898c9bb6d79129faedaa8427a52377f86c24c6e90f",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/138_add_ad_module.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "972d7c56c40a909882eeb3c199f4b7dfd05b080d8b159d2f4915c3d86beb055d",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/202_multiple_snap_rules.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4ed9e6c99d409df00b7cd2cb4a60bee536b9e0608c107a0944fb3a738ec0bd9f",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/213_sec_update.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6b71174c00e982cada0d051fae5e28c853207ec6d0f42a783db35a9519733769",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/218_object_account_info.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ef0f569461747bfcb2f294a8317d113b829323f9e6994e652d4344b2590099fa",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/80_support_reverse_replica_link.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f3504f5e1acadaf52bd9d420373b7edce2015435232e5fa53282455361bcd440",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/84_add_cert.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1d286bf0fe3301a898bcdcad0bf70955732608eb51468097ca6d70ae269654d8",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/123_lifecycle_rule_fix.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "87a3f72b0ac11e72103dfb4766faecdd2b0c1fe5fad379e322c910c5134f7025",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/83_add_certgrp.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b7513178564ee1707090e4b3df65af56f28a71119e0ebf73b074dc9d2c0e1d65",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/97_fix_encrpyted_array_connection_info.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e140fbfc3ac4eaab3dd9c482e3beb37efd98ad4c3892b36f93ffb00d89c9283f",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/136_add_s3user_policy.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b97c8a102be108e8d74c9ec6d9aa73ec151fe7a77c676452d7b96cf75a4ecf6b",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/114_certificate_update.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ce77387c64b0714a4abe011d4eabc7b1a803058c1e7b407646ceb8249545e8aa",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/162_new_lifecycle.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bd6214f7380736e34ed7a21396f1842c6796afba6c3b7413536522d4b6d0b531",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/132_add_timeout.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8aea8125471f4717c0efa211756fb2086542362d9bee50295686dbce9ba86db7",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/154_add_snap_now.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6bde815114a219fd03941a080c2e6acebd5ef748e7f67503e8c3ef5f81decd54",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/158_support_lags.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "68b3e104addfa10fb7f2f974bff2e5dad2c950e261c603f37409f42ab7afed02",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/92_fix_ds_update.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8befcbbddf6fc2db62ff48b4f3a1030fe115fb7ababfc9b03c8e693628087337",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/86_add_syslog.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e42ee9ea2a2bffa465347a52a3fcf4bfaa51f377e7f33bf4a405eb46ae507442",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/191_add_quota_info.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "58ae5507364e9af847ac1806d27d6497bd36967ef3bdb34e3716cc294c178440",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/188_bucket_type.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3c8485b792ba73283807489b10a7b6df8298c5f932aaeec7b6b841b2f504464a",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/109_update_info.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "857bb23faa48e2d894f432cca4219681d7b3dab68473b3502dfe9319d751a3e1",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/121_replication_perf.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "372e2b49c1b2fb2f637e01023dd3a5146ee61171adbf619062ceb5e53a5d3e96",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/153_add_quota.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2b2517ea362d7128333d6fab7f99f6b70c4253d2807eae3ec417aa4451b3ae6c",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/v1.3.0_summary.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "64bd3d32085373ce61a414518c2ed87bfd003d163d3002d087f41f4a54b0b1a0",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/113_policy_cleanup.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "11023f4d159bc146016fe9e9f40d18edb659518cb9dbc733750146e00de2b05c",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/216_extra_bucket_info.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cf88a27b9c51eefd78e80b587012be110c967d0185597cac22cf5de86b73b053",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/212_object_account_quota.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2d9dd6bbb0f690de495ad9416117baf213d1d60f164fbcaedafa5f941ebeba28",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/215_encrypt_sec_info.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6915aa0ddabb1f73dbced52d0114b84317958f29a2ef7ea4dcd72a10952f8017",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/131-apiclient.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "92dd9507a2a0476d24f0c1e7a5342925be49b4a84142fe8e33f4a76f422283c3",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/107_add_remove_s3user_key.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8a2bb28b43962c08ea8916db02a401f8bd7b4989bd1aa04f201ed8c602d94124",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/88_add_lifecycle.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fdc6c425f03ffc0b4a008230f290f6ef37874a270909cb2ee311843dc08909f6",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/147_no_gateway.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0ca2ad2e1c1d60b110b87b2b37013bae6ee9daff64056f1dea691f2376cb8448",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/78_update_filesystem_replica_link.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "57a7b5ed892c4ea2f5149023b2bdde9481eb8c0a7593e4e76a4603e706971100",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/176_nfs_export_policies.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "36fc1c990afd6fb48068d113d6e4a6846368ad32523554acc9b9d9e5ba861161",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/112_fix_check_mode.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "11f8266ad857ed327ddbe8ef65f810a54e6c57df7ef24d1ec1d4c132abaa23a7",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/79_hide_connect_api.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b4cd3cbdb65de6b71cfbe179d56a42be2afbf6486e1ce0df9fdd3a7042bd57b0",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/167_fix_logins.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "426451dd9cb0925943b74eae2fe37702574efc7974f630a049737bfa74991ff3",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/194_lists_for_service.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9e139b9ea88f7700071e57500cff497a6be300d8425b4a4ddaba77c36a8dc128",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/175_throttle_support.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "738e0e9c2f7789b1c931b5563416ca436fd0e04401232a502e6ce59fd03da28f",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/139_add_keytabs.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c4d64b50797e36e3861e530b3e7c080277ebceb17ac5f58d4a08b8ac59c14d10",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/135_add_user_policies.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a0b78f5b1a5be3bfb87a00a4e638fad67600b0bab4cfddd72b3bfa4d2e217e3f",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/77_filesystem_policies_info.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8c7090d551cb59c49622a89c0ed25f12ad89104a9e2ab6708a01fc01fce9e049",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/127_add_eula.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9b092f3766cf4309ac60ab77c2e51142ffbc81eb4bfa4da581d531ee2de633ac",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/187_rename_nfs_policy.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d8b9f4112fea72954805eca3c01cf04524d5bd02a5b2559cdfef68c09d616e49",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/140_more_32_info.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4e57a10a71ab3dd1c151a6867c0da118a21e13df2ef8b9d2fbb799108ddebcd4",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/205_fix_multi_lifecycle.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c4080535eeb4ad5e56715dc1dd7683679072d027a65bce93a49adb4b56b68618",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/166_lag_mac_note.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b639987ccd53708ee210a1812bd8c6af30292a3a1b6b42c7b839dd7120967e13",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/90_delete_conn_fix.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "787138033d123fa59a9d3cdb424dc093183a020eebf1e76b46cbf059006e18e5",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/108_dns_search_fix.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "056e8181176826dc43b62100b6c50c8770680f0fcc37cf73737848233382b2e8",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/184_certificate_typos.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "827c27fb0d7c31d13e89e829db35890c97a16cf437149264074c1c6fa52be9be",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/179_fqcn.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d4c60f377dd4cd40de9c777a7d54f6d185afa785fdc45a751d67f2baccf9efdf",
+ "format": 1
+ },
+ {
+ "name": "LICENSE",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3972dc9744f6499f0f9b2dbf76696f2ae7ad8af9b23dde66d6af86c9dfb36986",
+ "format": 1
+ },
+ {
+ "name": "COPYING.GPLv3",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8ceb4b9ee5adedde47b31e975c1d90c73ad27b6b165a1dcd80c7c545eb65b903",
+ "format": 1
+ },
+ {
+ "name": "plugins",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_bladename.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1b21f650ae77744ba23b47de5b5bcf220ee68c77b127f569908c48eba08a8f24",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_proxy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "42514c4241a3e3f254d0cd0fd8a27f394a417990aed0dcc4888efc93fb2a2b7c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_admin.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "76c2ce2781241b7338e05f4d443090cb5fd5e7cb6fc1845ae5f78e9a0f9f5002",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_user.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f00d3920d4dadb950764884c97c5ff3b64f8cc0fb7760d04f328843658a33cc1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_dns.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9ebd127691bb88001865cba5e1813f0895111b8806c3c5fbfef5a21c24954bdb",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_tz.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4d08c8115e92f613d74e1bbf53a59a379f95513e3a7d231a9f745a9dfe1d23d5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_alert.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "80d6d4747cf607c7f73ac70a70a7c5f71c527c628f928e49b21de377f5cdbc25",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_remote_cred.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "51baa72db5641ac2a00f98b07cc626fc65d11412ae11c24e7c5f2a381d2b63df",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_ad.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "40baf6272707344af09ee6c329457532462df5fedf087fc58662e295847444df",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_connect.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "78d93cd41217bfcca2d6cc900b560fb0a03d16e502162e52eb89c0e432b08820",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_fs.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dfee64d096d76c62d7b9081845b29b4f924bc2d6e6e699c3ff2b0ceb1b3c5714",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_network.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "21398dfcfc59ad0c094ea608027bd44c121ecffc8fbff9ae96fde4f61ba65774",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_target.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "47eea0605e82c442152c801f95a3f55e31f816720bde09b7153caa4d9c58228f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_certgrp.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "117c7e4ad499d72b90e13ee5dd72788e78f266832b670b4830154030e0b69e5d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_ds.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "756950f76e59b5099a8a331bb9afa80976cd7e37c605791f517af6442b9040b7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_userpolicy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8e9fe1856db864f057d4eb3bafb1107dce0d7c429acc4deeb25dfba991e510f0",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_certs.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b79151ea9333e6bde34361ab8a8e18b8d961ed6ed18c601c0b574d12020fa35f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_snmp_agent.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2da4ecae583c8c94c55046e4a72a9437ac1f01aefa83e77d315e02792edf4a2c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_subnet.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2ff34ed58891cf1dcca1757f2d2a2d79a21f40e61195cc2d509fc56108560409",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_apiclient.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2cc1381512d001748885bd41104f8215397c74f464b696c216368de7598e47bb",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_inventory.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "391dedb1a7265a3f57b2193ee5efa254e981d3f4be1c6425adb036c6ddb7cf6b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_syslog.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fe39dc9131937befc223fd3efd96a369238fa320618e77323fedaa8c7f2e7621",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_lag.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "911181fd37fedbb616cb2d2cc6b94c070a04ca56f4a69b97299ccff40be2c803",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_messages.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0a0bcd83ebb86063ed9fb3db1bacbda9a89d4d82f11590b1d2cbfd978cd1c198",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_banner.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5daf1a121e8086c3ce3b510c9a52119ba256e49591932f4a575484fc7230b1f9",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_snmp_mgr.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2ff095c16f369a129dff76ab9c2660ba2f45d0bc62b2c07bcbf58d62067addfd",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_ra.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3a9172183c8afdd07d3eb854f466a6c687ea881f6978053909ad9908f76db71b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_groupquota.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fb933221f221bc66e49534594bd0ed6c06f3d83fe57b1ec45bfda80ec593becd",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_s3acc.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ff4391301e7e1a21329460afd11d73b60fec6dbab050bea8ab0d8c740f571218",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "45900eaeaafc923ea85e88c1dc87d2948e5a07f3ccb3aa2a4767c69fb2da3ac9",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_s3user.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4e3221ed572489da65f749e185123f662047918a8f9b8b9391f665d343e6acf4",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_pingtrace.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "facfd9bbb4ec84cca4c6dc3608da73a2ab8af7a9b5b1f139fbcf6f91b4f83612",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_snap.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c0ccbd3a590ee10c35445717c2f0378abb36078d3fbb5908e195e40022eaa802",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_smtp.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "76d37be7050f2e57b7fa09cae4b7555fe8b644c031ae7b93a3de5af2cbe19781",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_keytabs.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9e68ef5023904b2b70f95567ef69356b43ed4324ab18fd080cc054c217326445",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_dsrole.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d625a7248695e857cc0eaf32beb340de4772c406278de8b3c81b1ce2740854c3",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_bucket.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2b7e76b4c8be29be79485ec99cf01ce365e725801f7467931d6eb656c5f64120",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_ntp.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3df2990a95399fb343b3d9733534ffe3cef10b5546b939924aa17d04fb10fdd2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_policy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6cbb2b5f7a2bbbebefc28ab19d06344fdf43f316a31839a440f2f29b652d130b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_bucket_replica.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a8ad0c4a4506527009dbb28920c81b8cef6dddde65382af33e47c22522d27332",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_virtualhost.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "37d614801411069d3c3aab20c018daf17496832bc73e59976b5bc25f8f5cddc2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_userquota.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cf1a39e2b307e395b54c2a6ced7335971cf127f03ca6f1bd8af17a2aff28b9c2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_fs_replica.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6ef60aaaa8d397ecbef11da23f16d707829db7613811a3142f426076b2e8d577",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_timeout.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2c25d12eff522c44580b77e457c0496368e877bfe72cb41f1a9402a96ad18418",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_phonehome.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "53bcb5901f85f1938f06ef36f36ed37537b5ec2997b596c3906971ee016a3b9f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_eula.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1d06a41aeae5febbc2d1fecd64b888e5947f14b0944f473c3c5d1d46e50acfc4",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_lifecycle.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "beff3e20624460b82775e554a8c27cfd6b345d3a5a787f96df582a7026e23449",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/purestorage.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cb96797756b79883247778bbf7c9ed0c9a34e3e6f14d97b753e3d6401ec25f0f",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/purefb.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5a7a9657951dec2667ad720e965452a0003924cd36fe260527c01f83948d0473",
+ "format": 1
+ },
+ {
+ "name": "README.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9eca16f5db9ebc48387f94f50a9762c57fcb6a6eb4cd6c258f13b0a9a371be8e",
+ "format": 1
+ },
+ {
+ "name": ".pylintrc",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "75d8dc97586bc956a906be2aa0b86ec465eb78ce48d3d651ea1ddad3935d27cf",
+ "format": 1
+ },
+ {
+ "name": ".git-blame-ignore-revs",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "272d9a8e8654881cd42bb4108716e720bc634065d74064fb09f29d0e6e817e21",
+ "format": 1
+ }
+ ],
+ "format": 1
+} \ No newline at end of file
diff --git a/ansible_collections/purestorage/flashblade/LICENSE b/ansible_collections/purestorage/flashblade/LICENSE
new file mode 100644
index 000000000..f288702d2
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/LICENSE
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ <program> Copyright (C) <year> <name of author>
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<https://www.gnu.org/licenses/>.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/purestorage/flashblade/MANIFEST.json b/ansible_collections/purestorage/flashblade/MANIFEST.json
new file mode 100644
index 000000000..c111f1bf6
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/MANIFEST.json
@@ -0,0 +1,37 @@
+{
+ "collection_info": {
+ "namespace": "purestorage",
+ "name": "flashblade",
+ "version": "1.11.0",
+ "authors": [
+ "Pure Storage Ansible Team <pure-ansible-team@purestorage.com>"
+ ],
+ "readme": "README.md",
+ "tags": [
+ "purestorage",
+ "flashblade",
+ "storage",
+ "object",
+ "nfs"
+ ],
+ "description": "Collection of modules to manage Pure Storage FlashBlades",
+ "license": [
+ "GPL-3.0-or-later",
+ "BSD-2-Clause"
+ ],
+ "license_file": null,
+ "dependencies": {},
+ "repository": "https://github.com/Pure-Storage-Ansible/FlashBlade-Collection",
+ "documentation": "https://docs.ansible.com/ansible/latest/collections/purestorage/flashblade/index.html#plugins-in-purestorage-flashblade",
+ "homepage": null,
+ "issues": "https://github.com/Pure-Storage-Ansible/FlashBlade-Collection/issues"
+ },
+ "file_manifest_file": {
+ "name": "FILES.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1bb9f78982cdd6334e7f063927e0a32f11b5d6c6940b0cd253d3311be4717cda",
+ "format": 1
+ },
+ "format": 1
+} \ No newline at end of file
diff --git a/ansible_collections/purestorage/flashblade/README.md b/ansible_collections/purestorage/flashblade/README.md
new file mode 100644
index 000000000..7972158bc
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/README.md
@@ -0,0 +1,98 @@
+<a href="https://github.com/Pure-Storage-Ansible/FlashBlade-Collection/releases/latest"><img src="https://img.shields.io/github/v/tag/Pure-Storage-Ansible/FlashBlade-Collection?label=release">
+<a href="COPYING.GPLv3"><img src="https://img.shields.io/badge/license-GPL%20v3.0-brightgreen.svg"></a>
+<img src="https://cla-assistant.io/readme/badge/Pure-Storage-Ansible/FlashBlade-Collection">
+<img src="https://github.com/Pure-Storage-Ansible/FLashBlade-Collection/workflows/Pure%20Storage%20Ansible%20CI/badge.svg">
+<a href="https://github.com/psf/black"><img src="https://img.shields.io/badge/code%20style-black-000000.svg"></a>
+
+# Pure Storage FlashBlade Collection
+
+The Pure Storage FlashBlade collection consists of the latest versions of the FlashBlade modules.
+
+## Supported Platforms
+
+- Pure Storage FlashBlade with Purity 2.1.2 or later
+- Certain modules and functionality require higher versions of Purity. Modules will inform you if your Purity version is not high enough to use a module.
+
+## Prerequisites
+
+- Ansible 2.9 or later
+- Pure Storage FlashBlade system running Purity//FB 2.1.2 or later
+ - some modules require higher versions of Purity//FB
+- purity_fb >=v1.12.2
+- py-pure-client >=v1.27.0
+- python >=3.6
+- netaddr
+- datetime
+- pytz
+
+## Idempotency
+
+All modules are idempotent with the exception of modules that change or set passwords. Due to security requirements exisitng passwords can be validated against and therefore will always be modified, even if there is no change.
+
+## Available Modules
+
+- purefb_ad - manage Active Directory account on FlashBlade
+- purefb_alert - manage alert email settings on a FlashBlade
+- purefb_apiclient - manage API clients for FlashBlade
+- purefb_banner - manage FlashBlade login banner
+- purefb_bladename - manage FlashBlade name
+- purefb_bucket - manage S3 buckets on a FlashBlade
+- purefb_bucket_replica - manage bucket replica links on a FlashBlade
+- purefb_certgrp - manage FlashBlade certificate groups
+- purefb_certs - manage FlashBlade SSL certificates
+- purefb_connect - manage connections between FlashBlades
+- purefb_dns - manage DNS settings on a FlashBlade
+- purefb_ds - manage Directory Services settings on a FlashBlade
+- purefb_dsrole - manage Directory Service Roles on a FlashBlade
+- purefb_eula - manage EULA on FlashBlade
+- purefb_fs - manage filesystems on a FlashBlade
+- purefb_fs_replica - manage filesystem replica links on a FlashBlade
+- purefb_groupquota - manage individual group quotas on FlashBlade filesystems
+- purefb_info - get information about the configuration of a FlashBlade
+- purefb_inventory - get information about the hardware inventory of a FlashBlade
+- purefb_keytabs - manage FlashBlade Kerberos keytabs
+- purefb_lag - manage FlashBlade Link Aggregation Groups
+- purefb_lifecycle - manage FlashBlade Bucket Lifecycle Rules
+- purefb_messages - list FlashBlade alert messages
+- purefb_network - manage the network settings for a FlashBlade
+- purefb_ntp - manage the NTP settings for a FlashBlade
+- purefb_phonehome - manage the phone home settings for a FlashBlade
+- purefb_pingtrace - perform FlashBlade network diagnostics
+- purefb_policy - manage the filesystem snapshot policies for a FlashBlade
+- purefb_proxy - manage the phone home HTTP proxy settings for a FlashBlade
+- purefb_ra - manage the Remote Assist connections on a FlashBlade
+- purefb_remote_cred - manage the Object Store Remote Credentials on a FlashBlade
+- purefb_s3acc - manage the object store accounts on a FlashBlade
+- purefb_s3user - manage the object atore users on a FlashBlade
+- purefb_smtp - manage SMTP settings on a FlashBlade
+- purefb_snap - manage filesystem snapshots on a FlashBlade
+- purefb_snmp_agent - modify the FlashBlade SNMP Agent
+- purefb_snmp_mgr - manage SNMP Managers on a FlashBlade
+- purefb_subnet - manage network subnets on a FlashBlade
+- purefb_syslog - manage FlashBlade syslog server configuration
+- purefb_target - manage remote S3-capable targets for a FlashBlade
+- purefb_timeout - manage FlashBlade GUI timeout
+- purefb_user - manage local *pureuser* account password on a FlashBlade
+- purefb_userpolicy - manage FlashBlade Object Store User Access Policies
+- purefb_userquota - manage individual user quotas on FlashBlade filesystems
+- purefb_virtualhost - manage FlashBlade Object Store Virtual Hosts
+
+## Instructions
+
+Install the Pure Storage FlashBlade collection on your Ansible management host.
+
+- Using ansible-galaxy (Ansible 2.9 or later):
+```
+ansible-galaxy collection install purestorage.flashblade -p ~/.ansible/collections
+```
+
+All servers that execute the modules must have the appropriate Pure Storage Python SDK installed on the host.
+
+## License
+
+[BSD-2-Clause](https://directory.fsf.org/wiki?title=License:FreeBSD)
+[GPL-3.0-or-later](https://www.gnu.org/licenses/gpl-3.0.en.html)
+
+## Author
+
+This collection was created in 2019 by [Simon Dodsley](@sdodsley) for, and on behalf of, the [Pure Storage Ansible Team](pure-ansible-team@purestorage.com)
diff --git a/ansible_collections/purestorage/flashblade/README.rst b/ansible_collections/purestorage/flashblade/README.rst
new file mode 100644
index 000000000..a7d062a8a
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/README.rst
@@ -0,0 +1,19 @@
+|License| |CLA-Assistant| |Pure-Storage-Ansible-CI| |Code-style-black|
+
+|Build history for master branch|
+
+=====================
+FlashBlade-Collection
+=====================
+
+Ansible Collection for Pure Storage FlashBlade.
+
+.. |License| image:: https://img.shields.io/badge/license-GPL%20v3.0-brightgreen.svg
+ :target: COPYING.GPLv3
+ :alt: Repository License
+.. |CLA-Assistant| image:: https://cla-assistant.io/readme/badge/Pure-Storage-Ansible/FlashBlade-Collection
+.. |Pure-Storage-Ansible-CI| image:: https://github.com/Pure-Storage-Ansible/FlashBlade-Collection/workflows/Pure%20Storage%20Ansible%20CI/badge.svg
+.. |Code-style-black| image:: https://img.shields.io/badge/code%20style-black-000000.svg
+ :target: https://github.com/psf/black
+.. |Build history for master branch| image:: https://buildstats.info/github/chart/Pure-Storage-Ansible/FlashBlade-Collection?branch=master&buildCount=50&includeBuildsFromPullRequest=false&showstats=false
+ :target: https://github.com/Pure-Storage-Ansible/FlashBlade-Collection/actions?query=branch%3Amaster
diff --git a/ansible_collections/purestorage/flashblade/changelogs/.plugin-cache.yaml b/ansible_collections/purestorage/flashblade/changelogs/.plugin-cache.yaml
new file mode 100644
index 000000000..9834bdfed
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/.plugin-cache.yaml
@@ -0,0 +1,254 @@
+objects:
+ role: {}
+plugins:
+ become: {}
+ cache: {}
+ callback: {}
+ cliconf: {}
+ connection: {}
+ filter: {}
+ httpapi: {}
+ inventory: {}
+ lookup: {}
+ module:
+ purefb_ad:
+ description: Manage FlashBlade Active Directory Account
+ name: purefb_ad
+ namespace: ''
+ version_added: 1.6.0
+ purefb_admin:
+ description: Configure Pure Storage FlashBlade Global Admin settings
+ name: purefb_admin
+ namespace: ''
+ version_added: 1.8.0
+ purefb_alert:
+ description: Configure Pure Storage FlashBlade alert email settings
+ name: purefb_alert
+ namespace: ''
+ version_added: 1.0.0
+ purefb_apiclient:
+ description: Manage FlashBlade API Clients
+ name: purefb_apiclient
+ namespace: ''
+ version_added: 1.6.0
+ purefb_banner:
+ description: Configure Pure Storage FlashBlade GUI and SSH MOTD message
+ name: purefb_banner
+ namespace: ''
+ version_added: 1.4.0
+ purefb_bladename:
+ description: Configure Pure Storage FlashBlade name
+ name: purefb_bladename
+ namespace: ''
+ version_added: 1.0.0
+ purefb_bucket:
+ description: Manage Object Store Buckets on a Pure Storage FlashBlade.
+ name: purefb_bucket
+ namespace: ''
+ version_added: 1.0.0
+ purefb_bucket_replica:
+ description: Manage bucket replica links between Pure Storage FlashBlades
+ name: purefb_bucket_replica
+ namespace: ''
+ version_added: 1.0.0
+ purefb_certgrp:
+ description: Manage FlashBlade Certifcate Groups
+ name: purefb_certgrp
+ namespace: ''
+ version_added: 1.4.0
+ purefb_certs:
+ description: Manage FlashBlade SSL Certificates
+ name: purefb_certs
+ namespace: ''
+ version_added: 1.4.0
+ purefb_connect:
+ description: Manage replication connections between two FlashBlades
+ name: purefb_connect
+ namespace: ''
+ version_added: 1.0.0
+ purefb_dns:
+ description: Configure Pure Storage FlashBlade DNS settings
+ name: purefb_dns
+ namespace: ''
+ version_added: 1.0.0
+ purefb_ds:
+ description: Configure FlashBlade Directory Service
+ name: purefb_ds
+ namespace: ''
+ version_added: 1.0.0
+ purefb_dsrole:
+ description: Configure FlashBlade Management Directory Service Roles
+ name: purefb_dsrole
+ namespace: ''
+ version_added: 1.0.0
+ purefb_eula:
+ description: Sign Pure Storage FlashBlade EULA
+ name: purefb_eula
+ namespace: ''
+ version_added: 1.6.0
+ purefb_fs:
+ description: Manage filesystemon Pure Storage FlashBlade`
+ name: purefb_fs
+ namespace: ''
+ version_added: 1.0.0
+ purefb_fs_replica:
+ description: Manage filesystem replica links between Pure Storage FlashBlades
+ name: purefb_fs_replica
+ namespace: ''
+ version_added: 1.0.0
+ purefb_groupquota:
+ description: Manage filesystem group quotas
+ name: purefb_groupquota
+ namespace: ''
+ version_added: 1.7.0
+ purefb_info:
+ description: Collect information from Pure Storage FlashBlade
+ name: purefb_info
+ namespace: ''
+ version_added: 1.0.0
+ purefb_inventory:
+ description: Collect information from Pure Storage FlashBlade
+ name: purefb_inventory
+ namespace: ''
+ version_added: 1.0.0
+ purefb_keytabs:
+ description: Manage FlashBlade Kerberos Keytabs
+ name: purefb_keytabs
+ namespace: ''
+ version_added: 1.6.0
+ purefb_lag:
+ description: Manage FlashBlade Link Aggregation Groups
+ name: purefb_lag
+ namespace: ''
+ version_added: 1.7.0
+ purefb_lifecycle:
+ description: Manage FlashBlade object lifecycles
+ name: purefb_lifecycle
+ namespace: ''
+ version_added: 1.4.0
+ purefb_messages:
+ description: List FlashBlade Alert Messages
+ name: purefb_messages
+ namespace: ''
+ version_added: 1.10.0
+ purefb_network:
+ description: Manage network interfaces in a Pure Storage FlashBlade
+ name: purefb_network
+ namespace: ''
+ version_added: 1.0.0
+ purefb_ntp:
+ description: Configure Pure Storage FlashBlade NTP settings
+ name: purefb_ntp
+ namespace: ''
+ version_added: 1.0.0
+ purefb_phonehome:
+ description: Enable or Disable Pure Storage FlashBlade Phone Home
+ name: purefb_phonehome
+ namespace: ''
+ version_added: 1.0.0
+ purefb_pingtrace:
+ description: Employ the internal FlashBlade ping and trace mechanisms
+ name: purefb_pingtrace
+ namespace: ''
+ version_added: 1.11.0
+ purefb_policy:
+ description: Manage FlashBlade policies
+ name: purefb_policy
+ namespace: ''
+ version_added: 1.0.0
+ purefb_proxy:
+ description: Configure FlashBlade phonehome HTTPs proxy settings
+ name: purefb_proxy
+ namespace: ''
+ version_added: 1.0.0
+ purefb_ra:
+ description: Enable or Disable Pure Storage FlashBlade Remote Assist
+ name: purefb_ra
+ namespace: ''
+ version_added: 1.0.0
+ purefb_remote_cred:
+ description: Create, modify and delete FlashBlade object store remote credentials
+ name: purefb_remote_cred
+ namespace: ''
+ version_added: 1.0.0
+ purefb_s3acc:
+ description: Create or delete FlashBlade Object Store accounts
+ name: purefb_s3acc
+ namespace: ''
+ version_added: 1.0.0
+ purefb_s3user:
+ description: Create or delete FlashBlade Object Store account users
+ name: purefb_s3user
+ namespace: ''
+ version_added: 1.0.0
+ purefb_smtp:
+ description: Configure SMTP for Pure Storage FlashBlade
+ name: purefb_smtp
+ namespace: ''
+ version_added: 1.0.0
+ purefb_snap:
+ description: Manage filesystem snapshots on Pure Storage FlashBlades
+ name: purefb_snap
+ namespace: ''
+ version_added: 1.0.0
+ purefb_snmp_agent:
+ description: Configure the FlashBlade SNMP Agent
+ name: purefb_snmp_agent
+ namespace: ''
+ version_added: 1.0.0
+ purefb_snmp_mgr:
+ description: Configure FlashBlade SNMP Managers
+ name: purefb_snmp_mgr
+ namespace: ''
+ version_added: 1.0.0
+ purefb_subnet:
+ description: Manage network subnets in a Pure Storage FlashBlade
+ name: purefb_subnet
+ namespace: ''
+ version_added: 1.0.0
+ purefb_syslog:
+ description: Configure Pure Storage FlashBlade syslog settings
+ name: purefb_syslog
+ namespace: ''
+ version_added: 1.4.0
+ purefb_target:
+ description: Manage remote S3-capable targets for a FlashBlade
+ name: purefb_target
+ namespace: ''
+ version_added: 1.0.0
+ purefb_timeout:
+ description: Configure Pure Storage FlashBlade GUI idle timeout
+ name: purefb_timeout
+ namespace: ''
+ version_added: 1.6.0
+ purefb_tz:
+ description: Configure Pure Storage FlashBlade timezone
+ name: purefb_tz
+ namespace: ''
+ version_added: 1.10.0
+ purefb_user:
+ description: Modify FlashBlade user accounts
+ name: purefb_user
+ namespace: ''
+ version_added: 1.0.0
+ purefb_userpolicy:
+ description: Manage FlashBlade Object Store User Access Policies
+ name: purefb_userpolicy
+ namespace: ''
+ version_added: 1.6.0
+ purefb_userquota:
+ description: Manage filesystem user quotas
+ name: purefb_userquota
+ namespace: ''
+ version_added: 1.7.0
+ purefb_virtualhost:
+ description: Manage FlashBlade Object Store Virtual Hosts
+ name: purefb_virtualhost
+ namespace: ''
+ version_added: 1.6.0
+ netconf: {}
+ shell: {}
+ strategy: {}
+ test: {}
+ vars: {}
+version: 1.11.0
diff --git a/ansible_collections/purestorage/flashblade/changelogs/changelog.yaml b/ansible_collections/purestorage/flashblade/changelogs/changelog.yaml
new file mode 100644
index 000000000..9995182fa
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/changelog.yaml
@@ -0,0 +1,329 @@
+ancestor: null
+releases:
+ 1.10.0:
+ changes:
+ bugfixes:
+ - purefb_connect - Resolve connection issues between two FBs that are throttling
+ capable
+ - purefb_policy - Fix incorrect API call for NFS export policy rule creation
+ minor_changes:
+ - All - Update documentation examples with FQCNs
+ - purefb_ad - Allow service to be a list
+ - purefb_bucket - Allow setting of bucket type to support VSO - requires Purity//FB
+ 3.3.3 or higher
+ - purefb_certs - Fix several misspellings of certificate
+ - purefb_info - Added filesystem default, user and group quotas where available
+ - purefb_info - Expose object store bucket type from Purity//FB 3.3.3
+ - purefb_info - Show information for current timezone
+ - purefb_policy - Allow rename of NFS Export Policies from Purity//FB 3.3.3
+ - purefb_tz - Add support for FlashBlade timezone management
+ fragments:
+ - 179_fqcn.yaml
+ - 183_v2_connections.yaml
+ - 184_certificate_typos.yaml
+ - 185_nfs_export_rule.yaml
+ - 186_add_tz.yaml
+ - 187_rename_nfs_policy.yaml
+ - 188_bucket_type.yaml
+ - 191_add_quota_info.yaml
+ - 194_lists_for_service.yaml
+ modules:
+ - description: List FlashBlade Alert Messages
+ name: purefb_messages
+ namespace: ''
+ - description: Configure Pure Storage FlashBlade timezone
+ name: purefb_tz
+ namespace: ''
+ release_date: '2022-09-12'
+ 1.11.0:
+ changes:
+ bugfixes:
+ - purefa_info - Fixed issue when more than 10 buckets have lifecycle rules.
+ - purefb_s3user - Fix incorrect response when bad key/secret pair provided for
+ new user
+ minor_changes:
+ - purefb_info - Added `encryption` and `support_keys` information.
+ - purefb_info - Added bucket quota and safemode information per bucket
+ - purefb_info - Added security update version for Purity//FB 4.0.2, or higher
+ - purefb_info - Updated object store account information
+ - purefb_inventory - Added `part_number` to hardware item information.
+ - purefb_policy - Added support for multiple rules in snapshot policies
+ - purefb_proxy - Added new boolean parameter `secure`. Default of true (for
+ backwards compatability) sets the protocol to be `https://`. False sets `http://`
+ - purefb_s3acc - Added support for default bucket quotas and hard limits
+ - purefb_s3acc - Added support for object account quota and hard limit
+ fragments:
+ - 200_proxy.yaml
+ - 202_multiple_snap_rules.yaml
+ - 205_fix_multi_lifecycle.yaml
+ - 211_change_booleans.yaml
+ - 212_object_account_quota.yaml
+ - 213_sec_update.yaml
+ - 215_encrypt_sec_info.yaml
+ - 216_extra_bucket_info.yaml
+ - 217_inventory.yaml
+ - 218_object_account_info.yaml
+ - 220_s3user_key_fix.yaml
+ modules:
+ - description: Employ the internal FlashBlade ping and trace mechanisms
+ name: purefb_pingtrace
+ namespace: ''
+ release_date: '2023-04-13'
+ 1.3.0:
+ changes:
+ bugfixes:
+ - purefb_bucket - Add warning message if ``state`` is ``absent`` without ``eradicate:``
+ - purefb_fs - Add graceful exist when ``state`` is ``absent`` and filesystem
+ not eradicated
+ - purefb_fs - Add warning message if ``state`` is ``absent`` without ``eradicate``
+ major_changes:
+ - purefb_alert - manage alert email settings on a FlashBlade
+ - purefb_bladename - manage FlashBlade name
+ - purefb_bucket_replica - manage bucket replica links on a FlashBlade
+ - purefb_connect - manage connections between FlashBlades
+ - purefb_dns - manage DNS settings on a FlashBlade
+ - purefb_fs_replica - manage filesystem replica links on a FlashBlade
+ - purefb_inventory - get information about the hardware inventory of a FlashBlade
+ - purefb_ntp - manage the NTP settings for a FlashBlade
+ - purefb_phonehome - manage the phone home settings for a FlashBlade
+ - purefb_policy - manage the filesystem snapshot policies for a FlashBlade
+ - purefb_proxy - manage the phone home HTTP proxy settings for a FlashBlade
+ - purefb_remote_cred - manage the Object Store Remote Credentials on a FlashBlade
+ - purefb_snmp_agent - modify the FlashBlade SNMP Agent
+ - purefb_snmp_mgr - manage SNMP Managers on a FlashBlade
+ - purefb_target - manage remote S3-capable targets for a FlashBlade
+ - purefb_user - manage local ``pureuser`` account password on a FlashBlade
+ minor_changes:
+ - purefb_bucket - Versioning support added
+ - purefb_info - new options added for information collection
+ - purefb_network - Add replication service type
+ - purefb_s3user - Limit ``access_key`` recreation to 3 times
+ - purefb_s3user - return dict changed from ``ansible_facts`` to ``s3user_info``
+ release_summary: '| Release Date: 2020-08-08
+
+ | This changlelog describes all changes made to the modules and plugins included
+ in this collection since Ansible 2.9.0
+
+ '
+ fragments:
+ - v1.3.0_summary.yaml
+ release_date: '2020-08-06'
+ 1.4.0:
+ changes:
+ bugfixes:
+ - purefa_policy - Resolve multiple issues related to incorrect use of timezones
+ - purefb_connect - Ensure changing encryption status on array connection is
+ performed correctly
+ - purefb_connect - Fix breaking change created in purity_fb SDK 1.9.2 for deletion
+ of array connections
+ - purefb_connect - Hide target array API token
+ - purefb_ds - Ensure updating directory service configurations completes correctly
+ - purefb_info - Fix issue getting array info when encrypted connection exists
+ minor_changes:
+ - purefb_banner - Module to manage the GUI and SSH login message
+ - purefb_certgrp - Module to manage FlashBlade Certificate Groups
+ - purefb_certs - Module to create and delete SSL certificates
+ - purefb_connect - Support idempotency when exisitng connection is incoming
+ - purefb_fs - Add new options for filesystem control (https://github.com/Pure-Storage-Ansible/FlashBlade-Collection/pull/81)
+ - purefb_fs - Default filesystem size on creation changes from 32G to ``unlimited``
+ - purefb_fs - Fix error in deletion and eradication of filesystem
+ - purefb_fs_replica - Remove condition to attach/detach policies on unhealthy
+ replica-link
+ - purefb_info - Add support to list filesystem policies
+ - purefb_lifecycle - Module to manage FlashBlade Bucket Lifecycle Rules
+ - purefb_s3user - Add support for imported user access keys
+ - purefb_syslog - Module to manage syslog server configuration
+ fragments:
+ - 101_fix_policy_and_timezone_error.yaml
+ - 76_default_fs_size.yaml
+ - 77_filesystem_policies_info.yaml
+ - 78_update_filesystem_replica_link.yaml
+ - 79_hide_connect_api.yaml
+ - 80_support_reverse_replica_link.yaml
+ - 81_purefb_fs_new_options.yaml
+ - 83_add_certgrp.yml
+ - 84_add_cert.yaml
+ - 85_add_banner.yaml
+ - 86_add_syslog.yaml
+ - 88_add_lifecycle.yml
+ - 90_delete_conn_fix.yaml
+ - 90_imported_keys.yaml
+ - 92_fix_ds_update.yaml
+ - 96_fix_update_connection.yaml
+ - 97_fix_encrpyted_array_connection_info.yaml
+ modules:
+ - description: Configure Pure Storage FlashBlade GUI and SSH MOTD message
+ name: purefb_banner
+ namespace: ''
+ - description: Manage FlashBlade Certifcate Groups
+ name: purefb_certgrp
+ namespace: ''
+ - description: Manage FlashBlade SSL Certifcates
+ name: purefb_certs
+ namespace: ''
+ - description: Manage FlashBlade object lifecycles
+ name: purefb_lifecycle
+ namespace: ''
+ - description: Configure Pure Storage FlashBlade syslog settings
+ name: purefb_syslog
+ namespace: ''
+ release_date: '2020-10-14'
+ 1.5.0:
+ changes:
+ bugfixes:
+ - purefb_* - Return a correct value for `changed` in all modules when in check
+ mode
+ - purefb_dns - Deprecate search paramerter
+ - purefb_dsrole - Resolve idempotency issue
+ - purefb_lifecycle - Fix error when creating new bucket lifecycle rule.
+ - purefb_policy - Ensure undeclared variables are set correctly
+ - purefb_s3user - Fix maximum access_key count logic
+ minor_changes:
+ - purefb_certs - Add update functionality for array cert
+ - purefb_fs - Add multiprotocol ACL support
+ - purefb_info - Add information regarding filesystem multiprotocol (where available)
+ - purefb_info - Add new parameter to provide details on admin users
+ - purefb_info - Add replication performace statistics
+ - purefb_s3user - Add ability to remove an S3 users existing access key
+ fragments:
+ - 105_max_access_key.yaml
+ - 107_add_remove_s3user_key.yaml
+ - 108_dns_search_fix.yaml
+ - 109_update_info.yaml
+ - 111_dsrole_update_idempotency.yaml
+ - 112_fix_check_mode.yaml
+ - 113_policy_cleanup.yaml
+ - 114_certificate_update.yaml
+ - 115_multiprotocol.yaml
+ - 121_replication_perf.yaml
+ - 123_lifecycle_rule_fix.yaml
+ release_date: '2021-03-30'
+ 1.6.0:
+ changes:
+ minor_changes:
+ - purefa_virtualhost - New module to manage API Clients
+ - purefb_ad - New module to manage Active Directory Account
+ - purefb_eula - New module to sign EULA
+ - purefb_info - Add Active Directory, Kerberos and Object Store Account information
+ - purefb_info - Add extra info for Purity//FB 3.2+ systems
+ - purefb_keytabs - New module to manage Kerberos Keytabs
+ - purefb_s3user - Add access policy option to user creation
+ - purefb_timeout - Add module to set GUI idle timeout
+ - purefb_userpolicy - New module to manage object store user access policies
+ - purefb_virtualhost - New module to manage Object Store Virtual Hosts
+ fragments:
+ - 127_add_eula.yaml
+ - 128_add_32_to_info.yaml
+ - 129-virtualhost.yaml
+ - 131-apiclient.yaml
+ - 132_add_timeout.yaml
+ - 135_add_user_policies.yaml
+ - 136_add_s3user_policy.yaml
+ - 138_add_ad_module.yaml
+ - 139_add_keytabs.yaml
+ - 140_more_32_info.yaml
+ modules:
+ - description: Manage FlashBlade Active Directory Account
+ name: purefb_ad
+ namespace: ''
+ - description: Manage FlashBlade API Clients
+ name: purefb_apiclient
+ namespace: ''
+ - description: Sign Pure Storage FlashBlade EULA
+ name: purefb_eula
+ namespace: ''
+ - description: Manage FlashBlade Kerberos Keytabs
+ name: purefb_keytabs
+ namespace: ''
+ - description: Configure Pure Storage FlashBlade GUI idle timeout
+ name: purefb_timeout
+ namespace: ''
+ - description: Manage FlashBlade Object Store User Access Policies
+ name: purefb_userpolicy
+ namespace: ''
+ - description: Manage FlashBlade Object Store Virtual Hosts
+ name: purefb_virtualhost
+ namespace: ''
+ release_date: '2021-04-21'
+ 1.7.0:
+ changes:
+ bugfixes:
+ - purefb_fs - Fix bug where changing the state of both NFS v3 and v4.1 at the
+ same time ignored one of these.
+ - purefb_s3acc - Ensure S3 Account Name is always lowercase
+ - purefb_s3user - Ensure S3 Account Name is always lowercase
+ - purefb_subnet - Allow subnet creation with no gateway
+ minor_changes:
+ - purefb_groupquota - New module for manage individual filesystem group quotas
+ - purefb_lag - Add support for LAG management
+ - purefb_snap - Add support for immeadiate snapshot to remote connected FlashBlade
+ - purefb_subnet - Add support for multiple LAGs.
+ - purefb_userquota - New module for manage individual filesystem user quotas
+ fragments:
+ - 147_no_gateway.yaml
+ - 150_fix_joint_nfs_version_change.yaml
+ - 152_s3acc_lowercase.yaml
+ - 153_add_quota.yaml
+ - 154_add_snap_now.yaml
+ - 158_support_lags.yaml
+ - 159_add_lag.yaml
+ modules:
+ - description: Manage filesystem group quotas
+ name: purefb_groupquota
+ namespace: ''
+ - description: Manage FlashBlade Link Aggregation Groups
+ name: purefb_lag
+ namespace: ''
+ - description: Manage filesystem user quotas
+ name: purefb_userquota
+ namespace: ''
+ release_date: '2021-09-27'
+ 1.8.0:
+ changes:
+ known_issues:
+ - purefb_lag - The mac_address field in the response is not populated. This
+ will be fixed in a future FlashBlade update.
+ minor_changes:
+ - purefb.py - Add check to ensure FlashBlade uses the latest REST version possible
+ for Purity version installed
+ - purefb_info - Add object lifecycles rules to bucket subset
+ - purefb_lifecycle - Add support for updated object lifecycle rules. See documentation
+ for details of new parameters.
+ - purefb_lifecycle - Change `keep_for` parameter to be `keep_previous_for`.
+ `keep_for` is deprecated and will be removed in a later version.
+ - purefb_user - Add support for managing user public key and user unlock
+ fragments:
+ - 161_add_lifecycle_info.yaml
+ - 162_new_lifecycle.yaml
+ - 163_admin_key.yaml
+ - 166_lag_mac_note.yaml
+ - 167_fix_logins.yaml
+ release_date: '2021-11-08'
+ 1.8.1:
+ changes:
+ minor_changes:
+ - purefb.py - Use latest `pypureclient` SDK with fix for "best fit". No longer
+ requires double login to negotiate best API version.
+ fragments:
+ - 169_pypureclient_fix.yaml
+ release_date: '2021-11-11'
+ 1.9.0:
+ changes:
+ minor_changes:
+ - purefb_admin - New module to manage global admin settings
+ - purefb_connect - Add support for array connections to have bandwidth throttling
+ defined
+ - purefb_fs - Add support for NFS export policies
+ - purefb_info - Add NFS export policies and rules
+ - purefb_info - Show array connections bandwidth throttle information
+ - purefb_policy - Add NFS export policies, with rules, as a new policy type
+ - purefb_policy - Add support for Object Store Access Policies, associated rules
+ and user grants
+ - purefb_policy - New parameter `policy_type` added. For backwards compatability,
+ default to `snapshot` if not provided.
+ fragments:
+ - 164_add_admin.yaml
+ - 174_access_policies.yaml
+ - 175_throttle_support.yaml
+ - 176_nfs_export_policies.yaml
+ release_date: '2021-12-17'
diff --git a/ansible_collections/purestorage/flashblade/changelogs/config.yaml b/ansible_collections/purestorage/flashblade/changelogs/config.yaml
new file mode 100644
index 000000000..f5466368f
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/config.yaml
@@ -0,0 +1,31 @@
+changelog_filename_template: ../CHANGELOG.rst
+changelog_filename_version_depth: 0
+changes_file: changelog.yaml
+changes_format: combined
+ignore_other_fragment_extensions: true
+keep_fragments: true
+mention_ancestor: true
+new_plugins_after_name: removed_features
+notesdir: fragments
+prelude_section_name: release_summary
+prelude_section_title: Release Summary
+sections:
+ - - major_changes
+ - Major Changes
+ - - minor_changes
+ - Minor Changes
+ - - breaking_changes
+ - Breaking Changes / Porting Guide
+ - - deprecated_features
+ - Deprecated Features
+ - - removed_features
+ - Removed Features (previously deprecated)
+ - - security_fixes
+ - Security Fixes
+ - - bugfixes
+ - Bugfixes
+ - - known_issues
+ - Known Issues
+title: Purestorage.Flashblade
+trivial_section_name: trivial
+use_fqcn: true
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/101_fix_policy_and_timezone_error.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/101_fix_policy_and_timezone_error.yaml
new file mode 100644
index 000000000..e6c1ea64d
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/101_fix_policy_and_timezone_error.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_policy - Resolve multiple issues related to incorrect use of timezones
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/105_max_access_key.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/105_max_access_key.yaml
new file mode 100644
index 000000000..8e673dad8
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/105_max_access_key.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefb_s3user - Fix maximum access_key count logic
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/107_add_remove_s3user_key.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/107_add_remove_s3user_key.yaml
new file mode 100644
index 000000000..4567aedcd
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/107_add_remove_s3user_key.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_s3user - Add ability to remove an S3 users existing access key
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/108_dns_search_fix.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/108_dns_search_fix.yaml
new file mode 100644
index 000000000..974bf0e8d
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/108_dns_search_fix.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefb_dns - Deprecate search paramerter
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/109_update_info.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/109_update_info.yaml
new file mode 100644
index 000000000..83f2c1923
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/109_update_info.yaml
@@ -0,0 +1,3 @@
+minor_changes:
+ - purefb_info - Add new parameter to provide details on admin users
+ - purefb_info - Add information regarding filesystem multiprotocol (where available)
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/111_dsrole_update_idempotency.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/111_dsrole_update_idempotency.yaml
new file mode 100644
index 000000000..f136b3617
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/111_dsrole_update_idempotency.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefb_dsrole - Resolve idempotency issue
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/112_fix_check_mode.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/112_fix_check_mode.yaml
new file mode 100644
index 000000000..321ba1bea
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/112_fix_check_mode.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefb_* - Return a correct value for `changed` in all modules when in check mode
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/113_policy_cleanup.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/113_policy_cleanup.yaml
new file mode 100644
index 000000000..7e075ea02
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/113_policy_cleanup.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefb_policy - Ensure undeclared variables are set correctly
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/114_certificate_update.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/114_certificate_update.yaml
new file mode 100644
index 000000000..27b27bb64
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/114_certificate_update.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_certs - add update functionality for array cert
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/115_multiprotocol.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/115_multiprotocol.yaml
new file mode 100644
index 000000000..f261d9899
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/115_multiprotocol.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_fs - Add multiprotocol ACL support
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/121_replication_perf.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/121_replication_perf.yaml
new file mode 100644
index 000000000..475a4ba15
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/121_replication_perf.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_info - Add replication performace statistics
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/123_lifecycle_rule_fix.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/123_lifecycle_rule_fix.yaml
new file mode 100644
index 000000000..db689a69e
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/123_lifecycle_rule_fix.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefb_lifecycle - Fix error when creating new bucket lifecycle rule.
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/127_add_eula.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/127_add_eula.yaml
new file mode 100644
index 000000000..0b5ca328d
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/127_add_eula.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_eula - New module to sign EULA
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/128_add_32_to_info.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/128_add_32_to_info.yaml
new file mode 100644
index 000000000..9b4c9f480
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/128_add_32_to_info.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_info - Add extra info for Purity//FB 3.2+ systems
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/129-virtualhost.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/129-virtualhost.yaml
new file mode 100644
index 000000000..da52304ea
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/129-virtualhost.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_virtualhost - New module to manage Object Store Virtual Hosts
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/131-apiclient.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/131-apiclient.yaml
new file mode 100644
index 000000000..7a3f021b5
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/131-apiclient.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_virtualhost - New module to manage API Clients
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/132_add_timeout.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/132_add_timeout.yaml
new file mode 100644
index 000000000..83bde3906
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/132_add_timeout.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_timeout - Add module to set GUI idle timeout
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/135_add_user_policies.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/135_add_user_policies.yaml
new file mode 100644
index 000000000..8716047c1
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/135_add_user_policies.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_userpolicy - New module to manage object store user access policies
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/136_add_s3user_policy.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/136_add_s3user_policy.yaml
new file mode 100644
index 000000000..b2351a885
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/136_add_s3user_policy.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_s3user - Add access policy option to user creation
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/138_add_ad_module.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/138_add_ad_module.yaml
new file mode 100644
index 000000000..fb2e2277e
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/138_add_ad_module.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_ad - New module to manage Active Directory Account
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/139_add_keytabs.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/139_add_keytabs.yaml
new file mode 100644
index 000000000..98e3e75ca
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/139_add_keytabs.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_keytabs - New module to manage Kerberos Keytabs
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/140_more_32_info.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/140_more_32_info.yaml
new file mode 100644
index 000000000..ac0687e29
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/140_more_32_info.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_info - Add Active Directory, Kerberos and Object Store Account information
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/147_no_gateway.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/147_no_gateway.yaml
new file mode 100644
index 000000000..00fe81efe
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/147_no_gateway.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefb_subnet - Allow subnet creation with no gateway
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/150_fix_joint_nfs_version_change.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/150_fix_joint_nfs_version_change.yaml
new file mode 100644
index 000000000..818b49f2c
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/150_fix_joint_nfs_version_change.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefb_fs - Fix bug where changing the state of both NFS v3 and v4.1 at the same time ignored one of these.
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/152_s3acc_lowercase.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/152_s3acc_lowercase.yaml
new file mode 100644
index 000000000..a2214ca56
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/152_s3acc_lowercase.yaml
@@ -0,0 +1,3 @@
+bugfixes:
+ - purefb_s3acc - Ensure S3 Account Name is always lowercase
+ - purefb_s3user - Ensure S3 Account Name is always lowercase
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/153_add_quota.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/153_add_quota.yaml
new file mode 100644
index 000000000..e415fd23b
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/153_add_quota.yaml
@@ -0,0 +1,3 @@
+minor_changes:
+ - purefb_groupquota - New module for manage individual filesystem group quotas
+ - purefb_userquota - New module for manage individual filesystem user quotas
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/154_add_snap_now.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/154_add_snap_now.yaml
new file mode 100644
index 000000000..c037d06bd
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/154_add_snap_now.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_snap - Add support for immeadiate snapshot to remote connected FlashBlade
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/158_support_lags.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/158_support_lags.yaml
new file mode 100644
index 000000000..c2c2b3a97
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/158_support_lags.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_subnet - Add support for multiple LAGs.
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/159_add_lag.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/159_add_lag.yaml
new file mode 100644
index 000000000..7d10b895c
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/159_add_lag.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_lag - Add support for LAG management
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/161_add_lifecycle_info.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/161_add_lifecycle_info.yaml
new file mode 100644
index 000000000..1d85adae0
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/161_add_lifecycle_info.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_info - Add object lifecycles rules to bucket subset
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/162_new_lifecycle.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/162_new_lifecycle.yaml
new file mode 100644
index 000000000..4584f521f
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/162_new_lifecycle.yaml
@@ -0,0 +1,3 @@
+minor_changes:
+ - purefb_lifecycle - Add support for updated object lifecycle rules. See documentation for details of new parameters.
+ - purefb_lifecycle - Change `keep_for` parameter to be `keep_previous_for`. `keep_for` is deprecated and will be removed in a later version.
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/163_admin_key.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/163_admin_key.yaml
new file mode 100644
index 000000000..c4c785737
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/163_admin_key.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_user - Add support for managing user public key and user unlock
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/164_add_admin.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/164_add_admin.yaml
new file mode 100644
index 000000000..6f6432a86
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/164_add_admin.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_admin - New module to manage global admin settings
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/166_lag_mac_note.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/166_lag_mac_note.yaml
new file mode 100644
index 000000000..49b1ddac7
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/166_lag_mac_note.yaml
@@ -0,0 +1,2 @@
+known_issues:
+ - purefb_lag - The mac_address field in the response is not populated. This will be fixed in a future FlashBlade update.
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/167_fix_logins.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/167_fix_logins.yaml
new file mode 100644
index 000000000..ff2b70056
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/167_fix_logins.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb.py - Add check to ensure FlashBlade uses the latest REST version possible for Purity version installed
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/169_pypureclient_fix.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/169_pypureclient_fix.yaml
new file mode 100644
index 000000000..671c0d3ff
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/169_pypureclient_fix.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb.py - Use latest `pypureclient` SDK with fix for "best fit". No longer requires double login to negotiate best API version.
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/174_access_policies.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/174_access_policies.yaml
new file mode 100644
index 000000000..ab241fe39
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/174_access_policies.yaml
@@ -0,0 +1,3 @@
+minor_changes:
+ - purefb_policy - Add support for Object Store Access Policies, associated rules and user grants
+ - purefb_policy - New parameter `policy_type` added. For backwards compatability, default to `snapshot` if not provided.
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/175_throttle_support.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/175_throttle_support.yaml
new file mode 100644
index 000000000..e075475cf
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/175_throttle_support.yaml
@@ -0,0 +1,3 @@
+minor_changes:
+ - purefb_connect - Add support for array connections to have bandwidth throttling defined
+ - purefb_info - Show array connections bandwidth throttle information
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/176_nfs_export_policies.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/176_nfs_export_policies.yaml
new file mode 100644
index 000000000..2332d1c7f
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/176_nfs_export_policies.yaml
@@ -0,0 +1,4 @@
+minor_changes:
+ - purefb_policy - Add NFS export policies, with rules, as a new policy type
+ - purefb_info - Add NFS export policies and rules
+ - purefb_fs - Add support for NFS export policies
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/179_fqcn.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/179_fqcn.yaml
new file mode 100644
index 000000000..5d3ba4592
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/179_fqcn.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - All - Update documentation examples with FQCNs
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/183_v2_connections.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/183_v2_connections.yaml
new file mode 100644
index 000000000..267a19f10
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/183_v2_connections.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefb_connect - Resolve connection issues between two FBs that are throttling capable
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/184_certificate_typos.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/184_certificate_typos.yaml
new file mode 100644
index 000000000..a416d6a2e
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/184_certificate_typos.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_certs - Fix several misspellings of certificate
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/185_nfs_export_rule.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/185_nfs_export_rule.yaml
new file mode 100644
index 000000000..d7dc7fa59
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/185_nfs_export_rule.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefb_policy - Fix incorrect API call for NFS export policy rule creation
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/186_add_tz.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/186_add_tz.yaml
new file mode 100644
index 000000000..074428b9e
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/186_add_tz.yaml
@@ -0,0 +1,3 @@
+minor_changes:
+ - purefb_tz - Add support for FlashBlade timezone management
+ - purefb_info - Show information for current timezone
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/187_rename_nfs_policy.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/187_rename_nfs_policy.yaml
new file mode 100644
index 000000000..d0caf092a
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/187_rename_nfs_policy.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_policy - Allow rename of NFS Export Policies from Purity//FB 3.3.3
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/188_bucket_type.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/188_bucket_type.yaml
new file mode 100644
index 000000000..ee681e33d
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/188_bucket_type.yaml
@@ -0,0 +1,3 @@
+minor_changes:
+ - purefb_info - Expose object store bucket type from Purity//FB 3.3.3
+ - purefb_bucket - Allow setting of bucket type to support VSO - requires Purity//FB 3.3.3 or higher
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/191_add_quota_info.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/191_add_quota_info.yaml
new file mode 100644
index 000000000..604296c5f
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/191_add_quota_info.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_info - Added filesystem default, user and group quotas where available
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/194_lists_for_service.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/194_lists_for_service.yaml
new file mode 100644
index 000000000..031a9bd9e
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/194_lists_for_service.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_ad - Allow service to be a list
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/200_proxy.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/200_proxy.yaml
new file mode 100644
index 000000000..612534990
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/200_proxy.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_proxy - Added new boolean parameter `secure`. Default of true (for backwards compatability) sets the protocol to be `https://`. False sets `http://`
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/202_multiple_snap_rules.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/202_multiple_snap_rules.yaml
new file mode 100644
index 000000000..aca61dee8
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/202_multiple_snap_rules.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_policy - Added support for multiple rules in snapshot policies
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/205_fix_multi_lifecycle.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/205_fix_multi_lifecycle.yaml
new file mode 100644
index 000000000..b6810884b
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/205_fix_multi_lifecycle.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_info - Fixed issue when more than 10 buckets have lifecycle rules.
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/211_change_booleans.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/211_change_booleans.yaml
new file mode 100644
index 000000000..84c3cb521
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/211_change_booleans.yaml
@@ -0,0 +1,2 @@
+trivial:
+ - various modules - Adjust booleans from ``yes``/``no`` to ``true``/``false`` in docs
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/212_object_account_quota.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/212_object_account_quota.yaml
new file mode 100644
index 000000000..7922ea515
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/212_object_account_quota.yaml
@@ -0,0 +1,3 @@
+minor_changes:
+ - purefb_s3acc - Added support for object account quota and hard limit
+ - purefb_s3acc - Added support for default bucket quotas and hard limits
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/213_sec_update.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/213_sec_update.yaml
new file mode 100644
index 000000000..eaa250f7e
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/213_sec_update.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_info - Added security update version for Purity//FB 4.0.2, or higher
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/215_encrypt_sec_info.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/215_encrypt_sec_info.yaml
new file mode 100644
index 000000000..778b39f3e
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/215_encrypt_sec_info.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_info - Added `encryption` and `support_keys` information.
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/216_extra_bucket_info.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/216_extra_bucket_info.yaml
new file mode 100644
index 000000000..acebab758
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/216_extra_bucket_info.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_info - Added bucket quota and safemode information per bucket
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/217_inventory.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/217_inventory.yaml
new file mode 100644
index 000000000..a95b5c44f
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/217_inventory.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_inventory - Added `part_number` to hardware item information.
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/218_object_account_info.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/218_object_account_info.yaml
new file mode 100644
index 000000000..54dac9dac
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/218_object_account_info.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_info - Updated object store account information
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/220_s3user_key_fix.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/220_s3user_key_fix.yaml
new file mode 100644
index 000000000..636cfadf2
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/220_s3user_key_fix.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefb_s3user - Fix incorrect response when bad key/secret pair provided for new user
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/76_default_fs_size.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/76_default_fs_size.yaml
new file mode 100644
index 000000000..b899c31f3
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/76_default_fs_size.yaml
@@ -0,0 +1,3 @@
+minor_changes:
+ - purefb_fs - Default filesystem size on creation changes from 32G to ``unlimited``
+ - purefb_fs - Fix error in deletion and eradication of filesystem
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/77_filesystem_policies_info.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/77_filesystem_policies_info.yaml
new file mode 100644
index 000000000..c4d84070f
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/77_filesystem_policies_info.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_info - Add support to list filesystem policies
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/78_update_filesystem_replica_link.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/78_update_filesystem_replica_link.yaml
new file mode 100644
index 000000000..09bc6c3a2
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/78_update_filesystem_replica_link.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_fs_replica - Remove condition to attach/detach policies on unhealthy replica-link
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/79_hide_connect_api.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/79_hide_connect_api.yaml
new file mode 100644
index 000000000..d6dcb9fe8
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/79_hide_connect_api.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefb_connect - Hide target array API token
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/80_support_reverse_replica_link.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/80_support_reverse_replica_link.yaml
new file mode 100644
index 000000000..42d8f1fe3
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/80_support_reverse_replica_link.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_connect - Support idempotency when exisitng connection is incoming
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/81_purefb_fs_new_options.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/81_purefb_fs_new_options.yaml
new file mode 100644
index 000000000..a6eb75c04
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/81_purefb_fs_new_options.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_fs - Add new options for filesystem control (https://github.com/Pure-Storage-Ansible/FlashBlade-Collection/pull/81)
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/83_add_certgrp.yml b/ansible_collections/purestorage/flashblade/changelogs/fragments/83_add_certgrp.yml
new file mode 100644
index 000000000..4f87b305e
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/83_add_certgrp.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_certgrp - Module to manage FlashBlade Certificate Groups
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/84_add_cert.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/84_add_cert.yaml
new file mode 100644
index 000000000..1470d302e
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/84_add_cert.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_certs - Module to create and delete SSL certificates
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/85_add_banner.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/85_add_banner.yaml
new file mode 100644
index 000000000..279173ccc
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/85_add_banner.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_banner - Module to manage the GUI and SSH login message
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/86_add_syslog.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/86_add_syslog.yaml
new file mode 100644
index 000000000..0cde34ca5
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/86_add_syslog.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_syslog - Module to manage syslog server configuration
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/88_add_lifecycle.yml b/ansible_collections/purestorage/flashblade/changelogs/fragments/88_add_lifecycle.yml
new file mode 100644
index 000000000..3caa436a5
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/88_add_lifecycle.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_lifecycle - Module to manage FlashBlade Bucket Lifecycle Rules
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/90_delete_conn_fix.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/90_delete_conn_fix.yaml
new file mode 100644
index 000000000..93876fede
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/90_delete_conn_fix.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefb_connect - Fix breaking change created in purity_fb SDK 1.9.2 for deletion of array connections
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/90_imported_keys.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/90_imported_keys.yaml
new file mode 100644
index 000000000..af012f746
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/90_imported_keys.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_s3user - Add support for imported user access keys
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/92_fix_ds_update.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/92_fix_ds_update.yaml
new file mode 100644
index 000000000..c4d52cab4
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/92_fix_ds_update.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefb_ds - Ensure updating directory service configurations completes correctly
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/96_fix_update_connection.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/96_fix_update_connection.yaml
new file mode 100644
index 000000000..87bfbeeef
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/96_fix_update_connection.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefb_connect - Ensure changing encryption status on array connection is performed correctly
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/97_fix_encrpyted_array_connection_info.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/97_fix_encrpyted_array_connection_info.yaml
new file mode 100644
index 000000000..5019c18e2
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/97_fix_encrpyted_array_connection_info.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefb_info - Fix issue getting array info when encrypted connection exists
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/v1.3.0_summary.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/v1.3.0_summary.yaml
new file mode 100644
index 000000000..35cff95f9
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/v1.3.0_summary.yaml
@@ -0,0 +1,33 @@
+release_summary: |
+ | Release Date: 2020-08-08
+ | This changlelog describes all changes made to the modules and plugins included in this collection since Ansible 2.9.0
+
+major_changes:
+ - purefb_alert - manage alert email settings on a FlashBlade
+ - purefb_bladename - manage FlashBlade name
+ - purefb_bucket_replica - manage bucket replica links on a FlashBlade
+ - purefb_connect - manage connections between FlashBlades
+ - purefb_dns - manage DNS settings on a FlashBlade
+ - purefb_fs_replica - manage filesystem replica links on a FlashBlade
+ - purefb_inventory - get information about the hardware inventory of a FlashBlade
+ - purefb_ntp - manage the NTP settings for a FlashBlade
+ - purefb_phonehome - manage the phone home settings for a FlashBlade
+ - purefb_policy - manage the filesystem snapshot policies for a FlashBlade
+ - purefb_proxy - manage the phone home HTTP proxy settings for a FlashBlade
+ - purefb_remote_cred - manage the Object Store Remote Credentials on a FlashBlade
+ - purefb_snmp_agent - modify the FlashBlade SNMP Agent
+ - purefb_snmp_mgr - manage SNMP Managers on a FlashBlade
+ - purefb_target - manage remote S3-capable targets for a FlashBlade
+ - purefb_user - manage local ``pureuser`` account password on a FlashBlade
+
+minor_changes:
+ - purefb_s3user - return dict changed from ``ansible_facts`` to ``s3user_info``
+ - purefb_s3user - Limit ``access_key`` recreation to 3 times
+ - purefb_info - new options added for information collection
+ - purefb_bucket - Versioning support added
+ - purefb_network - Add replication service type
+
+bugfixes:
+ - purefb_fs - Add graceful exist when ``state`` is ``absent`` and filesystem not eradicated
+ - purefb_fs - Add warning message if ``state`` is ``absent`` without ``eradicate``
+ - purefb_bucket - Add warning message if ``state`` is ``absent`` without ``eradicate:``
diff --git a/ansible_collections/purestorage/flashblade/meta/runtime.yml b/ansible_collections/purestorage/flashblade/meta/runtime.yml
new file mode 100644
index 000000000..2ee3c9fa9
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/meta/runtime.yml
@@ -0,0 +1,2 @@
+---
+requires_ansible: '>=2.9.10'
diff --git a/ansible_collections/purestorage/flashblade/playbooks/.keep b/ansible_collections/purestorage/flashblade/playbooks/.keep
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/playbooks/.keep
diff --git a/ansible_collections/purestorage/flashblade/playbooks/files/.keep b/ansible_collections/purestorage/flashblade/playbooks/files/.keep
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/playbooks/files/.keep
diff --git a/ansible_collections/purestorage/flashblade/playbooks/roles/.keep b/ansible_collections/purestorage/flashblade/playbooks/roles/.keep
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/playbooks/roles/.keep
diff --git a/ansible_collections/purestorage/flashblade/playbooks/tasks/.keep b/ansible_collections/purestorage/flashblade/playbooks/tasks/.keep
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/playbooks/tasks/.keep
diff --git a/ansible_collections/purestorage/flashblade/playbooks/templates/.keep b/ansible_collections/purestorage/flashblade/playbooks/templates/.keep
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/playbooks/templates/.keep
diff --git a/ansible_collections/purestorage/flashblade/playbooks/vars/.keep b/ansible_collections/purestorage/flashblade/playbooks/vars/.keep
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/playbooks/vars/.keep
diff --git a/ansible_collections/purestorage/flashblade/plugins/doc_fragments/purestorage.py b/ansible_collections/purestorage/flashblade/plugins/doc_fragments/purestorage.py
new file mode 100644
index 000000000..a3e5c735a
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/doc_fragments/purestorage.py
@@ -0,0 +1,42 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Simon Dodsley <simon@purestorage.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ # Standard Pure Storage documentation fragment
+ DOCUMENTATION = r"""
+options:
+ - See separate platform section for more details
+requirements:
+ - See separate platform section for more details
+notes:
+ - Ansible modules are available for the following Pure Storage products: FlashArray, FlashBlade
+"""
+
+ # Documentation fragment for FlashBlade
+ FB = r"""
+options:
+ fb_url:
+ description:
+ - FlashBlade management IP address or Hostname.
+ type: str
+ api_token:
+ description:
+ - FlashBlade API token for admin privileged user.
+ type: str
+notes:
+ - This module requires the C(purity_fb) Python library
+ - You must set C(PUREFB_URL) and C(PUREFB_API) environment variables
+ if I(fb_url) and I(api_token) arguments are not passed to the module directly
+requirements:
+ - python >= 2.7
+ - purity_fb >= 1.9
+ - netaddr
+ - pytz
+"""
diff --git a/ansible_collections/purestorage/flashblade/plugins/module_utils/purefb.py b/ansible_collections/purestorage/flashblade/plugins/module_utils/purefb.py
new file mode 100644
index 000000000..cf987a3e5
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/module_utils/purefb.py
@@ -0,0 +1,148 @@
+# -*- coding: utf-8 -*-
+
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Simon Dodsley <simon@purestorage.com>,2017
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import PurityFb
+except ImportError:
+ HAS_PURITY_FB = False
+
+HAS_PYPURECLIENT = True
+try:
+ from pypureclient import flashblade
+except ImportError:
+ PYPURECLIENT = False
+
+from os import environ
+import platform
+
+VERSION = "1.4"
+USER_AGENT_BASE = "Ansible"
+API_AGENT_VERSION = "1.5"
+
+
+def get_blade(module):
+ """Return System Object or Fail"""
+ user_agent = "%(base)s %(class)s/%(version)s (%(platform)s)" % {
+ "base": USER_AGENT_BASE,
+ "class": __name__,
+ "version": VERSION,
+ "platform": platform.platform(),
+ }
+ blade_name = module.params["fb_url"]
+ api = module.params["api_token"]
+
+ if HAS_PURITY_FB:
+ if blade_name and api:
+ blade = PurityFb(blade_name)
+ blade.disable_verify_ssl()
+ try:
+ blade.login(api)
+ versions = blade.api_version.list_versions().versions
+ if API_AGENT_VERSION in versions:
+ blade._api_client.user_agent = user_agent
+ except Exception:
+ module.fail_json(
+ msg="Pure Storage FlashBlade authentication failed. Check your credentials"
+ )
+ elif environ.get("PUREFB_URL") and environ.get("PUREFB_API"):
+ blade = PurityFb(environ.get("PUREFB_URL"))
+ blade.disable_verify_ssl()
+ try:
+ blade.login(environ.get("PUREFB_API"))
+ versions = blade.api_version.list_versions().versions
+ if API_AGENT_VERSION in versions:
+ blade._api_client.user_agent = user_agent
+ except Exception:
+ module.fail_json(
+ msg="Pure Storage FlashBlade authentication failed. Check your credentials"
+ )
+ else:
+ module.fail_json(
+ msg="You must set PUREFB_URL and PUREFB_API environment variables "
+ "or the fb_url and api_token module arguments"
+ )
+ else:
+ module.fail_json(msg="purity_fb SDK not installed.")
+ return blade
+
+
+def get_system(module):
+ """Return System Object or Fail"""
+ user_agent = "%(base)s %(class)s/%(version)s (%(platform)s)" % {
+ "base": USER_AGENT_BASE,
+ "class": __name__,
+ "version": VERSION,
+ "platform": platform.platform(),
+ }
+ blade_name = module.params["fb_url"]
+ api = module.params["api_token"]
+
+ if HAS_PYPURECLIENT:
+ if blade_name and api:
+ system = flashblade.Client(
+ target=blade_name,
+ api_token=api,
+ user_agent=user_agent,
+ )
+ elif environ.get("PUREFB_URL") and environ.get("PUREFB_API"):
+ system = flashblade.Client(
+ target=(environ.get("PUREFB_URL")),
+ api_token=(environ.get("PUREFB_API")),
+ user_agent=user_agent,
+ )
+ else:
+ module.fail_json(
+ msg="You must set PUREFB_URL and PUREFB_API environment variables "
+ "or the fb_url and api_token module arguments"
+ )
+ res = system.get_hardware()
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Pure Storage FlashBlade authentication failed. Error: {0}".format(
+ res.errors[0].message
+ )
+ )
+ else:
+ module.fail_json(msg="pypureclient SDK not installed.")
+ return system
+
+
+def purefb_argument_spec():
+ """Return standard base dictionary used for the argument_spec argument in AnsibleModule"""
+
+ return dict(
+ fb_url=dict(),
+ api_token=dict(no_log=True),
+ )
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_ad.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_ad.py
new file mode 100644
index 000000000..ccd1f5d92
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_ad.py
@@ -0,0 +1,404 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2021, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_ad
+version_added: '1.6.0'
+short_description: Manage FlashBlade Active Directory Account
+description:
+- Add or delete FlashBlade Active Directory Account
+- FlashBlade allows the creation of one AD computer account, or joining of an
+ existing AD computer account.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Name of the AD account
+ type: str
+ required: true
+ existing:
+ description:
+ - Does the account I(name) already exist in the AD environment
+ type: bool
+ default: false
+ state:
+ description:
+ - Define whether the AD sccount is deleted or not
+ default: present
+ choices: [ absent, present ]
+ type: str
+ computer:
+ description:
+ - The common name of the computer account to be created in the Active Directory domain.
+ - If not specified, defaults to the name of the Active Directory configuration.
+ type: str
+ domain:
+ description:
+ - The Active Directory domain to join
+ type: str
+ username:
+ description:
+ - A user capable of creating a computer account within the domain
+ type: str
+ password:
+ description:
+ - Password string for I(username)
+ type: str
+ encryption:
+ description:
+ - The encryption types that will be supported for use by clients for Kerberos authentication
+ type: list
+ elements: str
+ choices: [ aes256-sha1, aes128-sha1, arcfour-hmac]
+ default: aes256-sha1
+ join_ou:
+ description:
+ - Location where the Computer account will be created. e.g. OU=Arrays,OU=Storage.
+ - If left empty, defaults to B(CN=Computers).
+ type: str
+ directory_servers:
+ description:
+ - A list of directory servers that will be used for lookups related to user authorization
+ - Accepted server formats are IP address and DNS name
+ - All specified servers must be registered to the domain appropriately in the array
+ configured DNS and will only be communicated with over the secure LDAP (LDAPS) protocol.
+ If not specified, servers are resolved for the domain in DNS
+ - The specified list can have a maximum length of 5. If more are provided only the first
+ 5 are used.
+ type: list
+ elements: str
+ kerberos_servers:
+ description:
+ - A list of key distribution servers to use for Kerberos protocol
+ - Accepted server formats are IP address and DNS name
+ - All specified servers must be registered to the domain appropriately in the array
+ configured DNS. If not specified, servers are resolved for the domain in DNS.
+ - The specified list can have a maximum length of 5. If more are provided only the first
+ 5 are used.
+ type: list
+ elements: str
+ service_principals:
+ description:
+ - A list of either FQDNs or SPNs for registering services with the domain.
+ - If not specified B(Computer Name.Domain) is used
+ type: list
+ elements: str
+ service:
+ description:
+ - Service protocol for Active Directory principals
+ - Refer to FlashBlade User Guide for more details
+ type: list
+ elements: str
+ choices: ['nfs', 'cifs', 'HOST']
+ default: nfs
+ local_only:
+ description:
+ - Do a local-only delete of an active directory account
+ type: bool
+ default: false
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Create new AD account
+ purestorage.flashblade.purefb_ad:
+ name: ad_account
+ computer: FLASHBLADE
+ domain: acme.com
+ username: Administrator
+ password: Password
+ join_ou: "CN=FakeOU"
+ encryption:
+ - aes128-cts-hmac-sha1-96
+ - aes256-cts-hmac-sha1-96
+ kerberos_servers:
+ - kdc.acme.com
+ directory_servers:
+ - ldap.acme.com
+ service_principals:
+ - vip1.flashblade.acme.com
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Connect to existing AD account
+ purestorage.flashblade.purefb_ad:
+ name: ad_account
+ computer: FLASHBLADE
+ domain: acme.com
+ username: Administrator
+ password: Password
+ existing: true
+ kerberos_servers:
+ - kdc.acme.com
+ directory_servers:
+ - ldap.acme.com
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Update existing AD account
+ purestorage.flashblade.purefb_ad:
+ name: ad_account
+ encryption:
+ - aes256-cts-hmac-sha1-96
+ kerberos_servers:
+ - kdc.acme.com
+ directory_servers:
+ - ldap.acme.com
+ service_principals:
+ - vip1.flashblade.acme.com
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Delete local AD account
+ purestorage.flashblade.purefb_ad:
+ name: ad_account
+ local_only: true
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Fully delete AD account
+ purestorage.flashblade.purefb_ad:
+ name: ad_account
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient.flashblade import ActiveDirectoryPost, ActiveDirectoryPatch
+except ImportError:
+ HAS_PURESTORAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_system,
+ purefb_argument_spec,
+)
+
+MIN_REQUIRED_API_VERSION = "2.0"
+
+
+def delete_account(module, blade):
+ """Delete Active directory Account"""
+ changed = True
+ if not module.check_mode:
+ res = blade.delete_active_directory(
+ names=[module.params["name"]], local_only=module.params["local_only"]
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete AD Account {0}".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def create_account(module, blade):
+ """Create Active Directory Account"""
+ changed = True
+ if not module.params["existing"]:
+ ad_config = ActiveDirectoryPost(
+ computer_name=module.params["computer"],
+ directory_servers=module.params["directory_servers"],
+ kerberos_servers=module.params["kerberos_servers"],
+ domain=module.params["domain"],
+ encryption_types=module.params["encryption"],
+ fqdns=module.params["service_principals"],
+ join_ou=module.params["join_ou"],
+ user=module.params["username"],
+ password=module.params["password"],
+ )
+ if not module.check_mode:
+ res = blade.post_active_directory(
+ names=[module.params["name"]], active_directory=ad_config
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to add Active Directory Account {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ else:
+ ad_config = ActiveDirectoryPost(
+ computer_name=module.params["computer"],
+ directory_servers=module.params["directory_servers"],
+ kerberos_servers=module.params["kerberos_servers"],
+ domain=module.params["domain"],
+ encryption_types=module.params["encryption"],
+ user=module.params["username"],
+ password=module.params["password"],
+ )
+ if not module.check_mode:
+ res = blade.post_active_directory(
+ names=[module.params["name"]],
+ active_directory=ad_config,
+ join_existing_account=True,
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to add Active Directory Account {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def update_account(module, blade):
+ """Update Active Directory Account"""
+ changed = False
+ mod_ad = False
+ current_ad = list(blade.get_active_directory(names=[module.params["name"]]).items)[
+ 0
+ ]
+ attr = {}
+ if (
+ module.params["join_ou"] != current_ad.join_ou
+ and module.params["encryption"].sort() != current_ad.encryption_types.sort()
+ ):
+ module.fail_json(msg="Cannot make changes to OU when changing encryption types")
+ if module.params["directory_servers"]:
+ if current_ad.directory_servers:
+ if set(module.params["directory_servers"]) != set(
+ current_ad.directory_servers
+ ):
+ attr["directory_servers"] = module.params["directory_servers"]
+ mod_ad = True
+ if module.params["kerberos_servers"]:
+ if current_ad.kerberos_servers:
+ if set(module.params["kerberos_servers"]) != set(
+ current_ad.kerberos_servers
+ ):
+ attr["kerberos_servers"] = module.params["kerberos_servers"]
+ mod_ad = True
+ if module.params["join_ou"] != current_ad.join_ou:
+ attr["join_ou"] = module.params["join_ou"]
+ mod_ad = True
+ if set(module.params["encryption"]) != set(current_ad.encryption_types):
+ attr["encryption_types"] = module.params["encryption"]
+ mod_ad = True
+ if module.params["service_principals"]:
+ if current_ad.service_principal_names:
+ full_spns = []
+ for spn in range(0, len(module.params["service_principals"])):
+ for service in range(0, len(module.params["service"])):
+ full_spns.append(
+ module.params["service"][service]
+ + "/"
+ + module.params["service_principals"][spn]
+ )
+ if set(current_ad.service_principal_names) != set(full_spns):
+ attr["service_principal_names"] = full_spns
+ mod_ad = True
+ if mod_ad:
+ changed = True
+ if not module.check_mode:
+ ad_attr = ActiveDirectoryPatch(**attr)
+ res = blade.patch_active_directory(
+ names=[module.params["name"]], active_directory=ad_attr
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to update Active Directory Account {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ username=dict(type="str"),
+ password=dict(type="str", no_log=True),
+ name=dict(type="str", required=True),
+ service=dict(
+ type="list",
+ elements="str",
+ default="nfs",
+ choices=["nfs", "cifs", "HOST"],
+ ),
+ computer=dict(type="str"),
+ existing=dict(type="bool", default=False),
+ local_only=dict(type="bool", default=False),
+ domain=dict(type="str"),
+ join_ou=dict(type="str"),
+ directory_servers=dict(type="list", elements="str"),
+ kerberos_servers=dict(type="list", elements="str"),
+ service_principals=dict(type="list", elements="str"),
+ encryption=dict(
+ type="list",
+ elements="str",
+ choices=["aes256-sha1", "aes128-sha1", "arcfour-hmac"],
+ default=["aes256-sha1"],
+ ),
+ )
+ )
+
+ required_if = [["state", "present", ["username", "password", "domain"]]]
+
+ module = AnsibleModule(
+ argument_spec, required_if=required_if, supports_check_mode=True
+ )
+
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+
+ blade = get_system(module)
+ api_version = list(blade.get_versions().items)
+ module.params["encryption"] = [
+ crypt.replace("aes256-sha1", "aes256-cts-hmac-sha1-96").replace(
+ "aes128-sha1", "aes128-cts-hmac-sha1-96"
+ )
+ for crypt in module.params["encryption"]
+ ]
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashBlade REST version not supported. "
+ "Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
+ )
+ state = module.params["state"]
+ exists = bool(blade.get_active_directory().total_item_count == 1)
+
+ # TODO: Check SMB mode.
+ # If mode is SMB adapter only allow nfs
+ # Only allow cifs or HOST is SMB mode is native
+
+ if not module.params["computer"]:
+ module.params["computer"] = module.params["name"].replace("_", "-")
+ if module.params["kerberos_servers"]:
+ module.params["kerberos_servers"] = module.params["kerberos_servers"][0:5]
+ if module.params["directory_servers"]:
+ module.params["directory_servers"] = module.params["directory_servers"][0:5]
+
+ if not exists and state == "present":
+ create_account(module, blade)
+ elif exists and state == "present":
+ update_account(module, blade)
+ elif exists and state == "absent":
+ delete_account(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_admin.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_admin.py
new file mode 100644
index 000000000..3ee87bca1
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_admin.py
@@ -0,0 +1,137 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2021, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_admin
+version_added: '1.8.0'
+short_description: Configure Pure Storage FlashBlade Global Admin settings
+description:
+- Set global admin settings for the FlashBlade
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ max_login:
+ description:
+ - Maximum number of failed logins before account is locked
+ type: int
+ min_password:
+ description:
+ - Minimum user password length
+ - Range between 1 and 100
+ default: 1
+ type: int
+ lockout:
+ description:
+ - Account lockout duration, in seconds, after max_login exceeded
+ - Range between 1 second and 90 days (7776000 seconds)
+ type: int
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Set global login parameters
+ purestorage.flashblade.purefb_admin:
+ max_login: 5
+ min_password: 10
+ lockout: 300
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient.flashblade import AdminSetting
+except ImportError:
+ HAS_PURESTORAGE = False
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_system,
+ purefb_argument_spec,
+)
+
+MIN_API_VERSION = "2.3"
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ max_login=dict(type="int"),
+ min_password=dict(type="int", default=1, no_log=False),
+ lockout=dict(type="int"),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+ if module.params["lockout"] and not 1 <= module.params["lockout"] <= 7776000:
+ module.fail_json(msg="Lockout must be between 1 and 7776000 seconds")
+ if not 1 <= module.params["min_password"] <= 100:
+ module.fail_json(msg="Minimum password length must be between 1 and 100")
+ blade = get_system(module)
+ api_version = list(blade.get_versions().items)
+ changed = False
+ if MIN_API_VERSION in api_version:
+ current_settings = list(blade.get_admins_settings().items)[0]
+ lockout = getattr(current_settings, "lockout_duration", None)
+ max_login = getattr(current_settings, "max_login_attempts", None)
+ min_password = getattr(current_settings, "min_password_length", 1)
+ if min_password != module.params["min_password"]:
+ changed = True
+ min_password = module.params["min_password"]
+ if lockout and lockout != module.params["lockout"] * 1000:
+ changed = True
+ lockout = module.params["lockout"] * 1000
+ elif not lockout and module.params["lockout"]:
+ changed = True
+ lockout = module.params["lockout"] * 1000
+ if max_login and max_login != module.params["max_login"]:
+ changed = True
+ max_login = module.params["max_login"]
+ elif not max_login and module.params["max_login"]:
+ changed = True
+ max_login = module.params["max_login"]
+
+ if changed and not module.check_mode:
+ admin = AdminSetting(
+ min_password_length=min_password,
+ max_login_attempts=max_login,
+ lockout_duration=lockout,
+ )
+
+ res = blade.patch_admins_settings(admin_setting=admin)
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to change Global Admin settings. Error: {0}".format(
+ res.errors[0].message
+ )
+ )
+ else:
+ module.fail_json(msg="Purity version does not support Global Admin settings")
+ module.exit_json(changed=changed)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_alert.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_alert.py
new file mode 100644
index 000000000..406fe1c39
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_alert.py
@@ -0,0 +1,245 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_alert
+version_added: '1.0.0'
+short_description: Configure Pure Storage FlashBlade alert email settings
+description:
+- Configure alert email configuration for Pure Storage FlashArrays.
+- Add or delete an individual syslog server to the existing
+ list of serves.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ type: str
+ description:
+ - Create or delete alert email
+ default: present
+ choices: [ absent, present ]
+ address:
+ type: str
+ description:
+ - Email address (valid format required)
+ required: true
+ enabled:
+ type: bool
+ default: true
+ description:
+ - Set specified email address to be enabled or disabled
+ severity:
+ type: str
+ description:
+ - The minimum severity that an alert must have in order for
+ emails to be sent to the array's alert watchers
+ default: info
+ choices: [ info, warning, critical ]
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Add new email recipient and enable, or enable existing email
+ purestorage.flashblade.purefb_alert:
+ address: "user@domain.com"
+ enabled: true
+ state: present
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+- name: Delete existing email recipient
+ purestorage.flashblade.purefb_alert:
+ state: absent
+ address: "user@domain.com"
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import AlertWatcher
+except ImportError:
+ HAS_PURITY_FB = False
+
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ purefb_argument_spec,
+)
+
+
+MIN_REQUIRED_API_VERSION = "1.9"
+
+
+def create_alert(module, blade):
+ """Create Alert Email"""
+ changed = True
+ if not module.check_mode:
+ api_version = blade.api_version.list_versions().versions
+ if MIN_REQUIRED_API_VERSION in api_version:
+ watcher_settings = AlertWatcher(
+ minimum_notification_severity=module.params["severity"]
+ )
+ try:
+ blade.alert_watchers.create_alert_watchers(
+ names=[module.params["address"]], watcher_settings=watcher_settings
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to create alert email: {0}".format(
+ module.params["address"]
+ )
+ )
+ else:
+ try:
+ blade.alert_watchers.create_alert_watchers(
+ names=[module.params["address"]]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to create alert email: {0}".format(
+ module.params["address"]
+ )
+ )
+ if not module.params["enabled"]:
+ watcher_settings = AlertWatcher(enabled=module.params["enabled"])
+ try:
+ blade.alert_watchers.update_alert_watchers(
+ names=[module.params["address"]], watcher_settings=watcher_settings
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to disable during create alert email: {0}".format(
+ module.params["address"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def update_alert(module, blade):
+ """Update alert Watcher"""
+ api_version = blade.api_version.list_versions().versions
+ mod_alert = False
+ try:
+ alert = blade.alert_watchers.list_alert_watchers(
+ names=[module.params["address"]]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to get information for alert email: {0}".format(
+ module.params["address"]
+ )
+ )
+ current_state = {
+ "enabled": alert.items[0].enabled,
+ "severity": alert.items[0].minimum_notification_severity,
+ }
+ if current_state["enabled"] != module.params["enabled"]:
+ mod_alert = True
+ if MIN_REQUIRED_API_VERSION in api_version:
+ if current_state["severity"] != module.params["severity"]:
+ mod_alert = True
+ if mod_alert:
+ changed = True
+ if not module.check_mode:
+ if MIN_REQUIRED_API_VERSION in api_version:
+ watcher_settings = AlertWatcher(
+ enabled=module.params["enabled"],
+ minimum_notification_severity=module.params["severity"],
+ )
+ else:
+ watcher_settings = AlertWatcher(enabled=module.params["enabled"])
+ try:
+ blade.alert_watchers.update_alert_watchers(
+ names=[module.params["address"]], watcher_settings=watcher_settings
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to update alert email: {0}".format(
+ module.params["address"]
+ )
+ )
+ else:
+ changed = False
+ module.exit_json(changed=changed)
+
+
+def delete_alert(module, blade):
+ """Delete Alert Email"""
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.alert_watchers.delete_alert_watchers(names=[module.params["address"]])
+ except Exception:
+ module.fail_json(
+ msg="Failed to delete alert email: {0}".format(module.params["address"])
+ )
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ address=dict(type="str", required=True),
+ enabled=dict(type="bool", default=True),
+ severity=dict(
+ type="str", default="info", choices=["info", "warning", "critical"]
+ ),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg="purity_fb SDK is required for this module")
+
+ pattern = re.compile(r"^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$")
+ if not pattern.match(module.params["address"]):
+ module.fail_json(msg="Valid email address not provided.")
+
+ blade = get_blade(module)
+
+ exists = False
+ try:
+ emails = blade.alert_watchers.list_alert_watchers()
+ except Exception:
+ module.fail_json(msg="Failed to get exisitng email list")
+ for email in range(0, len(emails.items)):
+ if emails.items[email].name == module.params["address"]:
+ exists = True
+ break
+ if module.params["state"] == "present" and not exists:
+ create_alert(module, blade)
+ elif module.params["state"] == "present" and exists:
+ update_alert(module, blade)
+ elif module.params["state"] == "absent" and exists:
+ delete_alert(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_apiclient.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_apiclient.py
new file mode 100644
index 000000000..6a4755a95
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_apiclient.py
@@ -0,0 +1,250 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2021, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_apiclient
+version_added: '1.6.0'
+short_description: Manage FlashBlade API Clients
+description:
+- Enable or disable FlashBlade API Clients
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Name of the API Client
+ type: str
+ required: true
+ state:
+ description:
+ - Define whether the API client should exist or not.
+ default: present
+ choices: [ absent, present ]
+ type: str
+ role:
+ description:
+ - The maximum role allowed for ID Tokens issued by this API client
+ type: str
+ choices: [readonly, ops_admin, storage_admin, array_admin]
+ issuer:
+ description:
+ - The name of the identity provider that will be issuing ID Tokens for this API client
+ - If not specified, defaults to the API client name, I(name).
+ type: str
+ public_key:
+ description:
+ - The API clients PEM formatted (Base64 encoded) RSA public key.
+ - Include the I(—–BEGIN PUBLIC KEY—–) and I(—–END PUBLIC KEY—–) lines
+ type: str
+ token_ttl:
+ description:
+ - Time To Live length in seconds for the exchanged access token
+ - Range is 1 second to 1 day (86400 seconds)
+ type: int
+ default: 86400
+ enabled:
+ description:
+ - State of the API Client Key
+ type: bool
+ default: true
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Create API token ansible-token
+ purestorage.flashblade.purefb_apiclient:
+ name: ansible_token
+ issuer: "Pure_Storage"
+ token_ttl: 3000
+ role: array_admin
+ public_key: "{{lookup('file', 'public_pem_file') }}"
+ fb_url: 10.10.10.2
+ api_token: T-68618f31-0c9e-4e57-aa44-5306a2cf10e3
+
+- name: Disable API CLient
+ purestorage.flashblade.purefb_apiclient:
+ name: ansible_token
+ enabled: false
+ fb_url: 10.10.10.2
+ api_token: T-68618f31-0c9e-4e57-aa44-5306a2cf10e3
+
+- name: Enable API CLient
+ purestorage.flashblade.purefb_apiclient:
+ name: ansible_token
+ enabled: true
+ fb_url: 10.10.10.2
+ api_token: T-68618f31-0c9e-4e57-aa44-5306a2cf10e3
+
+- name: Delete API Client
+ purestorage.flashblade.purefb_apiclient:
+ state: absent
+ name: ansible_token
+ fb_url: 10.10.10.2
+ api_token: T-68618f31-0c9e-4e57-aa44-5306a2cf10e3
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flashblade
+except ImportError:
+ HAS_PURESTORAGE = False
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_system,
+ purefb_argument_spec,
+)
+
+MIN_REQUIRED_API_VERSION = "2.0"
+
+
+def delete_client(module, blade):
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.delete_api_clients(names=[module.params["name"]])
+ except Exception:
+ module.fail_json(
+ msg="Failed to delete API Client {0}".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def update_client(module, blade, client):
+ """Update API Client"""
+ changed = False
+ if client.enabled != module.params["enabled"]:
+ changed = True
+ if not module.check_mode:
+ res = blade.patch_api_clients(
+ names=[module.params["name"]],
+ api_clients=flashblade.ApiClient(enabled=module.params["enabled"]),
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to update API Client {0}".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def create_client(module, blade):
+ """Create API Client"""
+ changed = True
+ if not 1 <= module.params["token_ttl"] <= 86400:
+ module.fail_json(msg="token_ttl parameter is out of range (1 to 86400)")
+ else:
+ token_ttl = module.params["token_ttl"] * 1000
+ if not module.params["issuer"]:
+ module.params["issuer"] = module.params["name"]
+ if not module.check_mode:
+ api_client = flashblade.ApiClientsPost(
+ max_role={"name": module.params["role"]},
+ issuer=module.params["issuer"],
+ access_token_ttl_in_ms=token_ttl,
+ public_key=module.params["public_key"],
+ )
+ res = blade.post_api_clients(
+ names=[module.params["name"]], api_client=api_client
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to create API Client {0}. Error message: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ if module.params["enabled"]:
+ attr = flashblade.ApiClient(enabled=True)
+ res = blade.patch_api_clients(
+ api_clients=attr, names=[module.params["name"]]
+ )
+ if res.status_code != 200:
+ module.warn(
+ "API Client {0} created by enable failed. Please investigate.".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ enabled=dict(type="bool", default=True),
+ name=dict(type="str", required=True),
+ role=dict(
+ type="str",
+ choices=["readonly", "ops_admin", "storage_admin", "array_admin"],
+ ),
+ public_key=dict(type="str", no_log=True),
+ token_ttl=dict(type="int", default=86400, no_log=False),
+ issuer=dict(type="str"),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+ pattern = re.compile("^[a-zA-Z0-9]([a-zA-Z0-9_]{0,54}[a-zA-Z0-9])?$")
+ if module.params["issuer"]:
+ if not pattern.match(module.params["issuer"]):
+ module.fail_json(
+ msg="API Client Issuer name {0} does not conform to required naming convention".format(
+ module.params["issuer"]
+ )
+ )
+ if not pattern.match(module.params["name"]):
+ module.fail_json(
+ msg="Object Store Virtual Host name {0} does not conform to required naming convention".format(
+ module.params["name"]
+ )
+ )
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+
+ blade = get_system(module)
+ api_version = list(blade.get_versions().items)
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashBlade REST version not supported. "
+ "Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
+ )
+ state = module.params["state"]
+
+ exists = bool(
+ blade.get_api_clients(names=[module.params["name"]]).status_code == 200
+ )
+ if exists:
+ client = list(blade.get_api_clients(names=[module.params["name"]]).items)[0]
+
+ if not exists and state == "present":
+ create_client(module, blade)
+ elif exists and state == "present":
+ update_client(module, blade, client)
+ elif exists and state == "absent":
+ delete_client(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_banner.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_banner.py
new file mode 100644
index 000000000..739c2ab9a
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_banner.py
@@ -0,0 +1,143 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_banner
+version_added: '1.4.0'
+short_description: Configure Pure Storage FlashBlade GUI and SSH MOTD message
+description:
+- Configure MOTD for Pure Storage FlashBlades.
+- This will be shown during an SSH or GUI login to the system.
+- Multiple line messages can be achieved using \\n.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Set ot delete the MOTD
+ default: present
+ type: str
+ choices: [ present, absent ]
+ banner:
+ description:
+ - Banner text, or MOTD, to use
+ type: str
+ default: "Welcome to the machine..."
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Set new banner text
+ purestorage.flashblade.purefb_banner:
+ banner: "Banner over\ntwo lines"
+ state: present
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Delete banner text
+ purestorage.flashblade.purefb_banner:
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import PureArray
+except ImportError:
+ HAS_PURITY_FB = False
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ purefb_argument_spec,
+)
+
+
+MIN_REQUIRED_API_VERSION = "1.10"
+
+
+def set_banner(module, blade):
+ """Set MOTD banner text"""
+ changed = True
+ if not module.check_mode:
+ try:
+ if not module.params["banner"]:
+ module.fail_json(msg="Invalid MOTD banner given")
+ blade_settings = PureArray(banner=module.params["banner"])
+ blade.arrays.update_arrays(array_settings=blade_settings)
+ except Exception:
+ module.fail_json(msg="Failed to set MOTD banner text")
+
+ module.exit_json(changed=changed)
+
+
+def delete_banner(module, blade):
+ """Delete MOTD banner text"""
+ changed = True
+ if not module.check_mode:
+ try:
+ blade_settings = PureArray(banner="")
+ blade.arrays.update_arrays(array_settings=blade_settings)
+ except Exception:
+ module.fail_json(msg="Failed to delete current MOTD banner text")
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ banner=dict(type="str", default="Welcome to the machine..."),
+ state=dict(type="str", default="present", choices=["present", "absent"]),
+ )
+ )
+
+ required_if = [("state", "present", ["banner"])]
+
+ module = AnsibleModule(
+ argument_spec, required_if=required_if, supports_check_mode=True
+ )
+ if not HAS_PURITY_FB:
+ module.fail_json(msg="purity_fb sdk is required for this module")
+
+ state = module.params["state"]
+ blade = get_blade(module)
+ api_version = blade.api_version.list_versions().versions
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(msg="Purity//FB must be upgraded to support this module.")
+ current_banner = blade.login_banner.list_login_banner().login_banner
+
+ # set banner if empty value or value differs
+ if state == "present" and (
+ not current_banner or current_banner != module.params["banner"]
+ ):
+ set_banner(module, blade)
+ # clear banner if it has a value
+ elif state == "absent" and current_banner:
+ delete_banner(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_bladename.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_bladename.py
new file mode 100644
index 000000000..0e0b5c755
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_bladename.py
@@ -0,0 +1,115 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_bladename
+version_added: '1.0.0'
+short_description: Configure Pure Storage FlashBlade name
+description:
+- Configure name of Pure Storage FlashBlades.
+- Ideal for Day 0 initial configuration.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Set the FlashBlade name
+ type: str
+ default: present
+ choices: [ present ]
+ name:
+ description:
+ - Name of the FlashBlade. Must conform to correct naming schema.
+ type: str
+ required: true
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Set new FlashBlade name
+ purestorage.flashblade.purefb_bladename:
+ name: new-flashblade-name
+ state: present
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import PureArray
+except ImportError:
+ HAS_PURITY_FB = False
+
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ purefb_argument_spec,
+)
+
+
+def update_name(module, blade):
+ """Change aray name"""
+ changed = True
+ if not module.check_mode:
+ try:
+ blade_settings = PureArray(name=module.params["name"])
+ blade.arrays.update_arrays(array_settings=blade_settings)
+ except Exception:
+ module.fail_json(
+ msg="Failed to change array name to {0}".format(module.params["name"])
+ )
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True),
+ state=dict(type="str", default="present", choices=["present"]),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg="purity_fb sdk is required for this module")
+
+ blade = get_blade(module)
+ pattern = re.compile("^[a-zA-Z0-9]([a-zA-Z0-9-]{0,54}[a-zA-Z0-9])?$")
+ if not pattern.match(module.params["name"]):
+ module.fail_json(
+ msg="FlashBlade name {0} does not conform to array name rules".format(
+ module.params["name"]
+ )
+ )
+ if module.params["name"] != blade.arrays.list_arrays().items[0].name:
+ update_name(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_bucket.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_bucket.py
new file mode 100644
index 000000000..67b6b1545
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_bucket.py
@@ -0,0 +1,398 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+
+DOCUMENTATION = """
+---
+module: purefb_bucket
+version_added: "1.0.0"
+short_description: Manage Object Store Buckets on a Pure Storage FlashBlade.
+description:
+ - This module managess object store (s3) buckets on Pure Storage FlashBlade.
+author: Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Bucket Name.
+ required: true
+ type: str
+ account:
+ description:
+ - Object Store Account for Bucket.
+ required: true
+ type: str
+ versioning:
+ description:
+ - State of S3 bucket versioning
+ required: false
+ default: absent
+ type: str
+ choices: [ "enabled", "suspended", "absent" ]
+ state:
+ description:
+ - Create, delete or modifies a bucket.
+ required: false
+ default: present
+ type: str
+ choices: [ "present", "absent" ]
+ eradicate:
+ description:
+ - Define whether to eradicate the bucket on delete or leave in trash.
+ required: false
+ type: bool
+ default: false
+ mode:
+ description:
+ - The type of bucket to be created. Also referred to a VSO Mode.
+ - Requires Purity//FB 3.3.3 or higher
+ - I(multi-site) type can only be used after feature is
+ enabled by Pure Technical Support
+ type: str
+ choices: [ "classic", "multi-site" ]
+ version_added: '1.10.0'
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = """
+- name: Create new bucket named foo in account bar
+ purestorage.flashblade.purefb_bucket:
+ name: foo
+ account: bar
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Delete bucket named foo in account bar
+ purestorage.flashblade.purefb_bucket:
+ name: foo
+ account: bar
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Change bucket versioning state
+ purestorage.flashblade.purefb_bucket:
+ name: foo
+ account: bar
+ versioning: enabled
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Recover deleted bucket named foo in account bar
+ purestorage.flashblade.purefb_bucket:
+ name: foo
+ account: bar
+ state: present
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Eradicate bucket named foo in account bar
+ purestorage.flashblade.purefb_bucket:
+ name: foo
+ account: bar
+ state: absent
+ eradicate: true
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+"""
+
+RETURN = """
+"""
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import Bucket, Reference, BucketPatch, BucketPost
+except ImportError:
+ HAS_PURITY_FB = False
+
+HAS_PYPURECLIENT = True
+try:
+ from pypureclient import flashblade
+except ImportError:
+ HAS_PYPURECLIENT = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ get_system,
+ purefb_argument_spec,
+)
+
+
+MIN_REQUIRED_API_VERSION = "1.5"
+VERSIONING_VERSION = "1.9"
+VSO_VERSION = "2.4"
+
+
+def get_s3acc(module, blade):
+ """Return Object Store Account or None"""
+ s3acc = None
+ accts = blade.object_store_accounts.list_object_store_accounts()
+ for acct in range(0, len(accts.items)):
+ if accts.items[acct].name == module.params["account"]:
+ s3acc = accts.items[acct]
+ return s3acc
+
+
+def get_bucket(module, blade):
+ """Return Bucket or None"""
+ s3bucket = None
+ buckets = blade.buckets.list_buckets()
+ for bucket in range(0, len(buckets.items)):
+ if buckets.items[bucket].name == module.params["name"]:
+ s3bucket = buckets.items[bucket]
+ return s3bucket
+
+
+def create_bucket(module, blade):
+ """Create bucket"""
+ changed = True
+ if not module.check_mode:
+ try:
+ api_version = blade.api_version.list_versions().versions
+ if VSO_VERSION in api_version and module.params["mode"]:
+ bladev2 = get_system(module)
+ res = bladev2.post_buckets(
+ names=[module.params["name"]],
+ bucket=flashblade.BucketPost(
+ account=flashblade.Reference(name=module.params["account"]),
+ bucket_type=module.params["mode"],
+ ),
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Object Store Bucket {0} creation failed. Error: {1}".format(
+ module.params["name"],
+ res.errors[0].message,
+ )
+ )
+ elif VERSIONING_VERSION in api_version:
+ attr = BucketPost()
+ attr.account = Reference(name=module.params["account"])
+ blade.buckets.create_buckets(names=[module.params["name"]], bucket=attr)
+ else:
+ attr = Bucket()
+ attr.account = Reference(name=module.params["account"])
+ blade.buckets.create_buckets(
+ names=[module.params["name"]], account=attr
+ )
+ if (
+ module.params["versioning"] != "absent"
+ and VERSIONING_VERSION in api_version
+ ):
+ try:
+ blade.buckets.update_buckets(
+ names=[module.params["name"]],
+ bucket=BucketPatch(versioning=module.params["versioning"]),
+ )
+ except Exception:
+ module.fail_json(
+ msg="Object Store Bucket {0} Created but versioning state failed".format(
+ module.params["name"]
+ )
+ )
+ except Exception:
+ module.fail_json(
+ msg="Object Store Bucket {0}: Creation failed".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def delete_bucket(module, blade):
+ """Delete Bucket"""
+ changed = True
+ if not module.check_mode:
+ try:
+ api_version = blade.api_version.list_versions().versions
+ if VERSIONING_VERSION in api_version:
+ blade.buckets.update_buckets(
+ names=[module.params["name"]], bucket=BucketPatch(destroyed=True)
+ )
+ else:
+ blade.buckets.update_buckets(
+ names=[module.params["name"]], destroyed=Bucket(destroyed=True)
+ )
+ if module.params["eradicate"]:
+ try:
+ blade.buckets.delete_buckets(names=[module.params["name"]])
+ except Exception:
+ module.fail_json(
+ msg="Object Store Bucket {0}: Eradication failed".format(
+ module.params["name"]
+ )
+ )
+ except Exception:
+ module.fail_json(
+ msg="Object Store Bucket {0}: Deletion failed".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def recover_bucket(module, blade):
+ """Recover Bucket"""
+ changed = True
+ if not module.check_mode:
+ try:
+ api_version = blade.api_version.list_versions().versions
+ if VERSIONING_VERSION in api_version:
+ blade.buckets.update_buckets(
+ names=[module.params["name"]], bucket=BucketPatch(destroyed=False)
+ )
+ else:
+ blade.buckets.update_buckets(
+ names=[module.params["name"]], destroyed=Bucket(destroyed=False)
+ )
+ except Exception:
+ module.fail_json(
+ msg="Object Store Bucket {0}: Recovery failed".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def update_bucket(module, blade, bucket):
+ """Update Bucket"""
+ changed = False
+ api_version = blade.api_version.list_versions().versions
+ if VSO_VERSION in api_version:
+ if module.params["mode"]:
+ bladev2 = get_system(module)
+ bucket_detail = bladev2.get_buckets(names=[module.params["name"]])
+ if list(bucket_detail.items)[0].bucket_type != module.params["mode"]:
+ module.warn("Changing bucket type is not permitted.")
+
+ if VERSIONING_VERSION in api_version:
+ if bucket.versioning != "none":
+ if module.params["versioning"] == "absent":
+ versioning = "suspended"
+ else:
+ versioning = module.params["versioning"]
+ if bucket.versioning != versioning:
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.buckets.update_buckets(
+ names=[module.params["name"]],
+ bucket=BucketPatch(versioning=versioning),
+ )
+ changed = True
+ except Exception:
+ module.fail_json(
+ msg="Object Store Bucket {0}: Versioning change failed".format(
+ module.params["name"]
+ )
+ )
+ elif module.params["versioning"] != "absent":
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.buckets.update_buckets(
+ names=[module.params["name"]],
+ bucket=BucketPatch(versioning=module.params["versioning"]),
+ )
+ changed = True
+ except Exception:
+ module.fail_json(
+ msg="Object Store Bucket {0}: Versioning change failed".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def eradicate_bucket(module, blade):
+ """Eradicate Bucket"""
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.buckets.delete_buckets(names=[module.params["name"]])
+ except Exception:
+ module.fail_json(
+ msg="Object Store Bucket {0}: Eradication failed".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(required=True),
+ account=dict(required=True),
+ eradicate=dict(default="false", type="bool"),
+ mode=dict(type="str", choices=["classic", "multi-site"]),
+ versioning=dict(
+ default="absent", choices=["enabled", "suspended", "absent"]
+ ),
+ state=dict(default="present", choices=["present", "absent"]),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg="purity_fb sdk is required for this module")
+ if module.params["mode"]:
+ if not HAS_PYPURECLIENT:
+ module.fail_json(msg="py-pure-client sdk is required to support VSO mode")
+
+ state = module.params["state"]
+ blade = get_blade(module)
+ api_version = blade.api_version.list_versions().versions
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(msg="Purity//FB must be upgraded to support this module.")
+ if module.params["mode"] and VSO_VERSION not in api_version:
+ module.fail_json(msg="VSO mode requires Purity//FB 3.3.3 or higher.")
+
+ bucket = get_bucket(module, blade)
+ if not get_s3acc(module, blade):
+ module.fail_json(
+ msg="Object Store Account {0} does not exist.".format(
+ module.params["account"]
+ )
+ )
+
+ if module.params["eradicate"] and state == "present":
+ module.warn("Eradicate flag ignored without state=absent")
+
+ if state == "present" and not bucket:
+ create_bucket(module, blade)
+ elif state == "present" and bucket and bucket.destroyed:
+ recover_bucket(module, blade)
+ elif state == "absent" and bucket and not bucket.destroyed:
+ delete_bucket(module, blade)
+ elif state == "present" and bucket:
+ update_bucket(module, blade, bucket)
+ elif (
+ state == "absent" and bucket and bucket.destroyed and module.params["eradicate"]
+ ):
+ eradicate_bucket(module, blade)
+ elif state == "absent" and not bucket:
+ module.exit_json(changed=False)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_bucket_replica.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_bucket_replica.py
new file mode 100644
index 000000000..6ac3775ae
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_bucket_replica.py
@@ -0,0 +1,313 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+
+DOCUMENTATION = """
+---
+module: purefb_bucket_replica
+version_added: '1.0.0'
+short_description: Manage bucket replica links between Pure Storage FlashBlades
+description:
+ - This module manages bucket replica links between Pure Storage FlashBlades.
+author: Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Local Bucket Name.
+ required: true
+ type: str
+ state:
+ description:
+ - Creates or modifies a bucket replica link
+ required: false
+ default: present
+ type: str
+ choices: [ "present", "absent" ]
+ target:
+ description:
+ - Remote array or target name to create replica on.
+ required: false
+ type: str
+ target_bucket:
+ description:
+ - Name of target bucket name
+ - If not supplied, will default to I(name).
+ type: str
+ required: false
+ paused:
+ description:
+ - State of the bucket replica link
+ type: bool
+ default: false
+ credential:
+ description:
+ - Name of remote credential name to use.
+ required: false
+ type: str
+extends_documentation_fragment:
+ - purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = """
+- name: Create new bucket replica from foo to bar on arrayB
+ purestorage.flashblade.purefb_bucket_replica:
+ name: foo
+ target: arrayB
+ target_bucket: bar
+ credentials: cred_1
+ state: present
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Pause exisitng bucket replica link
+ purestorage.flashblade.purefb_bucket_replica:
+ name: foo
+ paused: true
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Delete bucket replica link foo
+ purestorage.flashblade.purefb_fs_replica:
+ name: foo
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641"""
+
+RETURN = """
+"""
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import BucketReplicaLink, ObjectStoreRemoteCredentials
+except ImportError:
+ HAS_PURITY_FB = False
+
+MIN_REQUIRED_API_VERSION = "1.9"
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ purefb_argument_spec,
+)
+
+
+def get_local_bucket(module, blade):
+ """Return Bucket or None"""
+ try:
+ res = blade.buckets.list_buckets(names=[module.params["name"]])
+ return res.items[0]
+ except Exception:
+ return None
+
+
+def get_remote_cred(module, blade, target):
+ """Return Remote Credential or None"""
+ try:
+ res = (
+ blade.object_store_remote_credentials.list_object_store_remote_credentials(
+ names=[target + "/" + module.params["credential"]]
+ )
+ )
+ return res.items[0]
+ except Exception:
+ return None
+
+
+def get_local_rl(module, blade):
+ """Return Bucket Replica Link or None"""
+ try:
+ res = blade.bucket_replica_links.list_bucket_replica_links(
+ local_bucket_names=[module.params["name"]]
+ )
+ return res.items[0]
+ except Exception:
+ return None
+
+
+def get_connected(module, blade):
+ connected_blades = blade.array_connections.list_array_connections()
+ for target in range(0, len(connected_blades.items)):
+ if (
+ connected_blades.items[target].remote.name == module.params["target"]
+ or connected_blades.items[target].management_address
+ == module.params["target"]
+ ) and connected_blades.items[target].status in [
+ "connected",
+ "connecting",
+ "partially_connected",
+ ]:
+ return connected_blades.items[target].remote.name
+ connected_targets = blade.targets.list_targets()
+ for target in range(0, len(connected_targets.items)):
+ if connected_targets.items[target].name == module.params[
+ "target"
+ ] and connected_targets.items[target].status in [
+ "connected",
+ "connecting",
+ "partially_connected",
+ ]:
+ return connected_targets.items[target].name
+ return None
+
+
+def create_rl(module, blade, remote_cred):
+ """Create Bucket Replica Link"""
+ changed = True
+ if not module.check_mode:
+ try:
+ if not module.params["target_bucket"]:
+ module.params["target_bucket"] = module.params["name"]
+ else:
+ module.params["target_bucket"] = module.params["target_bucket"].lower()
+ blade.bucket_replica_links.create_bucket_replica_links(
+ local_bucket_names=[module.params["name"]],
+ remote_bucket_names=[module.params["target_bucket"]],
+ remote_credentials_names=[remote_cred.name],
+ bucket_replica_link=BucketReplicaLink(paused=module.params["paused"]),
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to create bucket replica link {0}.".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def update_rl_policy(module, blade, local_replica_link):
+ """Update Bucket Replica Link"""
+ changed = False
+ new_cred = local_replica_link.remote.name + "/" + module.params["credential"]
+ if (
+ local_replica_link.paused != module.params["paused"]
+ or local_replica_link.remote_credentials.name != new_cred
+ ):
+ changed = True
+ if not module.check_mode:
+ try:
+ module.warn("{0}".format(local_replica_link))
+ blade.bucket_replica_links.update_bucket_replica_links(
+ local_bucket_names=[module.params["name"]],
+ remote_bucket_names=[local_replica_link.remote_bucket.name],
+ remote_names=[local_replica_link.remote.name],
+ bucket_replica_link=BucketReplicaLink(
+ paused=module.params["paused"],
+ remote_credentials=ObjectStoreRemoteCredentials(name=new_cred),
+ ),
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to update bucket replica link {0}.".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def delete_rl_policy(module, blade, local_replica_link):
+ """Delete Bucket Replica Link"""
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.bucket_replica_links.delete_bucket_replica_links(
+ remote_names=[local_replica_link.remote.name],
+ local_bucket_names=[module.params["name"]],
+ remote_bucket_names=[local_replica_link.remote_bucket.name],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to delete bucket replica link {0}.".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True),
+ target=dict(type="str"),
+ target_bucket=dict(type="str"),
+ paused=dict(type="bool", default=False),
+ credential=dict(type="str"),
+ state=dict(default="present", choices=["present", "absent"]),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg="purity_fb sdk is required for this module")
+
+ state = module.params["state"]
+ module.params["name"] = module.params["name"].lower()
+ blade = get_blade(module)
+ versions = blade.api_version.list_versions().versions
+
+ if MIN_REQUIRED_API_VERSION not in versions:
+ module.fail_json(
+ msg="Minimum FlashBlade REST version required: {0}".format(
+ MIN_REQUIRED_API_VERSION
+ )
+ )
+
+ local_bucket = get_local_bucket(module, blade)
+ local_replica_link = get_local_rl(module, blade)
+ target = get_connected(module, blade)
+
+ if not target:
+ module.fail_json(
+ msg="Selected target {0} is not connected.".format(module.params["target"])
+ )
+
+ if local_replica_link and not module.params["credential"]:
+ module.params["credential"] = local_replica_link.remote_credentials.name.split(
+ "/"
+ )[1]
+ remote_cred = get_remote_cred(module, blade, target)
+ if not remote_cred:
+ module.fail_json(
+ msg="Selected remote credential {0} does not exist for target {1}.".format(
+ module.params["credential"], module.params["target"]
+ )
+ )
+
+ if not local_bucket:
+ module.fail_json(
+ msg="Selected local bucket {0} does not exist.".format(
+ module.params["name"]
+ )
+ )
+
+ if local_replica_link:
+ if local_replica_link.status == "unhealthy":
+ module.fail_json(msg="Replica Link unhealthy - please check target")
+
+ if state == "present" and not local_replica_link:
+ create_rl(module, blade, remote_cred)
+ elif state == "present" and local_replica_link:
+ update_rl_policy(module, blade, local_replica_link)
+ elif state == "absent" and local_replica_link:
+ delete_rl_policy(module, blade, local_replica_link)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_certgrp.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_certgrp.py
new file mode 100644
index 000000000..2308b6f16
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_certgrp.py
@@ -0,0 +1,249 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_certgrp
+version_added: '1.4.0'
+short_description: Manage FlashBlade Certifcate Groups
+description:
+- Manage certifcate groups for FlashBlades
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Create or delete certifcate group
+ default: present
+ type: str
+ choices: [ absent, present ]
+ name:
+ description:
+ - Name of the certificate group
+ type: str
+ certificates:
+ description:
+ - List of certifcates to add to a policy on creation
+ type: list
+ elements: str
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Create a certifcate group
+ purestorage.flashblade.purefb_certgrp:
+ name: test_grp
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Create a cerifcate group and add existing certificates
+ purestorage.flashblade.purefb_certgrp:
+ name: test_grp
+ certifcates:
+ - cert1
+ - cert2
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Delete a certifcate from a group
+ purestorage.flashblade.purefb_certgrp:
+ name: test_grp
+ certificates:
+ - cert2
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Delete a certifcate group
+ purestorage.flashblade.purefb_certgrp:
+ name: test_grp
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+"""
+
+RETURN = r"""
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ purefb_argument_spec,
+)
+
+
+MIN_REQUIRED_API_VERSION = "1.9"
+
+
+def delete_certgrp(module, blade):
+ """Delete certifcate group"""
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.certificate_groups.delete_certificate_groups(
+ names=[module.params["name"]]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to delete certifcate group {0}.".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def create_certgrp(module, blade):
+ """Create certifcate group"""
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.certificate_groups.create_certificate_groups(
+ names=[module.params["name"]]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to create certificate group {0}.".format(
+ module.params["name"]
+ )
+ )
+ if module.params["certificates"]:
+ try:
+ blade.certificate_groups.add_certificate_group_certificates(
+ certificate_names=module.params["certificates"],
+ certificate_group_names=[module.params["name"]],
+ )
+ except Exception:
+ blade.certificate_groups.delete_certificate_groups(
+ names=[module.params["name"]]
+ )
+ module.fail_json(
+ msg="Failed to add certifcates {0}. "
+ "Please check they all exist".format(module.params["certificates"])
+ )
+ module.exit_json(changed=changed)
+
+
+def update_certgrp(module, blade):
+ """Update certificate group"""
+ changed = False
+ try:
+ certs = blade.certificate_groups.list_certificate_group_certificates(
+ certificate_group_names=[module.params["name"]]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to get certifates list for group {0}.".format(
+ module.params["name"]
+ )
+ )
+ if not certs:
+ if module.params["state"] == "present":
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.certificate_groups.add_certificate_group_certificates(
+ certificate_names=module.params["certificates"],
+ certificate_group_names=[module.params["name"]],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to add certifcates {0}. "
+ "Please check they all exist".format(
+ module.params["certificates"]
+ )
+ )
+ else:
+ current = []
+ for cert in range(0, len(certs.items)):
+ current.append(certs.items[cert].member.name)
+ for new_cert in range(0, len(module.params["certificates"])):
+ certificate = module.params["certificates"][new_cert]
+ if certificate in current:
+ if module.params["state"] == "absent":
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.certificate_groups.remove_certificate_group_certificates(
+ certificate_names=[certificate],
+ certificate_group_names=[module.params["name"]],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to delete certifcate {0} from group {1}.".format(
+ certificate, module.params["name"]
+ )
+ )
+ else:
+ if module.params["state"] == "present":
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.certificate_groups.add_certificate_group_certificates(
+ certificate_names=[certificate],
+ certificate_group_names=[module.params["name"]],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to add certifcate {0} to group {1}".format(
+ certificate, module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ name=dict(type="str"),
+ certificates=dict(type="list", elements="str"),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ state = module.params["state"]
+ blade = get_blade(module)
+ versions = blade.api_version.list_versions().versions
+
+ if MIN_REQUIRED_API_VERSION not in versions:
+ module.fail_json(
+ msg="Minimum FlashBlade REST version required: {0}".format(
+ MIN_REQUIRED_API_VERSION
+ )
+ )
+
+ try:
+ certgrp = blade.certificate_groups.list_certificate_groups(
+ names=[module.params["name"]]
+ ).items[0]
+ except Exception:
+ certgrp = None
+
+ if certgrp and state == "present" and module.params["certificates"]:
+ update_certgrp(module, blade)
+ elif state == "present" and not certgrp:
+ create_certgrp(module, blade)
+ elif state == "absent" and certgrp:
+ if module.params["certificates"]:
+ update_certgrp(module, blade)
+ else:
+ delete_certgrp(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_certs.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_certs.py
new file mode 100644
index 000000000..b9a2c76f7
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_certs.py
@@ -0,0 +1,198 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_certs
+version_added: '1.4.0'
+short_description: Manage FlashBlade SSL Certificates
+description:
+- Manage SSL certificates for FlashBlades
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Create or delete certificate
+ default: present
+ type: str
+ choices: [ absent, present ]
+ name:
+ description:
+ - Name of the certificate
+ type: str
+ contents:
+ description:
+ - SSL certificate text
+ type: str
+ private_key:
+ description:
+ - SSL certificate private key test
+ type: str
+ passphrase:
+ description:
+ - Passphrase for the private_key
+ type: str
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Create a SSL certificate
+ purestorage.flashblade.purefb_certs:
+ name: test_cert
+ contents: "{{lookup('file', 'certificate_file_name') }}"
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Delete a SSL certificate
+ purestorage.flashblade.purefb_certs:
+ name: test_cert
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Update SSL certificate
+ purestorage.flashblade.purefb_certs:
+ name: global
+ contents: "{{ lookup('file', 'certificate_file_name') }}"
+ private_key: "{{ lookup('file', 'certificate_key_file_name') }}"
+ passphrase: 'mypassword'
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURITYFB = True
+try:
+ from purity_fb import Certificate, CertificatePost
+except ImportError:
+ HAS_PURITYFB = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ purefb_argument_spec,
+)
+
+
+MIN_REQUIRED_API_VERSION = "1.9"
+
+
+def delete_cert(module, blade):
+ """Delete certificate"""
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.certificates.delete_certificates(names=[module.params["name"]])
+ except Exception:
+ module.fail_json(
+ msg="Failed to delete certificate {0}.".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def create_cert(module, blade):
+ """Create certificate"""
+ changed = True
+ if not module.check_mode:
+ try:
+ body = CertificatePost(
+ certificate=module.params["contents"], certificate_type="external"
+ )
+ blade.certificates.create_certificates(
+ names=[module.params["name"]], certificate=body
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to create certificate {0}.".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def update_cert(module, blade, cert):
+ """Update certificate"""
+ changed = False
+ if cert.certificate_type == "external":
+ module.fail_json(msg="External certificates cannot be modified")
+
+ if not module.params["private_key"]:
+ module.fail_json(msg="private_key must be specified for the global certificate")
+
+ if cert.certificate.strip() != module.params["contents"].strip():
+ changed = True
+ if not module.check_mode:
+ try:
+ body = Certificate(
+ certificate=module.params["contents"],
+ private_key=module.params["private_key"],
+ )
+ if module.params["passphrase"]:
+ Certificate.passphrase = module.params["passphrase"]
+ blade.certificates.update_certificates(
+ names=[module.params["name"]], certificate=body
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to create certificate {0}.".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ name=dict(type="str"),
+ contents=dict(type="str", no_log=True),
+ private_key=dict(type="str", no_log=True),
+ passphrase=dict(type="str", no_log=True),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ state = module.params["state"]
+ blade = get_blade(module)
+ versions = blade.api_version.list_versions().versions
+
+ if MIN_REQUIRED_API_VERSION not in versions:
+ module.fail_json(
+ msg="Minimum FlashBlade REST version required: {0}".format(
+ MIN_REQUIRED_API_VERSION
+ )
+ )
+
+ try:
+ cert = blade.certificates.list_certificates(names=[module.params["name"]])
+ except Exception:
+ cert = None
+
+ if not cert and state == "present":
+ create_cert(module, blade)
+ elif state == "present":
+ update_cert(module, blade, cert.items[0])
+ elif state == "absent" and cert:
+ delete_cert(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_connect.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_connect.py
new file mode 100644
index 000000000..508c6a322
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_connect.py
@@ -0,0 +1,574 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_connect
+version_added: '1.0.0'
+short_description: Manage replication connections between two FlashBlades
+description:
+- Manage replication connections to specified remote FlashBlade system
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Create or delete replication connection
+ default: present
+ type: str
+ choices: [ absent, present ]
+ encrypted:
+ description:
+ - Define if replication connection is encrypted
+ type: bool
+ default: false
+ target_url:
+ description:
+ - Management IP address of target FlashBlade system
+ type: str
+ required: true
+ target_api:
+ description:
+ - API token for target FlashBlade system
+ type: str
+ target_repl:
+ description:
+ - Replication IP address of target FlashBlade system
+ - If not set at time of connection creation, will default to
+ all the replication addresses available on the target array
+ at the time of connection creation.
+ type: list
+ elements: str
+ version_added: "1.9.0"
+ default_limit:
+ description:
+ - Default maximum bandwidth threshold for outbound traffic in bytes.
+ - B, K, M, or G units. See examples.
+ - Must be 0 or between 5MB and 28GB
+ - Once exceeded, bandwidth throttling occurs
+ type: str
+ version_added: "1.9.0"
+ window_limit:
+ description:
+ - Maximum bandwidth threshold for outbound traffic during the specified
+ time range in bytes.
+ - B, K, M, or G units. See examples.
+ - Must be 0 or between 5MB and 28GB
+ - Once exceeded, bandwidth throttling occurs
+ type: str
+ version_added: "1.9.0"
+ window_start:
+ description:
+ - The window start time.
+ - The time must be set to the hour.
+ type: str
+ version_added: "1.9.0"
+ window_end:
+ description:
+ - The window end time.
+ - The time must be set to the hour.
+ type: str
+ version_added: "1.9.0"
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Create a connection to remote FlashBlade system
+ purestorage.flashblade.purefb_connect:
+ target_url: 10.10.10.20
+ target_api: T-b3275b1c-8958-4190-9052-eb46b0bd09f8
+ fb_url: 10.10.10.2
+ api_token: T-91528421-fe42-47ee-bcb1-47eefb0a9220
+- name: Create a connection to remote FlashBlade system with bandwidth limits
+ purestorage.flashblade.purefb_connect:
+ target_url: 10.10.10.20
+ target_api: T-b3275b1c-8958-4190-9052-eb46b0bd09f8
+ window_limit: 28G
+ window_start: 1AM
+ window_end: 7AM
+ default_limit: 5M
+ fb_url: 10.10.10.2
+ api_token: T-91528421-fe42-47ee-bcb1-47eefb0a9220
+- name: Delete connection to target FlashBlade system
+ purestorage.flashblade.purefb_connect:
+ state: absent
+ target_url: 10.10.10.20
+ target_api: T-b3275b1c-8958-4190-9052-eb46b0bd09f8
+ fb_url: 10.10.10.2
+ api_token: T-91528421-fe42-47ee-bcb1-47eefb0a9220
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURITYFB = True
+try:
+ from purity_fb import PurityFb, ArrayConnection, ArrayConnectionPost
+except ImportError:
+ HAS_PURITYFB = False
+
+HAS_PYPURECLIENT = True
+try:
+ from pypureclient import flashblade
+ from pypureclient.flashblade import ArrayConnection, ArrayConnectionPost
+except ImportError:
+ HAS_PYPURECLIENT = False
+
+from ansible.module_utils.basic import AnsibleModule, human_to_bytes
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ get_system,
+ purefb_argument_spec,
+)
+
+
+FAN_IN_MAXIMUM = 1
+FAN_OUT_MAXIMUM = 3
+MIN_REQUIRED_API_VERSION = "1.9"
+THROTTLE_API_VERSION = "2.3"
+
+
+def _convert_to_millisecs(hour):
+ if hour[-2:] == "AM" and hour[:2] == "12":
+ return 0
+ elif hour[-2:] == "AM":
+ return int(hour[:-2]) * 3600000
+ elif hour[-2:] == "PM" and hour[:2] == "12":
+ return 43200000
+ return (int(hour[:-2]) + 12) * 3600000
+
+
+def _check_connected(module, blade):
+ connected_blades = blade.array_connections.list_array_connections()
+ for target in range(0, len(connected_blades.items)):
+ if connected_blades.items[target].management_address is None:
+ try:
+ remote_system = PurityFb(module.params["target_url"])
+ remote_system.login(module.params["target_api"])
+ remote_array = remote_system.arrays.list_arrays().items[0].name
+ if connected_blades.items[target].remote.name == remote_array:
+ return connected_blades.items[target]
+ except Exception:
+ module.fail_json(
+ msg="Failed to connect to remote array {0}.".format(
+ module.params["target_url"]
+ )
+ )
+ if connected_blades.items[target].management_address == module.params[
+ "target_url"
+ ] and connected_blades.items[target].status in [
+ "connected",
+ "connecting",
+ "partially_connected",
+ ]:
+ return connected_blades.items[target]
+ return None
+
+
+def break_connection(module, blade, target_blade):
+ """Break connection between arrays"""
+ changed = True
+ if not module.check_mode:
+ source_blade = blade.arrays.list_arrays().items[0].name
+ try:
+ if target_blade.management_address is None:
+ module.fail_json(
+ msg="Disconnect can only happen from the array that formed the connection"
+ )
+ blade.array_connections.delete_array_connections(
+ remote_names=[target_blade.remote.name]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to disconnect {0} from {1}.".format(
+ target_blade.remote.name, source_blade
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def create_connection(module, blade):
+ """Create connection between arrays"""
+ changed = True
+ if not module.check_mode:
+ remote_array = module.params["target_url"]
+ try:
+ remote_system = PurityFb(module.params["target_url"])
+ remote_system.login(module.params["target_api"])
+ remote_array = remote_system.arrays.list_arrays().items[0].name
+ remote_conn_cnt = (
+ remote_system.array_connections.list_array_connections().pagination_info.total_item_count
+ )
+ if remote_conn_cnt == FAN_IN_MAXIMUM:
+ module.fail_json(
+ msg="Remote array {0} already connected to {1} other array. Fan-In not supported".format(
+ remote_array, remote_conn_cnt
+ )
+ )
+ connection_key = (
+ remote_system.array_connections.create_array_connections_connection_keys()
+ .items[0]
+ .connection_key
+ )
+ connection_info = ArrayConnectionPost(
+ management_address=module.params["target_url"],
+ encrypted=module.params["encrypted"],
+ connection_key=connection_key,
+ )
+ blade.array_connections.create_array_connections(
+ array_connection=connection_info
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to connect to remote array {0}.".format(remote_array)
+ )
+ module.exit_json(changed=changed)
+
+
+def create_v2_connection(module, blade):
+ """Create connection between REST 2 capable arrays"""
+ changed = True
+ if blade.get_array_connections().total_item_count == FAN_OUT_MAXIMUM:
+ module.fail_json(
+ msg="FlashBlade fan-out maximum of {0} already reached".format(
+ FAN_OUT_MAXIMUM
+ )
+ )
+ try:
+ remote_system = flashblade.Client(
+ target=module.params["target_url"], api_token=module.params["target_api"]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to connect to remote array {0}.".format(
+ module.params["target_url"]
+ )
+ )
+ remote_array = list(remote_system.get_arrays().items)[0].name
+ remote_conn_cnt = remote_system.get_array_connections().total_item_count
+ if remote_conn_cnt == FAN_IN_MAXIMUM:
+ module.fail_json(
+ msg="Remote array {0} already connected to {1} other array. Fan-In not supported".format(
+ remote_array, remote_conn_cnt
+ )
+ )
+ connection_key = list(remote_system.post_array_connections_connection_key().items)[
+ 0
+ ].connection_key
+
+ if module.params["default_limit"] or module.params["window_limit"]:
+ if THROTTLE_API_VERSION in list(blade.get_versions().items):
+ if THROTTLE_API_VERSION not in list(remote_system.get_versions().items):
+ module.fail_json(msg="Remote array does not support throttling")
+ if module.params["window_limit"]:
+ if not module.params["window_start"]:
+ module.params["window_start"] = "12AM"
+ if not module.params["window_end"]:
+ module.params["window_end"] = "12AM"
+ window = flashblade.TimeWindow(
+ start=_convert_to_millisecs(module.params["window_start"]),
+ end=_convert_to_millisecs(module.params["window_end"]),
+ )
+ if module.params["window_limit"] and module.params["default_limit"]:
+ throttle = flashblade.Throttle(
+ default_limit=human_to_bytes(module.params["default_limit"]),
+ window_limit=human_to_bytes(module.params["window_limit"]),
+ window=window,
+ )
+ elif module.params["window_limit"] and not module.params["default_limit"]:
+ throttle = flashblade.Throttle(
+ window_limit=human_to_bytes(module.params["window_limit"]),
+ window=window,
+ )
+ else:
+ throttle = flashblade.Throttle(
+ default_limit=human_to_bytes(module.params["default_limit"]),
+ )
+ connection_info = ArrayConnectionPost(
+ management_address=module.params["target_url"],
+ replication_addresses=module.params["target_repl"],
+ encrypted=module.params["encrypted"],
+ connection_key=connection_key,
+ throttle=throttle,
+ )
+ else:
+ connection_info = ArrayConnectionPost(
+ management_address=module.params["target_url"],
+ replication_addresses=module.params["target_repl"],
+ encrypted=module.params["encrypted"],
+ connection_key=connection_key,
+ )
+ else:
+ connection_info = ArrayConnectionPost(
+ management_address=module.params["target_url"],
+ replication_addresses=module.params["target_repl"],
+ encrypted=module.params["encrypted"],
+ connection_key=connection_key,
+ )
+ if not module.check_mode:
+ res = blade.post_array_connections(array_connection=connection_info)
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to connect to remote array {0}. Error: {1}".format(
+ remote_array, res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def update_connection(module, blade, target_blade):
+ """Update array connection - only encryption currently"""
+ changed = False
+ if target_blade.management_address is None:
+ module.fail_json(
+ msg="Update can only happen from the array that formed the connection"
+ )
+ if module.params["encrypted"] != target_blade.encrypted:
+ if (
+ module.params["encrypted"]
+ and blade.file_system_replica_links.list_file_system_replica_links().pagination_info.total_item_count
+ != 0
+ ):
+ module.fail_json(
+ msg="Cannot turn array connection encryption on if file system replica links exist"
+ )
+ new_attr = ArrayConnection(encrypted=module.params["encrypted"])
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.array_connections.update_array_connections(
+ remote_names=[target_blade.remote.name],
+ array_connection=new_attr,
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to change encryption setting for array connection."
+ )
+ module.exit_json(changed=changed)
+
+
+def update_v2_connection(module, blade):
+ """Update REST 2 based array connection"""
+ changed = False
+ versions = list(blade.get_versions().items)
+ remote_blade = flashblade.Client(
+ target=module.params["target_url"], api_token=module.params["target_api"]
+ )
+ remote_name = list(remote_blade.get_arrays().items)[0].name
+ remote_connection = list(
+ blade.get_array_connections(filter="remote.name='" + remote_name + "'").items
+ )[0]
+ if remote_connection.management_address is None:
+ module.fail_json(
+ msg="Update can only happen from the array that formed the connection"
+ )
+ if module.params["encrypted"] != remote_connection.encrypted:
+ if (
+ module.params["encrypted"]
+ and blade.get_file_system_replica_links().total_item_count != 0
+ ):
+ module.fail_json(
+ msg="Cannot turn array connection encryption on if file system replica links exist"
+ )
+ current_connection = {
+ "encrypted": remote_connection.encrypted,
+ "replication_addresses": sorted(remote_connection.replication_addresses),
+ "throttle": [],
+ }
+ if (
+ not remote_connection.throttle.default_limit
+ and not remote_connection.throttle.window_limit
+ ):
+ if (
+ module.params["default_limit"] or module.params["window_limit"]
+ ) and blade.get_bucket_replica_links().total_item_count != 0:
+ module.fail_json(
+ msg="Cannot set throttle when bucket replica links already exist"
+ )
+ if THROTTLE_API_VERSION in versions:
+ current_connection["throttle"] = {
+ "default_limit": remote_connection.throttle.default_limit,
+ "window_limit": remote_connection.throttle.window_limit,
+ "start": remote_connection.throttle.window.start,
+ "end": remote_connection.throttle.window.end,
+ }
+ if module.params["encrypted"]:
+ encryption = module.params["encrypted"]
+ else:
+ encryption = remote_connection.encrypted
+ if module.params["target_repl"]:
+ target_repl = sorted(module.params["target_repl"])
+ else:
+ target_repl = remote_connection.replication_addresses
+ if module.params["default_limit"]:
+ default_limit = human_to_bytes(module.params["default_limit"])
+ if default_limit == 0:
+ default_limit = None
+ else:
+ default_limit = remote_connection.throttle.default_limit
+ if module.params["window_limit"]:
+ window_limit = human_to_bytes(module.params["window_limit"])
+ else:
+ window_limit = remote_connection.throttle.window_limit
+ if module.params["window_start"]:
+ start = _convert_to_millisecs(module.params["window_start"])
+ else:
+ start = remote_connection.throttle.window.start
+ if module.params["window_end"]:
+ end = _convert_to_millisecs(module.params["window_end"])
+ else:
+ end = remote_connection.throttle.window.end
+
+ new_connection = {
+ "encrypted": encryption,
+ "replication_addresses": target_repl,
+ "throttle": [],
+ }
+ if THROTTLE_API_VERSION in versions:
+ new_connection["throttle"] = {
+ "default_limit": default_limit,
+ "window_limit": window_limit,
+ "start": start,
+ "end": end,
+ }
+ if new_connection != current_connection:
+ changed = True
+ if not module.check_mode:
+ if THROTTLE_API_VERSION in versions:
+ window = flashblade.TimeWindow(
+ start=new_connection["throttle"]["start"],
+ end=new_connection["throttle"]["end"],
+ )
+ throttle = flashblade.Throttle(
+ default_limit=new_connection["throttle"]["default_limit"],
+ window_limit=new_connection["throttle"]["window_limit"],
+ window=window,
+ )
+ connection_info = ArrayConnectionPost(
+ replication_addresses=new_connection["replication_addresses"],
+ encrypted=new_connection["encrypted"],
+ throttle=throttle,
+ )
+ else:
+ connection_info = ArrayConnection(
+ replication_addresses=new_connection["replication_addresses"],
+ encrypted=new_connection["encrypted"],
+ )
+ res = blade.patch_array_connections(
+ remote_names=[remote_name], array_connection=connection_info
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to update connection to remote array {0}. Error: {1}".format(
+ remote_name, res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ encrypted=dict(type="bool", default=False),
+ target_url=dict(type="str", required=True),
+ target_api=dict(type="str", no_log=True),
+ target_repl=dict(type="list", elements="str"),
+ default_limit=dict(type="str"),
+ window_limit=dict(type="str"),
+ window_start=dict(type="str"),
+ window_end=dict(type="str"),
+ )
+ )
+
+ required_if = [("state", "present", ["target_api"])]
+
+ module = AnsibleModule(
+ argument_spec, required_if=required_if, supports_check_mode=True
+ )
+
+ if not HAS_PURITYFB:
+ module.fail_json(msg="purity_fb sdk is required for this module")
+
+ state = module.params["state"]
+ blade = get_blade(module)
+ versions = blade.api_version.list_versions().versions
+
+ if MIN_REQUIRED_API_VERSION not in versions:
+ module.fail_json(
+ msg="Minimum FlashBlade REST version required: {0}".format(
+ MIN_REQUIRED_API_VERSION
+ )
+ )
+ if "2.0" in versions:
+ bladev2 = get_system(module)
+ if not HAS_PYPURECLIENT:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+ v2_connection = True
+ if module.params["default_limit"]:
+ if (
+ human_to_bytes(module.params["default_limit"]) != 0
+ and 5242880
+ >= human_to_bytes(module.params["default_limit"])
+ >= 30064771072
+ ):
+ module.fail_json(msg="Default Bandwidth must be between 5MB and 28GB")
+ if module.params["window_limit"]:
+ if (
+ human_to_bytes(module.params["window_limit"]) != 0
+ and 5242880
+ >= human_to_bytes(module.params["window_limit"])
+ >= 30064771072
+ ):
+ module.fail_json(msg="Window Bandwidth must be between 5MB and 28GB")
+ else:
+ if module.params["target_repl"]:
+ module.warn(
+ "Target Replication addresses can only be set for systems"
+ " that support REST 2.0 and higher"
+ )
+ v2_connection = False
+
+ target_blade = _check_connected(module, blade)
+ if state == "present" and not target_blade:
+ # REST 1 does not support fan-out for replication
+ # REST 2 has a limit which we can check
+ if v2_connection:
+ create_v2_connection(module, bladev2)
+ else:
+ if (
+ blade.array_connections.list_array_connections().pagination_info.total_item_count
+ == 1
+ ):
+ module.fail_json(
+ msg="Source FlashBlade already connected to another array. Fan-Out not supported"
+ )
+ create_connection(module, blade)
+ elif state == "present" and target_blade:
+ if v2_connection:
+ update_v2_connection(module, bladev2)
+ else:
+ update_connection(module, blade, target_blade)
+ elif state == "absent" and target_blade:
+ break_connection(module, blade, target_blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_dns.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_dns.py
new file mode 100644
index 000000000..b5abd9289
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_dns.py
@@ -0,0 +1,175 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_dns
+version_added: '1.0.0'
+short_description: Configure Pure Storage FlashBlade DNS settings
+description:
+- Set or erase DNS configuration for Pure Storage FlashBlades.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Create or delete DNS servers configuration
+ type: str
+ default: present
+ choices: [ absent, present ]
+ domain:
+ description:
+ - Domain suffix to be appended when perofrming DNS lookups.
+ type: str
+ nameservers:
+ description:
+ - List of up to 3 unique DNS server IP addresses. These can be
+ IPv4 or IPv6 - No validation is done of the addresses is performed.
+ type: list
+ elements: str
+ search:
+ description:
+ - Ordered list of domain names to search
+ - Deprecated option. Will be removed in Collection v1.6.0, There is no replacement for this.
+ type: list
+ elements: str
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Delete exisitng DNS settings
+ purestorage.flashblade.purefb_dns:
+ state: absent
+ fa_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+
+- name: Set DNS settings
+ purestorage.flashblade.purefb_dns:
+ domain: purestorage.com
+ nameservers:
+ - 8.8.8.8
+ - 8.8.4.4
+ search:
+ - purestorage.com
+ - acme.com
+ fa_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import Dns
+except ImportError:
+ HAS_PURITY_FB = False
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ purefb_argument_spec,
+)
+
+
+def remove(duplicate):
+ final_list = []
+ for num in duplicate:
+ if num not in final_list:
+ final_list.append(num)
+ return final_list
+
+
+def delete_dns(module, blade):
+ """Delete DNS Settings"""
+ changed = True
+ if not module.check_mode:
+ changed = False
+ current_dns = blade.dns.list_dns()
+ if current_dns.items[0].domain or current_dns.items[0].nameservers != []:
+ try:
+ blade.dns.update_dns(dns_settings=Dns(domain="", nameservers=[]))
+ changed = True
+ except Exception:
+ module.fail_json(msg="Deletion of DNS settings failed")
+ module.exit_json(changed=changed)
+
+
+def update_dns(module, blade):
+ """Set DNS Settings"""
+ changed = False
+ current_dns = blade.dns.list_dns()
+ if module.params["domain"]:
+ if current_dns.items[0].domain != module.params["domain"]:
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.dns.update_dns(
+ dns_settings=Dns(domain=module.params["domain"])
+ )
+ except Exception:
+ module.fail_json(msg="Update of DNS domain failed")
+ if module.params["nameservers"]:
+ if sorted(module.params["nameservers"]) != sorted(
+ current_dns.items[0].nameservers
+ ):
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.dns.update_dns(
+ dns_settings=Dns(nameservers=module.params["nameservers"])
+ )
+ except Exception:
+ module.fail_json(msg="Update of DNS nameservers failed")
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ nameservers=dict(type="list", elements="str"),
+ search=dict(type="list", elements="str"),
+ domain=dict(type="str"),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg="purity_fb sdk is required for this module")
+
+ blade = get_blade(module)
+
+ if module.params["state"] == "absent":
+ delete_dns(module, blade)
+ elif module.params["state"] == "present":
+ if module.params["nameservers"]:
+ module.params["nameservers"] = remove(module.params["nameservers"])
+ if module.params["search"]:
+ module.warn(
+ "'search' parameter is deprecated and will be removed in Collection v1.6.0"
+ )
+ update_dns(module, blade)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_ds.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_ds.py
new file mode 100644
index 000000000..6433d3d9d
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_ds.py
@@ -0,0 +1,470 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_ds
+version_added: '1.0.0'
+short_description: Configure FlashBlade Directory Service
+description:
+- Create, modify or erase directory services configurations. There is no
+ facility to SSL certificates at this time. Use the FlashBlade GUI for this
+ additional configuration work.
+- If updating a directory service and i(bind_password) is provided this
+ will always cause a change, even if the password given isn't different from
+ the current. This makes this part of the module non-idempotent..
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Create or delete directory service configuration
+ default: present
+ type: str
+ choices: [ absent, present ]
+ dstype:
+ description:
+ - The type of directory service to work on
+ choices: [ management, nfs, smb ]
+ type: str
+ required: true
+ enable:
+ description:
+ - Whether to enable or disable directory service support.
+ default: false
+ type: bool
+ uri:
+ description:
+ - A list of up to 30 URIs of the directory servers. Each URI must include
+ the scheme ldap:// or ldaps:// (for LDAP over SSL), a hostname, and a
+ domain name or IP address. For example, ldap://ad.company.com configures
+ the directory service with the hostname "ad" in the domain "company.com"
+ while specifying the unencrypted LDAP protocol.
+ type: list
+ elements: str
+ base_dn:
+ description:
+ - Sets the base of the Distinguished Name (DN) of the directory service
+ groups. The base should consist of only Domain Components (DCs). The
+ base_dn will populate with a default value when a URI is entered by
+ parsing domain components from the URI. The base DN should specify DC=
+ for each domain component and multiple DCs should be separated by commas.
+ type: str
+ bind_password:
+ description:
+ - Sets the password of the bind_user user name account.
+ type: str
+ bind_user:
+ description:
+ - Sets the user name that can be used to bind to and query the directory.
+ - For Active Directory, enter the username - often referred to as
+ sAMAccountName or User Logon Name - of the account that is used to
+ perform directory lookups.
+ - For OpenLDAP, enter the full DN of the user.
+ type: str
+ nis_servers:
+ description:
+ - A list of up to 30 IP addresses or FQDNs for NIS servers.
+ - This cannot be used in conjunction with LDAP configurations.
+ type: list
+ elements: str
+ nis_domain:
+ description:
+ - The NIS domain to search
+ - This cannot be used in conjunction with LDAP configurations.
+ type: str
+ join_ou:
+ description:
+ - The optional organizational unit (OU) where the machine account
+ for the directory service will be created.
+ type: str
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Delete existing management directory service
+ purestorage.flashblade.purefb_ds:
+ dstype: management
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create NFS directory service (disabled)
+ purestorage.flashblade.purefb_ds:
+ dstype: nfs
+ uri: "ldaps://lab.purestorage.com"
+ base_dn: "DC=lab,DC=purestorage,DC=com"
+ bind_user: Administrator
+ bind_password: password
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Enable existing SMB directory service
+ purestorage.flashblade.purefb_ds:
+ dstypr: smb
+ enable: true
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Disable existing management directory service
+ purestorage.flashblade.purefb_ds:
+ dstype: management
+ enable: false
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create NFS directory service (enabled)
+ purestorage.flashblade.purefb_ds:
+ dstype: nfs
+ enable: true
+ uri: "ldaps://lab.purestorage.com"
+ base_dn: "DC=lab,DC=purestorage,DC=com"
+ bind_user: Administrator
+ bind_password: password
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+
+NIS_API_VERSION = "1.7"
+HAS_PURITY_FB = True
+try:
+ from purity_fb import DirectoryService
+except ImportError:
+ HAS_PURITY_FB = False
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ purefb_argument_spec,
+)
+
+
+def enable_ds(module, blade):
+ """Enable Directory Service"""
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.directory_services.update_directory_services(
+ names=[module.params["dstype"]],
+ directory_service=DirectoryService(enabled=True),
+ )
+ changed = True
+ except Exception:
+ module.fail_json(
+ msg="Enable {0} Directory Service failed".format(
+ module.params["dstype"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def disable_ds(module, blade):
+ """Disable Directory Service"""
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.directory_services.update_directory_services(
+ names=[module.params["dstype"]],
+ directory_service=DirectoryService(enabled=False),
+ )
+ except Exception:
+ module.fail_json(
+ msg="Disable {0} Directory Service failed".format(
+ module.params["dstype"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def delete_ds(module, blade):
+ """Delete Directory Service"""
+ changed = True
+ if not module.check_mode:
+ dirserv = blade.directory_services.list_directory_services(
+ names=[module.params["dstype"]]
+ )
+ try:
+ if module.params["dstype"] == "management":
+ if dirserv.items[0].uris:
+ dir_service = DirectoryService(
+ uris=[""],
+ base_dn="",
+ bind_user="",
+ bind_password="",
+ enabled=False,
+ )
+ else:
+ changed = False
+ elif module.params["dstype"] == "smb":
+ if dirserv.items[0].uris:
+ smb_attrs = {"join_ou": ""}
+ dir_service = DirectoryService(
+ uris=[""],
+ base_dn="",
+ bind_user="",
+ bind_password="",
+ smb=smb_attrs,
+ enabled=False,
+ )
+ else:
+ changed = False
+ elif module.params["dstype"] == "nfs":
+ if dirserv.items[0].uris:
+ dir_service = DirectoryService(
+ uris=[""],
+ base_dn="",
+ bind_user="",
+ bind_password="",
+ enabled=False,
+ )
+ elif dirserv.items[0].nfs.nis_domains:
+ nfs_attrs = {"nis_domains": [], "nis_servers": []}
+ dir_service = DirectoryService(nfs=nfs_attrs, enabled=False)
+ else:
+ changed = False
+ if changed:
+ blade.directory_services.update_directory_services(
+ names=[module.params["dstype"]], directory_service=dir_service
+ )
+ except Exception:
+ module.fail_json(
+ msg="Delete {0} Directory Service failed".format(
+ module.params["dstype"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def update_ds(module, blade):
+ """Update Directory Service"""
+ mod_ds = False
+ attr = {}
+ try:
+ ds_now = blade.directory_services.list_directory_services(
+ names=[module.params["dstype"]]
+ ).items[0]
+ if module.params["dstype"] == "nfs" and module.params["nis_servers"]:
+ if sorted(module.params["nis_servers"]) != sorted(
+ ds_now.nfs.nis_servers
+ ) or module.params["nis_domain"] != "".join(
+ map(str, ds_now.nfs.nis_domains)
+ ):
+ attr["nfs"] = {
+ "nis_domains": [module.params["nis_domain"]],
+ "nis_servers": module.params["nis_servers"][0:30],
+ }
+ mod_ds = True
+ else:
+ if module.params["uri"]:
+ if sorted(module.params["uri"][0:30]) != sorted(ds_now.uris):
+ attr["uris"] = module.params["uri"][0:30]
+ mod_ds = True
+ if module.params["base_dn"]:
+ if module.params["base_dn"] != ds_now.base_dn:
+ attr["base_dn"] = module.params["base_dn"]
+ mod_ds = True
+ if module.params["bind_user"]:
+ if module.params["bind_user"] != ds_now.bind_user:
+ attr["bind_user"] = module.params["bind_user"]
+ mod_ds = True
+ if module.params["enable"]:
+ if module.params["enable"] != ds_now.enabled:
+ attr["enabled"] = module.params["enable"]
+ mod_ds = True
+ if module.params["bind_password"]:
+ attr["bind_password"] = module.params["bind_password"]
+ mod_ds = True
+ if module.params["dstype"] == "smb":
+ if module.params["join_ou"] != ds_now.smb.join_ou:
+ attr["smb"] = {"join_ou": module.params["join_ou"]}
+ mod_ds = True
+ if mod_ds:
+ changed = True
+ if not module.check_mode:
+ n_attr = DirectoryService(**attr)
+ try:
+ blade.directory_services.update_directory_services(
+ names=[module.params["dstype"]], directory_service=n_attr
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to change {0} directory service.".format(
+ module.params["dstype"]
+ )
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to get current {0} directory service.".format(
+ module.params["dstype"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def create_ds(module, blade):
+ """Create Directory Service"""
+ changed = True
+ if not module.check_mode:
+ try:
+ if module.params["dstype"] == "management":
+ if module.params["uri"]:
+ dir_service = DirectoryService(
+ uris=module.params["uri"][0:30],
+ base_dn=module.params["base_dn"],
+ bind_user=module.params["bind_user"],
+ bind_password=module.params["bind_password"],
+ enabled=module.params["enable"],
+ )
+ else:
+ module.fail_json(
+ msg="Incorrect parameters provided for dstype {0}".format(
+ module.params["dstype"]
+ )
+ )
+ elif module.params["dstype"] == "smb":
+ if module.params["uri"]:
+ smb_attrs = {"join_ou": module.params["join_ou"]}
+ dir_service = DirectoryService(
+ uris=module.params["uri"][0:30],
+ base_dn=module.params["base_dn"],
+ bind_user=module.params["bind_user"],
+ bind_password=module.params["bind_password"],
+ smb=smb_attrs,
+ enabled=module.params["enable"],
+ )
+ else:
+ module.fail_json(
+ msg="Incorrect parameters provided for dstype {0}".format(
+ module.params["dstype"]
+ )
+ )
+ elif module.params["dstype"] == "nfs":
+ if module.params["nis_domain"]:
+ nfs_attrs = {
+ "nis_domains": [module.params["nis_domain"]],
+ "nis_servers": module.params["nis_servers"][0:30],
+ }
+ dir_service = DirectoryService(
+ nfs=nfs_attrs, enabled=module.params["enable"]
+ )
+ else:
+ dir_service = DirectoryService(
+ uris=module.params["uri"][0:30],
+ base_dn=module.params["base_dn"],
+ bind_user=module.params["bind_user"],
+ bind_password=module.params["bind_password"],
+ enabled=module.params["enable"],
+ )
+ blade.directory_services.update_directory_services(
+ names=[module.params["dstype"]], directory_service=dir_service
+ )
+ except Exception:
+ module.fail_json(
+ msg="Create {0} Directory Service failed".format(
+ module.params["dstype"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ uri=dict(type="list", elements="str"),
+ dstype=dict(
+ required=True, type="str", choices=["management", "nfs", "smb"]
+ ),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ enable=dict(type="bool", default=False),
+ bind_password=dict(type="str", no_log=True),
+ bind_user=dict(type="str"),
+ base_dn=dict(type="str"),
+ join_ou=dict(type="str"),
+ nis_domain=dict(type="str"),
+ nis_servers=dict(type="list", elements="str"),
+ )
+ )
+
+ required_together = [
+ ["uri", "bind_password", "bind_user", "base_dn"],
+ ["nis_servers", "nis_domain"],
+ ]
+ mutually_exclusive = [["uri", "nis_domain"]]
+
+ module = AnsibleModule(
+ argument_spec,
+ required_together=required_together,
+ mutually_exclusive=mutually_exclusive,
+ supports_check_mode=True,
+ )
+ if not HAS_PURITY_FB:
+ module.fail_json(msg="purity_fb sdk is required for this module")
+
+ state = module.params["state"]
+ blade = get_blade(module)
+ api_version = blade.api_version.list_versions().versions
+ ds_configured = False
+ dirserv = blade.directory_services.list_directory_services(
+ names=[module.params["dstype"]]
+ )
+ ds_enabled = dirserv.items[0].enabled
+ if dirserv.items[0].base_dn is not None:
+ ds_configured = True
+ if (module.params["nis_domain"] or module.params["join_ou"]) and (
+ NIS_API_VERSION not in api_version
+ ):
+ module.fail_json(
+ msg="NFS or SMB directory service attributes not supported by FlashBlade Purity version"
+ )
+ ldap_uri = False
+ set_ldap = False
+ for uri in range(0, len(dirserv.items[0].uris)):
+ if "ldap" in dirserv.items[0].uris[uri].lower():
+ ldap_uri = True
+ if module.params["uri"]:
+ for uri in range(0, len(module.params["uri"])):
+ if "ldap" in module.params["uri"][uri].lower():
+ set_ldap = True
+ if not module.params["uri"] and ldap_uri or module.params["uri"] and set_ldap:
+ if module.params["nis_servers"] or module.params["nis_domain"]:
+ module.fail_json(
+ msg="NIS configuration not supported in an LDAP environment"
+ )
+ if state == "absent":
+ delete_ds(module, blade)
+ elif ds_configured and module.params["enable"] and ds_enabled:
+ update_ds(module, blade)
+ elif ds_configured and not module.params["enable"] and ds_enabled:
+ disable_ds(module, blade)
+ elif ds_configured and module.params["enable"] and not ds_enabled:
+ enable_ds(module, blade)
+ # Now we have enabled the DS lets make sure there aren't any new updates...
+ update_ds(module, blade)
+ elif not ds_configured and state == "present":
+ create_ds(module, blade)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_dsrole.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_dsrole.py
new file mode 100644
index 000000000..61934cc6e
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_dsrole.py
@@ -0,0 +1,213 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_dsrole
+version_added: '1.0.0'
+short_description: Configure FlashBlade Management Directory Service Roles
+description:
+- Set or erase directory services role configurations.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Create or delete directory service role
+ default: present
+ type: str
+ choices: [ absent, present ]
+ role:
+ description:
+ - The directory service role to work on
+ choices: [ array_admin, ops_admin, readonly, storage_admin ]
+ type: str
+ required: true
+ group_base:
+ description:
+ - Specifies where the configured group is located in the directory
+ tree. This field consists of Organizational Units (OUs) that combine
+ with the base DN attribute and the configured group CNs to complete
+ the full Distinguished Name of the groups. The group base should
+ specify OU= for each OU and multiple OUs should be separated by commas.
+ The order of OUs is important and should get larger in scope from left
+ to right.
+ - Each OU should not exceed 64 characters in length.
+ type: str
+ group:
+ description:
+ - Sets the common Name (CN) of the configured directory service group
+ containing users for the FlashBlade. This name should be just the
+ Common Name of the group without the CN= specifier.
+ - Common Names should not exceed 64 characters in length.
+ type: str
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Delete existing array_admin directory service role
+ purestorage.flashblade.purefb_dsrole:
+ role: array_admin
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create array_admin directory service role
+ purestorage.flashblade.purefb_dsrole:
+ role: array_admin
+ group_base: "OU=PureGroups,OU=SANManagers"
+ group: pureadmins
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Update ops_admin directory service role
+ purestorage.flashblade.purefb_dsrole:
+ role: ops_admin
+ group_base: "OU=PureGroups"
+ group: opsgroup
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import DirectoryServiceRole
+except ImportError:
+ HAS_PURITY_FB = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ purefb_argument_spec,
+)
+
+
+def update_role(module, blade):
+ """Update Directory Service Role"""
+ changed = False
+ role = blade.directory_services.list_directory_services_roles(
+ names=[module.params["role"]]
+ )
+ if (
+ role.items[0].group_base != module.params["group_base"]
+ or role.items[0].group != module.params["group"]
+ ):
+ changed = True
+ if not module.check_mode:
+ try:
+ role = DirectoryServiceRole(
+ group_base=module.params["group_base"], group=module.params["group"]
+ )
+ blade.directory_services.update_directory_services_roles(
+ names=[module.params["role"]], directory_service_role=role
+ )
+ except Exception:
+ module.fail_json(
+ msg="Update Directory Service Role {0} failed".format(
+ module.params["role"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def delete_role(module, blade):
+ """Delete Directory Service Role"""
+ changed = True
+ if not module.check_mode:
+ try:
+ role = DirectoryServiceRole(group_base="", group="")
+ blade.directory_services.update_directory_services_roles(
+ names=[module.params["role"]], directory_service_role=role
+ )
+ except Exception:
+ module.fail_json(
+ msg="Delete Directory Service Role {0} failed".format(
+ module.params["role"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def create_role(module, blade):
+ """Create Directory Service Role"""
+ changed = True
+ if not module.check_mode:
+ try:
+ role = DirectoryServiceRole(
+ group_base=module.params["group_base"], group=module.params["group"]
+ )
+ blade.directory_services.update_directory_services_roles(
+ names=[module.params["role"]], directory_service_role=role
+ )
+ except Exception:
+ module.fail_json(
+ msg="Create Directory Service Role {0} failed".format(
+ module.params["role"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ role=dict(
+ required=True,
+ type="str",
+ choices=["array_admin", "ops_admin", "readonly", "storage_admin"],
+ ),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ group_base=dict(type="str"),
+ group=dict(type="str"),
+ )
+ )
+
+ required_together = [["group", "group_base"]]
+
+ module = AnsibleModule(
+ argument_spec, required_together=required_together, supports_check_mode=True
+ )
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg="purity_fb sdk is required for this module")
+
+ state = module.params["state"]
+ blade = get_blade(module)
+ role_configured = False
+ role = blade.directory_services.list_directory_services_roles(
+ names=[module.params["role"]]
+ )
+ if role.items[0].group is not None:
+ role_configured = True
+
+ if state == "absent" and role_configured:
+ delete_role(module, blade)
+ elif role_configured and state == "present":
+ update_role(module, blade)
+ elif not role_configured and state == "present":
+ create_role(module, blade)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_eula.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_eula.py
new file mode 100644
index 000000000..83b5e656a
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_eula.py
@@ -0,0 +1,131 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2021, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_eula
+version_added: '1.6.0'
+short_description: Sign Pure Storage FlashBlade EULA
+description:
+- Sign the FlashBlade EULA for Day 0 config, or change signatory.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ company:
+ description:
+ - Full legal name of the entity.
+ - The value must be between 1 and 64 characters in length.
+ type: str
+ required: true
+ name:
+ description:
+ - Full legal name of the individual at the company who has the authority to accept the terms of the agreement.
+ - The value must be between 1 and 64 characters in length.
+ type: str
+ required: true
+ title:
+ description:
+ - Individual's job title at the company.
+ - The value must be between 1 and 64 characters in length.
+ type: str
+ required: true
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Sign EULA for FlashBlade
+ purestorage.flashblade.purefb_eula:
+ company: "ACME Storage, Inc."
+ name: "Fred Bloggs"
+ title: "Storage Manager"
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import Eula, EulaSignature
+except ImportError:
+ HAS_PURITY_FB = False
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_system,
+ get_blade,
+ purefb_argument_spec,
+)
+
+
+EULA_API_VERSION = "2.0"
+
+
+def set_eula(module, blade):
+ """Sign EULA"""
+ changed = False
+ if not module.check_mode:
+ current_eula = list(blade.get_arrays_eula().items)[0].signature
+ if not current_eula.accepted:
+ if (
+ current_eula.company != module.params["company"]
+ or current_eula.title != module.params["title"]
+ or current_eula.name != module.params["name"]
+ ):
+ signature = EulaSignature(
+ company=module.params["company"],
+ title=module.params["title"],
+ name=module.params["name"],
+ )
+ eula_body = Eula(signature=signature)
+ if not module.check_mode:
+ changed = True
+ rc = blade.patch_arrays_eula(eula=eula_body)
+ if rc.status_code != 200:
+ module.fail_json(msg="Signing EULA failed")
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ company=dict(type="str", required=True),
+ name=dict(type="str", required=True),
+ title=dict(type="str", required=True),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg="purity_fb sdk is required for this module")
+
+ blade = get_blade(module)
+ api_version = blade.api_version.list_versions().versions
+ api_version = blade.api_version.list_versions().versions
+ if EULA_API_VERSION not in api_version:
+ module.fail_json(msg="Purity//FB must be upgraded to support this module.")
+ blade = get_system(module)
+ set_eula(module, blade)
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_fs.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_fs.py
new file mode 100644
index 000000000..a07180793
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_fs.py
@@ -0,0 +1,944 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+
+DOCUMENTATION = """
+---
+module: purefb_fs
+version_added: "1.0.0"
+short_description: Manage filesystemon Pure Storage FlashBlade`
+description:
+ - This module manages filesystems on Pure Storage FlashBlade.
+author: Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Filesystem Name.
+ required: true
+ type: str
+ state:
+ description:
+ - Create, delete or modifies a filesystem.
+ required: false
+ default: present
+ type: str
+ choices: [ "present", "absent" ]
+ eradicate:
+ description:
+ - Define whether to eradicate the filesystem on delete or leave in trash.
+ required: false
+ type: bool
+ default: false
+ size:
+ description:
+ - Volume size in M, G, T or P units. See examples.
+ - If size is not set at filesystem creation time the filesystem size becomes unlimited.
+ type: str
+ required: false
+ nfsv3:
+ description:
+ - Define whether to NFSv3 protocol is enabled for the filesystem.
+ required: false
+ type: bool
+ default: true
+ nfsv4:
+ description:
+ - Define whether to NFSv4.1 protocol is enabled for the filesystem.
+ required: false
+ type: bool
+ default: true
+ nfs_rules:
+ description:
+ - Define the NFS rules in operation.
+ - If not set at filesystem creation time it defaults to I(*(rw,no_root_squash))
+ - Supported binary options are ro/rw, secure/insecure, fileid_32bit/no_fileid_32bit,
+ root_squash/no_root_squash, all_squash/no_all_squash and atime/noatime
+ - Supported non-binary options are anonuid=#, anongid=#, sec=(sys|krb5)
+ - Superceeded by I(export_policy) if provided
+ required: false
+ type: str
+ smb:
+ description:
+ - Define whether to SMB protocol is enabled for the filesystem.
+ required: false
+ type: bool
+ default: false
+ smb_aclmode:
+ description:
+ - Specify the ACL mode for the SMB protocol.
+ - Deprecated from Purity//FB 3.1.1. Use I(access_control) instead.
+ required: false
+ type: str
+ default: shared
+ choices: [ "shared", "native" ]
+ http:
+ description:
+ - Define whether to HTTP/HTTPS protocol is enabled for the filesystem.
+ required: false
+ type: bool
+ default: false
+ snapshot:
+ description:
+ - Define whether a snapshot directory is enabled for the filesystem.
+ required: false
+ type: bool
+ default: false
+ writable:
+ description:
+ - Define if a filesystem is writeable.
+ required: false
+ type: bool
+ promote:
+ description:
+ - Promote/demote a filesystem.
+ - Can only demote the file-system if it is in a replica-link relationship.
+ required: false
+ type: bool
+ fastremove:
+ description:
+ - Define whether the fast remove directory is enabled for the filesystem.
+ required: false
+ type: bool
+ default: false
+ hard_limit:
+ description:
+ - Define whether the capacity for a filesystem is a hard limit.
+ - CAUTION This will cause the filesystem to go Read-Only if the
+ capacity has already exceeded the logical size of the filesystem.
+ required: false
+ type: bool
+ default: false
+ user_quota:
+ description:
+ - Default quota in M, G, T or P units for a user under this file system.
+ required: false
+ type: str
+ group_quota:
+ description:
+ - Default quota in M, G, T or P units for a group under this file system.
+ required: false
+ type: str
+ policy:
+ description:
+ - Filesystem policy to assign to or remove from a filesystem.
+ required: false
+ type: str
+ policy_state:
+ description:
+ - Add or delete a policy from a filesystem
+ required: false
+ default: present
+ type: str
+ choices: [ "absent", "present" ]
+ delete_link:
+ description:
+ - Define if the filesystem can be deleted even if it has a replica link
+ required: false
+ default: false
+ type: bool
+ discard_snaps:
+ description:
+ - Allow a filesystem to be demoted.
+ required: false
+ default: false
+ type: bool
+ access_control:
+ description:
+ - The access control style that is utilized for client actions such
+ as setting file and directory ACLs.
+ - Only available from Purity//FB 3.1.1
+ type: str
+ default: shared
+ choices: [ 'nfs', 'smb', 'shared', 'independent', 'mode-bits' ]
+ safeguard_acls:
+ description:
+ - Safeguards ACLs on a filesystem.
+ - Performs different roles depending on the filesystem protocol enabled.
+ - See Purity//FB documentation for detailed description.
+ - Only available from Purity//FB 3.1.1
+ type: bool
+ default: true
+ export_policy:
+ description:
+ - Name of NFS export policy to assign to filesystem
+ - Overrides I(nfs_rules)
+ - Only valid for Purity//FB 3.3.0 or higher
+ type: str
+ version_added: "1.9.0"
+extends_documentation_fragment:
+ - purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = """
+- name: Create new filesystem named foo
+ purestorage.flashblade.purefb_fs:
+ name: foo
+ size: 1T
+ state: present
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Delete filesystem named foo
+ purestorage.flashblade.purefb_fs:
+ name: foo
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Recover filesystem named foo
+ purestorage.flashblade.purefb_fs:
+ name: foo
+ state: present
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Eradicate filesystem named foo
+ purestorage.flashblade.purefb_fs:
+ name: foo
+ state: absent
+ eradicate: true
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Promote filesystem named foo ready for failover
+ purestorage.flashblade.purefb_fs:
+ name: foo
+ promote: true
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Demote filesystem named foo after failover
+ purestorage.flashblade.purefb_fs:
+ name: foo
+ promote: false
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Modify attributes of an existing filesystem named foo
+ purestorage.flashblade.purefb_fs:
+ name: foo
+ size: 2T
+ nfsv3 : false
+ nfsv4 : true
+ user_quota: 10K
+ group_quota: 25M
+ nfs_rules: '10.21.200.0/24(ro)'
+ snapshot: true
+ fastremove: true
+ hard_limit: true
+ smb: true
+ state: present
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641"""
+
+RETURN = """
+"""
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import (
+ FileSystem,
+ ProtocolRule,
+ NfsRule,
+ SmbRule,
+ MultiProtocolRule,
+ rest,
+ )
+except ImportError:
+ HAS_PURITY_FB = False
+
+HAS_PYPURECLIENT = True
+try:
+ from pypureclient.flashblade import (
+ FileSystemPatch,
+ NfsPatch,
+ Reference,
+ )
+except ImportError:
+ HAS_PYPURECLIENT = False
+
+HAS_JSON = True
+try:
+ import json
+except ImportError:
+ HAS_JSON = False
+
+from ansible.module_utils.basic import AnsibleModule, human_to_bytes
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ get_system,
+ purefb_argument_spec,
+)
+
+
+HARD_LIMIT_API_VERSION = "1.4"
+NFSV4_API_VERSION = "1.6"
+REPLICATION_API_VERSION = "1.9"
+MULTIPROTOCOL_API_VERSION = "1.11"
+EXPORT_POLICY_API_VERSION = "2.3"
+
+
+def get_fs(module, blade):
+ """Return Filesystem or None"""
+ fsys = []
+ fsys.append(module.params["name"])
+ try:
+ res = blade.file_systems.list_file_systems(names=fsys)
+ return res.items[0]
+ except Exception:
+ return None
+
+
+def create_fs(module, blade):
+ """Create Filesystem"""
+ changed = True
+ if not module.check_mode:
+ try:
+ if not module.params["nfs_rules"]:
+ module.params["nfs_rules"] = "*(rw,no_root_squash)"
+ if module.params["size"]:
+ size = human_to_bytes(module.params["size"])
+ else:
+ size = 0
+
+ if module.params["user_quota"]:
+ user_quota = human_to_bytes(module.params["user_quota"])
+ else:
+ user_quota = None
+ if module.params["group_quota"]:
+ group_quota = human_to_bytes(module.params["group_quota"])
+ else:
+ group_quota = None
+
+ api_version = blade.api_version.list_versions().versions
+ if HARD_LIMIT_API_VERSION in api_version:
+ if NFSV4_API_VERSION in api_version:
+ if REPLICATION_API_VERSION in api_version:
+ if MULTIPROTOCOL_API_VERSION in api_version:
+ if module.params["access_control"] == "nfs" and not (
+ module.params["nfsv3"] or module.params["nfsv4"]
+ ):
+ module.fail_json(
+ msg="Cannot set access_control to nfs when NFS is not enabled."
+ )
+ if (
+ module.params["access_control"]
+ in ["smb", "independent"]
+ and not module.params["smb"]
+ ):
+ module.fail_json(
+ msg="Cannot set access_control to smb or independent when SMB is not enabled."
+ )
+ if module.params["safeguard_acls"] and (
+ module.params["access_control"]
+ in ["mode-bits", "independent"]
+ or module.params["smb"]
+ ):
+ module.fail_json(
+ msg="ACL Safeguarding cannot be enabled with SMB or if access_control is mode-bits or independent."
+ )
+ fs_obj = FileSystem(
+ name=module.params["name"],
+ provisioned=size,
+ fast_remove_directory_enabled=module.params[
+ "fastremove"
+ ],
+ hard_limit_enabled=module.params["hard_limit"],
+ snapshot_directory_enabled=module.params["snapshot"],
+ nfs=NfsRule(
+ v3_enabled=module.params["nfsv3"],
+ v4_1_enabled=module.params["nfsv4"],
+ rules=module.params["nfs_rules"],
+ ),
+ smb=SmbRule(enabled=module.params["smb"]),
+ http=ProtocolRule(enabled=module.params["http"]),
+ multi_protocol=MultiProtocolRule(
+ safeguard_acls=module.params["safeguard_acls"],
+ access_control_style=module.params[
+ "access_control"
+ ],
+ ),
+ default_user_quota=user_quota,
+ default_group_quota=group_quota,
+ )
+ else:
+ fs_obj = FileSystem(
+ name=module.params["name"],
+ provisioned=size,
+ fast_remove_directory_enabled=module.params[
+ "fastremove"
+ ],
+ hard_limit_enabled=module.params["hard_limit"],
+ snapshot_directory_enabled=module.params["snapshot"],
+ nfs=NfsRule(
+ v3_enabled=module.params["nfsv3"],
+ v4_1_enabled=module.params["nfsv4"],
+ rules=module.params["nfs_rules"],
+ ),
+ smb=SmbRule(
+ enabled=module.params["smb"],
+ acl_mode=module.params["smb_aclmode"],
+ ),
+ http=ProtocolRule(enabled=module.params["http"]),
+ default_user_quota=user_quota,
+ default_group_quota=group_quota,
+ )
+ else:
+ fs_obj = FileSystem(
+ name=module.params["name"],
+ provisioned=size,
+ fast_remove_directory_enabled=module.params["fastremove"],
+ hard_limit_enabled=module.params["hard_limit"],
+ snapshot_directory_enabled=module.params["snapshot"],
+ nfs=NfsRule(
+ v3_enabled=module.params["nfsv3"],
+ v4_1_enabled=module.params["nfsv4"],
+ rules=module.params["nfs_rules"],
+ ),
+ smb=ProtocolRule(enabled=module.params["smb"]),
+ http=ProtocolRule(enabled=module.params["http"]),
+ default_user_quota=user_quota,
+ default_group_quota=group_quota,
+ )
+ else:
+ fs_obj = FileSystem(
+ name=module.params["name"],
+ provisioned=size,
+ fast_remove_directory_enabled=module.params["fastremove"],
+ hard_limit_enabled=module.params["hard_limit"],
+ snapshot_directory_enabled=module.params["snapshot"],
+ nfs=NfsRule(
+ enabled=module.params["nfsv3"],
+ rules=module.params["nfs_rules"],
+ ),
+ smb=ProtocolRule(enabled=module.params["smb"]),
+ http=ProtocolRule(enabled=module.params["http"]),
+ )
+ else:
+ fs_obj = FileSystem(
+ name=module.params["name"],
+ provisioned=size,
+ fast_remove_directory_enabled=module.params["fastremove"],
+ snapshot_directory_enabled=module.params["snapshot"],
+ nfs=NfsRule(
+ enabled=module.params["nfs"], rules=module.params["nfs_rules"]
+ ),
+ smb=ProtocolRule(enabled=module.params["smb"]),
+ http=ProtocolRule(enabled=module.params["http"]),
+ )
+ blade.file_systems.create_file_systems(fs_obj)
+ except rest.ApiException as err:
+ message = json.loads(err.body)["errors"][0]["message"]
+ module.fail_json(
+ msg="Failed to create filesystem {0}. Error: {1}".format(
+ module.params["name"], message
+ )
+ )
+ if REPLICATION_API_VERSION in api_version:
+ if module.params["policy"]:
+ try:
+ blade.policies.list_policies(names=[module.params["policy"]])
+ except Exception:
+ _delete_fs(module, blade)
+ module.fail_json(
+ msg="Policy {0} doesn't exist.".format(module.params["policy"])
+ )
+ try:
+ blade.policies.create_policy_filesystems(
+ policy_names=[module.params["policy"]],
+ member_names=[module.params["name"]],
+ )
+ except Exception:
+ _delete_fs(module, blade)
+ module.fail_json(
+ msg="Failed to apply policy {0} when creating filesystem {1}.".format(
+ module.params["policy"], module.params["name"]
+ )
+ )
+ if EXPORT_POLICY_API_VERSION in api_version and module.params["export_policy"]:
+ system = get_system(module)
+ export_attr = FileSystemPatch(
+ nfs=NfsPatch(
+ export_policy=Reference(name=module.params["export_policy"])
+ )
+ )
+ res = system.patch_file_systems(
+ names=[module.params["name"]], file_system=export_attr
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Filesystem {0} created, but failed to assign export "
+ "policy {1}. Error: {2}".format(
+ module.params["name"],
+ module.params["export_policy"],
+ res.errors[0].message,
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def modify_fs(module, blade):
+ """Modify Filesystem"""
+ changed = False
+ mod_fs = False
+ attr = {}
+ if module.params["policy"] and module.params["policy_state"] == "present":
+ try:
+ policy = blade.policies.list_policy_filesystems(
+ policy_names=[module.params["policy"]],
+ member_names=[module.params["name"]],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Policy {0} does not exist.".format(module.params["policy"])
+ )
+ if not policy.items:
+ try:
+ blade.policies.create_policy_filesystems(
+ policy_names=[module.params["policy"]],
+ member_names=[module.params["name"]],
+ )
+ mod_fs = True
+ except Exception:
+ module.fail_json(
+ msg="Failed to add filesystem {0} to policy {1}.".format(
+ module.params["name"], module.params["polict"]
+ )
+ )
+ if module.params["policy"] and module.params["policy_state"] == "absent":
+ try:
+ policy = blade.policies.list_policy_filesystems(
+ policy_names=[module.params["policy"]],
+ member_names=[module.params["name"]],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Policy {0} does not exist.".format(module.params["policy"])
+ )
+ if len(policy.items) == 1:
+ try:
+ blade.policies.delete_policy_filesystems(
+ policy_names=[module.params["policy"]],
+ member_names=[module.params["name"]],
+ )
+ mod_fs = True
+ except Exception:
+ module.fail_json(
+ msg="Failed to remove filesystem {0} to policy {1}.".format(
+ module.params["name"], module.params["polict"]
+ )
+ )
+ if module.params["user_quota"]:
+ user_quota = human_to_bytes(module.params["user_quota"])
+ if module.params["group_quota"]:
+ group_quota = human_to_bytes(module.params["group_quota"])
+ fsys = get_fs(module, blade)
+ if fsys.destroyed:
+ attr["destroyed"] = False
+ mod_fs = True
+ if module.params["size"]:
+ if human_to_bytes(module.params["size"]) != fsys.provisioned:
+ attr["provisioned"] = human_to_bytes(module.params["size"])
+ mod_fs = True
+ api_version = blade.api_version.list_versions().versions
+ if NFSV4_API_VERSION in api_version:
+ v3_state = v4_state = None
+ if module.params["nfsv3"] and not fsys.nfs.v3_enabled:
+ v3_state = module.params["nfsv3"]
+ if not module.params["nfsv3"] and fsys.nfs.v3_enabled:
+ v3_state = module.params["nfsv3"]
+ if module.params["nfsv4"] and not fsys.nfs.v4_1_enabled:
+ v4_state = module.params["nfsv4"]
+ if not module.params["nfsv4"] and fsys.nfs.v4_1_enabled:
+ v4_state = module.params["nfsv4"]
+ if v3_state is not None or v4_state is not None:
+ attr["nfs"] = NfsRule(v4_1_enabled=v4_state, v3_enabled=v3_state)
+ mod_fs = True
+ if (
+ module.params["nfsv3"]
+ or module.params["nfsv4"]
+ and fsys.nfs.v3_enabled
+ or fsys.nfs.v4_1_enabled
+ ):
+ if module.params["nfs_rules"] is not None:
+ if fsys.nfs.rules != module.params["nfs_rules"]:
+ attr["nfs"] = NfsRule(rules=module.params["nfs_rules"])
+ mod_fs = True
+ if module.params["user_quota"] and user_quota != fsys.default_user_quota:
+ attr["default_user_quota"] = user_quota
+ mod_fs = True
+ if module.params["group_quota"] and group_quota != fsys.default_group_quota:
+ attr["default_group_quota"] = group_quota
+ mod_fs = True
+ else:
+ if module.params["nfsv3"] and not fsys.nfs.enabled:
+ attr["nfs"] = NfsRule(enabled=module.params["nfsv3"])
+ mod_fs = True
+ if not module.params["nfsv3"] and fsys.nfs.enabled:
+ attr["nfs"] = NfsRule(enabled=module.params["nfsv3"])
+ mod_fs = True
+ if module.params["nfsv3"] and fsys.nfs.enabled:
+ if fsys.nfs.rules != module.params["nfs_rules"]:
+ attr["nfs"] = NfsRule(rules=module.params["nfs_rules"])
+ mod_fs = True
+ if REPLICATION_API_VERSION in api_version:
+ if module.params["smb"] and not fsys.smb.enabled:
+ if MULTIPROTOCOL_API_VERSION in api_version:
+ attr["smb"] = SmbRule(enabled=module.params["smb"])
+ else:
+ attr["smb"] = SmbRule(
+ enabled=module.params["smb"], acl_mode=module.params["smb_aclmode"]
+ )
+ mod_fs = True
+ if not module.params["smb"] and fsys.smb.enabled:
+ attr["smb"] = ProtocolRule(enabled=module.params["smb"])
+ mod_fs = True
+ if (
+ module.params["smb"]
+ and fsys.smb.enabled
+ and MULTIPROTOCOL_API_VERSION not in api_version
+ ):
+ if fsys.smb.acl_mode != module.params["smb_aclmode"]:
+ attr["smb"] = SmbRule(
+ enabled=module.params["smb"], acl_mode=module.params["smb_aclmode"]
+ )
+ mod_fs = True
+ else:
+ if module.params["smb"] and not fsys.smb.enabled:
+ attr["smb"] = ProtocolRule(enabled=module.params["smb"])
+ mod_fs = True
+ if not module.params["smb"] and fsys.smb.enabled:
+ attr["smb"] = ProtocolRule(enabled=module.params["smb"])
+ mod_fs = True
+ if module.params["http"] and not fsys.http.enabled:
+ attr["http"] = ProtocolRule(enabled=module.params["http"])
+ mod_fs = True
+ if not module.params["http"] and fsys.http.enabled:
+ attr["http"] = ProtocolRule(enabled=module.params["http"])
+ mod_fs = True
+ if module.params["snapshot"] and not fsys.snapshot_directory_enabled:
+ attr["snapshot_directory_enabled"] = module.params["snapshot"]
+ mod_fs = True
+ if not module.params["snapshot"] and fsys.snapshot_directory_enabled:
+ attr["snapshot_directory_enabled"] = module.params["snapshot"]
+ mod_fs = True
+ if module.params["fastremove"] and not fsys.fast_remove_directory_enabled:
+ attr["fast_remove_directory_enabled"] = module.params["fastremove"]
+ mod_fs = True
+ if not module.params["fastremove"] and fsys.fast_remove_directory_enabled:
+ attr["fast_remove_directory_enabled"] = module.params["fastremove"]
+ mod_fs = True
+ if HARD_LIMIT_API_VERSION in api_version:
+ if not module.params["hard_limit"] and fsys.hard_limit_enabled:
+ attr["hard_limit_enabled"] = module.params["hard_limit"]
+ mod_fs = True
+ if module.params["hard_limit"] and not fsys.hard_limit_enabled:
+ attr["hard_limit_enabled"] = module.params["hard_limit"]
+ mod_fs = True
+ if MULTIPROTOCOL_API_VERSION in api_version:
+ if module.params["safeguard_acls"] and not fsys.multi_protocol.safeguard_acls:
+ attr["multi_protocol"] = MultiProtocolRule(safeguard_acls=True)
+ mod_fs = True
+ if not module.params["safeguard_acls"] and fsys.multi_protocol.safeguard_acls:
+ attr["multi_protocol"] = MultiProtocolRule(safeguard_acls=False)
+ mod_fs = True
+ if module.params["access_control"] != fsys.multi_protocol.access_control_style:
+ attr["multi_protocol"] = MultiProtocolRule(
+ access_control_style=module.params["access_control"]
+ )
+ mod_fs = True
+ if REPLICATION_API_VERSION in api_version:
+ if module.params["writable"] is not None:
+ if not module.params["writable"] and fsys.writable:
+ attr["writable"] = module.params["writable"]
+ mod_fs = True
+ if (
+ module.params["writable"]
+ and not fsys.writable
+ and fsys.promotion_status == "promoted"
+ ):
+ attr["writable"] = module.params["writable"]
+ mod_fs = True
+ if module.params["promote"] is not None:
+ if module.params["promote"] and fsys.promotion_status != "promoted":
+ attr["requested_promotion_state"] = "promoted"
+ mod_fs = True
+ if not module.params["promote"] and fsys.promotion_status == "promoted":
+ # Demotion only allowed on filesystems in a replica-link
+ try:
+ blade.file_system_replica_links.list_file_system_replica_links(
+ local_file_system_names=[module.params["name"]]
+ ).items[0]
+ except Exception:
+ module.fail_json(
+ msg="Filesystem {0} not demoted. Not in a replica-link".format(
+ module.params["name"]
+ )
+ )
+ attr["requested_promotion_state"] = module.params["promote"]
+ mod_fs = True
+ if mod_fs:
+ changed = True
+ if not module.check_mode:
+ n_attr = FileSystem(**attr)
+ if REPLICATION_API_VERSION in api_version:
+ try:
+ blade.file_systems.update_file_systems(
+ name=module.params["name"],
+ attributes=n_attr,
+ discard_non_snapshotted_data=module.params["discard_snaps"],
+ )
+ except rest.ApiException as err:
+ message = json.loads(err.body)["errors"][0]["message"]
+ module.fail_json(
+ msg="Failed to update filesystem {0}. Error {1}".format(
+ module.params["name"], message
+ )
+ )
+ else:
+ try:
+ blade.file_systems.update_file_systems(
+ name=module.params["name"], attributes=n_attr
+ )
+ except rest.ApiException as err:
+ message = json.loads(err.body)["errors"][0]["message"]
+ module.fail_json(
+ msg="Failed to update filesystem {0}. Error {1}".format(
+ module.params["name"], message
+ )
+ )
+ if EXPORT_POLICY_API_VERSION in api_version and module.params["export_policy"]:
+ system = get_system(module)
+ change_export = False
+ current_fs = list(
+ system.get_file_systems(filter="name='" + module.params["name"] + "'").items
+ )[0]
+ if (
+ current_fs.nfs.export_policy.name
+ and current_fs.nfs.export_policy.name != module.params["export_policy"]
+ ):
+ change_export = True
+ if not current_fs.nfs.export_policy.name and module.params["export_policy"]:
+ change_export = True
+ if change_export and not module.check_mode:
+ export_attr = FileSystemPatch(
+ nfs=NfsPatch(
+ export_policy=Reference(name=module.params["export_policy"])
+ )
+ )
+ res = system.patch_file_systems(
+ names=[module.params["name"]], file_system=export_attr
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to modify export policy {1} for "
+ "filesystem {0}. Error: {2}".format(
+ module.params["name"],
+ module.params["export_policy"],
+ res.errors[0].message,
+ )
+ )
+
+ module.exit_json(changed=changed)
+
+
+def _delete_fs(module, blade):
+ """In module Delete Filesystem"""
+ api_version = blade.api_version.list_versions().versions
+ if NFSV4_API_VERSION in api_version:
+ if MULTIPROTOCOL_API_VERSION in api_version:
+ blade.file_systems.update_file_systems(
+ name=module.params["name"],
+ attributes=FileSystem(
+ nfs=NfsRule(v3_enabled=False, v4_1_enabled=False),
+ smb=ProtocolRule(enabled=False),
+ http=ProtocolRule(enabled=False),
+ multi_protocol=MultiProtocolRule(access_control_style="shared"),
+ destroyed=True,
+ ),
+ )
+ else:
+ blade.file_systems.update_file_systems(
+ name=module.params["name"],
+ attributes=FileSystem(
+ nfs=NfsRule(v3_enabled=False, v4_1_enabled=False),
+ smb=ProtocolRule(enabled=False),
+ http=ProtocolRule(enabled=False),
+ destroyed=True,
+ ),
+ )
+ else:
+ blade.file_systems.update_file_systems(
+ name=module.params["name"],
+ attributes=FileSystem(
+ nfs=NfsRule(enabled=False),
+ smb=ProtocolRule(enabled=False),
+ http=ProtocolRule(enabled=False),
+ destroyed=True,
+ ),
+ )
+
+ blade.file_systems.delete_file_systems(module.params["name"])
+
+
+def delete_fs(module, blade):
+ """Delete Filesystem"""
+ changed = True
+ if not module.check_mode:
+ try:
+ api_version = blade.api_version.list_versions().versions
+ if REPLICATION_API_VERSION in api_version:
+ if NFSV4_API_VERSION in api_version:
+ blade.file_systems.update_file_systems(
+ name=module.params["name"],
+ attributes=FileSystem(
+ nfs=NfsRule(v3_enabled=False, v4_1_enabled=False),
+ smb=ProtocolRule(enabled=False),
+ http=ProtocolRule(enabled=False),
+ destroyed=True,
+ ),
+ delete_link_on_eradication=module.params["delete_link"],
+ )
+ else:
+ blade.file_systems.update_file_systems(
+ name=module.params["name"],
+ attributes=FileSystem(
+ nfs=NfsRule(enabled=False),
+ smb=ProtocolRule(enabled=False),
+ http=ProtocolRule(enabled=False),
+ destroyed=True,
+ ),
+ delete_link_on_eradication=module.params["delete_link"],
+ )
+ else:
+ if NFSV4_API_VERSION in api_version:
+ blade.file_systems.update_file_systems(
+ name=module.params["name"],
+ attributes=FileSystem(
+ nfs=NfsRule(v3_enabled=False, v4_1_enabled=False),
+ smb=ProtocolRule(enabled=False),
+ http=ProtocolRule(enabled=False),
+ destroyed=True,
+ ),
+ )
+ else:
+ blade.file_systems.update_file_systems(
+ name=module.params["name"],
+ attributes=FileSystem(
+ nfs=NfsRule(enabled=False),
+ smb=ProtocolRule(enabled=False),
+ http=ProtocolRule(enabled=False),
+ destroyed=True,
+ ),
+ )
+ if module.params["eradicate"]:
+ try:
+ blade.file_systems.delete_file_systems(name=module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Failed to delete filesystem {0}.".format(
+ module.params["name"]
+ )
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to update filesystem {0} prior to deletion.".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def eradicate_fs(module, blade):
+ """Eradicate Filesystem"""
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.file_systems.delete_file_systems(name=module.params["name"])
+ except Exception:
+ module.fail_json(
+ msg="Failed to eradicate filesystem {0}.".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True),
+ eradicate=dict(default="false", type="bool"),
+ nfsv3=dict(default="true", type="bool"),
+ nfsv4=dict(default="true", type="bool"),
+ nfs_rules=dict(type="str"),
+ smb=dict(default="false", type="bool"),
+ http=dict(default="false", type="bool"),
+ snapshot=dict(default="false", type="bool"),
+ writable=dict(type="bool"),
+ promote=dict(type="bool"),
+ fastremove=dict(default="false", type="bool"),
+ hard_limit=dict(default="false", type="bool"),
+ user_quota=dict(type="str"),
+ policy=dict(type="str"),
+ group_quota=dict(type="str"),
+ smb_aclmode=dict(
+ type="str", default="shared", choices=["shared", "native"]
+ ),
+ policy_state=dict(default="present", choices=["present", "absent"]),
+ state=dict(default="present", choices=["present", "absent"]),
+ delete_link=dict(default=False, type="bool"),
+ discard_snaps=dict(default=False, type="bool"),
+ safeguard_acls=dict(default=True, type="bool"),
+ access_control=dict(
+ type="str",
+ default="shared",
+ choices=["nfs", "smb", "shared", "independent", "mode-bits"],
+ ),
+ size=dict(type="str"),
+ export_policy=dict(type="str"),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_JSON:
+ module.fail_json(msg="json sdk is required for this module")
+ if not HAS_PURITY_FB:
+ module.fail_json(msg="purity_fb sdk is required for this module")
+
+ state = module.params["state"]
+ blade = get_blade(module)
+ fsys = get_fs(module, blade)
+
+ if module.params["eradicate"] and state == "present":
+ module.warn("Eradicate flag ignored without state=absent")
+
+ if state == "present" and not fsys:
+ create_fs(module, blade)
+ elif state == "present" and fsys:
+ modify_fs(module, blade)
+ elif state == "absent" and fsys and not fsys.destroyed:
+ delete_fs(module, blade)
+ elif state == "absent" and fsys and fsys.destroyed and module.params["eradicate"]:
+ eradicate_fs(module, blade)
+ elif state == "absent" and not fsys:
+ module.exit_json(changed=False)
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_fs_replica.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_fs_replica.py
new file mode 100644
index 000000000..f96903788
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_fs_replica.py
@@ -0,0 +1,308 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+
+DOCUMENTATION = """
+---
+module: purefb_fs_replica
+version_added: '1.0.0'
+short_description: Manage filesystem replica links between Pure Storage FlashBlades
+description:
+ - This module manages filesystem replica links between Pure Storage FlashBlades.
+author: Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Local Filesystem Name.
+ required: true
+ type: str
+ state:
+ description:
+ - Creates or modifies a filesystem replica link
+ required: false
+ default: present
+ type: str
+ choices: [ "present", "absent" ]
+ target_array:
+ description:
+ - Remote array name to create replica on.
+ required: false
+ type: str
+ target_fs:
+ description:
+ - Name of target filesystem name
+ - If not supplied, will default to I(name).
+ type: str
+ required: false
+ policy:
+ description:
+ - Name of filesystem snapshot policy to apply to the replica link.
+ required: false
+ type: str
+extends_documentation_fragment:
+ - purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = """
+- name: Create new filesystem replica from foo to bar on arrayB
+ purestorage.flashblade.purefb_fs_replica:
+ name: foo
+ target_array: arrayB
+ target_fs: bar
+ policy: daily
+ state: present
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Add new snapshot policy to exisitng filesystem replica link
+ purestorage.flashblade.purefb_fs_replica:
+ name: foo
+ policy: weekly
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Delete snapshot policy from filesystem replica foo
+ purestorage.flashblade.purefb_fs_replica:
+ name: foo
+ policy: weekly
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641"""
+
+RETURN = """
+"""
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import FileSystemReplicaLink, LocationReference
+except ImportError:
+ HAS_PURITY_FB = False
+
+MIN_REQUIRED_API_VERSION = "1.9"
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ purefb_argument_spec,
+)
+
+
+def get_local_fs(module, blade):
+ """Return Filesystem or None"""
+ try:
+ res = blade.file_systems.list_file_systems(names=[module.params["name"]])
+ return res.items[0]
+ except Exception:
+ return None
+
+
+def get_local_rl(module, blade):
+ """Return Filesystem Replica Link or None"""
+ try:
+ res = blade.file_system_replica_links.list_file_system_replica_links(
+ local_file_system_names=[module.params["name"]]
+ )
+ return res.items[0]
+ except Exception:
+ return None
+
+
+def _check_connected(module, blade):
+ connected_blades = blade.array_connections.list_array_connections()
+ for target in range(0, len(connected_blades.items)):
+ if (
+ connected_blades.items[target].remote.name == module.params["target_array"]
+ or connected_blades.items[target].management_address
+ == module.params["target_array"]
+ ) and connected_blades.items[target].status in [
+ "connected",
+ "connecting",
+ "partially_connected",
+ ]:
+ return connected_blades.items[target]
+ return None
+
+
+def create_rl(module, blade):
+ """Create Filesystem Replica Link"""
+ changed = True
+ if not module.check_mode:
+ try:
+ remote_array = _check_connected(module, blade)
+ if remote_array:
+ if not module.params["target_fs"]:
+ module.params["target_fs"] = module.params["name"]
+ if not module.params["policy"]:
+ blade.file_system_replica_links.create_file_system_replica_links(
+ local_file_system_names=[module.params["name"]],
+ remote_file_system_names=[module.params["target_fs"]],
+ remote_names=[remote_array.remote.name],
+ )
+ else:
+ blade.file_system_replica_links.create_file_system_replica_links(
+ local_file_system_names=[module.params["name"]],
+ remote_file_system_names=[module.params["target_fs"]],
+ remote_names=[remote_array.remote.name],
+ file_system_replica_link=FileSystemReplicaLink(
+ policies=[LocationReference(name=module.params["policy"])]
+ ),
+ )
+ else:
+ module.fail_json(
+ msg="Target array {0} is not connected".format(
+ module.params["target_array"]
+ )
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to create filesystem replica link for {0}.".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def add_rl_policy(module, blade):
+ """Add Policy to Filesystem Replica Link"""
+ changed = False
+ if not module.params["target_array"]:
+ module.params["target_array"] = (
+ blade.file_system_replica_links.list_file_system_replica_links(
+ local_file_system_names=[module.params["name"]]
+ )
+ .items[0]
+ .remote.name
+ )
+ remote_array = _check_connected(module, blade)
+ try:
+ already_a_policy = (
+ blade.file_system_replica_links.list_file_system_replica_link_policies(
+ local_file_system_names=[module.params["name"]],
+ policy_names=[module.params["policy"]],
+ remote_names=[remote_array.remote.name],
+ )
+ )
+ if not already_a_policy.items:
+ changed = True
+ if not module.check_mode:
+ blade.file_system_replica_links.create_file_system_replica_link_policies(
+ policy_names=[module.params["policy"]],
+ local_file_system_names=[module.params["name"]],
+ remote_names=[remote_array.remote.name],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to add policy {0} to replica link {1}.".format(
+ module.params["policy"], module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def delete_rl_policy(module, blade):
+ """Delete Policy from Filesystem Replica Link"""
+ changed = True
+ if not module.check_mode:
+ current_policy = (
+ blade.file_system_replica_links.list_file_system_replica_link_policies(
+ local_file_system_names=[module.params["name"]],
+ policy_names=[module.params["policy"]],
+ )
+ )
+ if current_policy.items:
+ try:
+ blade.file_system_replica_links.delete_file_system_replica_link_policies(
+ policy_names=[module.params["policy"]],
+ local_file_system_names=[module.params["name"]],
+ remote_names=[current_policy.items[0].link.remote.name],
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to remove policy {0} from replica link {1}.".format(
+ module.params["policy"], module.params["name"]
+ )
+ )
+ else:
+ changed = False
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True),
+ target_fs=dict(type="str"),
+ target_array=dict(type="str"),
+ policy=dict(type="str"),
+ state=dict(default="present", choices=["present", "absent"]),
+ )
+ )
+
+ required_if = [["state", "absent", ["policy"]]]
+
+ module = AnsibleModule(
+ argument_spec, required_if=required_if, supports_check_mode=True
+ )
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg="purity_fb sdk is required for this module")
+
+ state = module.params["state"]
+ blade = get_blade(module)
+ versions = blade.api_version.list_versions().versions
+
+ if MIN_REQUIRED_API_VERSION not in versions:
+ module.fail_json(
+ msg="Minimum FlashBlade REST version required: {0}".format(
+ MIN_REQUIRED_API_VERSION
+ )
+ )
+
+ local_fs = get_local_fs(module, blade)
+ local_replica_link = get_local_rl(module, blade)
+
+ if not local_fs:
+ module.fail_json(
+ msg="Selected local filesystem {0} does not exist.".format(
+ module.params["name"]
+ )
+ )
+
+ if module.params["policy"]:
+ try:
+ policy = blade.policies.list_policies(names=[module.params["policy"]])
+ except Exception:
+ module.fail_json(
+ msg="Selected policy {0} does not exist.".format(
+ module.params["policy"]
+ )
+ )
+ else:
+ policy = None
+ if state == "present" and not local_replica_link:
+ create_rl(module, blade)
+ elif state == "present" and local_replica_link and policy:
+ add_rl_policy(module, blade)
+ elif state == "absent" and policy:
+ delete_rl_policy(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_groupquota.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_groupquota.py
new file mode 100644
index 000000000..2ae610275
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_groupquota.py
@@ -0,0 +1,321 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2021, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+
+DOCUMENTATION = """
+---
+module: purefb_groupquota
+version_added: "1.7.0"
+short_description: Manage filesystem group quotas
+description:
+ - This module manages group quotas for filesystems on Pure Storage FlashBlade.
+author: Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Filesystem Name.
+ required: true
+ type: str
+ state:
+ description:
+ - Create, delete or modifies a quota.
+ required: false
+ default: present
+ type: str
+ choices: [ "present", "absent" ]
+ quota:
+ description:
+ - Group quota in M, G, T or P units. This cannot be 0.
+ - This value will override the file system's default group quota.
+ type: str
+ gid:
+ description:
+ - The group id on which the quota is enforced.
+ - Cannot be combined with I(gname)
+ type: int
+ gname:
+ description:
+ - The group name on which the quota is enforced.
+ - Cannot be combined with I(gid)
+ type: str
+extends_documentation_fragment:
+ - purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = """
+- name: Create new group (using GID) quota for filesystem named foo
+ purestorage.flashblade.purefb_groupquota:
+ name: foo
+ quota: 1T
+ gid: 1234
+ state: present
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Create new group (using groupname) quota for filesystem named foo
+ purestorage.flashblade.purefb_groupquota:
+ name: foo
+ quota: 1T
+ gname: bar
+ state: present
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Delete group quota on filesystem foo for group by GID
+ purestorage.flashblade.purefb_groupquota:
+ name: foo
+ gid: 1234
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Delete group quota on filesystem foo for group by groupname
+ purestorage.flashblade.purefb_groupquota:
+ name: foo
+ gname: bar
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Update group quota on filesystem foo for group by groupname
+ purestorage.flashblade.purefb_groupquota:
+ name: foo
+ quota: 20G
+ gname: bar
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Update group quota on filesystem foo for group by GID
+ purestorage.flashblade.purefb_groupquota:
+ name: foo
+ quota: 20G
+ gid: bar
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641"""
+
+RETURN = """
+"""
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import QuotasGroup
+except ImportError:
+ HAS_PURITY_FB = False
+
+from ansible.module_utils.basic import AnsibleModule, human_to_bytes
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ purefb_argument_spec,
+)
+
+
+MIN_REQUIRED_API_VERSION = "1.6"
+
+
+def get_fs(module, blade):
+ """Return Filesystem or None"""
+ fsys = []
+ fsys.append(module.params["name"])
+ try:
+ res = blade.file_systems.list_file_systems(names=fsys)
+ return res.items[0]
+ except Exception:
+ return None
+
+
+def get_quota(module, blade):
+ """Return Filesystem User Quota or None"""
+ fsys = []
+ fsys.append(module.params["name"])
+ try:
+ if module.params["gid"]:
+ res = blade.quotas_groups.list_group_quotas(
+ file_system_names=fsys, filter="group.id=" + str(module.params["gid"])
+ )
+ else:
+ res = blade.quotas_groups.list_group_quotas(
+ file_system_names=fsys,
+ filter="group.name='" + module.params["gname"] + "'",
+ )
+ return res.items[0]
+ except Exception:
+ return None
+
+
+def create_quota(module, blade):
+ """Create Filesystem User Quota"""
+ changed = True
+ if not module.check_mode:
+ try:
+ if module.params["gid"]:
+ blade.quotas_groups.create_group_quotas(
+ file_system_names=[module.params["name"]],
+ gids=[module.params["gid"]],
+ quota=QuotasGroup(
+ quota=int(human_to_bytes(module.params["quota"]))
+ ),
+ )
+ else:
+ blade.quotas_groups.create_group_quotas(
+ file_system_names=[module.params["name"]],
+ group_names=[module.params["gname"]],
+ quota=QuotasGroup(
+ quota=int(human_to_bytes(module.params["quota"]))
+ ),
+ )
+ except Exception:
+ if module.params["gid"]:
+ module.fail_json(
+ msg="Failed to create quote for UID {0} on filesystem {1}.".format(
+ module.params["gid"], module.params["name"]
+ )
+ )
+ else:
+ module.fail_json(
+ msg="Failed to create quote for groupname {0} on filesystem {1}.".format(
+ module.params["gname"], module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def update_quota(module, blade):
+ """Upodate Filesystem User Quota"""
+ changed = False
+ current_quota = get_quota(module, blade)
+ if current_quota.quota != human_to_bytes(module.params["quota"]):
+ changed = True
+ if not module.check_mode:
+ if module.params["gid"]:
+ try:
+ blade.quotas_groups.update_group_quotas(
+ file_system_names=[module.params["name"]],
+ gids=[module.params["gid"]],
+ quota=QuotasGroup(
+ quota=int(human_to_bytes(module.params["quota"]))
+ ),
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to update quota for UID {0} on filesystem {1}.".format(
+ module.params["gid"], module.params["name"]
+ )
+ )
+ else:
+ try:
+ blade.quotas_groups.update_group_quotas(
+ file_system_names=[module.params["name"]],
+ group_names=[module.params["gname"]],
+ quota=QuotasGroup(
+ quota=int(human_to_bytes(module.params["quota"]))
+ ),
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to update quota for UID {0} on filesystem {1}.".format(
+ module.params["gname"], module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def delete_quota(module, blade):
+ """Delete Filesystem User Quota"""
+ changed = True
+ if not module.check_mode:
+ try:
+ if module.params["gid"]:
+ blade.quotas_groups.delete_group_quotas(
+ file_system_names=[module.params["name"]],
+ gids=[module.params["gid"]],
+ )
+ else:
+ blade.quotas_groups.delete_group_quotas(
+ file_system_names=[module.params["name"]],
+ group_names=[module.params["gname"]],
+ )
+ except Exception:
+ if module.params["gid"]:
+ module.fail_json(
+ msg="Failed to delete quota for UID {0} on filesystem {1}.".format(
+ module.params["gid"], module.params["name"]
+ )
+ )
+ else:
+ module.fail_json(
+ msg="Failed to delete quota for groupname {0} on filesystem {1}.".format(
+ module.params["gname"], module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True),
+ gid=dict(type="int"),
+ gname=dict(type="str"),
+ state=dict(default="present", choices=["present", "absent"]),
+ quota=dict(type="str"),
+ )
+ )
+
+ mutually_exclusive = [["gid", "gname"]]
+ required_if = [["state", "present", ["quota"]]]
+ module = AnsibleModule(
+ argument_spec,
+ mutually_exclusive=mutually_exclusive,
+ required_if=required_if,
+ supports_check_mode=True,
+ )
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg="purity_fb sdk is required for this module")
+
+ state = module.params["state"]
+ blade = get_blade(module)
+ versions = blade.api_version.list_versions().versions
+
+ if MIN_REQUIRED_API_VERSION not in versions:
+ module.fail_json(
+ msg="Minimum FlashBlade REST version required: {0}".format(
+ MIN_REQUIRED_API_VERSION
+ )
+ )
+ fsys = get_fs(module, blade)
+ if not fsys:
+ module.fail_json(
+ msg="Filesystem {0} does not exist.".format(module.params["name"])
+ )
+ quota = get_quota(module, blade)
+
+ if state == "present" and not quota:
+ create_quota(module, blade)
+ elif state == "present" and quota:
+ update_quota(module, blade)
+ elif state == "absent" and quota:
+ delete_quota(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_info.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_info.py
new file mode 100644
index 000000000..8525bd8e3
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_info.py
@@ -0,0 +1,1548 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2019, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_info
+version_added: '1.0.0'
+short_description: Collect information from Pure Storage FlashBlade
+description:
+ - Collect information from a Pure Storage FlashBlade running the
+ Purity//FB operating system. By default, the module will collect basic
+ information including hosts, host groups, protection
+ groups and volume counts. Additional information can be collected
+ based on the configured set of arguements.
+author:
+ - Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ gather_subset:
+ description:
+ - When supplied, this argument will define the information to be collected.
+ Possible values for this include all, minimum, config, performance,
+ capacity, network, subnets, lags, filesystems, snapshots, buckets,
+ replication, policies, arrays, accounts, admins, ad, kerberos
+ and drives.
+ required: false
+ type: list
+ elements: str
+ default: minimum
+extends_documentation_fragment:
+ - purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: collect default set of info
+ purestorage.flashblade.purefb_info:
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+ register: blade_info
+- name: show default information
+ debug:
+ msg: "{{ blade_info['purefb_info']['default'] }}"
+
+- name: collect configuration and capacity info
+ purestorage.flashblade.purefb_info:
+ gather_subset:
+ - config
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+ register: blade_info
+- name: show config information
+ debug:
+ msg: "{{ blade_info['purefb_info']['config'] }}"
+
+- name: collect all info
+ purestorage.flashblade.purefb_info:
+ gather_subset:
+ - all
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+ register: blade_info
+- name: show all information
+ debug:
+ msg: "{{ blade_info['purefb_info'] }}"
+"""
+
+RETURN = r"""
+purefb_info:
+ description: Returns the information collected from the FlashBlade
+ returned: always
+ type: complex
+ sample: {
+ "admins": {
+ "pureuser": {
+ "api_token_timeout": null,
+ "local": true,
+ "public_key": null
+ },
+ "another_user": {
+ "api_token_timeout": null,
+ "local": false,
+ "public_key": null
+ },
+ },
+ "buckets": {
+ "central": {
+ "account_name": "jake",
+ "bucket_type": "classic",
+ "created": 1628900154000,
+ "data_reduction": null,
+ "destroyed": false,
+ "id": "43758f09-9e71-7bf7-5757-2028a95a2b65",
+ "lifecycle_rules": {},
+ "object_count": 0,
+ "snapshot_space": 0,
+ "time_remaining": null,
+ "total_physical_space": 0,
+ "unique_space": 0,
+ "versioning": "none",
+ "virtual_space": 0
+ },
+ "test": {
+ "account_name": "acme",
+ "bucket_type": "classic",
+ "created": 1630591952000,
+ "data_reduction": 3.6,
+ "destroyed": false,
+ "id": "d5f6149c-fbef-f3c5-58b6-8fd143110ba9",
+ "lifecycle_rules": {
+ "test": {
+ "abort_incomplete_multipart_uploads_after (days)": 1,
+ "cleanup_expired_object_delete_marker": true,
+ "enabled": true,
+ "keep_current_version_for (days)": null,
+ "keep_current_version_until": "2023-12-21",
+ "keep_previous_version_for (days)": null,
+ "prefix": "foo"
+ }
+ },
+ },
+ },
+ "capacity": {
+ "aggregate": {
+ "data_reduction": 1.1179228,
+ "snapshots": 0,
+ "total_physical": 17519748439,
+ "unique": 17519748439,
+ "virtual": 19585726464
+ },
+ "file-system": {
+ "data_reduction": 1.3642412,
+ "snapshots": 0,
+ "total_physical": 4748219708,
+ "unique": 4748219708,
+ "virtual": 6477716992
+ },
+ "object-store": {
+ "data_reduction": 1.0263462,
+ "snapshots": 0,
+ "total_physical": 12771528731,
+ "unique": 12771528731,
+ "virtual": 6477716992
+ },
+ "total": 83359896948925
+ },
+ "config": {
+ "alert_watchers": {
+ "enabled": true,
+ "name": "notify@acmestorage.com"
+ },
+ "array_management": {
+ "base_dn": null,
+ "bind_password": null,
+ "bind_user": null,
+ "enabled": false,
+ "name": "management",
+ "services": [
+ "management"
+ ],
+ "uris": []
+ },
+ "directory_service_roles": {
+ "array_admin": {
+ "group": null,
+ "group_base": null
+ },
+ "ops_admin": {
+ "group": null,
+ "group_base": null
+ },
+ "readonly": {
+ "group": null,
+ "group_base": null
+ },
+ "storage_admin": {
+ "group": null,
+ "group_base": null
+ }
+ },
+ "dns": {
+ "domain": "demo.acmestorage.com",
+ "name": "demo-fb-1",
+ "nameservers": [
+ "8.8.8.8"
+ ],
+ "search": [
+ "demo.acmestorage.com"
+ ]
+ },
+ "nfs_directory_service": {
+ "base_dn": null,
+ "bind_password": null,
+ "bind_user": null,
+ "enabled": false,
+ "name": "nfs",
+ "services": [
+ "nfs"
+ ],
+ "uris": []
+ },
+ "ntp": [
+ "0.ntp.pool.org"
+ ],
+ "smb_directory_service": {
+ "base_dn": null,
+ "bind_password": null,
+ "bind_user": null,
+ "enabled": false,
+ "name": "smb",
+ "services": [
+ "smb"
+ ],
+ "uris": []
+ },
+ "smtp": {
+ "name": "demo-fb-1",
+ "relay_host": null,
+ "sender_domain": "acmestorage.com"
+ },
+ "ssl_certs": {
+ "certificate": "-----BEGIN CERTIFICATE-----\n\n-----END CERTIFICATE-----",
+ "common_name": "Acme Storage",
+ "country": "US",
+ "email": null,
+ "intermediate_certificate": null,
+ "issued_by": "Acme Storage",
+ "issued_to": "Acme Storage",
+ "key_size": 4096,
+ "locality": null,
+ "name": "global",
+ "organization": "Acme Storage",
+ "organizational_unit": "Acme Storage",
+ "passphrase": null,
+ "private_key": null,
+ "state": null,
+ "status": "self-signed",
+ "valid_from": "1508433967000",
+ "valid_to": "2458833967000"
+ }
+ },
+ "default": {
+ "blades": 15,
+ "buckets": 7,
+ "filesystems": 2,
+ "flashblade_name": "demo-fb-1",
+ "object_store_accounts": 1,
+ "object_store_users": 1,
+ "purity_version": "2.2.0",
+ "snapshots": 1,
+ "total_capacity": 83359896948925,
+ "smb_mode": "native"
+ },
+ "filesystems": {
+ "k8s-pvc-d24b1357-579e-11e8-811f-ecf4bbc88f54": {
+ "default_group_quota": 0,
+ "default_user_quota": 0,
+ "destroyed": false,
+ "fast_remove": false,
+ "hard_limit": true,
+ "nfs_rules": "10.21.255.0/24(rw,no_root_squash)",
+ "provisioned": 21474836480,
+ "snapshot_enabled": false
+ },
+ "z": {
+ "default_group_quota": 0,
+ "default_user_quota": 0,
+ "destroyed": false,
+ "fast_remove": false,
+ "hard_limit": false,
+ "provisioned": 1073741824,
+ "snapshot_enabled": false
+ }
+ },
+ "lag": {
+ "uplink": {
+ "lag_speed": 0,
+ "port_speed": 40000000000,
+ "ports": [
+ {
+ "name": "CH1.FM1.ETH1.1"
+ },
+ {
+ "name": "CH1.FM1.ETH1.2"
+ },
+ ],
+ "status": "healthy"
+ }
+ },
+ "network": {
+ "fm1.admin0": {
+ "address": "10.10.100.6",
+ "gateway": "10.10.100.1",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "support"
+ ],
+ "type": "vip",
+ "vlan": 2200
+ },
+ "fm2.admin0": {
+ "address": "10.10.100.7",
+ "gateway": "10.10.100.1",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "support"
+ ],
+ "type": "vip",
+ "vlan": 2200
+ },
+ "nfs1": {
+ "address": "10.10.100.4",
+ "gateway": "10.10.100.1",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "data"
+ ],
+ "type": "vip",
+ "vlan": 2200
+ },
+ "vir0": {
+ "address": "10.10.100.5",
+ "gateway": "10.10.100.1",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "management"
+ ],
+ "type": "vip",
+ "vlan": 2200
+ }
+ },
+ "performance": {
+ "aggregate": {
+ "bytes_per_op": 0,
+ "bytes_per_read": 0,
+ "bytes_per_write": 0,
+ "read_bytes_per_sec": 0,
+ "reads_per_sec": 0,
+ "usec_per_other_op": 0,
+ "usec_per_read_op": 0,
+ "usec_per_write_op": 0,
+ "write_bytes_per_sec": 0,
+ "writes_per_sec": 0
+ },
+ "http": {
+ "bytes_per_op": 0,
+ "bytes_per_read": 0,
+ "bytes_per_write": 0,
+ "read_bytes_per_sec": 0,
+ "reads_per_sec": 0,
+ "usec_per_other_op": 0,
+ "usec_per_read_op": 0,
+ "usec_per_write_op": 0,
+ "write_bytes_per_sec": 0,
+ "writes_per_sec": 0
+ },
+ "nfs": {
+ "bytes_per_op": 0,
+ "bytes_per_read": 0,
+ "bytes_per_write": 0,
+ "read_bytes_per_sec": 0,
+ "reads_per_sec": 0,
+ "usec_per_other_op": 0,
+ "usec_per_read_op": 0,
+ "usec_per_write_op": 0,
+ "write_bytes_per_sec": 0,
+ "writes_per_sec": 0
+ },
+ "s3": {
+ "bytes_per_op": 0,
+ "bytes_per_read": 0,
+ "bytes_per_write": 0,
+ "read_bytes_per_sec": 0,
+ "reads_per_sec": 0,
+ "usec_per_other_op": 0,
+ "usec_per_read_op": 0,
+ "usec_per_write_op": 0,
+ "write_bytes_per_sec": 0,
+ "writes_per_sec": 0
+ }
+ },
+ "snapshots": {
+ "z.188": {
+ "destroyed": false,
+ "source": "z",
+ "source_destroyed": false,
+ "suffix": "188"
+ }
+ },
+ "subnet": {
+ "new-mgmt": {
+ "gateway": "10.10.100.1",
+ "interfaces": [
+ {
+ "name": "fm1.admin0"
+ },
+ {
+ "name": "fm2.admin0"
+ },
+ {
+ "name": "nfs1"
+ },
+ {
+ "name": "vir0"
+ }
+ ],
+ "lag": "uplink",
+ "mtu": 1500,
+ "prefix": "10.10.100.0/24",
+ "services": [
+ "data",
+ "management",
+ "support"
+ ],
+ "vlan": 2200
+ }
+ }
+ }
+"""
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ get_system,
+ purefb_argument_spec,
+)
+from datetime import datetime
+
+
+MIN_REQUIRED_API_VERSION = "1.3"
+HARD_LIMIT_API_VERSION = "1.4"
+POLICIES_API_VERSION = "1.5"
+CERT_GROUPS_API_VERSION = "1.8"
+REPLICATION_API_VERSION = "1.9"
+MULTIPROTOCOL_API_VERSION = "1.11"
+MIN_32_API = "2.0"
+LIFECYCLE_API_VERSION = "2.1"
+SMB_MODE_API_VERSION = "2.2"
+NFS_POLICY_API_VERSION = "2.3"
+VSO_VERSION = "2.4"
+DRIVES_API_VERSION = "2.5"
+SECURITY_API_VERSION = "2.7"
+BUCKET_API_VERSION = "2.8"
+
+
+def _millisecs_to_time(millisecs):
+ if millisecs:
+ return (str(int(millisecs / 3600000 % 24)).zfill(2) + ":00",)
+ return None
+
+
+def _bytes_to_human(bytes_number):
+ if bytes_number:
+ labels = ["B/s", "KB/s", "MB/s", "GB/s", "TB/s", "PB/s"]
+ i = 0
+ double_bytes = bytes_number
+ while i < len(labels) and bytes_number >= 1024:
+ double_bytes = bytes_number / 1024.0
+ i += 1
+ bytes_number = bytes_number / 1024
+ return str(round(double_bytes, 2)) + " " + labels[i]
+ return None
+
+
+def generate_default_dict(module, blade):
+ default_info = {}
+ defaults = blade.arrays.list_arrays().items[0]
+ default_info["flashblade_name"] = defaults.name
+ default_info["purity_version"] = defaults.version
+ default_info["filesystems"] = len(blade.file_systems.list_file_systems().items)
+ default_info["snapshots"] = len(
+ blade.file_system_snapshots.list_file_system_snapshots().items
+ )
+ default_info["buckets"] = len(blade.buckets.list_buckets().items)
+ default_info["object_store_users"] = len(
+ blade.object_store_users.list_object_store_users().items
+ )
+ default_info["object_store_accounts"] = len(
+ blade.object_store_accounts.list_object_store_accounts().items
+ )
+ default_info["blades"] = len(blade.blade.list_blades().items)
+ default_info["certificates"] = len(blade.certificates.list_certificates().items)
+ default_info["total_capacity"] = blade.arrays.list_arrays_space().items[0].capacity
+ api_version = blade.api_version.list_versions().versions
+ default_info["api_versions"] = api_version
+ if POLICIES_API_VERSION in api_version:
+ default_info["policies"] = len(blade.policies.list_policies().items)
+ if CERT_GROUPS_API_VERSION in api_version:
+ default_info["certificate_groups"] = len(
+ blade.certificate_groups.list_certificate_groups().items
+ )
+ if REPLICATION_API_VERSION in api_version:
+ default_info["fs_replicas"] = len(
+ blade.file_system_replica_links.list_file_system_replica_links().items
+ )
+ default_info["remote_credentials"] = len(
+ blade.object_store_remote_credentials.list_object_store_remote_credentials().items
+ )
+ default_info["bucket_replicas"] = len(
+ blade.bucket_replica_links.list_bucket_replica_links().items
+ )
+ default_info["connected_arrays"] = len(
+ blade.array_connections.list_array_connections().items
+ )
+ default_info["targets"] = len(blade.targets.list_targets().items)
+ default_info["kerberos_keytabs"] = len(blade.keytabs.list_keytabs().items)
+ # This section is just for REST 2.x features
+ if MIN_32_API in api_version:
+ blade = get_system(module)
+ blade_info = list(blade.get_arrays().items)[0]
+ default_info["object_store_virtual_hosts"] = len(
+ blade.get_object_store_virtual_hosts().items
+ )
+ default_info["api_clients"] = len(blade.get_api_clients().items)
+ default_info["idle_timeout"] = int(blade_info.idle_timeout / 60000)
+ if list(blade.get_arrays_eula().items)[0].signature.accepted:
+ default_info["EULA"] = "Signed"
+ else:
+ default_info["EULA"] = "Not Signed"
+ if NFS_POLICY_API_VERSION in api_version:
+ admin_settings = list(blade.get_admins_settings().items)[0]
+ default_info["max_login_attempts"] = admin_settings.max_login_attempts
+ default_info["min_password_length"] = admin_settings.min_password_length
+ if admin_settings.lockout_duration:
+ default_info["lockout_duration"] = (
+ str(admin_settings.lockout_duration / 1000) + " seconds"
+ )
+ if NFS_POLICY_API_VERSION in api_version:
+ default_info["smb_mode"] = blade_info.smb_mode
+ if VSO_VERSION in api_version:
+ default_info["timezone"] = blade_info.time_zone
+ if DRIVES_API_VERSION in api_version:
+ default_info["product_type"] = getattr(
+ blade_info, "product_type", "Unknown"
+ )
+ if SECURITY_API_VERSION in api_version:
+ dar = blade_info.encryption.data_at_rest
+ default_info["encryption"] = {
+ "data_at_rest_enabled": dar.enabled,
+ "data_at_rest_algorithms": dar.algorithms,
+ "data_at_rest_entropy_source": dar.entropy_source,
+ }
+ keys = list(blade.get_support_verification_keys().items)
+ default_info["support_keys"] = {}
+ for key in range(0, len(keys)):
+ keyname = keys[key].name
+ default_info["support_keys"][keyname] = {keys[key].verification_key}
+ default_info["security_update"] = getattr(
+ blade_info, "security_update", None
+ )
+
+ return default_info
+
+
+def generate_perf_dict(blade):
+ perf_info = {}
+ total_perf = blade.arrays.list_arrays_performance()
+ http_perf = blade.arrays.list_arrays_performance(protocol="http")
+ s3_perf = blade.arrays.list_arrays_performance(protocol="s3")
+ nfs_perf = blade.arrays.list_arrays_performance(protocol="nfs")
+ perf_info["aggregate"] = {
+ "bytes_per_op": total_perf.items[0].bytes_per_op,
+ "bytes_per_read": total_perf.items[0].bytes_per_read,
+ "bytes_per_write": total_perf.items[0].bytes_per_write,
+ "read_bytes_per_sec": total_perf.items[0].read_bytes_per_sec,
+ "reads_per_sec": total_perf.items[0].reads_per_sec,
+ "usec_per_other_op": total_perf.items[0].usec_per_other_op,
+ "usec_per_read_op": total_perf.items[0].usec_per_read_op,
+ "usec_per_write_op": total_perf.items[0].usec_per_write_op,
+ "write_bytes_per_sec": total_perf.items[0].write_bytes_per_sec,
+ "writes_per_sec": total_perf.items[0].writes_per_sec,
+ }
+ perf_info["http"] = {
+ "bytes_per_op": http_perf.items[0].bytes_per_op,
+ "bytes_per_read": http_perf.items[0].bytes_per_read,
+ "bytes_per_write": http_perf.items[0].bytes_per_write,
+ "read_bytes_per_sec": http_perf.items[0].read_bytes_per_sec,
+ "reads_per_sec": http_perf.items[0].reads_per_sec,
+ "usec_per_other_op": http_perf.items[0].usec_per_other_op,
+ "usec_per_read_op": http_perf.items[0].usec_per_read_op,
+ "usec_per_write_op": http_perf.items[0].usec_per_write_op,
+ "write_bytes_per_sec": http_perf.items[0].write_bytes_per_sec,
+ "writes_per_sec": http_perf.items[0].writes_per_sec,
+ }
+ perf_info["s3"] = {
+ "bytes_per_op": s3_perf.items[0].bytes_per_op,
+ "bytes_per_read": s3_perf.items[0].bytes_per_read,
+ "bytes_per_write": s3_perf.items[0].bytes_per_write,
+ "read_bytes_per_sec": s3_perf.items[0].read_bytes_per_sec,
+ "reads_per_sec": s3_perf.items[0].reads_per_sec,
+ "usec_per_other_op": s3_perf.items[0].usec_per_other_op,
+ "usec_per_read_op": s3_perf.items[0].usec_per_read_op,
+ "usec_per_write_op": s3_perf.items[0].usec_per_write_op,
+ "write_bytes_per_sec": s3_perf.items[0].write_bytes_per_sec,
+ "writes_per_sec": s3_perf.items[0].writes_per_sec,
+ }
+ perf_info["nfs"] = {
+ "bytes_per_op": nfs_perf.items[0].bytes_per_op,
+ "bytes_per_read": nfs_perf.items[0].bytes_per_read,
+ "bytes_per_write": nfs_perf.items[0].bytes_per_write,
+ "read_bytes_per_sec": nfs_perf.items[0].read_bytes_per_sec,
+ "reads_per_sec": nfs_perf.items[0].reads_per_sec,
+ "usec_per_other_op": nfs_perf.items[0].usec_per_other_op,
+ "usec_per_read_op": nfs_perf.items[0].usec_per_read_op,
+ "usec_per_write_op": nfs_perf.items[0].usec_per_write_op,
+ "write_bytes_per_sec": nfs_perf.items[0].write_bytes_per_sec,
+ "writes_per_sec": nfs_perf.items[0].writes_per_sec,
+ }
+ api_version = blade.api_version.list_versions().versions
+ if REPLICATION_API_VERSION in api_version:
+ file_repl_perf = (
+ blade.array_connections.list_array_connections_performance_replication(
+ type="file-system"
+ )
+ )
+ obj_repl_perf = (
+ blade.array_connections.list_array_connections_performance_replication(
+ type="object-store"
+ )
+ )
+ if len(file_repl_perf.total):
+ perf_info["file_replication"] = {
+ "received_bytes_per_sec": file_repl_perf.total[
+ 0
+ ].periodic.received_bytes_per_sec,
+ "transmitted_bytes_per_sec": file_repl_perf.total[
+ 0
+ ].periodic.transmitted_bytes_per_sec,
+ }
+ if len(obj_repl_perf.total):
+ perf_info["object_replication"] = {
+ "received_bytes_per_sec": obj_repl_perf.total[
+ 0
+ ].periodic.received_bytes_per_sec,
+ "transmitted_bytes_per_sec": obj_repl_perf.total[
+ 0
+ ].periodic.transmitted_bytes_per_sec,
+ }
+ return perf_info
+
+
+def generate_config_dict(blade):
+ config_info = {}
+ config_info["dns"] = blade.dns.list_dns().items[0].to_dict()
+ config_info["smtp"] = blade.smtp.list_smtp().items[0].to_dict()
+ try:
+ config_info["alert_watchers"] = (
+ blade.alert_watchers.list_alert_watchers().items[0].to_dict()
+ )
+ except Exception:
+ config_info["alert_watchers"] = ""
+ api_version = blade.api_version.list_versions().versions
+ if HARD_LIMIT_API_VERSION in api_version:
+ config_info["array_management"] = (
+ blade.directory_services.list_directory_services(names=["management"])
+ .items[0]
+ .to_dict()
+ )
+ config_info["directory_service_roles"] = {}
+ roles = blade.directory_services.list_directory_services_roles()
+ for role in range(0, len(roles.items)):
+ role_name = roles.items[role].name
+ config_info["directory_service_roles"][role_name] = {
+ "group": roles.items[role].group,
+ "group_base": roles.items[role].group_base,
+ }
+ config_info["nfs_directory_service"] = (
+ blade.directory_services.list_directory_services(names=["nfs"])
+ .items[0]
+ .to_dict()
+ )
+ config_info["smb_directory_service"] = (
+ blade.directory_services.list_directory_services(names=["smb"])
+ .items[0]
+ .to_dict()
+ )
+ config_info["ntp"] = blade.arrays.list_arrays().items[0].ntp_servers
+ config_info["ssl_certs"] = blade.certificates.list_certificates().items[0].to_dict()
+ api_version = blade.api_version.list_versions().versions
+ if CERT_GROUPS_API_VERSION in api_version:
+ try:
+ config_info["certificate_groups"] = (
+ blade.certificate_groups.list_certificate_groups().items[0].to_dict()
+ )
+ except Exception:
+ config_info["certificate_groups"] = ""
+ if REPLICATION_API_VERSION in api_version:
+ config_info["snmp_agents"] = {}
+ snmp_agents = blade.snmp_agents.list_snmp_agents()
+ for agent in range(0, len(snmp_agents.items)):
+ agent_name = snmp_agents.items[agent].name
+ config_info["snmp_agents"][agent_name] = {
+ "version": snmp_agents.items[agent].version,
+ "engine_id": snmp_agents.items[agent].engine_id,
+ }
+ if config_info["snmp_agents"][agent_name]["version"] == "v3":
+ config_info["snmp_agents"][agent_name][
+ "auth_protocol"
+ ] = snmp_agents.items[agent].v3.auth_protocol
+ config_info["snmp_agents"][agent_name][
+ "privacy_protocol"
+ ] = snmp_agents.items[agent].v3.privacy_protocol
+ config_info["snmp_agents"][agent_name]["user"] = snmp_agents.items[
+ agent
+ ].v3.user
+ config_info["snmp_managers"] = {}
+ snmp_managers = blade.snmp_managers.list_snmp_managers()
+ for manager in range(0, len(snmp_managers.items)):
+ mgr_name = snmp_managers.items[manager].name
+ config_info["snmp_managers"][mgr_name] = {
+ "version": snmp_managers.items[manager].version,
+ "host": snmp_managers.items[manager].host,
+ "notification": snmp_managers.items[manager].notification,
+ }
+ if config_info["snmp_managers"][mgr_name]["version"] == "v3":
+ config_info["snmp_managers"][mgr_name][
+ "auth_protocol"
+ ] = snmp_managers.items[manager].v3.auth_protocol
+ config_info["snmp_managers"][mgr_name][
+ "privacy_protocol"
+ ] = snmp_managers.items[manager].v3.privacy_protocol
+ config_info["snmp_managers"][mgr_name]["user"] = snmp_managers.items[
+ manager
+ ].v3.user
+ return config_info
+
+
+def generate_subnet_dict(blade):
+ sub_info = {}
+ subnets = blade.subnets.list_subnets()
+ for sub in range(0, len(subnets.items)):
+ sub_name = subnets.items[sub].name
+ if subnets.items[sub].enabled:
+ sub_info[sub_name] = {
+ "gateway": subnets.items[sub].gateway,
+ "mtu": subnets.items[sub].mtu,
+ "vlan": subnets.items[sub].vlan,
+ "prefix": subnets.items[sub].prefix,
+ "services": subnets.items[sub].services,
+ }
+ sub_info[sub_name]["lag"] = subnets.items[sub].link_aggregation_group.name
+ sub_info[sub_name]["interfaces"] = []
+ for iface in range(0, len(subnets.items[sub].interfaces)):
+ sub_info[sub_name]["interfaces"].append(
+ {"name": subnets.items[sub].interfaces[iface].name}
+ )
+ return sub_info
+
+
+def generate_lag_dict(blade):
+ lag_info = {}
+ groups = blade.link_aggregation_groups.list_link_aggregation_groups()
+ for groupcnt in range(0, len(groups.items)):
+ lag_name = groups.items[groupcnt].name
+ lag_info[lag_name] = {
+ "lag_speed": groups.items[groupcnt].lag_speed,
+ "port_speed": groups.items[groupcnt].port_speed,
+ "status": groups.items[groupcnt].status,
+ }
+ lag_info[lag_name]["ports"] = []
+ for port in range(0, len(groups.items[groupcnt].ports)):
+ lag_info[lag_name]["ports"].append(
+ {"name": groups.items[groupcnt].ports[port].name}
+ )
+ return lag_info
+
+
+def generate_admin_dict(module, blade):
+ admin_info = {}
+ api_version = blade.api_version.list_versions().versions
+ if MULTIPROTOCOL_API_VERSION in api_version:
+ admins = blade.admins.list_admins()
+ for admin in range(0, len(admins.items)):
+ admin_name = admins.items[admin].name
+ admin_info[admin_name] = {
+ "api_token_timeout": admins.items[admin].api_token_timeout,
+ "public_key": admins.items[admin].public_key,
+ "local": admins.items[admin].is_local,
+ }
+
+ if MIN_32_API in api_version:
+ bladev2 = get_system(module)
+ admins = list(bladev2.get_admins().items)
+ for admin in range(0, len(admins)):
+ admin_name = admins[admin].name
+ if admins[admin].api_token.expires_at:
+ admin_info[admin_name]["token_expires"] = datetime.fromtimestamp(
+ admins[admin].api_token.expires_at / 1000
+ ).strftime("%Y-%m-%d %H:%M:%S")
+ else:
+ admin_info[admin_name]["token_expires"] = None
+ admin_info[admin_name]["token_created"] = datetime.fromtimestamp(
+ admins[admin].api_token.created_at / 1000
+ ).strftime("%Y-%m-%d %H:%M:%S")
+ admin_info[admin_name]["role"] = admins[admin].role.name
+ if NFS_POLICY_API_VERSION in api_version:
+ admin_info[admin_name]["locked"] = admins[admin].locked
+ admin_info[admin_name]["lockout_remaining"] = admins[
+ admin
+ ].lockout_remaining
+ return admin_info
+
+
+def generate_targets_dict(blade):
+ targets_info = {}
+ targets = blade.targets.list_targets()
+ for target in range(0, len(targets.items)):
+ target_name = targets.items[target].name
+ targets_info[target_name] = {
+ "address": targets.items[target].address,
+ "status": targets.items[target].status,
+ "status_details": targets.items[target].status_details,
+ }
+ return targets_info
+
+
+def generate_remote_creds_dict(blade):
+ remote_creds_info = {}
+ remote_creds = (
+ blade.object_store_remote_credentials.list_object_store_remote_credentials()
+ )
+ for cred_cnt in range(0, len(remote_creds.items)):
+ cred_name = remote_creds.items[cred_cnt].name
+ remote_creds_info[cred_name] = {
+ "access_key": remote_creds.items[cred_cnt].access_key_id,
+ "remote_array": remote_creds.items[cred_cnt].remote.name,
+ }
+ return remote_creds_info
+
+
+def generate_file_repl_dict(blade):
+ file_repl_info = {}
+ file_links = blade.file_system_replica_links.list_file_system_replica_links()
+ for linkcnt in range(0, len(file_links.items)):
+ fs_name = file_links.items[linkcnt].local_file_system.name
+ file_repl_info[fs_name] = {
+ "direction": file_links.items[linkcnt].direction,
+ "lag": file_links.items[linkcnt].lag,
+ "status": file_links.items[linkcnt].status,
+ "remote_fs": file_links.items[linkcnt].remote.name
+ + ":"
+ + file_links.items[linkcnt].remote_file_system.name,
+ "recovery_point": file_links.items[linkcnt].recovery_point,
+ }
+ file_repl_info[fs_name]["policies"] = []
+ for policy_cnt in range(0, len(file_links.items[linkcnt].policies)):
+ file_repl_info[fs_name]["policies"].append(
+ file_links.items[linkcnt].policies[policy_cnt].display_name
+ )
+ return file_repl_info
+
+
+def generate_bucket_repl_dict(module, blade):
+ bucket_repl_info = {}
+ bucket_links = blade.bucket_replica_links.list_bucket_replica_links()
+ for linkcnt in range(0, len(bucket_links.items)):
+ bucket_name = bucket_links.items[linkcnt].local_bucket.name
+ bucket_repl_info[bucket_name] = {
+ "direction": bucket_links.items[linkcnt].direction,
+ "lag": bucket_links.items[linkcnt].lag,
+ "paused": bucket_links.items[linkcnt].paused,
+ "status": bucket_links.items[linkcnt].status,
+ "remote_bucket": bucket_links.items[linkcnt].remote_bucket.name,
+ "remote_credentials": bucket_links.items[linkcnt].remote_credentials.name,
+ "recovery_point": bucket_links.items[linkcnt].recovery_point,
+ "object_backlog": {},
+ }
+ api_version = blade.api_version.list_versions().versions
+ if SMB_MODE_API_VERSION in api_version:
+ blade = get_system(module)
+ bucket_links = list(blade.get_bucket_replica_links().items)
+ for linkcnt in range(0, len(bucket_links)):
+ bucket_name = bucket_links[linkcnt].local_bucket.name
+ bucket_repl_info[bucket_name]["object_backlog"] = {
+ "bytes_count": bucket_links[linkcnt].object_backlog.bytes_count,
+ "delete_ops_count": bucket_links[
+ linkcnt
+ ].object_backlog.delete_ops_count,
+ "other_ops_count": bucket_links[linkcnt].object_backlog.other_ops_count,
+ "put_ops_count": bucket_links[linkcnt].object_backlog.put_ops_count,
+ }
+ bucket_repl_info[bucket_name]["cascading_enabled"] = bucket_links[
+ linkcnt
+ ].cascading_enabled
+ return bucket_repl_info
+
+
+def generate_network_dict(blade):
+ net_info = {}
+ ports = blade.network_interfaces.list_network_interfaces()
+ for portcnt in range(0, len(ports.items)):
+ int_name = ports.items[portcnt].name
+ if ports.items[portcnt].enabled:
+ net_info[int_name] = {
+ "type": ports.items[portcnt].type,
+ "mtu": ports.items[portcnt].mtu,
+ "vlan": ports.items[portcnt].vlan,
+ "address": ports.items[portcnt].address,
+ "services": ports.items[portcnt].services,
+ "gateway": ports.items[portcnt].gateway,
+ "netmask": ports.items[portcnt].netmask,
+ }
+ return net_info
+
+
+def generate_capacity_dict(blade):
+ capacity_info = {}
+ total_cap = blade.arrays.list_arrays_space()
+ file_cap = blade.arrays.list_arrays_space(type="file-system")
+ object_cap = blade.arrays.list_arrays_space(type="object-store")
+ capacity_info["total"] = total_cap.items[0].capacity
+ capacity_info["aggregate"] = {
+ "data_reduction": total_cap.items[0].space.data_reduction,
+ "snapshots": total_cap.items[0].space.snapshots,
+ "total_physical": total_cap.items[0].space.total_physical,
+ "unique": total_cap.items[0].space.unique,
+ "virtual": total_cap.items[0].space.virtual,
+ }
+ capacity_info["file-system"] = {
+ "data_reduction": file_cap.items[0].space.data_reduction,
+ "snapshots": file_cap.items[0].space.snapshots,
+ "total_physical": file_cap.items[0].space.total_physical,
+ "unique": file_cap.items[0].space.unique,
+ "virtual": file_cap.items[0].space.virtual,
+ }
+ capacity_info["object-store"] = {
+ "data_reduction": object_cap.items[0].space.data_reduction,
+ "snapshots": object_cap.items[0].space.snapshots,
+ "total_physical": object_cap.items[0].space.total_physical,
+ "unique": object_cap.items[0].space.unique,
+ "virtual": file_cap.items[0].space.virtual,
+ }
+
+ return capacity_info
+
+
+def generate_snap_dict(blade):
+ snap_info = {}
+ snaps = blade.file_system_snapshots.list_file_system_snapshots()
+ api_version = blade.api_version.list_versions().versions
+ for snap in range(0, len(snaps.items)):
+ snapshot = snaps.items[snap].name
+ snap_info[snapshot] = {
+ "destroyed": snaps.items[snap].destroyed,
+ "source": snaps.items[snap].source,
+ "suffix": snaps.items[snap].suffix,
+ "source_destroyed": snaps.items[snap].source_destroyed,
+ }
+ if REPLICATION_API_VERSION in api_version:
+ snap_info[snapshot]["owner"] = snaps.items[snap].owner.name
+ snap_info[snapshot]["owner_destroyed"] = snaps.items[snap].owner_destroyed
+ snap_info[snapshot]["source_display_name"] = snaps.items[
+ snap
+ ].source_display_name
+ snap_info[snapshot]["source_is_local"] = snaps.items[snap].source_is_local
+ snap_info[snapshot]["source_location"] = snaps.items[
+ snap
+ ].source_location.name
+ return snap_info
+
+
+def generate_snap_transfer_dict(blade):
+ snap_transfer_info = {}
+ snap_transfers = blade.file_system_snapshots.list_file_system_snapshots_transfer()
+ for snap_transfer in range(0, len(snap_transfers.items)):
+ transfer = snap_transfers.items[snap_transfer].name
+ snap_transfer_info[transfer] = {
+ "completed": snap_transfers.items[snap_transfer].completed,
+ "data_transferred": snap_transfers.items[snap_transfer].data_transferred,
+ "progress": snap_transfers.items[snap_transfer].progress,
+ "direction": snap_transfers.items[snap_transfer].direction,
+ "remote": snap_transfers.items[snap_transfer].remote.name,
+ "remote_snapshot": snap_transfers.items[snap_transfer].remote_snapshot.name,
+ "started": snap_transfers.items[snap_transfer].started,
+ "status": snap_transfers.items[snap_transfer].status,
+ }
+ return snap_transfer_info
+
+
+def generate_array_conn_dict(module, blade):
+ array_conn_info = {}
+ arraysv2 = {}
+ api_version = blade.api_version.list_versions().versions
+ arrays = blade.array_connections.list_array_connections()
+ if NFS_POLICY_API_VERSION in api_version:
+ bladev2 = get_system(module)
+ arraysv2 = list(bladev2.get_array_connections().items)
+ for arraycnt in range(0, len(arrays.items)):
+ array = arrays.items[arraycnt].remote.name
+ array_conn_info[array] = {
+ "encrypted": arrays.items[arraycnt].encrypted,
+ "replication_addresses": arrays.items[arraycnt].replication_addresses,
+ "management_address": arrays.items[arraycnt].management_address,
+ "status": arrays.items[arraycnt].status,
+ "version": arrays.items[arraycnt].version,
+ "throttle": [],
+ }
+ if arrays.items[arraycnt].encrypted:
+ array_conn_info[array]["ca_certificate_group"] = arrays.items[
+ arraycnt
+ ].ca_certificate_group.name
+ for v2array in range(0, len(arraysv2)):
+ if arraysv2[v2array].remote.name == array:
+ array_conn_info[array]["throttle"] = {
+ "default_limit": _bytes_to_human(
+ arraysv2[v2array].throttle.default_limit
+ ),
+ "window_limit": _bytes_to_human(
+ arraysv2[v2array].throttle.window_limit
+ ),
+ "window_start": _millisecs_to_time(
+ arraysv2[v2array].throttle.window.start
+ ),
+ "window_end": _millisecs_to_time(
+ arraysv2[v2array].throttle.window.end
+ ),
+ }
+ return array_conn_info
+
+
+def generate_policies_dict(blade):
+ policies_info = {}
+ policies = blade.policies.list_policies()
+ for policycnt in range(0, len(policies.items)):
+ policy = policies.items[policycnt].name
+ policies_info[policy] = {}
+ policies_info[policy]["enabled"] = policies.items[policycnt].enabled
+ if policies.items[policycnt].rules:
+ policies_info[policy]["rules"] = (
+ policies.items[policycnt].rules[0].to_dict()
+ )
+ return policies_info
+
+
+def generate_bucket_dict(module, blade):
+ bucket_info = {}
+ buckets = blade.buckets.list_buckets()
+ for bckt in range(0, len(buckets.items)):
+ bucket = buckets.items[bckt].name
+ bucket_info[bucket] = {
+ "versioning": buckets.items[bckt].versioning,
+ "bucket_type": getattr(buckets.items[bckt], "bucket_type", None),
+ "object_count": buckets.items[bckt].object_count,
+ "id": buckets.items[bckt].id,
+ "account_name": buckets.items[bckt].account.name,
+ "data_reduction": buckets.items[bckt].space.data_reduction,
+ "snapshot_space": buckets.items[bckt].space.snapshots,
+ "total_physical_space": buckets.items[bckt].space.total_physical,
+ "unique_space": buckets.items[bckt].space.unique,
+ "virtual_space": buckets.items[bckt].space.virtual,
+ "created": buckets.items[bckt].created,
+ "destroyed": buckets.items[bckt].destroyed,
+ "time_remaining": buckets.items[bckt].time_remaining,
+ "lifecycle_rules": {},
+ }
+ api_version = blade.api_version.list_versions().versions
+ if LIFECYCLE_API_VERSION in api_version:
+ blade = get_system(module)
+ for bckt in range(0, len(buckets.items)):
+ if buckets.items[bckt].destroyed:
+ # skip processing buckets marked as destroyed
+ continue
+ all_rules = list(
+ blade.get_lifecycle_rules(bucket_ids=[buckets.items[bckt].id]).items
+ )
+ for rule in range(0, len(all_rules)):
+ bucket_name = all_rules[rule].bucket.name
+ rule_id = all_rules[rule].rule_id
+ if all_rules[rule].keep_previous_version_for:
+ keep_previous_version_for = int(
+ all_rules[rule].keep_previous_version_for / 86400000
+ )
+ else:
+ keep_previous_version_for = None
+ if all_rules[rule].keep_current_version_for:
+ keep_current_version_for = int(
+ all_rules[rule].keep_current_version_for / 86400000
+ )
+ else:
+ keep_current_version_for = None
+ if all_rules[rule].abort_incomplete_multipart_uploads_after:
+ abort_incomplete_multipart_uploads_after = int(
+ all_rules[rule].abort_incomplete_multipart_uploads_after
+ / 86400000
+ )
+ else:
+ abort_incomplete_multipart_uploads_after = None
+ if all_rules[rule].keep_current_version_until:
+ keep_current_version_until = datetime.fromtimestamp(
+ all_rules[rule].keep_current_version_until / 1000
+ ).strftime("%Y-%m-%d")
+ else:
+ keep_current_version_until = None
+ bucket_info[bucket_name]["lifecycle_rules"][rule_id] = {
+ "keep_previous_version_for (days)": keep_previous_version_for,
+ "keep_current_version_for (days)": keep_current_version_for,
+ "keep_current_version_until": keep_current_version_until,
+ "prefix": all_rules[rule].prefix,
+ "enabled": all_rules[rule].enabled,
+ "abort_incomplete_multipart_uploads_after (days)": abort_incomplete_multipart_uploads_after,
+ "cleanup_expired_object_delete_marker": all_rules[
+ rule
+ ].cleanup_expired_object_delete_marker,
+ }
+ if VSO_VERSION in api_version:
+ buckets = list(blade.get_buckets().items)
+ for bucket in range(0, len(buckets)):
+ bucket_info[buckets[bucket].name]["bucket_type"] = buckets[
+ bucket
+ ].bucket_type
+ if BUCKET_API_VERSION in api_version:
+ for bucket in range(0, len(buckets)):
+ bucket_info[buckets[bucket].name]["retention_lock"] = buckets[
+ bucket
+ ].retention_lock
+ bucket_info[buckets[bucket].name]["quota_limit"] = buckets[
+ bucket
+ ].quota_limit
+ bucket_info[buckets[bucket].name]["object_lock_config"] = {
+ "enabled": buckets[bucket].object_lock_config.enabled,
+ "freeze_locked_objects": buckets[
+ bucket
+ ].object_lock_config.freeze_locked_objects,
+ }
+ bucket_info[buckets[bucket].name]["eradication_config"] = {
+ "eradication_delay": buckets[
+ bucket
+ ].eradication_config.eradication_delay,
+ "manual_eradication": buckets[
+ bucket
+ ].eradication_config.manual_eradication,
+ }
+ return bucket_info
+
+
+def generate_kerb_dict(blade):
+ kerb_info = {}
+ keytabs = list(blade.get_keytabs().items)
+ for ktab in range(0, len(keytabs)):
+ keytab_name = keytabs[ktab].prefix
+ kerb_info[keytab_name] = {}
+ for key in range(0, len(keytabs)):
+ if keytabs[key].prefix == keytab_name:
+ kerb_info[keytab_name][keytabs[key].suffix] = {
+ "fqdn": keytabs[key].fqdn,
+ "kvno": keytabs[key].kvno,
+ "principal": keytabs[key].principal,
+ "realm": keytabs[key].realm,
+ "encryption_type": keytabs[key].encryption_type,
+ }
+ return kerb_info
+
+
+def generate_ad_dict(blade):
+ ad_info = {}
+ active_directory = blade.get_active_directory()
+ if active_directory.total_item_count != 0:
+ ad_account = list(active_directory.items)[0]
+ ad_info[ad_account.name] = {
+ "computer": ad_account.computer_name,
+ "domain": ad_account.domain,
+ "directory_servers": ad_account.directory_servers,
+ "kerberos_servers": ad_account.kerberos_servers,
+ "service_principals": ad_account.service_principal_names,
+ "join_ou": ad_account.join_ou,
+ "encryption_types": ad_account.encryption_types,
+ }
+ return ad_info
+
+
+def generate_object_store_access_policies_dict(blade):
+ policies_info = {}
+ policies = list(blade.get_object_store_access_policies().items)
+ for policy in range(0, len(policies)):
+ policy_name = policies[policy].name
+ policies_info[policy_name] = {
+ "ARN": policies[policy].arn,
+ "description": policies[policy].description,
+ "enabled": policies[policy].enabled,
+ "local": policies[policy].is_local,
+ "rules": [],
+ }
+ for rule in range(0, len(policies[policy].rules)):
+ policies_info[policy_name]["rules"].append(
+ {
+ "actions": policies[policy].rules[rule].actions,
+ "conditions": {
+ "source_ips": policies[policy]
+ .rules[rule]
+ .conditions.source_ips,
+ "s3_delimiters": policies[policy]
+ .rules[rule]
+ .conditions.s3_delimiters,
+ "s3_prefixes": policies[policy]
+ .rules[rule]
+ .conditions.s3_prefixes,
+ },
+ "effect": policies[policy].rules[rule].effect,
+ "name": policies[policy].rules[rule].name,
+ }
+ )
+ return policies_info
+
+
+def generate_nfs_export_policies_dict(blade):
+ policies_info = {}
+ policies = list(blade.get_nfs_export_policies().items)
+ for policy in range(0, len(policies)):
+ policy_name = policies[policy].name
+ policies_info[policy_name] = {
+ "local": policies[policy].is_local,
+ "enabled": policies[policy].enabled,
+ "rules": [],
+ }
+ for rule in range(0, len(policies[policy].rules)):
+ policies_info[policy_name]["rules"].append(
+ {
+ "access": policies[policy].rules[rule].access,
+ "anongid": policies[policy].rules[rule].anongid,
+ "anonuid": policies[policy].rules[rule].anonuid,
+ "atime": policies[policy].rules[rule].atime,
+ "client": policies[policy].rules[rule].client,
+ "fileid_32bit": policies[policy].rules[rule].fileid_32bit,
+ "permission": policies[policy].rules[rule].permission,
+ "secure": policies[policy].rules[rule].secure,
+ "security": policies[policy].rules[rule].security,
+ "index": policies[policy].rules[rule].index,
+ }
+ )
+ return policies_info
+
+
+def generate_object_store_accounts_dict(blade):
+ account_info = {}
+ accounts = list(blade.get_object_store_accounts().items)
+ for account in range(0, len(accounts)):
+ acc_name = accounts[account].name
+ account_info[acc_name] = {
+ "object_count": accounts[account].object_count,
+ "data_reduction": accounts[account].space.data_reduction,
+ "snapshots_space": accounts[account].space.snapshots,
+ "total_physical_space": accounts[account].space.total_physical,
+ "unique_space": accounts[account].space.unique,
+ "virtual_space": accounts[account].space.virtual,
+ "quota_limit": getattr(accounts[account], "quota_limit", None),
+ "hard_limit_enabled": getattr(
+ accounts[account], "hard_limit_enabled", None
+ ),
+ "total_provisioned": getattr(
+ accounts[account].space, "total_provisioned", None
+ ),
+ "users": {},
+ }
+ try:
+ account_info[acc_name]["bucket_defaults"] = {
+ "hard_limit_enabled": accounts[
+ account
+ ].bucket_defaults.hard_limit_enabled,
+ "quota_limit": accounts[account].bucket_defaults.quota_limit,
+ }
+ except AttributeError:
+ pass
+ acc_users = list(
+ blade.get_object_store_users(filter='name="' + acc_name + '/*"').items
+ )
+ for acc_user in range(0, len(acc_users)):
+ user_name = acc_users[acc_user].name.split("/")[1]
+ account_info[acc_name]["users"][user_name] = {"keys": [], "policies": []}
+ if (
+ blade.get_object_store_access_keys(
+ filter='user.name="' + acc_users[acc_user].name + '"'
+ ).total_item_count
+ != 0
+ ):
+ access_keys = list(
+ blade.get_object_store_access_keys(
+ filter='user.name="' + acc_users[acc_user].name + '"'
+ ).items
+ )
+ for key in range(0, len(access_keys)):
+ account_info[acc_name]["users"][user_name]["keys"].append(
+ {
+ "name": access_keys[key].name,
+ "enabled": bool(access_keys[key].enabled),
+ }
+ )
+ if (
+ blade.get_object_store_access_policies_object_store_users(
+ member_names=[acc_users[acc_user].name]
+ ).total_item_count
+ != 0
+ ):
+ policies = list(
+ blade.get_object_store_access_policies_object_store_users(
+ member_names=[acc_users[acc_user].name]
+ ).items
+ )
+ for policy in range(0, len(policies)):
+ account_info[acc_name]["users"][user_name]["policies"].append(
+ policies[policy].policy.name
+ )
+ return account_info
+
+
+def generate_fs_dict(module, blade):
+ api_version = blade.api_version.list_versions().versions
+ if SMB_MODE_API_VERSION in api_version:
+ bladev2 = get_system(module)
+ fsys_v2 = list(bladev2.get_file_systems().items)
+ fs_info = {}
+ fsys = blade.file_systems.list_file_systems()
+ for fsystem in range(0, len(fsys.items)):
+ share = fsys.items[fsystem].name
+ fs_info[share] = {
+ "fast_remove": fsys.items[fsystem].fast_remove_directory_enabled,
+ "snapshot_enabled": fsys.items[fsystem].snapshot_directory_enabled,
+ "provisioned": fsys.items[fsystem].provisioned,
+ "destroyed": fsys.items[fsystem].destroyed,
+ "nfs_rules": fsys.items[fsystem].nfs.rules,
+ "nfs_v3": getattr(fsys.items[fsystem].nfs, "v3_enabled", False),
+ "nfs_v4_1": getattr(fsys.items[fsystem].nfs, "v4_1_enabled", False),
+ "user_quotas": {},
+ "group_quotas": {},
+ }
+ if fsys.items[fsystem].http.enabled:
+ fs_info[share]["http"] = fsys.items[fsystem].http.enabled
+ if fsys.items[fsystem].smb.enabled:
+ fs_info[share]["smb_mode"] = fsys.items[fsystem].smb.acl_mode
+ api_version = blade.api_version.list_versions().versions
+ if MULTIPROTOCOL_API_VERSION in api_version:
+ fs_info[share]["multi_protocol"] = {
+ "safegaurd_acls": fsys.items[fsystem].multi_protocol.safeguard_acls,
+ "access_control_style": fsys.items[
+ fsystem
+ ].multi_protocol.access_control_style,
+ }
+ if HARD_LIMIT_API_VERSION in api_version:
+ fs_info[share]["hard_limit"] = fsys.items[fsystem].hard_limit_enabled
+ if REPLICATION_API_VERSION in api_version:
+ fs_info[share]["promotion_status"] = fsys.items[fsystem].promotion_status
+ fs_info[share]["requested_promotion_state"] = fsys.items[
+ fsystem
+ ].requested_promotion_state
+ fs_info[share]["writable"] = fsys.items[fsystem].writable
+ fs_info[share]["source"] = {
+ "is_local": fsys.items[fsystem].source.is_local,
+ "name": fsys.items[fsystem].source.name,
+ }
+ if SMB_MODE_API_VERSION in api_version:
+ for v2fs in range(0, len(fsys_v2)):
+ if fsys_v2[v2fs].name == share:
+ fs_info[share]["default_group_quota"] = fsys_v2[
+ v2fs
+ ].default_group_quota
+ fs_info[share]["default_user_quota"] = fsys_v2[
+ v2fs
+ ].default_user_quota
+ if NFS_POLICY_API_VERSION in api_version:
+ fs_info[share]["export_policy"] = fsys_v2[
+ v2fs
+ ].nfs.export_policy.name
+ if VSO_VERSION in api_version:
+ for v2fs in range(0, len(fsys_v2)):
+ if fsys_v2[v2fs].name == share:
+ try:
+ fs_groups = True
+ fs_group_quotas = list(
+ bladev2.get_quotas_groups(file_system_names=[share]).items
+ )
+ except Exception:
+ fs_groups = False
+ try:
+ fs_users = True
+ fs_user_quotas = list(
+ bladev2.get_quotas_users(file_system_names=[share]).items
+ )
+ except Exception:
+ fs_users = False
+ if fs_groups:
+ for group_quota in range(0, len(fs_group_quotas)):
+ group_name = fs_group_quotas[group_quota].name.rsplit("/")[
+ 1
+ ]
+ fs_info[share]["group_quotas"][group_name] = {
+ "group_id": fs_group_quotas[group_quota].group.id,
+ "group_name": fs_group_quotas[group_quota].group.name,
+ "quota": fs_group_quotas[group_quota].quota,
+ "usage": fs_group_quotas[group_quota].usage,
+ }
+ if fs_users:
+ for user_quota in range(0, len(fs_user_quotas)):
+ user_name = fs_user_quotas[user_quota].name.rsplit("/")[1]
+ fs_info[share]["user_quotas"][user_name] = {
+ "user_id": fs_user_quotas[user_quota].user.id,
+ "user_name": fs_user_quotas[user_quota].user.name,
+ "quota": fs_user_quotas[user_quota].quota,
+ "usage": fs_user_quotas[user_quota].usage,
+ }
+
+ return fs_info
+
+
+def generate_drives_dict(blade):
+ """
+ Drives information is only available for the Legend chassis.
+ The Legend chassis product_name has // in it so only bother if
+ that is the case.
+ """
+ drives_info = {}
+ drives = list(blade.get_drives().items)
+ if "//" in list(blade.get_arrays().items)[0].product_type:
+ for drive in range(0, len(drives)):
+ name = drives[drive].name
+ drives_info[name] = {
+ "progress": getattr(drives[drive], "progress", None),
+ "raw_capacity": getattr(drives[drive], "raw_capacity", None),
+ "status": getattr(drives[drive], "status", None),
+ "details": getattr(drives[drive], "details", None),
+ }
+ return drives_info
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(gather_subset=dict(default="minimum", type="list", elements="str"))
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ blade = get_blade(module)
+ versions = blade.api_version.list_versions().versions
+
+ if MIN_REQUIRED_API_VERSION not in versions:
+ module.fail_json(
+ msg="Minimum FlashBlade REST version required: {0}".format(
+ MIN_REQUIRED_API_VERSION
+ )
+ )
+ if not module.params["gather_subset"]:
+ module.params["gather_subset"] = ["minimum"]
+ subset = [test.lower() for test in module.params["gather_subset"]]
+ valid_subsets = (
+ "all",
+ "minimum",
+ "config",
+ "performance",
+ "capacity",
+ "network",
+ "subnets",
+ "lags",
+ "filesystems",
+ "snapshots",
+ "buckets",
+ "arrays",
+ "replication",
+ "policies",
+ "accounts",
+ "admins",
+ "ad",
+ "kerberos",
+ "drives",
+ )
+ subset_test = (test in valid_subsets for test in subset)
+ if not all(subset_test):
+ module.fail_json(
+ msg="value must gather_subset must be one or more of: %s, got: %s"
+ % (",".join(valid_subsets), ",".join(subset))
+ )
+
+ info = {}
+
+ api_version = blade.api_version.list_versions().versions
+ if "minimum" in subset or "all" in subset:
+ info["default"] = generate_default_dict(module, blade)
+ if "performance" in subset or "all" in subset:
+ info["performance"] = generate_perf_dict(blade)
+ if "config" in subset or "all" in subset:
+ info["config"] = generate_config_dict(blade)
+ if "capacity" in subset or "all" in subset:
+ info["capacity"] = generate_capacity_dict(blade)
+ if "lags" in subset or "all" in subset:
+ info["lag"] = generate_lag_dict(blade)
+ if "network" in subset or "all" in subset:
+ info["network"] = generate_network_dict(blade)
+ if "subnets" in subset or "all" in subset:
+ info["subnet"] = generate_subnet_dict(blade)
+ if "filesystems" in subset or "all" in subset:
+ info["filesystems"] = generate_fs_dict(module, blade)
+ if "admins" in subset or "all" in subset:
+ info["admins"] = generate_admin_dict(module, blade)
+ if "snapshots" in subset or "all" in subset:
+ info["snapshots"] = generate_snap_dict(blade)
+ if "buckets" in subset or "all" in subset:
+ info["buckets"] = generate_bucket_dict(module, blade)
+ if POLICIES_API_VERSION in api_version:
+ if "policies" in subset or "all" in subset:
+ info["policies"] = generate_policies_dict(blade)
+ info["snapshot_policies"] = generate_policies_dict(blade)
+ if REPLICATION_API_VERSION in api_version:
+ if "arrays" in subset or "all" in subset:
+ info["arrays"] = generate_array_conn_dict(module, blade)
+ if "replication" in subset or "all" in subset:
+ info["file_replication"] = generate_file_repl_dict(blade)
+ info["bucket_replication"] = generate_bucket_repl_dict(module, blade)
+ info["snap_transfers"] = generate_snap_transfer_dict(blade)
+ info["remote_credentials"] = generate_remote_creds_dict(blade)
+ info["targets"] = generate_targets_dict(blade)
+ if MIN_32_API in api_version:
+ # Calls for data only available from Purity//FB 3.2 and higher
+ blade = get_system(module)
+ if "accounts" in subset or "all" in subset:
+ info["accounts"] = generate_object_store_accounts_dict(blade)
+ if "ad" in subset or "all" in subset:
+ info["active_directory"] = generate_ad_dict(blade)
+ if "kerberos" in subset or "all" in subset:
+ info["kerberos"] = generate_kerb_dict(blade)
+ if "policies" in subset or "all" in subset:
+ if SMB_MODE_API_VERSION in api_version:
+ info["access_policies"] = generate_object_store_access_policies_dict(
+ blade
+ )
+ if NFS_POLICY_API_VERSION in api_version:
+ info["export_policies"] = generate_nfs_export_policies_dict(blade)
+ if "drives" in subset or "all" in subset and DRIVES_API_VERSION in api_version:
+ info["drives"] = generate_drives_dict(blade)
+ module.exit_json(changed=False, purefb_info=info)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_inventory.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_inventory.py
new file mode 100644
index 000000000..b17bc3f9e
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_inventory.py
@@ -0,0 +1,279 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_inventory
+version_added: '1.0.0'
+short_description: Collect information from Pure Storage FlashBlade
+description:
+ - Collect information from a Pure Storage FlashBlade running the
+ Purity//FB operating system. By default, the module will collect basic
+ information including hosts, host groups, protection
+ groups and volume counts. Additional information can be collected
+ based on the configured set of arguements.
+author:
+ - Pure Storage ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+extends_documentation_fragment:
+ - purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: collect FlashBlade inventory
+ purestorage.flashblade.purefb_inventory:
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ register: blade_info
+- name: show default information
+ debug:
+ msg: "{{ blade_info['purefb_info'] }}"
+
+"""
+
+RETURN = r"""
+purefb_inventory:
+ description: Returns the inventory information for the FlashBlade
+ returned: always
+ type: complex
+ sample: {
+ "blades": {
+ "CH1.FB1": {
+ "model": "FB-17TB",
+ "serial": "PPCXA1942AFF5",
+ "slot": 1,
+ "status": "healthy"
+ }
+ },
+ "chassis": {
+ "CH1": {
+ "index": 1,
+ "model": null,
+ "serial": "PMPAM163402AE",
+ "slot": null,
+ "status": "healthy"
+ }
+ },
+ "controllers": {},
+ "ethernet": {
+ "CH1.FM1.ETH1": {
+ "model": "624410002",
+ "serial": "APF16360021PRV",
+ "slot": 1,
+ "speed": 40000000000,
+ "status": "healthy"
+ }
+ },
+ "fans": {
+ "CH1.FM1.FAN1": {
+ "slot": 1,
+ "status": "healthy"
+ }
+ },
+ "modules": {
+ "CH1.FM1": {
+ "model": "EFM-110",
+ "serial": "PSUFS1640002C",
+ "slot": 1,
+ "status": "healthy"
+ },
+ "CH1.FM2": {
+ "model": "EFM-110",
+ "serial": "PSUFS1640004A",
+ "slot": 2,
+ "status": "healthy"
+ }
+ },
+ "power": {
+ "CH1.PWR1": {
+ "model": "DS1600SPE-3",
+ "serial": "M0500E00D8AJZ",
+ "slot": 1,
+ "status": "healthy"
+ }
+ },
+ "switch": {}
+ }
+"""
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ get_system,
+ purefb_argument_spec,
+)
+
+
+MIN_API_VERSION = "2.1"
+PART_NUMBER_API_VERSION = "2.8"
+
+
+def generate_hardware_dict(module, blade, api_version):
+ hw_info = {
+ "modules": {},
+ "ethernet": {},
+ "mgmt_ports": {},
+ "fans": {},
+ "bays": {},
+ "controllers": {},
+ "blades": {},
+ "chassis": {},
+ "power": {},
+ "switch": {},
+ }
+ blade = get_system(module)
+ components = list(blade.get_hardware(filter="type='fm'").items)
+ for component in range(0, len(components)):
+ component_name = components[component].name
+ hw_info["modules"][component_name] = {
+ "slot": components[component].slot,
+ "status": components[component].status,
+ "serial": components[component].serial,
+ "model": components[component].model,
+ "identify": components[component].identify_enabled,
+ }
+ if PART_NUMBER_API_VERSION in api_version:
+ hw_info["modules"][component_name]["part_number"] = components[
+ component
+ ].part_number
+ components = list(blade.get_hardware(filter="type='eth'").items)
+ for component in range(0, len(components)):
+ component_name = components[component].name
+ hw_info["ethernet"][component_name] = {
+ "slot": components[component].slot,
+ "status": components[component].status,
+ "serial": components[component].serial,
+ "model": components[component].model,
+ "speed": components[component].speed,
+ }
+ if PART_NUMBER_API_VERSION in api_version:
+ hw_info["ethernet"][component_name]["part_number"] = components[
+ component
+ ].part_number
+ components = list(blade.get_hardware(filter="type='mgmt_port'").items)
+ for component in range(0, len(components)):
+ component_name = components[component].name
+ hw_info["mgmt_ports"][component_name] = {
+ "slot": components[component].slot,
+ "status": components[component].status,
+ "serial": components[component].serial,
+ "model": components[component].model,
+ "speed": components[component].speed,
+ }
+ if PART_NUMBER_API_VERSION in api_version:
+ hw_info["mgmt_ports"][component_name]["part_number"] = components[
+ component
+ ].part_number
+ components = list(blade.get_hardware(filter="type='fan'").items)
+ for component in range(0, len(components)):
+ component_name = components[component].name
+ hw_info["fans"][component_name] = {
+ "slot": components[component].slot,
+ "status": components[component].status,
+ "identify": components[component].identify_enabled,
+ }
+ if PART_NUMBER_API_VERSION in api_version:
+ hw_info["fans"][component_name]["part_number"] = components[
+ component
+ ].part_number
+ components = list(blade.get_hardware(filter="type='fb'").items)
+ for component in range(0, len(components)):
+ component_name = components[component].name
+ hw_info["blades"][component_name] = {
+ "slot": components[component].slot,
+ "status": components[component].status,
+ "serial": components[component].serial,
+ "model": components[component].model,
+ "temperature": components[component].temperature,
+ "identify": components[component].identify_enabled,
+ }
+ if PART_NUMBER_API_VERSION in api_version:
+ hw_info["blades"][component_name]["part_number"] = components[
+ component
+ ].part_number
+ components = list(blade.get_hardware(filter="type='pwr'").items)
+ for component in range(0, len(components)):
+ component_name = components[component].name
+ hw_info["power"][component_name] = {
+ "slot": components[component].slot,
+ "status": components[component].status,
+ "serial": components[component].serial,
+ "model": components[component].model,
+ }
+ if PART_NUMBER_API_VERSION in api_version:
+ hw_info["power"][component_name]["part_number"] = components[
+ component
+ ].part_number
+ components = list(blade.get_hardware(filter="type='xfm'").items)
+ for component in range(0, len(components)):
+ component_name = components[component].name
+ hw_info["switch"][component_name] = {
+ "slot": components[component].slot,
+ "status": components[component].status,
+ "serial": components[component].serial,
+ "model": components[component].model,
+ }
+ if PART_NUMBER_API_VERSION in api_version:
+ hw_info["switch"][component_name]["part_number"] = components[
+ component
+ ].part_number
+ components = list(blade.get_hardware(filter="type='ch'").items)
+ for component in range(0, len(components)):
+ component_name = components[component].name
+ hw_info["chassis"][component_name] = {
+ "slot": components[component].slot,
+ "index": components[component].index,
+ "status": components[component].status,
+ "serial": components[component].serial,
+ "model": components[component].model,
+ }
+ if PART_NUMBER_API_VERSION in api_version:
+ hw_info["chassis"][component_name]["part_number"] = components[
+ component
+ ].part_number
+ components = list(blade.get_hardware(filter="type='bay'").items)
+ for component in range(0, len(components)):
+ component_name = components[component].name
+ hw_info["bays"][component_name] = {
+ "slot": components[component].slot,
+ "index": components[component].index,
+ "status": components[component].status,
+ "serial": components[component].serial,
+ "model": components[component].model,
+ "identify": components[component].identify_enabled,
+ }
+ if PART_NUMBER_API_VERSION in api_version:
+ hw_info["bay"][component_name]["part_number"] = components[
+ component
+ ].part_number
+
+ return hw_info
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+ blade = get_blade(module)
+ api_version = blade.api_version.list_versions().versions
+
+ module.exit_json(
+ changed=False, purefb_info=generate_hardware_dict(module, blade, api_version)
+ )
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_keytabs.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_keytabs.py
new file mode 100644
index 000000000..7268bc01c
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_keytabs.py
@@ -0,0 +1,254 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2021, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: purefb_keytabs
+version_added: '1.6.0'
+short_description: Manage FlashBlade Kerberos Keytabs
+description:
+- Manage Kerberos Keytabs for FlashBlades
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Manage Kerberos Keytabs
+ default: import
+ type: str
+ choices: [ absent, import, export, rotate ]
+ name:
+ description:
+ - Name of the Keytab
+ - Must include prefix and suffix
+ type: str
+ prefix:
+ description:
+ - Only required for I(import) or I(rotate)
+ - Prefix to use for naming the files slots
+ - Specifying a file entry prefix is required because a single keytab file can contain
+ multiple keytab entries in multiple slots.
+ - If not provided for I(import) the current AD Account name will be used.
+ type: str
+ keytab_file:
+ description:
+ - Name of file holding Keytab
+ type: str
+ filetype:
+ description:
+ - Format of the keytab file
+ type: str
+ choices: [ binary, base64 ]
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Import a binary keytab
+ purestorage.flashblade.purefb_keytabs:
+ state: import
+ prefix: example
+ keytab_file: pure_krb.keytab
+ filetype: binary
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+
+- name: Import a base64 keytab
+ purestorage.flashblade.purefb_keytabs:
+ state: import
+ prefix: example
+ keytab_file: pure_krb.keytab.mime
+ filetype: base64
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+
+- name: Export a keytab
+ purestorage.flashblade.purefb_keytabs:
+ state: export
+ name: example.3
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+ register: download_file
+
+- name: Delete a keytab
+ purestorage.flashblade.purefb_keytabs:
+ state: absent
+ name: example.3
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+
+- name: Rotate current AD account keytabs
+ purestorage.flashblade.purefb_keytabs:
+ state: rotate
+ fb_url: 10.10.10.2
+
+- name: Rotate AD account keytabs by creating new series
+ purestorage.flashblade.purefb_keytabs:
+ state: rotate
+ name: next_prefix
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+"""
+
+RETURN = r"""
+download_file:
+ description:
+ - Name of file containing exported keytab
+ returned: When using I(export) option
+ type: str
+ sample: "/tmp/pure_krb8939478070214877726.keytab"
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient.flashblade import KeytabPost, Reference
+except ImportError:
+ HAS_PURESTORAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_system,
+ purefb_argument_spec,
+)
+
+
+MIN_REQUIRED_API_VERSION = "2.0"
+
+
+def rotate_keytab(module, blade):
+ """Rotate keytab"""
+ changed = True
+ account = Reference(
+ name=list(blade.get_active_directory().items)[0].name,
+ resource_type="active-directory",
+ )
+ keytab = KeytabPost(source=account)
+ if not module.check_mode:
+ res = blade.post_keytabs(keytab=keytab, name_prefixes=module.params["prefix"])
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to rotate AD account keytabs, prefix {0}.".format(
+ module.params["prefix"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def delete_keytab(module, blade):
+ """Delete keytab"""
+ changed = False
+ if blade.get_keytabs(names=[module.params["name"]]).status_code == 200:
+ changed = True
+ if not module.check_mode:
+ res = blade.delete_keytabs(names=[module.params["name"]])
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete keytab {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def import_keytab(module, blade):
+ """Import keytab"""
+ changed = True
+ if not module.check_mode:
+ if module.params["filetype"] == "binary":
+ readtype = "rb"
+ else:
+ readtype = "r"
+ with open(module.params["keytab_file"], readtype) as keytab_file:
+ keytab_data = keytab_file.read()
+ short_name = module.params["keytab_file"].split("/")[-1]
+ res = blade.post_keytabs_upload(
+ name_prefixes=module.params["prefix"], keytab_file=(short_name, keytab_data)
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to import keytab file {0}. Error: {1}".format(
+ module.params["keytab_file"], res.errors[0].message
+ )
+ )
+
+ module.exit_json(changed=changed)
+
+
+def export_keytab(module, blade):
+ """Export keytab"""
+ changed = False
+ download_file = ""
+ if blade.get_keytabs(names=[module.params["name"]]).status_code == 200:
+ changed = True
+ if not module.check_mode:
+ res = blade.get_keytabs_download(keytab_names=[module.params["name"]])
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to export keytab {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ else:
+ download_file = list(res.items)[0]
+ module.exit_json(changed=changed, download_file=download_file)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(
+ type="str",
+ default="import",
+ choices=["absent", "rotate", "import", "export"],
+ ),
+ name=dict(type="str"),
+ prefix=dict(type="str"),
+ keytab_file=dict(type="str"),
+ filetype=dict(type="str", choices=["binary", "base64"]),
+ )
+ )
+
+ required_if = [["state", "import", ["prefix"]]]
+ module = AnsibleModule(
+ argument_spec, required_if=required_if, supports_check_mode=True
+ )
+
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+
+ state = module.params["state"]
+ blade = get_system(module)
+ api_version = list(blade.get_versions().items)
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(
+ msg="Minimum FlashBlade REST version required: {0}".format(
+ MIN_REQUIRED_API_VERSION
+ )
+ )
+
+ if not module.params["prefix"]:
+ module.params["prefix"] = list(blade.get_active_directory().items)[0].name
+
+ if state == "import":
+ import_keytab(module, blade)
+ elif state == "export":
+ export_keytab(module, blade)
+ elif state == "rotate":
+ rotate_keytab(module, blade)
+ elif state == "absent":
+ delete_keytab(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_lag.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_lag.py
new file mode 100644
index 000000000..e5c46e730
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_lag.py
@@ -0,0 +1,315 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2021, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_lag
+version_added: '1.7.0'
+short_description: Manage FlashBlade Link Aggregation Groups
+description:
+- Maintain FlashBlade Link Aggregation Groups
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Name of the Link Aggregation Group
+ type: str
+ default: uplink
+ state:
+ description:
+ - Define whether the LAG should be added or deleted
+ default: present
+ choices: [ absent, present ]
+ type: str
+ ports:
+ description:
+ - Name of network ports assigned to the LAG
+ - Format should be CHx.ETHy, where CHx is the chassis number and
+ ETHy is the ethernet port number.
+ - Matched port pairs from each Fabric Module in the Chassis will
+ be used.
+ - To modify required ports for a LAG specify only the ports required
+ by the LAG. Any ports currently used by the LAG not specified will be
+ disconnected from the LAG.
+ type: list
+ elements: str
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Add LAG
+ purestorage.flashblade.purefb_lag:
+ name: lag2
+ ports:
+ - ch1.eth2
+ fb_url: 10.10.10.2
+ api_token: T-68618f31-0c9e-4e57-aa44-5306a2cf10e3
+
+- name: Upate LAG
+ purestorage.flashblade.purefb_lag:
+ name: lag2
+ ports:
+ - ch1.eth2
+ - ch1.eth4
+ fb_url: 10.10.10.2
+ api_token: T-68618f31-0c9e-4e57-aa44-5306a2cf10e3
+
+- name: Delete LAG
+ purestorage.flashblade.purefb_lag:
+ name: lag2
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-68618f31-0c9e-4e57-aa44-5306a2cf10e3
+"""
+
+RETURN = r"""
+lag:
+ description: A dictionary describing the LAG.
+ type: dict
+ returned: success
+ contains:
+ lag_speed:
+ description: Combined speed of all ports in the LAG in Gb/s
+ type: str
+ port_speed:
+ description: Configured speed of each port in the LAG in Gb/s
+ type: str
+ mac_address:
+ description: Unique MAC address assigned to the LAG
+ type: str
+ status:
+ description: Health status of the LAG.
+ type: str
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flashblade
+except ImportError:
+ HAS_PURESTORAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_system,
+ purefb_argument_spec,
+)
+
+MIN_REQUIRED_API_VERSION = "2.0"
+
+
+def delete_lag(module, blade):
+ """Delete Link Aggregation Group"""
+ changed = True
+ if not module.check_mode:
+ res = blade.delete_link_aggregation_groups(names=[module.params["name"]])
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete LAG {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def update_lag(module, blade):
+ """Update Link Aggregation Group"""
+ changed = False
+ used_ports = []
+ current_ports = []
+ lagfact = []
+ current_lag = list(
+ blade.get_link_aggregation_groups(names=[module.params["name"]]).items
+ )[0]
+ for port in range(0, len(current_lag.ports)):
+ used_ports.append(current_lag.ports[port].name)
+ for lag_port in range(0, len(module.params["ports"]), 2):
+ if (
+ not (
+ module.params["ports"][lag_port].split(".")[0].upper()
+ + ".FM1."
+ + module.params["ports"][lag_port].split(".")[1].upper()
+ )
+ in used_ports
+ ):
+ current_lags = list(blade.get_link_aggregation_groups().items)
+ for lag in range(0, len(current_lags)):
+ for port in range(0, len(current_lags[lag].ports)):
+ current_ports.append(current_lags[lag].ports[port].name)
+ for current_lag_port in range(0, len(current_ports)):
+ if (
+ module.params["ports"][lag_port].split(".")[0].upper()
+ + ".FM1."
+ + module.params["ports"][lag_port].split(".")[1].upper()
+ ) in current_ports:
+ module.fail_json(
+ msg="Selected port {0} is currently in use by another LAG.".format(
+ module.params["ports"][lag_port].upper()
+ )
+ )
+ new_ports = []
+ for port in range(0, len(module.params["ports"])):
+ new_ports.append(
+ module.params["ports"][port].split(".")[0].upper()
+ + ".FM1."
+ + module.params["ports"][port].split(".")[1].upper()
+ )
+ new_ports.append(
+ module.params["ports"][port].split(".")[0].upper()
+ + ".FM2."
+ + module.params["ports"][port].split(".")[1].upper()
+ )
+ ports = []
+ for final_port in range(0, len(new_ports)):
+ ports.append(flashblade.FixedReference(name=new_ports[final_port]))
+ link_aggregation_group = flashblade.Linkaggregationgroup(ports=ports)
+ if sorted(used_ports) != sorted(new_ports):
+ changed = True
+ if not module.check_mode:
+ res = blade.patch_link_aggregation_groups(
+ names=[module.params["name"]],
+ link_aggregation_group=link_aggregation_group,
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to update LAG {0}. Error: {1}".format(
+ module.params["name"],
+ res.errors[0].message,
+ )
+ )
+ else:
+ response = list(res.items)[0]
+ lagfact = {
+ "mac_address": response.mac_address,
+ "port_speed": str(response.port_speed / 1000000000) + "Gb/s",
+ "lag_speed": str(response.lag_speed / 1000000000) + "Gb/s",
+ "status": response.status,
+ }
+ module.exit_json(changed=changed, lag=lagfact)
+
+
+def create_lag(module, blade):
+ """Create Link Aggregation Group"""
+ changed = True
+ used_ports = []
+ lagfact = []
+ current_lags = list(blade.get_link_aggregation_groups().items)
+ for lag in range(0, len(current_lags)):
+ for port in range(0, len(current_lags[lag].ports)):
+ used_ports.append(current_lags[lag].ports[port].name)
+ for lag_port in range(0, len(module.params["ports"])):
+ if (
+ module.params["ports"][lag_port].split(".")[0].upper()
+ + ".FM1."
+ + module.params["ports"][0].split(".")[1].upper()
+ ) in used_ports:
+ module.fail_json(
+ msg="Selected port {0} is currently in use by another LAG.".format(
+ module.params["ports"][lag_port].upper()
+ )
+ )
+ new_ports = []
+ for new_port in range(0, len(module.params["ports"])):
+ new_ports.append(
+ module.params["ports"][new_port].split(".")[0].upper()
+ + ".FM1."
+ + module.params["ports"][new_port].split(".")[1].upper()
+ )
+ new_ports.append(
+ module.params["ports"][new_port].split(".")[0].upper()
+ + ".FM2."
+ + module.params["ports"][new_port].split(".")[1].upper()
+ )
+ ports = []
+ for final_port in range(0, len(new_ports)):
+ ports.append(flashblade.FixedReference(name=new_ports[final_port]))
+ link_aggregation_group = flashblade.LinkAggregationGroup(ports=ports)
+ if not module.check_mode:
+ res = blade.post_link_aggregation_groups(
+ names=[module.params["name"]], link_aggregation_group=link_aggregation_group
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to create LAG {0}. Error: {1}".format(
+ module.params["name"],
+ res.errors[0].message,
+ )
+ )
+ else:
+ response = list(res.items)[0]
+ lagfact = {
+ "mac_address": response.mac_address,
+ "port_speed": str(response.port_speed / 1000000000) + "Gb/s",
+ "lag_speed": str(response.lag_speed / 1000000000) + "Gb/s",
+ "status": response.status,
+ }
+ module.exit_json(changed=changed, lag=lagfact)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ name=dict(type="str", default="uplink"),
+ ports=dict(type="list", elements="str"),
+ )
+ )
+
+ required_if = [["state", "present", ["ports"]]]
+
+ module = AnsibleModule(
+ argument_spec, required_if=required_if, supports_check_mode=True
+ )
+
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+
+ blade = get_system(module)
+ api_version = list(blade.get_versions().items)
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashBlade REST version not supported. "
+ "Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
+ )
+ state = module.params["state"]
+
+ exists = bool(
+ blade.get_link_aggregation_groups(names=[module.params["name"]]).status_code
+ == 200
+ )
+ if module.params["ports"]:
+ # Remove duplicates
+ module.params["ports"] = list(dict.fromkeys(module.params["ports"]))
+ if not exists and state == "present":
+ create_lag(module, blade)
+ elif exists and state == "present":
+ update_lag(module, blade)
+ elif exists and state == "absent":
+ if module.params["name"].lower() == "uplink":
+ module.fail_json(
+ msg="Preconfigured Link Aggregation Group cannot be deleted"
+ )
+ else:
+ delete_lag(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_lifecycle.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_lifecycle.py
new file mode 100644
index 000000000..0403aedcb
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_lifecycle.py
@@ -0,0 +1,490 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_lifecycle
+version_added: '1.4.0'
+short_description: Manage FlashBlade object lifecycles
+description:
+- Manage lifecycles for object buckets
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Create or delete lifecycle rule
+ default: present
+ type: str
+ choices: [ absent, present ]
+ bucket:
+ description:
+ - Bucket the lifecycle rule applies to
+ type: str
+ required: true
+ name:
+ description:
+ - Name of the lifecycle rule
+ type: str
+ required: true
+ enabled:
+ description:
+ - State of lifecycle rule
+ type: bool
+ default: true
+ keep_previous_for:
+ aliases: [ keep_for ]
+ description:
+ - Time after which previous versions will be marked expired.
+ - Enter as days (d) or weeks (w). Range is 1 - 2147483647 days.
+ type: str
+ keep_current_for:
+ description:
+ - Time after which current versions will be marked expired.
+ - Enter as days (d) or weeks (w). Range is 1 - 2147483647 days.
+ version_added: "1.8.0"
+ type: str
+ keep_current_until:
+ description:
+ - Date after which current versions will be marked expired.
+ - Enter as date in form YYYY-MM-DD.
+ - B(Note:) setting a date in the past will delete ALL objects with
+ the value of I(prefix) as they are created.
+ version_added: "1.8.0"
+ type: str
+ abort_uploads_after:
+ description:
+ - Duration of time after which incomplete multipart uploads will be aborted.
+ - Enter as days (d) or weeks (w). Range is 1 - 2147483647 days.
+ version_added: "1.8.0"
+ type: str
+ prefix:
+ description:
+ - Object key prefix identifying one or more objects in the bucket
+ type: str
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Create a lifecycle rule called bar for bucket foo (pre-Purity//FB 3.2.3)
+ purestorage.flashblade.purefb_lifecycle:
+ name: bar
+ bucket: foo
+ keep_previous_for: 2d
+ prefix: test
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Create a lifecycle rule called bar for bucket foo (post-Purity//FB 3.2.3)
+ purestorage.flashblade.purefb_lifecycle:
+ name: bar
+ bucket: foo
+ keep_previous_for: 2d
+ keep_current_for: 1w
+ abort_uploads_after: 1d
+ keep_current_until: 2020-11-23
+ prefix: test
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Modify a lifecycle rule (post-Purity//FB 3.2.3)
+ purestorage.flashblade.purefb_lifecycle:
+ name: bar
+ bucket: foo
+ keep_previous_for: 10d
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Delete lifecycle rule foo from bucket foo
+ purestorage.flashblade.purefb_lifecycle:
+ name: foo
+ bucket: bar
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURITYFB = True
+try:
+ from purity_fb import LifecycleRulePost, LifecycleRulePatch, Reference
+except ImportError:
+ HAS_PURITYFB = False
+
+HAS_PYPURECLIENT = True
+try:
+ from pypureclient import flashblade
+except ImportError:
+ HAS_PYPURECLIENT = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ get_system,
+ purefb_argument_spec,
+)
+from datetime import datetime
+
+
+MIN_REQUIRED_API_VERSION = "1.10"
+LIFECYCLE_API_VERSION = "2.1"
+
+
+def _get_bucket(module, blade):
+ s3bucket = None
+ buckets = blade.buckets.list_buckets()
+ for bucket in range(0, len(buckets.items)):
+ if buckets.items[bucket].name == module.params["bucket"]:
+ s3bucket = buckets.items[bucket]
+ return s3bucket
+
+
+def _convert_date_to_epoch(module):
+ try:
+ unix_date = datetime.strptime(module.params["keep_current_until"], "%Y-%m-%d")
+ except ValueError:
+ module.fail_json(msg="Incorrect data format, should be YYYY-MM-DD")
+ if unix_date < datetime.utcnow():
+ module.warn(
+ "This value of `keep_current_until` will permanently delete objects "
+ "as they are created. Using this date is not recommended"
+ )
+ epoch_milliseconds = int((unix_date - datetime(1970, 1, 1)).total_seconds() * 1000)
+ return epoch_milliseconds
+
+
+def _convert_to_millisecs(day):
+ try:
+ if day[-1:].lower() == "w":
+ return int(day[:-1]) * 7 * 86400000
+ elif day[-1:].lower() == "d":
+ return int(day[:-1]) * 86400000
+ except Exception:
+ return 0
+ return 0
+
+
+def _findstr(text, match):
+ for line in text.splitlines():
+ if match in line:
+ found = line
+ return found
+
+
+def delete_rule(module, blade):
+ """Delete lifecycle rule"""
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.lifecycle_rules.delete_lifecycle_rules(
+ names=[module.params["bucket"] + "/" + module.params["name"]]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to delete lifecycle rule {0} for bucket {1}.".format(
+ module.params["name"], module.params["bucket"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def create_rule(module, blade, bladev2=None):
+ """Create lifecycle policy"""
+ changed = True
+ if bladev2:
+ if (
+ not module.params["keep_previous_for"]
+ and not module.params["keep_current_until"]
+ and not module.params["keep_current_for"]
+ ):
+ module.fail_json(
+ msg="At least one `keep...` parameter is required to create a new lifecycle rule"
+ )
+
+ else:
+ if not module.params["keep_previous_for"] and not bladev2:
+ module.fail_json(
+ msg="'keep_previous_for' is required to create a new lifecycle rule"
+ )
+ if not module.check_mode:
+ if not bladev2:
+ try:
+ attr = LifecycleRulePost(
+ bucket=Reference(name=module.params["bucket"]),
+ rule_id=module.params["name"],
+ keep_previous_version_for=_convert_to_millisecs(
+ module.params["keep_previous_for"]
+ ),
+ prefix=module.params["prefix"],
+ )
+ blade.lifecycle_rules.create_lifecycle_rules(
+ rule=attr, confirm_date=True
+ )
+ if not module.params["enabled"]:
+ attr = LifecycleRulePatch()
+ attr.enabled = False
+ blade.lifecycle_rules.update_lifecycle_rules(
+ name=[module.params["bucket"] + "/" + module.params["name"]],
+ rule=attr,
+ confirm_date=True,
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to create lifecycle rule {0} for bucket {1}.".format(
+ module.params["name"], module.params["bucket"]
+ )
+ )
+ else:
+ attr = flashblade.LifecycleRulePost(
+ bucket=flashblade.Reference(name=module.params["bucket"]),
+ rule_id=module.params["name"],
+ keep_previous_version_for=_convert_to_millisecs(
+ module.params["keep_previous_for"]
+ ),
+ keep_current_version_until=module.params["keep_current_until"],
+ keep_current_version_for=_convert_to_millisecs(
+ module.params["keep_current_for"]
+ ),
+ abort_incomplete_multipart_uploads_after=_convert_to_millisecs(
+ module.params["abort_uploads_after"]
+ ),
+ prefix=module.params["prefix"],
+ )
+ if attr.keep_current_version_until:
+ res = bladev2.post_lifecycle_rules(rule=attr, confirm_date=True)
+ else:
+ res = bladev2.post_lifecycle_rules(rule=attr)
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to create lifecycle rule {0} for bucket {1}. Error: {2}".format(
+ module.params["name"],
+ module.params["bucket"],
+ res.errors[0].message,
+ )
+ )
+ if not module.params["enabled"]:
+ attr = flashblade.LifecycleRulePatch(enabled=module.params["enabled"])
+ res = bladev2.patch_lifecycle_rules(
+ names=[module.params["bucket"] + "/" + module.params["name"]],
+ lifecycle=attr,
+ )
+ if res.status_code != 200:
+ module.warn(
+ "Lifecycle Rule {0} did not enable correctly. "
+ "Please chack your FlashBlade".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def update_rule(module, blade, rule, bladev2=None):
+ """Update snapshot policy"""
+ changed = False
+ if not bladev2:
+ current_rule = {
+ "prefix": rule.prefix,
+ "keep_previous_version_for": rule.keep_previous_version_for,
+ "enabled": rule.enabled,
+ }
+ else:
+ current_rule = {
+ "prefix": rule.prefix,
+ "abort_incomplete_multipart_uploads_after": rule.abort_incomplete_multipart_uploads_after,
+ "keep_current_version_for": rule.keep_current_version_for,
+ "keep_previous_version_for": rule.keep_previous_version_for,
+ "keep_current_version_until": rule.keep_current_version_until,
+ "enabled": rule.enabled,
+ }
+ if not module.params["prefix"]:
+ prefix = current_rule["prefix"]
+ else:
+ prefix = module.params["prefix"]
+ if not module.params["keep_previous_for"]:
+ keep_previous_for = current_rule["keep_previous_version_for"]
+ else:
+ keep_previous_for = _convert_to_millisecs(module.params["keep_previous_for"])
+ if bladev2:
+ if not module.params["keep_current_for"]:
+ keep_current_for = current_rule["keep_current_version_for"]
+ else:
+ keep_current_for = _convert_to_millisecs(module.params["keep_current_for"])
+ if not module.params["abort_uploads_after"]:
+ abort_uploads_after = current_rule[
+ "abort_incomplete_multipart_uploads_after"
+ ]
+ else:
+ abort_uploads_after = _convert_to_millisecs(
+ module.params["abort_uploads_after"]
+ )
+ if not module.params["keep_current_until"]:
+ keep_current_until = current_rule["keep_current_version_until"]
+ else:
+ keep_current_until = module.params["keep_current_until"]
+ new_rule = {
+ "prefix": prefix,
+ "abort_incomplete_multipart_uploads_after": abort_uploads_after,
+ "keep_current_version_for": keep_current_for,
+ "keep_previous_version_for": keep_previous_for,
+ "keep_current_version_until": keep_current_until,
+ "enabled": module.params["enabled"],
+ }
+ else:
+ new_rule = {
+ "prefix": prefix,
+ "keep_previous_version_for": keep_previous_for,
+ "enabled": module.params["enabled"],
+ }
+ if current_rule != new_rule:
+ changed = True
+ if not module.check_mode:
+ if not bladev2:
+ try:
+ attr = LifecycleRulePatch(
+ keep_previous_version_for=new_rule["keep_previous_version_for"],
+ prefix=new_rule["prefix"],
+ )
+ attr.enabled = module.params["enabled"]
+ blade.lifecycle_rules.update_lifecycle_rules(
+ names=[module.params["bucket"] + "/" + module.params["name"]],
+ rule=attr,
+ confirm_date=True,
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to update lifecycle rule {0} for bucket {1}.".format(
+ module.params["name"], module.params["bucket"]
+ )
+ )
+ else:
+ attr = flashblade.LifecycleRulePatch(
+ keep_previous_version_for=new_rule["keep_previous_version_for"],
+ keep_current_version_for=new_rule["keep_current_version_for"],
+ keep_current_version_until=new_rule["keep_current_version_until"],
+ abort_incomplete_multipart_uploads_after=new_rule[
+ "abort_incomplete_multipart_uploads_after"
+ ],
+ prefix=new_rule["prefix"],
+ enabled=new_rule["enabled"],
+ )
+ if attr.keep_current_version_until:
+ res = bladev2.patch_lifecycle_rules(
+ names=[module.params["bucket"] + "/" + module.params["name"]],
+ lifecycle=attr,
+ confirm_date=True,
+ )
+ else:
+ res = bladev2.patch_lifecycle_rules(
+ names=[module.params["bucket"] + "/" + module.params["name"]],
+ lifecycle=attr,
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to update lifecycle rule {0} for bucket {1}. Error: {2}".format(
+ module.params["name"],
+ module.params["bucket"],
+ res.errors[0].message,
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ enabled=dict(type="bool", default=True),
+ bucket=dict(type="str", required=True),
+ name=dict(type="str", required=True),
+ prefix=dict(
+ type="str",
+ ),
+ keep_previous_for=dict(type="str", aliases=["keep_for"]),
+ keep_current_for=dict(type="str"),
+ keep_current_until=dict(type="str"),
+ abort_uploads_after=dict(type="str"),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_PURITYFB:
+ module.fail_json(msg="purity_fb sdk is required for this module")
+
+ state = module.params["state"]
+ blade = get_blade(module)
+ bladev2 = get_system(module)
+ versions = blade.api_version.list_versions().versions
+
+ if module.params["keep_previous_for"] and not module.params["keep_previous_for"][
+ -1:
+ ].lower() in ["w", "d"]:
+ module.fail_json(
+ msg="'keep_previous_for' format incorrect - specify as 'd' or 'w'"
+ )
+ if module.params["keep_current_for"] and not module.params["keep_current_for"][
+ -1:
+ ].lower() in ["w", "d"]:
+ module.fail_json(
+ msg="'keep_current_for' format incorrect - specify as 'd' or 'w'"
+ )
+ if module.params["abort_uploads_after"] and not module.params[
+ "abort_uploads_after"
+ ][-1:].lower() in ["w", "d"]:
+ module.fail_json(
+ msg="'abort_uploads_after' format incorrect - specify as 'd' or 'w'"
+ )
+
+ if MIN_REQUIRED_API_VERSION not in versions:
+ module.fail_json(
+ msg="Minimum FlashBlade REST version required: {0}".format(
+ MIN_REQUIRED_API_VERSION
+ )
+ )
+
+ if not _get_bucket(module, blade):
+ module.fail_json(
+ msg="Specified bucket {0} does not exist".format(module.params["bucket"])
+ )
+
+ try:
+ if LIFECYCLE_API_VERSION not in versions:
+ rule = blade.lifecycle_rules.list_lifecycle_rules(
+ names=[module.params["bucket"] + "/" + module.params["name"]]
+ ).items[0]
+ else:
+ if module.params["keep_current_until"]:
+ module.params["keep_current_until"] = _convert_date_to_epoch(module)
+ bladev2 = get_system(module)
+ rule = list(
+ bladev2.get_lifecycle_rules(
+ names=[module.params["bucket"] + "/" + module.params["name"]]
+ ).items
+ )[0]
+ except Exception:
+ rule = None
+
+ if rule and state == "present":
+ update_rule(module, blade, rule, bladev2)
+ elif state == "present" and not rule:
+ create_rule(module, blade, bladev2)
+ elif state == "absent" and rule:
+ delete_rule(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_messages.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_messages.py
new file mode 100644
index 000000000..bbfe57f95
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_messages.py
@@ -0,0 +1,193 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2022, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_messages
+version_added: '1.10.0'
+short_description: List FlashBlade Alert Messages
+description:
+- List Alert messages based on filters provided
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ severity:
+ description:
+ - severity of the alerts to show
+ type: list
+ elements: str
+ choices: [ all, critical, warning, info ]
+ default: [ all ]
+ state:
+ description:
+ - State of alerts to show
+ default: open
+ choices: [ all, open, closed ]
+ type: str
+ flagged:
+ description:
+ - Show alerts that have been acknowledged or not
+ default: false
+ type: bool
+ history:
+ description:
+ - Historical time period to show alerts for, from present time
+ - Allowed time period are hour(h), day(d), week(w) and year(y)
+ type: str
+ default: 1w
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Show critical alerts from past 4 weeks that haven't been acknowledged
+ purefb_messages:
+ history: 4w
+ flagged : false
+ severity:
+ - critical
+ fb_url: 10.10.10.2
+ api_token: T-68618f31-0c9e-4e57-aa44-5306a2cf10e3
+"""
+
+RETURN = r"""
+"""
+
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_system,
+ purefb_argument_spec,
+)
+
+MIN_REQUIRED_API_VERSION = "2.0"
+ALLOWED_PERIODS = ["h", "d", "w", "y"]
+# Time periods in micro-seconds
+HOUR = 3600000
+DAY = HOUR * 24
+WEEK = DAY * 7
+YEAR = WEEK * 52
+
+
+def _create_time_window(window):
+ period = window[-1].lower()
+ multiple = int(window[0:-1])
+ if period == "h":
+ return HOUR * multiple
+ if period == "d":
+ return DAY * multiple
+ if period == "w":
+ return WEEK * multiple
+ if period == "y":
+ return YEAR * multiple
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="open", choices=["all", "open", "closed"]),
+ history=dict(type="str", default="1w"),
+ flagged=dict(type="bool", default=False),
+ severity=dict(
+ type="list",
+ elements="str",
+ default=["all"],
+ choices=["all", "critical", "warning", "info"],
+ ),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+ time_now = int(time.time() * 1000)
+ blade = get_system(module)
+ api_version = list(blade.get_versions().items)
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashBlade REST version not supported. "
+ "Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
+ )
+ if module.params["history"][-1].lower() not in ALLOWED_PERIODS:
+ module.fail_json(msg="historical window value is not an allowsd time period")
+ since_time = str(time_now - _create_time_window(module.params["history"].lower()))
+ if module.params["flagged"]:
+ flagged = " and flagged='True'"
+ else:
+ flagged = " and flagged='False'"
+
+ multi_sev = False
+ if len(module.params["severity"]) > 1:
+ if "all" in module.params["severity"]:
+ module.params["severity"] = ["*"]
+ else:
+ multi_sev = True
+ if multi_sev:
+ severity = " and ("
+ for level in range(0, len(module.params["severity"])):
+ severity += "severity='" + str(module.params["severity"][level]) + "' or "
+ severity = severity[0:-4] + ")"
+ else:
+ if module.params["severity"] == ["all"]:
+ severity = " and severity='*'"
+ else:
+ severity = " and severity='" + str(module.params["severity"][0]) + "'"
+ messages = {}
+ if module.params["state"] == "all":
+ state = " and state='*'"
+ else:
+ state = " and state='" + module.params["state"] + "'"
+ filter_string = "notified>" + since_time + state + flagged + severity
+ try:
+ res = blade.get_alerts(filter=filter_string)
+ alerts = list(res.items)
+ except Exception:
+ module.fail_json(
+ msg="Failed to get alert messages. Error: {0}".format(res.errors[0].message)
+ )
+ for message in range(0, len(alerts)):
+ name = alerts[message].name
+ messages[name] = {
+ "summary": alerts[message].summary,
+ "component_type": alerts[message].component_type,
+ "component_name": alerts[message].component_name,
+ "description": alerts[message].description,
+ "code": alerts[message].code,
+ "severity": alerts[message].severity,
+ "state": alerts[message].state,
+ "flagged": alerts[message].flagged,
+ "created": time.strftime(
+ "%Y-%m-%d %H:%M:%S",
+ time.gmtime(alerts[message].created / 1000),
+ )
+ + " UTC",
+ "notified": time.strftime(
+ "%Y-%m-%d %H:%M:%S",
+ time.gmtime(alerts[message].notified / 1000),
+ )
+ + " UTC",
+ "updated": time.strftime(
+ "%Y-%m-%d %H:%M:%S",
+ time.gmtime(alerts[message].updated / 1000),
+ )
+ + " UTC",
+ }
+ module.exit_json(changed=False, purefb_messages=messages)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_network.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_network.py
new file mode 100644
index 000000000..27693e32c
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_network.py
@@ -0,0 +1,224 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+
+DOCUMENTATION = """
+---
+module: purefb_network
+version_added: "1.0.0"
+short_description: Manage network interfaces in a Pure Storage FlashBlade
+description:
+ - This module manages network interfaces on Pure Storage FlashBlade.
+ - When creating a network interface a subnet must already exist with
+ a network prefix that covers the IP address of the interface being
+ created.
+author: Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Interface Name.
+ required: true
+ type: str
+ state:
+ description:
+ - Create, delete or modifies a network interface.
+ required: false
+ default: present
+ choices: [ "present", "absent" ]
+ type: str
+ address:
+ description:
+ - IP address of interface.
+ required: false
+ type: str
+ services:
+ description:
+ - Define which services are configured for the interfaces.
+ required: false
+ choices: [ "data", "replication" ]
+ default: data
+ type: str
+ itype:
+ description:
+ - Type of interface.
+ required: false
+ choices: [ "vip" ]
+ default: vip
+ type: str
+extends_documentation_fragment:
+ - purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = """
+- name: Create new network interface named foo
+ purestorage.flashblade.purefb_network:
+ name: foo
+ address: 10.21.200.23
+ state: present
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Change IP address of network interface named foo
+ purestorage.flashblade.purefb_network:
+ name: foo
+ state: present
+ address: 10.21.200.123
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Delete network interface named foo
+ purestorage.flashblade.purefb_network:
+ name: foo
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641"""
+
+RETURN = """
+"""
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import NetworkInterface
+except ImportError:
+ HAS_PURITY_FB = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ purefb_argument_spec,
+)
+
+
+MINIMUM_API_VERSION = "1.3"
+
+
+def get_iface(module, blade):
+ """Return Filesystem or None"""
+ iface = []
+ iface.append(module.params["name"])
+ try:
+ res = blade.network_interfaces.list_network_interfaces(names=iface)
+ return res.items[0]
+ except Exception:
+ return None
+
+
+def create_iface(module, blade):
+ """Create Network Interface"""
+ changed = True
+ if not module.check_mode:
+ iface = []
+ services = []
+ iface.append(module.params["name"])
+ services.append(module.params["services"])
+ try:
+ blade.network_interfaces.create_network_interfaces(
+ names=iface,
+ network_interface=NetworkInterface(
+ address=module.params["address"],
+ services=services,
+ type=module.params["itype"],
+ ),
+ )
+ except Exception:
+ module.fail_json(
+ msg="Interface creation failed. Check subnet exists for {0}".format(
+ module.params["address"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def modify_iface(module, blade):
+ """Modify Network Interface IP address"""
+ changed = False
+ iface = get_iface(module, blade)
+ iface_new = []
+ iface_new.append(module.params["name"])
+ if module.params["address"] != iface.address:
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.network_interfaces.update_network_interfaces(
+ names=iface_new,
+ network_interface=NetworkInterface(
+ address=module.params["address"]
+ ),
+ )
+ changed = True
+ except Exception:
+ module.fail_json(
+ msg="Failed to modify Interface {0}".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def delete_iface(module, blade):
+ """Delete Network Interface"""
+ changed = True
+ if not module.check_mode:
+ iface = []
+ iface.append(module.params["name"])
+ try:
+ blade.network_interfaces.delete_network_interfaces(names=iface)
+ except Exception:
+ module.fail_json(
+ msg="Failed to delete network {0}".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(required=True),
+ state=dict(default="present", choices=["present", "absent"]),
+ address=dict(),
+ services=dict(default="data", choices=["data", "replication"]),
+ itype=dict(default="vip", choices=["vip"]),
+ )
+ )
+
+ required_if = [["state", "present", ["address"]]]
+
+ module = AnsibleModule(
+ argument_spec, required_if=required_if, supports_check_mode=True
+ )
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg="purity_fb sdk is required for this module")
+
+ state = module.params["state"]
+ blade = get_blade(module)
+ api_version = blade.api_version.list_versions().versions
+ if MINIMUM_API_VERSION not in api_version:
+ module.fail_json(msg="Upgrade Purity//FB to enable this module")
+ iface = get_iface(module, blade)
+
+ if state == "present" and not iface:
+ create_iface(module, blade)
+ elif state == "present" and iface:
+ modify_iface(module, blade)
+ elif state == "absent" and iface:
+ delete_iface(module, blade)
+ elif state == "absent" and not iface:
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_ntp.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_ntp.py
new file mode 100644
index 000000000..7b18442c9
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_ntp.py
@@ -0,0 +1,158 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_ntp
+version_added: '1.0.0'
+short_description: Configure Pure Storage FlashBlade NTP settings
+description:
+- Set or erase NTP configuration for Pure Storage FlashBlades.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Create or delete NTP servers configuration
+ type: str
+ default: present
+ choices: [ absent, present ]
+ ntp_servers:
+ type: list
+ elements: str
+ description:
+ - A list of up to 4 alternate NTP servers. These may include IPv4,
+ IPv6 or FQDNs. Invalid IP addresses will cause the module to fail.
+ No validation is performed for FQDNs.
+ - If more than 4 servers are provided, only the first 4 unique
+ nameservers will be used.
+ - if no servers are given a default of I(0.pool.ntp.org) will be used.
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Delete exisitng NTP server entries
+ purestorage.flashblade.purefb_ntp:
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Set array NTP servers
+ purestorage.flashblade.purefb_ntp:
+ state: present
+ ntp_servers:
+ - "0.pool.ntp.org"
+ - "1.pool.ntp.org"
+ - "2.pool.ntp.org"
+ - "3.pool.ntp.org"
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import PureArray
+except ImportError:
+ HAS_PURITY_FB = False
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ purefb_argument_spec,
+)
+
+
+MIN_REQUIRED_API_VERSION = "1.3"
+
+
+def remove(duplicate):
+ final_list = []
+ for num in duplicate:
+ if num not in final_list:
+ final_list.append(num)
+ return final_list
+
+
+def delete_ntp(module, blade):
+ """Delete NTP Servers"""
+ changed = True
+ if not module.check_mode:
+ if blade.arrays.list_arrays().items[0].ntp_servers != []:
+ try:
+ blade_settings = PureArray(ntp_servers=[])
+ blade.arrays.update_arrays(array_settings=blade_settings)
+ except Exception:
+ module.fail_json(msg="Deletion of NTP servers failed")
+ module.exit_json(changed=changed)
+
+
+def create_ntp(module, blade):
+ """Set NTP Servers"""
+ changed = True
+ if not module.check_mode:
+ if not module.params["ntp_servers"]:
+ module.params["ntp_servers"] = ["0.pool.ntp.org"]
+ try:
+ blade_settings = PureArray(ntp_servers=module.params["ntp_servers"][0:4])
+ blade.arrays.update_arrays(array_settings=blade_settings)
+ except Exception:
+ module.fail_json(msg="Update of NTP servers failed")
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ ntp_servers=dict(type="list", elements="str"),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ )
+ )
+
+ required_if = [["state", "present", ["ntp_servers"]]]
+
+ module = AnsibleModule(
+ argument_spec, required_if=required_if, supports_check_mode=True
+ )
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg="purity_fb sdk is required for this module")
+
+ blade = get_blade(module)
+
+ api_version = blade.api_version.list_versions().versions
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(msg="Purity//FB must be upgraded to support this module.")
+
+ if module.params["state"] == "absent":
+ delete_ntp(module, blade)
+ else:
+ module.params["ntp_servers"] = remove(module.params["ntp_servers"])
+ if sorted(blade.arrays.list_arrays().items[0].ntp_servers) != sorted(
+ module.params["ntp_servers"][0:4]
+ ):
+ create_ntp(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_phonehome.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_phonehome.py
new file mode 100644
index 000000000..20b99e8a0
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_phonehome.py
@@ -0,0 +1,124 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_phonehome
+version_added: '1.0.0'
+short_description: Enable or Disable Pure Storage FlashBlade Phone Home
+description:
+- Enablke or Disable Remote Phone Home for a Pure Storage FlashBlade.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Define state of phone home
+ type: str
+ default: present
+ choices: [ present, absent ]
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Enable Remote Phone Home
+ purestorage.flashblade.purefb_phonehome:
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Disable Remote Phone Home
+ purestorage.flashblade.purefb_phonehome:
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import Support
+except ImportError:
+ HAS_PURITY_FB = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ purefb_argument_spec,
+)
+
+
+MIN_REQUIRED_API_VERSION = "1.6"
+
+
+def enable_ph(module, blade):
+ """Enable Phone Hone"""
+ changed = True
+ if not module.check_mode:
+ ph_settings = Support(phonehome_enabled=True)
+ try:
+ blade.support.update_support(support=ph_settings)
+ except Exception:
+ module.fail_json(msg="Enabling Phone Home failed")
+ module.exit_json(changed=changed)
+
+
+def disable_ph(module, blade):
+ """Disable Phone Home"""
+ changed = True
+ if not module.check_mode:
+ ph_settings = Support(phonehome_enabled=False)
+ try:
+ blade.support.update_support(support=ph_settings)
+ except Exception:
+ module.fail_json(msg="Disabling Phone Home failed")
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["present", "absent"]),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ blade = get_blade(module)
+ api_version = blade.api_version.list_versions().versions
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(msg="Purity//FB must be upgraded to support this module.")
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg="purity_fb SDK is required for this module")
+
+ if (
+ module.params["state"] == "present"
+ and not blade.support.list_support().items[0].phonehome_enabled
+ ):
+ enable_ph(module, blade)
+ elif (
+ module.params["state"] == "absent"
+ and blade.support.list_support().items[0].phonehome_enabled
+ ):
+ disable_ph(module, blade)
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_pingtrace.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_pingtrace.py
new file mode 100644
index 000000000..e9f20a158
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_pingtrace.py
@@ -0,0 +1,277 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2023, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_pingtrace
+version_added: '1.11.0'
+short_description: Employ the internal FlashBlade ping and trace mechanisms
+description:
+- Ping or trace a destination
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ action:
+ description:
+ - Which action is required, ping or trace
+ type: str
+ choices: [ ping, trace ]
+ default: ping
+ count:
+ description:
+ - Used by ping to specify the number of packets to send
+ type: int
+ default: 1
+ resolve:
+ description:
+ - Specify whether or not to map IP addresses to host names
+ type: bool
+ default: True
+ latency:
+ description:
+ - Specify whether or not to print the full user-to-user latency
+ type: bool
+ default: False
+ packet_size:
+ description:
+ - Used by ping to specify the number of data bytes to send per packet
+ type: int
+ default: 56
+ destination:
+ description:
+ - IP addtress or hostname used to run ping or trace against.
+ type: str
+ required: true
+ method:
+ description:
+ - Used by trace to specify the method to use for operations
+ type: str
+ choices: [ icmp, tcp, udp ]
+ default: udp
+ fragment:
+ description:
+ - Used by trace to specify whether or not to fragment packets
+ type: bool
+ default: true
+ discover_mtu:
+ description:
+ - Used by trace to specify whether or not to discover the MTU
+ along the path being traced
+ type: bool
+ default: false
+ port:
+ description:
+ - Used by trace to specify a destination port
+ type: str
+ source:
+ description:
+ - IP address or hostname used by ping and trace to specify where
+ to start to run the specified operation
+ - If not specified will use all available sources
+ type: str
+ component:
+ description:
+ - Used by ping and trace to specify where to run the operation.
+ - Valid values are controllers and blades from hardware list.
+ - If not specified defaults to all available controllers and selected blades
+ type: str
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: ping Google DNS server
+ purestorage.flashblade.purefb_pingtrace:
+ destination: 8.8.8.8
+ fb_url: 10.10.10.2
+ api_token: T-68618f31-0c9e-4e57-aa44-5306a2cf10e3
+
+- name: trace to Google DNS server from CH1.FM0
+ purestorage.flashblade.purefb_pingtrace:
+ action: trace
+ destination: 8.8.8.8
+ fragment_packet: true
+ source: CH1.FM0
+ discover_mtu: true
+ fb_url: 10.10.10.2
+ api_token: T-68618f31-0c9e-4e57-aa44-5306a2cf10e3
+"""
+
+RETURN = r"""
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_system,
+ purefb_argument_spec,
+)
+
+MIN_REQUIRED_API_VERSION = "2.6"
+
+
+def run_ping(module, blade):
+ """Run network ping"""
+ ping_fact = {}
+ if module.params["source"] and module.params["component"]:
+ res = blade.get_network_interfaces_ping(
+ destination=module.params["destination"],
+ component=module.params["component"],
+ source=module.params["source"],
+ packet_size=module.params["packet_size"],
+ count=module.params["count"],
+ print_latency=module.params["latency"],
+ resolve_hostname=module.params["resolve"],
+ )
+ elif module.params["source"] and not module.params["component"]:
+ res = blade.get_network_interfaces_ping(
+ destination=module.params["destination"],
+ source=module.params["source"],
+ packet_size=module.params["packet_size"],
+ count=module.params["count"],
+ print_latency=module.params["latency"],
+ resolve_hostname=module.params["resolve"],
+ )
+ elif not module.params["source"] and module.params["component"]:
+ res = blade.get_network_interfaces_ping(
+ destination=module.params["destination"],
+ component=module.params["component"],
+ packet_size=module.params["packet_size"],
+ count=module.params["count"],
+ print_latency=module.params["latency"],
+ resolve_hostname=module.params["resolve"],
+ )
+ else:
+ res = blade.get_network_interfaces_ping(
+ destination=module.params["destination"],
+ packet_size=module.params["packet_size"],
+ count=module.params["count"],
+ print_latency=module.params["latency"],
+ resolve_hostname=module.params["resolve"],
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to run ping. Error: {0}".format(res.errors[0].message)
+ )
+ else:
+ responses = list(res.items)
+ for resp in range(0, len(responses)):
+ comp_name = responses[resp].component_name.replace(".", "_")
+ ping_fact[comp_name] = {
+ "details": responses[resp].details,
+ }
+
+ module.exit_json(changed=False, pingfact=ping_fact)
+
+
+def run_trace(module, blade):
+ """Run network trace"""
+ trace_fact = {}
+ if module.params["source"] and module.params["component"]:
+ res = blade.get_network_interfaces_trace(
+ port=module.params["port"],
+ destination=module.params["destination"],
+ component=module.params["component"],
+ discover_mtu=module.params["discover_mtu"],
+ source=module.params["source"],
+ fragment_packet=module.params["fragment"],
+ method=module.params["method"],
+ resolve_hostname=module.params["resolve"],
+ )
+ elif module.params["source"] and not module.params["component"]:
+ res = blade.get_network_interfaces_trace(
+ port=module.params["port"],
+ destination=module.params["destination"],
+ discover_mtu=module.params["discover_mtu"],
+ source=module.params["source"],
+ fragment_packet=module.params["fragment"],
+ method=module.params["method"],
+ resolve_hostname=module.params["resolve"],
+ )
+ elif not module.params["source"] and module.params["component"]:
+ res = blade.get_network_interfaces_trace(
+ port=module.params["port"],
+ destination=module.params["destination"],
+ discover_mtu=module.params["discover_mtu"],
+ component=module.params["component"],
+ fragment_packet=module.params["fragment"],
+ method=module.params["method"],
+ resolve_hostname=module.params["resolve"],
+ )
+ else:
+ res = blade.get_network_interfaces_trace(
+ port=module.params["port"],
+ destination=module.params["destination"],
+ discover_mtu=module.params["discover_mtu"],
+ fragment_packet=module.params["fragment"],
+ method=module.params["method"],
+ resolve_hostname=module.params["resolve"],
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to run trace. Error: {0}".format(res.errors[0].message)
+ )
+ else:
+ responses = list(res.items)
+ for resp in range(0, len(responses)):
+ comp_name = responses[resp].component_name.replace(".", "_")
+ trace_fact[comp_name] = {
+ "details": responses[resp].details,
+ }
+
+ module.exit_json(changed=False, tracefact=trace_fact)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ action=dict(type="str", choices=["ping", "trace"], default="ping"),
+ method=dict(type="str", choices=["icmp", "tcp", "udp"], default="udp"),
+ destination=dict(type="str", required=True),
+ source=dict(type="str"),
+ component=dict(type="str"),
+ port=dict(type="str"),
+ count=dict(type="int", default=1),
+ packet_size=dict(type="int", default=56),
+ resolve=dict(type="bool", default=True),
+ fragment=dict(type="bool", default=True),
+ latency=dict(type="bool", default=False),
+ discover_mtu=dict(type="bool", default=False),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ blade = get_system(module)
+ api_version = list(blade.get_versions().items)
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashBlade REST version not supported. "
+ "Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
+ )
+
+ blade = get_system(module)
+ if module.params["action"] == "ping":
+ run_ping(module, blade)
+ else:
+ run_trace(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_policy.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_policy.py
new file mode 100644
index 000000000..273166de8
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_policy.py
@@ -0,0 +1,2079 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_policy
+version_added: '1.0.0'
+short_description: Manage FlashBlade policies
+description:
+- Manage policies for filesystem, file replica links and object store access.
+- To update an existing snapshot policy rule, you must first delete the
+ original rule and then add the new rule to replace it. Purity's best-fit
+ will try to ensure that any required snapshots deleted on the deletion of
+ the first rule will be recovered as long replacement rule is added before
+ the snapshot eradication period is exceeded (usuually 24 hours).
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Create or delete policy.
+ - Copy is applicable only to Object Store Access Policies Rules
+ default: present
+ type: str
+ choices: [ absent, present, copy ]
+ target:
+ description:
+ - Name of policy to copy rule to
+ type: str
+ version_added: "1.9.0"
+ target_rule:
+ description:
+ - Name of the rule to copy the exisitng rule to.
+ - If not defined the existing rule name is used.
+ type: str
+ version_added: "1.9.0"
+ policy_type:
+ description:
+ - Type of policy
+ default: snapshot
+ type: str
+ choices: [ snapshot, access, nfs ]
+ version_added: "1.9.0"
+ account:
+ description:
+ - Name of Object Store account policy applies to.
+ - B(Special Case) I(pure policy) is used for the system-wide S3 policies
+ type: str
+ version_added: "1.9.0"
+ rule:
+ description:
+ - Name of the rule for the Object Store Access Policy
+ - Rules in system wide policies cannot be deleted or modified
+ type: str
+ version_added: "1.9.0"
+ effect:
+ description:
+ - Allow S3 requests that match all of the I(actions) item selected.
+ Rules are additive.
+ type: str
+ default: allow
+ choices: [ allow ]
+ version_added: "1.9.0"
+ actions:
+ description:
+ - List of permissions to grant.
+ - System-wide policy rules cannot be deleted or modified
+ type: list
+ elements: str
+ choices:
+ - s3:*
+ - s3:AbortMultipartUpload
+ - s3:CreateBucket
+ - s3:DeleteBucket
+ - s3:DeleteObject
+ - s3:DeleteObjectVersion
+ - s3:ExtendSafemodeRetentionPeriod
+ - s3:GetBucketAcl
+ - s3:GetBucketLocation
+ - s3:GetBucketVersioning
+ - s3:GetLifecycleConfiguration
+ - s3:GetObject
+ - s3:GetObjectAcl
+ - s3:GetObjectVersion
+ - s3:ListAllMyBuckets
+ - s3:ListBucket
+ - s3:ListBucketMultipartUploads
+ - s3:ListBucketVersions
+ - s3:ListMultipartUploadParts
+ - s3:PutBucketVersioning
+ - s3:PutLifecycleConfiguration
+ - s3:PutObject
+ version_added: "1.9.0"
+ object_resources:
+ description:
+ - List of bucket names and object paths, with a wildcard (*) to
+ specify objects in a bucket; e.g., bucket1, bucket1/*, bucket2,
+ bucket2/*.
+ - System-wide policy rules cannot be deleted or modified
+ type: list
+ elements: str
+ version_added: "1.9.0"
+ source_ips:
+ description:
+ - List of IPs and subnets from which this rule should allow requests;
+ e.g., 10.20.30.40, 10.20.30.0/24, 2001:DB8:1234:5678::/64.
+ - System-wide policy rules cannot be deleted or modified
+ type: list
+ elements: str
+ version_added: "1.9.0"
+ s3_prefixes:
+ description:
+ - List of 'folders' (object key prefixes) for which object listings
+ may be requested.
+ - System-wide policy rules cannot be deleted or modified
+ type: list
+ elements: str
+ version_added: "1.9.0"
+ s3_delimiters:
+ description:
+ - List of delimiter characters allowed in object list requests.
+ - Grants permissions to list 'folder names' (prefixes ending in a
+ delimiter) instead of object keys.
+ - System-wide policy rules cannot be deleted or modified
+ type: list
+ elements: str
+ version_added: "1.9.0"
+ ignore_enforcement:
+ description:
+ - Certain combinations of actions and other rule elements are inherently
+ ignored if specified together in a rule.
+ - If set to true, operations which attempt to set these combinations will fail.
+ - If set to false, such operations will instead be allowed.
+ type: bool
+ default: true
+ version_added: "1.9.0"
+ user:
+ description:
+ - User in the I(account) that the policy is granted to.
+ type: str
+ version_added: "1.9.0"
+ force_delete:
+ description:
+ - Force the deletion of a Object Store Access Policy is this
+ has attached users.
+ - WARNING This can have undesired side-effects.
+ - System-wide policies cannot be deleted
+ type: bool
+ default: false
+ version_added: "1.9.0"
+ name:
+ description:
+ - Name of the policy
+ type: str
+ enabled:
+ description:
+ - State of policy
+ type: bool
+ default: true
+ every:
+ description:
+ - Interval between snapshots in seconds
+ - Range available 300 - 31536000 (equates to 5m to 365d)
+ type: int
+ keep_for:
+ description:
+ - How long to keep snapshots for
+ - Range available 300 - 31536000 (equates to 5m to 365d)
+ - Must not be set less than I(every)
+ type: int
+ at:
+ description:
+ - Provide a time in 12-hour AM/PM format, eg. 11AM
+ type: str
+ timezone:
+ description:
+ - Time Zone used for the I(at) parameter
+ - If not provided, the module will attempt to get the current local timezone from the server
+ type: str
+ filesystem:
+ description:
+ - List of filesystems to add to a policy on creation
+ - To amend policy members use the I(purestorage.flashblade.purefb_fs) module
+ type: list
+ elements: str
+ replica_link:
+ description:
+ - List of filesystem replica links to add to a policy on creation
+ - To amend policy members use the I(purestorage.flashblade.purefb_fs_replica) module
+ type: list
+ elements: str
+ access:
+ description:
+ - Specifies access control for the export policy rule
+ type: str
+ choices: [ root-squash, all-squash, no-squash ]
+ default: root-squash
+ version_added: "1.9.0"
+ anonuid:
+ description:
+ - Any user whose UID is affected by an I(access) of `root_squash` or `all_squash`
+ will have their UID mapped to anonuid.
+ The defaultis null, which means 65534.
+ Use "" to clear.
+ type: str
+ version_added: "1.9.0"
+ anongid:
+ description:
+ - Any user whose GID is affected by an I(access) of `root_squash` or `all_squash`
+ will have their GID mapped to anongid.
+ The default anongid is null, which means 65534.
+ Use "" to clear.
+ type: str
+ version_added: "1.9.0"
+ atime:
+ description:
+ - After a read operation has occurred, the inode access time is updated only if any
+ of the following conditions is true; the previous access time is less than the
+ inode modify time, the previous access time is less than the inode change time,
+ or the previous access time is more than 24 hours ago.
+ - If set to false, disables the update of inode access times after read operations.
+ type: bool
+ default: true
+ version_added: "1.9.0"
+ client:
+ description:
+ - Specifies the clients that will be permitted to access the export.
+ - Accepted notation is a single IP address, subnet in CIDR notation, netgroup, or
+ anonymous (*).
+ type: str
+ default: "*"
+ version_added: "1.9.0"
+ fileid_32bit:
+ description:
+ - Whether the file id is 32 bits or not.
+ type: bool
+ default: false
+ version_added: "1.9.0"
+ permission:
+ description:
+ - Specifies which read-write client access permissions are allowed for the export.
+ type: str
+ choices: [ rw, ro ]
+ default: ro
+ version_added: "1.9.0"
+ secure:
+ description:
+ - If true, this prevents NFS access to client connections coming from non-reserved ports.
+ - If false, allows NFS access to client connections coming from non-reserved ports.
+ - Applies to NFSv3, NFSv4.1, and auxiliary protocols MOUNT and NLM.
+ type: bool
+ default: false
+ version_added: "1.9.0"
+ security:
+ description:
+ - The security flavors to use for accessing files on this mount point.
+ - If the server does not support the requested flavor, the mount operation fails.
+ - I(sys) trusts the client to specify users identity.
+ - I(krb) provides cryptographic proof of a users identity in each RPC request.
+ - I(krb5i) adds integrity checking to krb5, to ensure the data has not been tampered with.
+ - I(krb5p) adds integrity checking and encryption to krb5.
+ type: list
+ elements: str
+ choices: [ sys, krb5, krb5i, krb5p ]
+ default: sys
+ version_added: "1.9.0"
+ before_rule:
+ description:
+ - The index of the client rule to insert or move a client rule before.
+ type: int
+ version_added: "1.9.0"
+ rename:
+ description:
+ - New name for export policy
+ - Only applies to NFS export policies
+ type: str
+ version_added: "1.10.0"
+ destroy_snapshots:
+ description:
+ - This parameter must be set to true in order to modify a policy such that local or remote snapshots would be destroyed.
+ type: bool
+ version_added: '1.11.0'
+ default: false
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Create a simple snapshot policy with no rules
+ purestorage.flashblade.purefb_policy:
+ name: test_policy
+ policy_type: snapshot
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Create a snapshot policy and connect to existing filesystems and filesystem replica links
+ purestorage.flashblade.purefb_policy:
+ name: test_policy_with_members
+ policy_type: snapshot
+ filesystem:
+ - fs1
+ - fs2
+ replica_link:
+ - rl1
+ - rl2
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Create a snapshot policy with rules
+ purestorage.flashblade.purefb_policy:
+ name: test_policy2
+ policy_type: snapshot
+ at: 11AM
+ keep_for: 86400
+ every: 86400
+ timezone: Asia/Shanghai
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Delete a snapshot policy
+ purestorage.flashblade.purefb_policy:
+ name: test_policy
+ policy_type: snapshot
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Create an empty object store access policy
+ purestorage.flashblade.purefb_policy:
+ name: test_os_policy
+ account: test
+ policy_type: access
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Create an empty object store access policy and assign user
+ purestorage.flashblade.purefb_policy:
+ name: test_os_policy
+ account: test
+ policy_type: access
+ user: fred
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Create a object store access policy with simple rule
+ purestorage.flashblade.purefb_policy:
+ name: test_os_policy_rule
+ policy_type: access
+ account: test
+ rule: rule1
+ actions: "s3:*"
+ object_resources: "*"
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Create an empty NFS export policy
+ purestorage.flashblade.purefb_policy:
+ name: test_nfs_export
+ policy_type: nfs
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Create an NFS export policy with a client rule
+ purestorage.flashblade.purefb_policy:
+ name: test_nfs_export
+ policy_type: nfs
+ atime: true
+ client: "10.0.1.0/24"
+ secure: true
+ security: [sys, krb5]
+ permission: rw
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Create a new rule for an existing NFS export policy
+ purestorage.flashblade.purefb_policy:
+ name: test_nfs_export
+ policy_type: nfs
+ atime: true
+ client: "10.0.2.0/24"
+ security: sys
+ permission: ro
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Delete a client rule from an NFS export policy
+ purestorage.flashblade.purefb_policy:
+ name: test_nfs_export
+ client: "10.0.1.0/24"
+ policy_type: nfs
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Delete an NFS export policy and all associated rules
+ purestorage.flashblade.purefb_policy:
+ name: test_nfs_export
+ state: absent
+ policy_type: nfs
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Delete a rule from an object store access policy
+ purestorage.flashblade.purefb_policy:
+ name: test_os_policy_rule
+ account: test
+ policy_type: access
+ rule: rule1
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Delete a user from an object store access policy
+ purestorage.flashblade.purefb_policy:
+ name: test_os_policy_rule
+ account: test
+ user: fred
+ policy_type: access
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Delete an object store access policy with attached users (USE WITH CAUTION)
+ purestorage.flashblade.purefb_policy:
+ name: test_os_policy_rule
+ account: test
+ policy_type: access
+ force_delete: true
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Delete an object store access policy with no attached users
+ purestorage.flashblade.purefb_policy:
+ name: test_os_policy_rule
+ account: test
+ policy_type: access
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Copy an object store access policy rule to another exisitng policy
+ purestorage.flashblade.purefb_policy:
+ name: test_os_policy_rule
+ policy_type: access
+ account: test
+ target: "account2/anotherpolicy"
+ target_rule: new_rule1
+ state: copy
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Rename an NFS Export Policy
+ purestorage.flashblade.purefb_policy:
+ name: old_name
+ policy_type: nfs
+ rename: new_name
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURITYFB = True
+try:
+ from purity_fb import Policy, PolicyRule, PolicyPatch
+except ImportError:
+ HAS_PURITYFB = False
+
+HAS_PYPURECLIENT = True
+try:
+ from pypureclient.flashblade import (
+ PolicyRuleObjectAccessCondition,
+ PolicyRuleObjectAccessPost,
+ PolicyRuleObjectAccess,
+ NfsExportPolicy,
+ NfsExportPolicyRule,
+ Policy,
+ PolicyRule,
+ )
+except ImportError:
+ HAS_PYPURECLIENT = False
+
+HAS_PYTZ = True
+try:
+ import pytz
+except ImportError:
+ HAS_PYTX = False
+
+import os
+import re
+import platform
+
+from ansible.module_utils.common.process import get_bin_path
+from ansible.module_utils.facts.utils import get_file_content
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ get_system,
+ purefb_argument_spec,
+)
+
+
+MIN_REQUIRED_API_VERSION = "1.9"
+SNAPSHOT_POLICY_API_VERSION = "2.1"
+ACCESS_POLICY_API_VERSION = "2.2"
+NFS_POLICY_API_VERSION = "2.3"
+NFS_RENAME_API_VERSION = "2.4"
+
+
+def _convert_to_millisecs(hour):
+ if hour[-2:] == "AM" and hour[:2] == "12":
+ return 0
+ elif hour[-2:] == "AM":
+ return int(hour[:-2]) * 3600000
+ elif hour[-2:] == "PM" and hour[:2] == "12":
+ return 43200000
+ return (int(hour[:-2]) + 12) * 3600000
+
+
+def _findstr(text, match):
+ for line in text.splitlines():
+ if match in line:
+ found = line
+ return found
+
+
+def _get_local_tz(module, timezone="UTC"):
+ """
+ We will attempt to get the local timezone of the server running the module and use that.
+ If we can't get the timezone then we will set the default to be UTC
+
+ Linnux has been tested and other opersting systems should be OK.
+ Failures cause assumption of UTC
+
+ Windows is not supported and will assume UTC
+ """
+ if platform.system() == "Linux":
+ timedatectl = get_bin_path("timedatectl")
+ if timedatectl is not None:
+ rcode, stdout, stderr = module.run_command(timedatectl)
+ if rcode == 0 and stdout:
+ line = _findstr(stdout, "Time zone")
+ full_tz = line.split(":", 1)[1].rstrip()
+ timezone = full_tz.split()[0]
+ return timezone
+ else:
+ module.warn("Incorrect timedatectl output. Timezone will be set to UTC")
+ else:
+ if os.path.exists("/etc/timezone"):
+ timezone = get_file_content("/etc/timezone")
+ else:
+ module.warn("Could not find /etc/timezone. Assuming UTC")
+
+ elif platform.system() == "SunOS":
+ if os.path.exists("/etc/default/init"):
+ for line in get_file_content("/etc/default/init", "").splitlines():
+ if line.startswith("TZ="):
+ timezone = line.split("=", 1)[1]
+ return timezone
+ else:
+ module.warn("Could not find /etc/default/init. Assuming UTC")
+
+ elif re.match("^Darwin", platform.platform()):
+ systemsetup = get_bin_path("systemsetup")
+ if systemsetup is not None:
+ rcode, stdout, stderr = module.execute(systemsetup, "-gettimezone")
+ if rcode == 0 and stdout:
+ timezone = stdout.split(":", 1)[1].lstrip()
+ else:
+ module.warn("Could not run systemsetup. Assuming UTC")
+ else:
+ module.warn("Could not find systemsetup. Assuming UTC")
+
+ elif re.match("^(Free|Net|Open)BSD", platform.platform()):
+ if os.path.exists("/etc/timezone"):
+ timezone = get_file_content("/etc/timezone")
+ else:
+ module.warn("Could not find /etc/timezone. Assuming UTC")
+
+ elif platform.system() == "AIX":
+ aix_oslevel = int(platform.version() + platform.release())
+ if aix_oslevel >= 61:
+ if os.path.exists("/etc/environment"):
+ for line in get_file_content("/etc/environment", "").splitlines():
+ if line.startswith("TZ="):
+ timezone = line.split("=", 1)[1]
+ return timezone
+ else:
+ module.warn("Could not find /etc/environment. Assuming UTC")
+ else:
+ module.warn(
+ "Cannot determine timezone when AIX os level < 61. Assuming UTC"
+ )
+
+ else:
+ module.warn("Could not find /etc/timezone. Assuming UTC")
+
+ return timezone
+
+
+def delete_nfs_policy(module, blade):
+ """Delete NFS Export Policy, or Rule
+
+ If client is provided then delete the client rule if it exists.
+ """
+
+ changed = False
+ policy_delete = True
+ if module.params["client"]:
+ policy_delete = False
+ res = blade.get_nfs_export_policies_rules(
+ policy_names=[module.params["name"]],
+ filter="client='" + module.params["client"] + "'",
+ )
+ if res.status_code == 200:
+ if res.total_item_count == 0:
+ pass
+ elif res.total_item_count == 1:
+ rule = list(res.items)[0]
+ if module.params["client"] == rule.client:
+ changed = True
+ if not module.check_mode:
+ res = blade.delete_nfs_export_policies_rules(names=[rule.name])
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete rule for client {0} in policy {1}. "
+ "Error: {2}".format(
+ module.params["client"],
+ module.params["name"],
+ res.errors[0].message,
+ )
+ )
+ else:
+ rules = list(res.items)
+ for cli in range(0, len(rules)):
+ if rules[cli].client == "*":
+ changed = True
+ if not module.check_mode:
+ res = blade.delete_nfs_export_policies_rules(
+ names=[rules[cli].name]
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete rule for client {0} in policy {1}. "
+ "Error: {2}".format(
+ module.params["client"],
+ module.params["name"],
+ res.errors[0].message,
+ )
+ )
+ if policy_delete:
+ changed = True
+ if not module.check_mode:
+ res = blade.delete_nfs_export_policies(names=[module.params["name"]])
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete export policy {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def rename_nfs_policy(module, blade):
+ """Rename NFS Export Policy"""
+
+ changed = True
+ if not module.check_mode:
+ res = blade.patch_nfs_export_policies(
+ names=[module.params["name"]],
+ policy=NfsExportPolicy(name=module.params["rename"]),
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to rename NFS export policy {0} to {1}. Error: {2}".format(
+ module.params["name"],
+ module.params["rename"],
+ res.errors[0].message,
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def update_nfs_policy(module, blade):
+ """Update NFS Export Policy Rule"""
+
+ changed = False
+ if module.params["client"]:
+ current_policy_rule = blade.get_nfs_export_policies_rules(
+ policy_names=[module.params["name"]],
+ filter="client='" + module.params["client"] + "'",
+ )
+ if (
+ current_policy_rule.status_code == 200
+ and current_policy_rule.total_item_count == 0
+ ):
+ rule = NfsExportPolicyRule(
+ client=module.params["client"],
+ permission=module.params["permission"],
+ access=module.params["access"],
+ anonuid=module.params["anonuid"],
+ anongid=module.params["anongid"],
+ fileid_32bit=module.params["fileid_32bit"],
+ atime=module.params["atime"],
+ secure=module.params["secure"],
+ security=module.params["security"],
+ )
+ changed = True
+ if not module.check_mode:
+ if module.params["before_rule"]:
+ before_name = (
+ module.params["name"] + "." + str(module.params["before_rule"])
+ )
+ res = blade.post_nfs_export_policies_rules(
+ policy_names=[module.params["name"]],
+ rule=rule,
+ before_rule_name=before_name,
+ )
+ else:
+ res = blade.post_nfs_export_policies_rules(
+ policy_names=[module.params["name"]],
+ rule=rule,
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to create rule for client {0} "
+ "in export policy {1}. Error: {2}".format(
+ module.params["client"],
+ module.params["name"],
+ res.errors[0].message,
+ )
+ )
+ else:
+ rules = list(current_policy_rule.items)
+ cli_count = None
+ done = False
+ if module.params["client"] == "*":
+ for cli in range(0, len(rules)):
+ if rules[cli].client == "*":
+ cli_count = cli
+ if not cli_count:
+ rule = NfsExportPolicyRule(
+ client=module.params["client"],
+ permission=module.params["permission"],
+ access=module.params["access"],
+ anonuid=module.params["anonuid"],
+ anongid=module.params["anongid"],
+ fileid_32bit=module.params["fileid_32bit"],
+ atime=module.params["atime"],
+ secure=module.params["secure"],
+ security=module.params["security"],
+ )
+ done = True
+ changed = True
+ if not module.check_mode:
+ if module.params["before_rule"]:
+ res = blade.post_nfs_export_policies_rules(
+ policy_names=[module.params["name"]],
+ rule=rule,
+ before_rule_name=(
+ module.params["name"]
+ + "."
+ + str(module.params["before_rule"]),
+ ),
+ )
+ else:
+ res = blade.post_nfs_export_policies_rules(
+ policy_names=[module.params["name"]],
+ rule=rule,
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to create rule for "
+ "client {0} in export policy {1}. Error: {2}".format(
+ module.params["client"],
+ module.params["name"],
+ res.errors[0].message,
+ )
+ )
+ if not done:
+ old_policy_rule = rules[0]
+ current_rule = {
+ "anongid": getattr(old_policy_rule, "anongid", None),
+ "anonuid": getattr(old_policy_rule, "anonuid", None),
+ "atime": old_policy_rule.atime,
+ "client": sorted(old_policy_rule.client),
+ "fileid_32bit": old_policy_rule.fileid_32bit,
+ "permission": sorted(old_policy_rule.permission),
+ "secure": old_policy_rule.secure,
+ "security": sorted(old_policy_rule.security),
+ }
+ if module.params["permission"]:
+ new_permission = sorted(module.params["permission"])
+ else:
+ new_permission = sorted(current_rule["permission"])
+ if module.params["client"]:
+ new_client = sorted(module.params["client"])
+ else:
+ new_client = sorted(current_rule["client"])
+ if module.params["security"]:
+ new_security = sorted(module.params["security"])
+ else:
+ new_security = sorted(current_rule["security"])
+ if module.params["anongid"]:
+ new_anongid = module.params["anongid"]
+ else:
+ new_anongid = current_rule["anongid"]
+ if module.params["anonuid"]:
+ new_anonuid = module.params["anonuid"]
+ else:
+ new_anonuid = current_rule["anonuid"]
+ if module.params["atime"] != current_rule["atime"]:
+ new_atime = module.params["atime"]
+ else:
+ new_atime = current_rule["atime"]
+ if module.params["secure"] != current_rule["secure"]:
+ new_secure = module.params["secure"]
+ else:
+ new_secure = current_rule["secure"]
+ if module.params["fileid_32bit"] != current_rule["fileid_32bit"]:
+ new_fileid_32bit = module.params["fileid_32bit"]
+ else:
+ new_fileid_32bit = current_rule["fileid_32bit"]
+ new_rule = {
+ "anongid": new_anongid,
+ "anonuid": new_anonuid,
+ "atime": new_atime,
+ "client": new_client,
+ "fileid_32bit": new_fileid_32bit,
+ "permission": new_permission,
+ "secure": new_secure,
+ "security": new_security,
+ }
+ if current_rule != new_rule:
+ changed = True
+ if not module.check_mode:
+ rule = NfsExportPolicyRule(
+ client=module.params["client"],
+ permission=module.params["permission"],
+ access=module.params["access"],
+ anonuid=module.params["anonuid"],
+ anongid=module.params["anongid"],
+ fileid_32bit=module.params["fileid_32bit"],
+ atime=module.params["atime"],
+ secure=module.params["secure"],
+ security=module.params["security"],
+ )
+ res = blade.patch_nfs_export_policies_rules(
+ names=[
+ module.params["name"] + "." + str(old_policy_rule.index)
+ ],
+ rule=rule,
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to update NFS export rule {0}. Error: {1}".format(
+ module.params["name"]
+ + "."
+ + str(old_policy_rule.index),
+ res.errors[0].message,
+ )
+ )
+ if (
+ module.params["before_rule"]
+ and module.params["before_rule"] != old_policy_rule.index
+ ):
+ changed = True
+ if not module.check_mode:
+ before_name = (
+ module.params["name"]
+ + "."
+ + str(module.params["before_rule"])
+ )
+ res = blade.patch_nfs_export_policies_rules(
+ names=[
+ module.params["name"] + "." + str(old_policy_rule.index)
+ ],
+ rule=NfsExportPolicyRule(),
+ before_rule_name=before_name,
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to move NFS export rule {0}. Error: {1}".format(
+ module.params["name"]
+ + "."
+ + str(old_policy_rule.index),
+ res.errors[0].message,
+ )
+ )
+ current_policy = list(
+ blade.get_nfs_export_policies(names=[module.params["name"]]).items
+ )[0]
+ if current_policy.enabled != module.params["enabled"]:
+ changed = True
+ if not module.check_mode:
+ res = blade.patch_nfs_export_policies(
+ policy=NfsExportPolicy(enabled=module.params["enabled"]),
+ names=[module.params["name"]],
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to change state of nfs export policy {0}.Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def create_nfs_policy(module, blade):
+ """Create NFS Export Policy"""
+ changed = True
+ if not module.check_mode:
+ res = blade.post_nfs_export_policies(names=[module.params["name"]])
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to create nfs export policy {0}.Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ if not module.params["enabled"]:
+ res = blade.patch_nfs_export_policies(
+ policy=NfsExportPolicy(enabled=False), names=[module.params["name"]]
+ )
+ if res.status_code != 200:
+ blade.delete_nfs_export_policies(names=[module.params["name"]])
+ module.fail_json(
+ msg="Failed to create nfs export policy {0}.Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ if not module.params["client"]:
+ module.fail_json(msg="client is required to create a new rule")
+ else:
+ rule = NfsExportPolicyRule(
+ client=module.params["client"],
+ permission=module.params["permission"],
+ access=module.params["access"],
+ anonuid=module.params["anonuid"],
+ anongid=module.params["anongid"],
+ fileid_32bit=module.params["fileid_32bit"],
+ atime=module.params["atime"],
+ secure=module.params["secure"],
+ security=module.params["security"],
+ )
+ res = blade.post_nfs_export_policies_rules(
+ policy_names=[module.params["name"]],
+ rule=rule,
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to rule for policy {0}. Error: {1}".format(
+ module.params["name"],
+ res.errors[0].message,
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def delete_os_policy(module, blade):
+ """Delete Object Store Access Policy, Rule, or User
+
+ If rule is provided then delete the rule if it exists.
+ If user is provided then remove grant from user if granted.
+ If no user or rule provided delete the whole policy.
+ Cannot delete a policy with attached users, so delete all users
+ if the force_delete option is selected.
+ """
+
+ changed = False
+ policy_name = module.params["account"] + "/" + module.params["name"]
+ policy_delete = True
+ if module.params["rule"]:
+ policy_delete = False
+ res = blade.get_object_store_access_policies_rules(
+ policy_names=[policy_name], names=[module.params["rule"]]
+ )
+ if res.status_code == 200 and res.total_item_count != 0:
+ changed = True
+ if not module.check_mode:
+ res = blade.delete_object_store_access_policies_object_store_rules(
+ policy_names=[policy_name], names=[module.params["rule"]]
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete users from policy {0}. Error: {1} - {2}".format(
+ policy_name, res.errors[0].context, res.errors[0].message
+ )
+ )
+
+ if module.params["user"]:
+ member_name = module.params["account"] + "/" + module.params["user"]
+ policy_delete = False
+ res = blade.get_object_store_access_policies_object_store_users(
+ policy_names=[policy_name], member_names=[member_name]
+ )
+ if res.status_code == 200 and res.total_item_count != 0:
+ changed = True
+ if not module.check_mode:
+ member_name = module.params["account"] + "/" + module.params["user"]
+ res = blade.delete_object_store_access_policies_object_store_users(
+ policy_names=[policy_name], member_names=[member_name]
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete users from policy {0}. Error: {1} - {2}".format(
+ policy_name, res.errors[0].context, res.errors[0].message
+ )
+ )
+
+ if policy_delete:
+ if module.params["account"].lower() == "pure:policy":
+ module.fail_json(msg="System-Wide policies cannot be deleted.")
+ policy_users = list(
+ blade.get_object_store_access_policies_object_store_users(
+ policy_names=[policy_name]
+ ).items
+ )
+ if len(policy_users) == 0:
+ changed = True
+ if not module.check_mode:
+ res = blade.delete_object_store_access_policies(names=[policy_name])
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete policy {0}. Error: {1}".format(
+ policy_name, res.errors[0].message
+ )
+ )
+ else:
+ if module.params["force_delete"]:
+ changed = True
+ if not module.check_mode:
+ for user in range(0, len(policy_users)):
+ res = blade.delete_object_store_access_policies_object_store_users(
+ member_names=[policy_users[user].member.name],
+ policy_names=[policy_name],
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete user {0} from policy {1}, "
+ "Error: {2}".format(
+ policy_users[user].member,
+ policy_name,
+ res.errors[0].message,
+ )
+ )
+ res = blade.delete_object_store_access_policies(names=[policy_name])
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete policy {0}. Error: {1}".format(
+ policy_name, res.errors[0].message
+ )
+ )
+ else:
+ module.fail_json(
+ msg="Policy {0} cannot be deleted with connected users".format(
+ policy_name
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def create_os_policy(module, blade):
+ """Create Object Store Access Policy"""
+ changed = True
+ policy_name = module.params["account"] + "/" + module.params["name"]
+ if not module.check_mode:
+ res = blade.post_object_store_access_policies(names=[policy_name])
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to create access policy {0}.".format(policy_name)
+ )
+ if module.params["rule"]:
+ if not module.params["actions"] or not module.params["object_resources"]:
+ module.fail_json(
+ msg="Parameters `actions` and `object_resources` "
+ "are required to create a new rule"
+ )
+ conditions = PolicyRuleObjectAccessCondition(
+ source_ips=module.params["source_ips"],
+ s3_delimiters=module.params["s3_delimiters"],
+ s3_prefixes=module.params["s3_prefixes"],
+ )
+ rule = PolicyRuleObjectAccessPost(
+ actions=module.params["actions"],
+ resources=module.params["object_resources"],
+ conditions=conditions,
+ )
+ res = blade.post_object_store_access_policies_rules(
+ policy_names=policy_name,
+ names=[module.params["rule"]],
+ enforce_action_restrictions=module.params["ignore_enforcement"],
+ rule=rule,
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to create rule {0} to policy {1}. Error: {2}".format(
+ module.params["rule"], policy_name, res.errors[0].message
+ )
+ )
+ if module.params["user"]:
+ member_name = module.params["account"] + "/" + module.params["user"]
+ res = blade.post_object_store_access_policies_object_store_users(
+ member_names=[member_name], policy_names=[policy_name]
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to add users to policy {0}. Error: {1} - {2}".format(
+ policy_name, res.errors[0].context, res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def update_os_policy(module, blade):
+ """Update Object Store Access Policy"""
+ changed = False
+ policy_name = module.params["account"] + "/" + module.params["name"]
+ if module.params["rule"]:
+ current_policy_rule = blade.get_object_store_access_policies_rules(
+ policy_names=[policy_name], names=[module.params["rule"]]
+ )
+ if current_policy_rule.status_code != 200:
+ conditions = PolicyRuleObjectAccessCondition(
+ source_ips=module.params["source_ips"],
+ s3_delimiters=module.params["s3_delimiters"],
+ s3_prefixes=module.params["s3_prefixes"],
+ )
+ rule = PolicyRuleObjectAccessPost(
+ actions=module.params["actions"],
+ resources=module.params["object_resources"],
+ conditions=conditions,
+ )
+ res = blade.post_object_store_access_policies_rules(
+ policy_names=policy_name,
+ names=[module.params["rule"]],
+ enforce_action_restrictions=module.params["ignore_enforcement"],
+ rule=rule,
+ )
+ else:
+ old_policy_rule = list(current_policy_rule.items)[0]
+ current_rule = {
+ "actions": old_policy_rule.actions,
+ "resources": old_policy_rule.resources,
+ "ips": getattr(old_policy_rule.conditions, "source_ips", None),
+ "prefixes": getattr(old_policy_rule.conditions, "s3_prefixes", None),
+ "delimiters": getattr(
+ old_policy_rule.conditions, "s3_delimiters", None
+ ),
+ }
+ if module.params["actions"]:
+ new_actions = sorted(module.params["actions"])
+ else:
+ new_actions = sorted(current_rule["actions"])
+ if module.params["object_resources"]:
+ new_resources = sorted(module.params["object_resources"])
+ else:
+ new_resources = sorted(current_rule["resources"])
+ if module.params["s3_prefixes"]:
+ new_prefixes = sorted(module.params["s3_prefixes"])
+ elif current_rule["prefixes"]:
+ new_prefixes = sorted(current_rule["prefixes"])
+ else:
+ new_prefixes = None
+ if module.params["s3_delimiters"]:
+ new_delimiters = sorted(module.params["s3_delimiters"])
+ elif current_rule["delimiters"]:
+ new_delimiters = sorted(current_rule["delimiters"])
+ else:
+ new_delimiters = None
+ if module.params["source_ips"]:
+ new_ips = sorted(module.params["source_ips"])
+ elif current_rule["ips"]:
+ new_ips = sorted(current_rule["source_ips"])
+ else:
+ new_ips = None
+ new_rule = {
+ "actions": new_actions,
+ "resources": new_resources,
+ "ips": new_ips,
+ "prefixes": new_prefixes,
+ "delimiters": new_delimiters,
+ }
+ if current_rule != new_rule:
+ changed = True
+ if not module.check_mode:
+ conditions = PolicyRuleObjectAccessCondition(
+ source_ips=new_rule["ips"],
+ s3_prefixes=new_rule["prefixes"],
+ s3_delimiters=new_rule["delimiters"],
+ )
+ rule = PolicyRuleObjectAccess(
+ actions=new_rule["actions"],
+ resources=new_rule["resources"],
+ conditions=conditions,
+ )
+ res = blade.patch_object_store_access_policies_rules(
+ policy_names=[policy_name],
+ names=[module.params["rule"]],
+ rule=rule,
+ enforce_action_restrictions=module.params["ignore_enforcement"],
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to update rule {0} in policy {1}. Error: {2}".format(
+ module.params["rule"], policy_name, res.errors[0].message
+ )
+ )
+ if module.params["user"]:
+ member_name = module.params["account"] + "/" + module.params["user"]
+ res = blade.get_object_store_access_policies_object_store_users(
+ policy_names=[policy_name], member_names=[member_name]
+ )
+ if res.status_code != 200 or (
+ res.status_code == 200 and res.total_item_count == 0
+ ):
+ changed = True
+ if not module.check_mode:
+ res = blade.post_object_store_access_policies_object_store_users(
+ member_names=[member_name], policy_names=[policy_name]
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to add user {0} to policy {1}. Error: {2}".format(
+ member_name, policy_name, res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def copy_os_policy_rule(module, blade):
+ """Copy an existing policy rule to a new policy"""
+ changed = True
+ policy_name = module.params["account"] + "/" + module.params["name"]
+ if not module.params["target_rule"]:
+ module.params["target_rule"] = module.params["rule"]
+ if (
+ blade.get_object_store_access_policies_rules(
+ policy_names=[module.params["target"]], names=[module.params["target_rule"]]
+ ).status_code
+ == 200
+ ):
+ module.fail_json(
+ msg="Target rule {0} already exists in policy {1}".format(
+ module.params["target_rule"], policy_name
+ )
+ )
+ current_rule = list(
+ blade.get_object_store_access_policies_rules(
+ policy_names=[policy_name], names=[module.params["rule"]]
+ ).items
+ )[0]
+ if not module.check_mode:
+ conditions = PolicyRuleObjectAccessCondition(
+ source_ips=current_rule.conditions.source_ips,
+ s3_delimiters=current_rule.conditions.s3_delimiters,
+ s3_prefixes=current_rule.conditions.s3_prefixes,
+ )
+ rule = PolicyRuleObjectAccessPost(
+ actions=current_rule.actions,
+ resources=current_rule.resources,
+ conditions=conditions,
+ )
+ res = blade.post_object_store_access_policies_rules(
+ policy_names=module.params["target"],
+ names=[module.params["target_rule"]],
+ enforce_action_restrictions=module.params["ignore_enforcement"],
+ rule=rule,
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to copy rule {0} from policy {1} to policy {2}. "
+ "Error: {3}".format(
+ module.params["rule"],
+ policy_name,
+ module.params["target"],
+ res.errors[0].message,
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def delete_policy(module, blade):
+ """Delete policy"""
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.policies.delete_policies(names=[module.params["name"]])
+ except Exception:
+ module.fail_json(
+ msg="Failed to delete policy {0}.".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def delete_snap_policy(module, blade):
+ """Delete REST 2 snapshot policy
+
+ If any rule parameters are provided then delete any rules that match
+ all of the parameters provided.
+ If no rule parameters are provided delete the entire policy
+ """
+
+ changed = False
+ rule_delete = False
+ if (
+ module.params["at"]
+ or module.params["every"]
+ or module.params["timezone"]
+ or module.params["keep_for"]
+ ):
+ rule_delete = True
+ if rule_delete:
+ current_rules = list(blade.get_policies(names=[module.params["name"]]).items)[
+ 0
+ ].rules
+ for rule in range(0, len(current_rules)):
+ current_rule = {
+ "at": current_rules[rule].at,
+ "every": current_rules[rule].every,
+ "keep_for": current_rules[rule].keep_for,
+ "time_zone": current_rules[rule].time_zone,
+ }
+ if not module.params["at"]:
+ delete_at = current_rules[rule].at
+ else:
+ delete_at = _convert_to_millisecs(module.params["at"])
+ if module.params["keep_for"]:
+ delete_keep_for = module.params["keep_for"]
+ else:
+ delete_keep_for = int(current_rules[rule].keep_for / 1000)
+ if module.params["every"]:
+ delete_every = module.params["every"]
+ else:
+ delete_every = int(current_rules[rule].every / 1000)
+ if not module.params["timezone"]:
+ delete_tz = current_rules[rule].time_zone
+ else:
+ delete_tz = module.params["timezone"]
+ delete_rule = {
+ "at": delete_at,
+ "every": delete_every * 1000,
+ "keep_for": delete_keep_for * 1000,
+ "time_zone": delete_tz,
+ }
+ if current_rule == delete_rule:
+ changed = True
+ attr = PolicyPatch(remove_rules=[delete_rule])
+ if not module.check_mode:
+ res = blade.patch_policies(
+ destroy_snapshots=module.params["destroy_snapshots"],
+ names=[module.params["name"]],
+ policy=attr,
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete policy rule {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ else:
+ changed = True
+ if not module.check_mode:
+ res = blade.delete_policies(names=[module.params["name"]])
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete policy {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def create_snap_policy(module, blade):
+ """Create REST 2 snapshot policy"""
+ changed = True
+ if (
+ module.params["keep_for"]
+ and not module.params["every"]
+ or module.params["every"]
+ and not module.params["keep_for"]
+ ):
+ module.fail_json(msg="`keep_for` and `every` are required.")
+ if module.params["timezone"] and not module.params["at"]:
+ module.fail_json(msg="`timezone` requires `at` to be provided.")
+ if module.params["at"] and not module.params["every"]:
+ module.fail_json(msg="`at` requires `every` to be provided.")
+
+ if not module.check_mode:
+ if module.params["at"] and module.params["every"]:
+ if not module.params["every"] % 86400 == 0:
+ module.fail_json(
+ msg="At time can only be set if every value is a multiple of 86400"
+ )
+ if not module.params["timezone"]:
+ module.params["timezone"] = _get_local_tz(module)
+ if module.params["timezone"] not in pytz.all_timezones_set:
+ module.fail_json(
+ msg="Timezone {0} is not valid".format(
+ module.params["timezone"]
+ )
+ )
+ if not module.params["keep_for"]:
+ module.params["keep_for"] = 0
+ if not module.params["every"]:
+ module.params["every"] = 0
+ if module.params["keep_for"] < module.params["every"]:
+ module.fail_json(
+ msg="Retention period cannot be less than snapshot interval."
+ )
+ if module.params["at"] and not module.params["timezone"]:
+ module.params["timezone"] = _get_local_tz(module)
+ if module.params["timezone"] not in set(pytz.all_timezones_set):
+ module.fail_json(
+ msg="Timezone {0} is not valid".format(module.params["timezone"])
+ )
+
+ if module.params["keep_for"]:
+ if not 300 <= module.params["keep_for"] <= 34560000:
+ module.fail_json(
+ msg="keep_for parameter is out of range (300 to 34560000)"
+ )
+ if not 300 <= module.params["every"] <= 34560000:
+ module.fail_json(
+ msg="every parameter is out of range (300 to 34560000)"
+ )
+ if module.params["at"]:
+ attr = Policy(
+ enabled=module.params["enabled"],
+ rules=[
+ PolicyRule(
+ keep_for=module.params["keep_for"] * 1000,
+ every=module.params["every"] * 1000,
+ at=_convert_to_millisecs(module.params["at"]),
+ time_zone=module.params["timezone"],
+ )
+ ],
+ )
+ else:
+ attr = Policy(
+ enabled=module.params["enabled"],
+ rules=[
+ PolicyRule(
+ keep_for=module.params["keep_for"] * 1000,
+ every=module.params["every"] * 1000,
+ )
+ ],
+ )
+ else:
+ attr = Policy(enabled=module.params["enabled"])
+ res = blade.post_policies(names=[module.params["name"]], policy=attr)
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to create snapshot policy {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def create_policy(module, blade):
+ """Create snapshot policy"""
+ changed = True
+ if not module.check_mode:
+ try:
+ if module.params["at"] and module.params["every"]:
+ if not module.params["every"] % 86400 == 0:
+ module.fail_json(
+ msg="At time can only be set if every value is a multiple of 86400"
+ )
+ if not module.params["timezone"]:
+ module.params["timezone"] = _get_local_tz(module)
+ if module.params["timezone"] not in pytz.all_timezones_set:
+ module.fail_json(
+ msg="Timezone {0} is not valid".format(
+ module.params["timezone"]
+ )
+ )
+ if not module.params["keep_for"]:
+ module.params["keep_for"] = 0
+ if not module.params["every"]:
+ module.params["every"] = 0
+ if module.params["keep_for"] < module.params["every"]:
+ module.fail_json(
+ msg="Retention period cannot be less than snapshot interval."
+ )
+ if module.params["at"] and not module.params["timezone"]:
+ module.params["timezone"] = _get_local_tz(module)
+ if module.params["timezone"] not in set(pytz.all_timezones_set):
+ module.fail_json(
+ msg="Timezone {0} is not valid".format(
+ module.params["timezone"]
+ )
+ )
+
+ if module.params["keep_for"]:
+ if not 300 <= module.params["keep_for"] <= 34560000:
+ module.fail_json(
+ msg="keep_for parameter is out of range (300 to 34560000)"
+ )
+ if not 300 <= module.params["every"] <= 34560000:
+ module.fail_json(
+ msg="every parameter is out of range (300 to 34560000)"
+ )
+ if module.params["at"]:
+ attr = Policy(
+ enabled=module.params["enabled"],
+ rules=[
+ PolicyRule(
+ keep_for=module.params["keep_for"] * 1000,
+ every=module.params["every"] * 1000,
+ at=_convert_to_millisecs(module.params["at"]),
+ time_zone=module.params["timezone"],
+ )
+ ],
+ )
+ else:
+ attr = Policy(
+ enabled=module.params["enabled"],
+ rules=[
+ PolicyRule(
+ keep_for=module.params["keep_for"] * 1000,
+ every=module.params["every"] * 1000,
+ )
+ ],
+ )
+ else:
+ attr = Policy(enabled=module.params["enabled"])
+ blade.policies.create_policies(names=[module.params["name"]], policy=attr)
+ except Exception:
+ module.fail_json(
+ msg="Failed to create policy {0}.".format(module.params["name"])
+ )
+ if module.params["filesystem"]:
+ try:
+ blade.file_systems.list_file_systems(names=module.params["filesystem"])
+ blade.policies.create_policy_filesystems(
+ policy_names=[module.params["name"]],
+ member_names=module.params["filesystem"],
+ )
+ except Exception:
+ blade.policies.delete_policies(names=[module.params["name"]])
+ module.fail_json(
+ msg="Failed to connect filesystems to policy {0}, "
+ "or one of {1} doesn't exist.".format(
+ module.params["name"], module.params["filesystem"]
+ )
+ )
+ if module.params["replica_link"]:
+ for link in module.params["replica_link"]:
+ remote_array = (
+ blade.file_system_replica_links.list_file_system_replica_links(
+ local_file_system_names=[link]
+ )
+ )
+ try:
+ blade.policies.create_policy_file_system_replica_links(
+ policy_names=[module.params["name"]],
+ member_names=[link],
+ remote_names=[remote_array.items[0].remote.name],
+ )
+ except Exception:
+ blade.policies.delete_policies(names=[module.params["name"]])
+ module.fail_json(
+ msg="Failed to connect filesystem replicsa link {0} to policy {1}. "
+ "Replica Link {0} does not exist.".format(
+ link, module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def update_snap_policy(module, blade):
+ """Update REST 2 snapshot policy
+
+ Add new rules to the policy using this function.
+ Should it be necessary to modify an existing rule these are the rules:
+
+ Due to the 'best fit' nature of Purity we only add new rulkes in this function.
+ If you trying to update an existing rule, then this should be done by deleting
+ the current rule and then adding the new rule.
+
+ Purity may recover some snapshots as long as the add happens before the eradication delay
+ (typically 24h) causes the snapshots to be eradicated.
+ """
+
+ changed = False
+ if (
+ module.params["keep_for"]
+ and not module.params["every"]
+ or module.params["every"]
+ and not module.params["keep_for"]
+ ):
+ module.fail_json(msg="`keep_for` and `every` are required.")
+ if module.params["timezone"] and not module.params["at"]:
+ module.fail_json(msg="`timezone` requires `at` to be provided.")
+ if module.params["at"] and not module.params["every"]:
+ module.fail_json(msg="`at` requires `every` to be provided.")
+ current_rules = list(blade.get_policies(names=[module.params["name"]]).items)[
+ 0
+ ].rules
+ create_new = True
+ for rule in range(0, len(current_rules)):
+ current_rule = {
+ "at": current_rules[rule].at,
+ "every": current_rules[rule].every,
+ "keep_for": current_rules[rule].keep_for,
+ "time_zone": current_rules[rule].time_zone,
+ }
+ if not module.params["at"]:
+ new_at = current_rules[rule].at
+ else:
+ new_at = _convert_to_millisecs(module.params["at"])
+ if module.params["keep_for"]:
+ new_keep_for = module.params["keep_for"]
+ else:
+ new_keep_for = int(current_rules[rule].keep_for / 1000)
+ if module.params["every"]:
+ new_every = module.params["every"]
+ else:
+ new_every = int(current_rules[rule].every / 1000)
+ if not module.params["timezone"]:
+ new_tz = current_rules[rule].time_zone
+ else:
+ new_tz = module.params["timezone"]
+ new_rule = {
+ "at": new_at,
+ "every": new_every * 1000,
+ "keep_for": new_keep_for * 1000,
+ "time_zone": new_tz,
+ }
+ if current_rule == new_rule:
+ create_new = False
+
+ if create_new:
+ changed = True
+ if not module.check_mode:
+ if module.params["at"] and module.params["every"]:
+ if not module.params["every"] % 86400 == 0:
+ module.fail_json(
+ msg="At time can only be set if every value is a multiple of 86400"
+ )
+ if not module.params["timezone"]:
+ module.params["timezone"] = _get_local_tz(module)
+ if module.params["timezone"] not in pytz.all_timezones_set:
+ module.fail_json(
+ msg="Timezone {0} is not valid".format(
+ module.params["timezone"]
+ )
+ )
+ if not module.params["keep_for"]:
+ module.params["keep_for"] = 0
+ if not module.params["every"]:
+ module.params["every"] = 0
+ if module.params["keep_for"] < module.params["every"]:
+ module.fail_json(
+ msg="Retention period cannot be less than snapshot interval."
+ )
+ if module.params["at"] and not module.params["timezone"]:
+ module.params["timezone"] = _get_local_tz(module)
+ if module.params["timezone"] not in set(pytz.all_timezones_set):
+ module.fail_json(
+ msg="Timezone {0} is not valid".format(
+ module.params["timezone"]
+ )
+ )
+
+ if module.params["keep_for"]:
+ if not 300 <= module.params["keep_for"] <= 34560000:
+ module.fail_json(
+ msg="keep_for parameter is out of range (300 to 34560000)"
+ )
+ if not 300 <= module.params["every"] <= 34560000:
+ module.fail_json(
+ msg="every parameter is out of range (300 to 34560000)"
+ )
+ if module.params["at"]:
+ attr = PolicyPatch(
+ enabled=module.params["enabled"],
+ add_rules=[
+ PolicyRule(
+ keep_for=module.params["keep_for"] * 1000,
+ every=module.params["every"] * 1000,
+ at=_convert_to_millisecs(module.params["at"]),
+ time_zone=module.params["timezone"],
+ )
+ ],
+ )
+ else:
+ attr = PolicyPatch(
+ enabled=module.params["enabled"],
+ add_rules=[
+ PolicyRule(
+ keep_for=module.params["keep_for"] * 1000,
+ every=module.params["every"] * 1000,
+ )
+ ],
+ )
+ else:
+ attr = PolicyPatch(enabled=module.params["enabled"])
+ res = blade.patch_policies(
+ names=[module.params["name"]],
+ policy=attr,
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to update snapshot policy {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def update_policy(module, blade, policy):
+ """Update snapshot policy"""
+ changed = False
+ if not policy.rules:
+ current_policy = {
+ "time_zone": None,
+ "every": 0,
+ "keep_for": 0,
+ "at": 0,
+ "enabled": policy.enabled,
+ }
+ else:
+ if policy.rules[0].keep_for != 0:
+ policy.rules[0].keep_for = int(policy.rules[0].keep_for / 1000)
+ if policy.rules[0].every != 0:
+ policy.rules[0].every = int(policy.rules[0].every / 1000)
+
+ current_policy = {
+ "time_zone": policy.rules[0].time_zone,
+ "every": policy.rules[0].every,
+ "keep_for": policy.rules[0].keep_for,
+ "at": policy.rules[0].at,
+ "enabled": policy.enabled,
+ }
+ if not module.params["every"]:
+ every = 0
+ else:
+ every = module.params["every"]
+ if not module.params["keep_for"]:
+ keep_for = 0
+ else:
+ keep_for = module.params["keep_for"]
+ if module.params["at"]:
+ at_time = _convert_to_millisecs(module.params["at"])
+ else:
+ at_time = None
+ if not module.params["timezone"]:
+ timezone = _get_local_tz(module)
+ else:
+ timezone = module.params["timezone"]
+ if at_time:
+ new_policy = {
+ "time_zone": timezone,
+ "every": every,
+ "keep_for": keep_for,
+ "at": at_time,
+ "enabled": module.params["enabled"],
+ }
+ else:
+ new_policy = {
+ "time_zone": None,
+ "every": every,
+ "keep_for": keep_for,
+ "at": None,
+ "enabled": module.params["enabled"],
+ }
+ if (
+ new_policy["time_zone"]
+ and new_policy["time_zone"] not in pytz.all_timezones_set
+ ):
+ module.fail_json(
+ msg="Timezone {0} is not valid".format(module.params["timezone"])
+ )
+
+ if current_policy != new_policy:
+ if not module.params["at"]:
+ module.params["at"] = current_policy["at"]
+ if not module.params["keep_for"]:
+ module.params["keep_for"] = current_policy["keep_for"]
+ if not module.params["every"]:
+ module.params["every"] = current_policy["every"]
+ if module.params["at"] and module.params["every"]:
+ if not module.params["every"] % 86400 == 0:
+ module.fail_json(
+ msg="At time can only be set if every value is a multiple of 86400"
+ )
+ if module.params["keep_for"] < module.params["every"]:
+ module.fail_json(
+ msg="Retention period cannot be less than snapshot interval."
+ )
+ if module.params["at"] and not module.params["timezone"]:
+ module.params["timezone"] = _get_local_tz(module)
+ if module.params["timezone"] not in set(pytz.all_timezones_set):
+ module.fail_json(
+ msg="Timezone {0} is not valid".format(module.params["timezone"])
+ )
+
+ changed = True
+ if not module.check_mode:
+ try:
+ attr = PolicyPatch()
+ attr.enabled = module.params["enabled"]
+ if at_time:
+ attr.add_rules = [
+ PolicyRule(
+ keep_for=module.params["keep_for"] * 1000,
+ every=module.params["every"] * 1000,
+ at=at_time,
+ time_zone=timezone,
+ )
+ ]
+ else:
+ attr.add_rules = [
+ PolicyRule(
+ keep_for=module.params["keep_for"] * 1000,
+ every=module.params["every"] * 1000,
+ )
+ ]
+ attr.remove_rules = [
+ PolicyRule(
+ keep_for=current_policy["keep_for"] * 1000,
+ every=current_policy["every"] * 1000,
+ at=current_policy["at"],
+ time_zone=current_policy["time_zone"],
+ )
+ ]
+ blade.policies.update_policies(
+ names=[module.params["name"]], policy_patch=attr
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to update policy {0}.".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(
+ type="str", default="present", choices=["absent", "present", "copy"]
+ ),
+ policy_type=dict(
+ type="str", default="snapshot", choices=["snapshot", "access", "nfs"]
+ ),
+ enabled=dict(type="bool", default=True),
+ timezone=dict(type="str"),
+ name=dict(type="str"),
+ at=dict(type="str"),
+ every=dict(type="int"),
+ keep_for=dict(type="int"),
+ filesystem=dict(type="list", elements="str"),
+ replica_link=dict(type="list", elements="str"),
+ account=dict(type="str"),
+ target=dict(type="str"),
+ target_rule=dict(type="str"),
+ rename=dict(type="str"),
+ rule=dict(type="str"),
+ user=dict(type="str"),
+ effect=dict(type="str", default="allow", choices=["allow"]),
+ actions=dict(
+ type="list",
+ elements="str",
+ choices=[
+ "s3:*",
+ "s3:AbortMultipartUpload",
+ "s3:CreateBucket",
+ "s3:DeleteBucket",
+ "s3:DeleteObject",
+ "s3:DeleteObjectVersion",
+ "s3:ExtendSafemodeRetentionPeriod",
+ "s3:GetBucketAcl",
+ "s3:GetBucketLocation",
+ "s3:GetBucketVersioning",
+ "s3:GetLifecycleConfiguration",
+ "s3:GetObject",
+ "s3:GetObjectAcl",
+ "s3:GetObjectVersion",
+ "s3:ListAllMyBuckets",
+ "s3:ListBucket",
+ "s3:ListBucketMultipartUploads",
+ "s3:ListBucketVersions",
+ "s3:ListMultipartUploadParts",
+ "s3:PutBucketVersioning",
+ "s3:PutLifecycleConfiguration",
+ "s3:PutObject",
+ ],
+ ),
+ object_resources=dict(type="list", elements="str"),
+ source_ips=dict(type="list", elements="str"),
+ s3_prefixes=dict(type="list", elements="str"),
+ s3_delimiters=dict(type="list", elements="str"),
+ ignore_enforcement=dict(type="bool", default=True),
+ force_delete=dict(type="bool", default=False),
+ access=dict(
+ type="str",
+ choices=["root-squash", "all-squash", "no-squash"],
+ default="root-squash",
+ ),
+ anonuid=dict(type="str"),
+ anongid=dict(type="str"),
+ atime=dict(type="bool", default=True),
+ client=dict(type="str", default="*"),
+ fileid_32bit=dict(type="bool", default=False),
+ permission=dict(type="str", choices=["rw", "ro"], default="ro"),
+ secure=dict(type="bool", default=False),
+ destroy_snapshots=dict(type="bool", default=False),
+ security=dict(
+ type="list",
+ elements="str",
+ choices=["sys", "krb5", "krb5i", "krb5p"],
+ default=["sys"],
+ ),
+ before_rule=dict(type="int"),
+ )
+ )
+
+ required_together = [["keep_for", "every"]]
+ required_if = [
+ ["policy_type", "access", ["account", "name"]],
+ ["policy_type", "nfs", ["name"]],
+ ]
+
+ module = AnsibleModule(
+ argument_spec,
+ required_together=required_together,
+ required_if=required_if,
+ supports_check_mode=True,
+ )
+
+ if not HAS_PURITYFB:
+ module.fail_json(msg="purity-fb sdk is required for this module")
+ if not HAS_PYTZ:
+ module.fail_json(msg="pytz is required for this module")
+
+ state = module.params["state"]
+ blade = get_blade(module)
+ versions = blade.api_version.list_versions().versions
+ if module.params["policy_type"] == "access":
+ if ACCESS_POLICY_API_VERSION not in versions:
+ module.fail_json(
+ msg=(
+ "Minimum FlashBlade REST version required: {0}".format(
+ ACCESS_POLICY_API_VERSION
+ )
+ )
+ )
+ if not HAS_PYPURECLIENT:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+ blade = get_system(module)
+ try:
+ policy = list(
+ blade.get_object_store_access_policies(
+ names=[module.params["account"] + "/" + module.params["name"]]
+ ).items
+ )[0]
+ except AttributeError:
+ policy = None
+ if module.params["user"]:
+ member_name = module.params["account"] + "/" + module.params["user"]
+ res = blade.get_object_store_users(filter='name="' + member_name + "'")
+ if res.status_code != 200:
+ module.fail_json(
+ msg="User {0} does not exist in account {1}".format(
+ module.params["user"], module.params["account"]
+ )
+ )
+ if policy and state == "present":
+ update_os_policy(module, blade)
+ elif state == "present" and not policy:
+ create_os_policy(module, blade)
+ elif state == "absent" and policy:
+ delete_os_policy(module, blade)
+ elif state == "copy" and module.params["target"] and module.params["rule"]:
+ if "/" not in module.params["target"]:
+ module.fail_json(
+ msg='Incorrect format for target policy. Must be "<account>/<name>"'
+ )
+ if (
+ blade.get_object_store_access_policies(
+ names=[module.params["target"]]
+ ).status_code
+ != 200
+ ):
+ module.fail_json(
+ msg="Target policy {0} does not exist".format(
+ module.params["target"]
+ )
+ )
+ copy_os_policy_rule(module, blade)
+ elif module.params["policy_type"] == "nfs":
+ if NFS_POLICY_API_VERSION not in versions:
+ module.fail_json(
+ msg=(
+ "Minimum FlashBlade REST version required: {0}".format(
+ NFS_POLICY_API_VERSION
+ )
+ )
+ )
+ if not HAS_PYPURECLIENT:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+ blade = get_system(module)
+ try:
+ policy = list(
+ blade.get_nfs_export_policies(names=[module.params["name"]]).items
+ )[0]
+ except AttributeError:
+ policy = None
+ if module.params["rename"]:
+ try:
+ new_policy = list(
+ blade.get_nfs_export_policies(names=[module.params["rename"]]).items
+ )[0]
+ except AttributeError:
+ new_policy = None
+ if policy and state == "present" and not module.params["rename"]:
+ if module.params["before_rule"]:
+ res = blade.get_nfs_export_policies_rules(
+ policy_names=[module.params["name"]],
+ names=[
+ module.params["name"] + "." + str(module.params["before_rule"])
+ ],
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Rule index {0} does not exist.".format(
+ module.params["before_rule"]
+ )
+ )
+ update_nfs_policy(module, blade)
+ elif (
+ state == "present" and module.params["rename"] and policy and not new_policy
+ ):
+ rename_nfs_policy(module, blade)
+ elif state == "present" and not policy and not module.params["rename"]:
+ create_nfs_policy(module, blade)
+ elif state == "absent" and policy:
+ delete_nfs_policy(module, blade)
+ elif SNAPSHOT_POLICY_API_VERSION in versions:
+ if not HAS_PYPURECLIENT:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+ blade = get_system(module)
+ try:
+ policy = list(blade.get_policies(names=[module.params["name"]]).items)[0]
+ except AttributeError:
+ policy = None
+ if not policy and state == "present":
+ create_snap_policy(module, blade)
+ elif policy and state == "present":
+ update_snap_policy(module, blade)
+ elif policy and state == "absent":
+ delete_snap_policy(module, blade)
+ else:
+ if MIN_REQUIRED_API_VERSION not in versions:
+ module.fail_json(
+ msg="Minimum FlashBlade REST version required: {0}".format(
+ MIN_REQUIRED_API_VERSION
+ )
+ )
+ try:
+ policy = blade.policies.list_policies(names=[module.params["name"]]).items[
+ 0
+ ]
+ except Exception:
+ policy = None
+
+ if policy and state == "present":
+ update_policy(module, blade, policy)
+ elif state == "present" and not policy:
+ create_policy(module, blade)
+ elif state == "absent" and policy:
+ delete_policy(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_proxy.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_proxy.py
new file mode 100644
index 000000000..ed9e39ac8
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_proxy.py
@@ -0,0 +1,155 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2019, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_proxy
+version_added: '1.0.0'
+author:
+ - Pure Storage ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+short_description: Configure FlashBlade phonehome HTTPs proxy settings
+description:
+- Set or erase configuration for the phonehome proxy settings.
+options:
+ state:
+ description:
+ - Set or delete proxy configuration
+ default: present
+ type: str
+ choices: [ absent, present ]
+ host:
+ description:
+ - The proxy host name.
+ type: str
+ port:
+ description:
+ - The proxy TCP/IP port number.
+ type: int
+ secure:
+ description:
+ - Use http or https as the proxy protocol.
+ - True uses https, false uses http.
+ default: true
+ type: bool
+ version_added: '1.11.0'
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Delete exisitng proxy settings
+ purestorage.flashblade.purefb_proxy:
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Set proxy settings
+ purestorage.flashblade.purefb_proxy:
+ host: purestorage.com
+ port: 8080
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import Support
+except ImportError:
+ HAS_PURITY_FB = False
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ purefb_argument_spec,
+)
+
+
+def delete_proxy(module, blade):
+ """Delete proxy settings"""
+ changed = False
+ current_proxy = blade.support.list_support().items[0].proxy
+ if current_proxy != "":
+ changed = True
+ if not module.check_mode:
+ try:
+ proxy_settings = Support(proxy="")
+ blade.support.update_support(support=proxy_settings)
+ except Exception:
+ module.fail_json(msg="Delete proxy settigs failed")
+ module.exit_json(changed=changed)
+
+
+def create_proxy(module, blade):
+ """Set proxy settings"""
+ changed = False
+ current_proxy = blade.support.list_support().items[0].proxy
+ if module.params["secure"]:
+ protocol = "https://"
+ else:
+ protocol = "http://"
+ if current_proxy is not None:
+ changed = True
+ if not module.check_mode:
+ new_proxy = (
+ protocol + module.params["host"] + ":" + str(module.params["port"])
+ )
+ if new_proxy != current_proxy:
+ try:
+ proxy_settings = Support(proxy=new_proxy)
+ blade.support.update_support(support=proxy_settings)
+ except Exception:
+ module.fail_json(msg="Set phone home proxy failed.")
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ secure=dict(type="bool", default=True),
+ host=dict(type="str"),
+ port=dict(type="int"),
+ )
+ )
+
+ required_together = [["host", "port"]]
+
+ module = AnsibleModule(
+ argument_spec, required_together=required_together, supports_check_mode=True
+ )
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg="purity_fb SDK is required for this module")
+
+ state = module.params["state"]
+ blade = get_blade(module)
+
+ if state == "absent":
+ delete_proxy(module, blade)
+ elif state == "present":
+ create_proxy(module, blade)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_ra.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_ra.py
new file mode 100644
index 000000000..c84ba1e41
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_ra.py
@@ -0,0 +1,126 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_ra
+version_added: '1.0.0'
+short_description: Enable or Disable Pure Storage FlashBlade Remote Assist
+description:
+- Enablke or Disable Remote Assist for a Pure Storage FlashBlade.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Define state of remote assist
+ - When set to I(enable) the RA port can be exposed using the
+ I(debug) module.
+ type: str
+ default: present
+ choices: [ present, absent ]
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Enable Remote Assist port
+ purestorage.flashblade.purefb_ra:
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Disable Remote Assist port
+ purestorage.flashblade.purefb_ra:
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import Support
+except ImportError:
+ HAS_PURITY_FB = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ purefb_argument_spec,
+)
+
+
+MIN_REQUIRED_API_VERSION = "1.6"
+
+
+def enable_ra(module, blade):
+ """Enable Remote Assist"""
+ changed = True
+ if not module.check_mode:
+ ra_settings = Support(remote_assist_active=True)
+ try:
+ blade.support.update_support(support=ra_settings)
+ except Exception:
+ module.fail_json(msg="Enabling Remote Assist failed")
+ module.exit_json(changed=changed)
+
+
+def disable_ra(module, blade):
+ """Disable Remote Assist"""
+ changed = True
+ if not module.check_mode:
+ ra_settings = Support(remote_assist_active=False)
+ try:
+ blade.support.update_support(support=ra_settings)
+ except Exception:
+ module.fail_json(msg="Disabling Remote Assist failed")
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["present", "absent"]),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ blade = get_blade(module)
+ api_version = blade.api_version.list_versions().versions
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(msg="Purity//FB must be upgraded to support this module.")
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg="purity_fb SDK is required for this module")
+
+ if (
+ module.params["state"] == "present"
+ and not blade.support.list_support().items[0].remote_assist_active
+ ):
+ enable_ra(module, blade)
+ elif (
+ module.params["state"] == "absent"
+ and blade.support.list_support().items[0].remote_assist_active
+ ):
+ disable_ra(module, blade)
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_remote_cred.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_remote_cred.py
new file mode 100644
index 000000000..53c985d35
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_remote_cred.py
@@ -0,0 +1,243 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_remote_cred
+version_added: '1.0.0'
+short_description: Create, modify and delete FlashBlade object store remote credentials
+description:
+- Create, modify and delete object store remote credentials
+- You must have a correctly configured remote array or target
+- This module is B(not) idempotent when updating existing remote credentials
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Define state of remote credential
+ default: present
+ choices: [ absent, present ]
+ type: str
+ name:
+ description:
+ - The name of the credential
+ required: true
+ type: str
+ access_key:
+ description:
+ - Access Key ID of the S3 target
+ type: str
+ secret:
+ description:
+ - Secret Access Key for the S3 or Azure target
+ type: str
+ target:
+ description:
+ - Define whether to initialize the S3 bucket
+ required: true
+ type: str
+
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Create remote credential
+ purestorage.flashblade.purefb_remote_cred:
+ name: cred1
+ access_key: "3794fb12c6204e19195f"
+ secret: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"
+ target: target1
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Delete remote credential
+ purestorage.flashblade.purefb_remote_cred:
+ name: cred1
+ target: target1
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import ObjectStoreRemoteCredentials
+except ImportError:
+ HAS_PURITY_FB = False
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ purefb_argument_spec,
+)
+
+MIN_REQUIRED_API_VERSION = "1.9"
+
+
+def get_connected(module, blade):
+ """Return connected device or None"""
+ connected_blades = blade.array_connections.list_array_connections()
+ for target in range(0, len(connected_blades.items)):
+ if (
+ connected_blades.items[target].remote.name == module.params["target"]
+ or connected_blades.items[target].management_address
+ == module.params["target"]
+ ) and connected_blades.items[target].status in [
+ "connected",
+ "connecting",
+ "partially_connected",
+ ]:
+ return connected_blades.items[target].remote.name
+ connected_targets = blade.targets.list_targets()
+ for target in range(0, len(connected_targets.items)):
+ if connected_targets.items[target].name == module.params[
+ "target"
+ ] and connected_targets.items[target].status in [
+ "connected",
+ "connecting",
+ "partially_connected",
+ ]:
+ return connected_targets.items[target].name
+ return None
+
+
+def get_remote_cred(module, blade):
+ """Return Remote Credential or None"""
+ try:
+ res = (
+ blade.object_store_remote_credentials.list_object_store_remote_credentials(
+ names=[module.params["target"] + "/" + module.params["name"]]
+ )
+ )
+ return res.items[0]
+ except Exception:
+ return None
+
+
+def create_credential(module, blade):
+ """Create remote credential"""
+ changed = True
+ if not module.check_mode:
+ remote_cred = module.params["target"] + "/" + module.params["name"]
+ remote_credentials = ObjectStoreRemoteCredentials(
+ access_key_id=module.params["access_key"],
+ secret_access_key=module.params["secret"],
+ )
+ try:
+ blade.object_store_remote_credentials.create_object_store_remote_credentials(
+ names=[remote_cred], remote_credentials=remote_credentials
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to create remote credential {0}".format(remote_cred)
+ )
+ module.exit_json(changed=changed)
+
+
+def update_credential(module, blade):
+ """Update remote credential"""
+ changed = True
+ if not module.check_mode:
+ remote_cred = module.params["target"] + "/" + module.params["name"]
+ new_attr = ObjectStoreRemoteCredentials(
+ access_key_id=module.params["access_key"],
+ secret_access_key=module.params["secret"],
+ )
+ try:
+ blade.object_store_remote_credentials.update_object_store_remote_credentials(
+ names=[remote_cred], remote_credentials=new_attr
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to update remote credential {0}".format(remote_cred)
+ )
+ module.exit_json(changed=changed)
+
+
+def delete_credential(module, blade):
+ """Delete remote credential"""
+ changed = True
+ if not module.check_mode:
+ remote_cred = module.params["target"] + "/" + module.params["name"]
+ try:
+ blade.object_store_remote_credentials.delete_object_store_remote_credentials(
+ names=[remote_cred]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to delete remote credential {0}.".format(remote_cred)
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["present", "absent"]),
+ name=dict(type="str", required=True),
+ access_key=dict(type="str", no_log=False),
+ secret=dict(type="str", no_log=True),
+ target=dict(type="str", required=True),
+ )
+ )
+
+ required_if = [["state", "present", ["access_key", "secret"]]]
+
+ module = AnsibleModule(
+ argument_spec, required_if=required_if, supports_check_mode=True
+ )
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg="purity_fb sdk is required for this module")
+
+ blade = get_blade(module)
+ api_version = blade.api_version.list_versions().versions
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashBlade REST version not supported. "
+ "Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
+ )
+
+ target = get_connected(module, blade)
+
+ if not target:
+ module.fail_json(
+ msg="Selected target {0} is not connected.".format(module.params["target"])
+ )
+
+ remote_cred = get_remote_cred(module, blade)
+
+ if module.params["state"] == "present" and not remote_cred:
+ create_credential(module, blade)
+ elif module.params["state"] == "present":
+ update_credential(module, blade)
+ elif module.params["state"] == "absent" and remote_cred:
+ delete_credential(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_s3acc.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_s3acc.py
new file mode 100644
index 000000000..034731994
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_s3acc.py
@@ -0,0 +1,314 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_s3acc
+version_added: '1.0.0'
+short_description: Create or delete FlashBlade Object Store accounts
+description:
+- Create or delete object store accounts on a Pure Stoage FlashBlade.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Create or delete object store account
+ default: present
+ choices: [ absent, present ]
+ type: str
+ name:
+ description:
+ - The name of object store account
+ type: str
+ required: true
+ quota:
+ description:
+ - The effective quota limit to be applied against the size of the account in bytes.
+ - If set to '' (empty string), the account is unlimited in size.
+ version_added: 1.11.0
+ type: str
+ hard_limit:
+ description:
+ - If set to true, the account size, as defined by I(quota_limit), is used as a hard limit quota.
+ - If set to false, a hard limit quota will not be applied to the account, but soft quota alerts
+ will still be sent if the account has a value set for I(quota_limit).
+ version_added: 1.11.0
+ type: bool
+ default: false
+ default_quota:
+ description:
+ - The value of this field will be used to configure the I(quota_limit) field of newly created buckets
+ associated with this object store account, if the bucket creation does not specify its own value.
+ - If set to '' (empty string), the bucket default is unlimited in size.
+ version_added: 1.11.0
+ type: str
+ default_hard_limit:
+ description:
+ - The value of this field will be used to configure the I(hard_limit) field of newly created buckets
+ associated with this object store account, if the bucket creation does not specify its own value.
+ version_added: 1.11.0
+ type: bool
+ default: false
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Crrate object store account foo (with no quotas)
+ purestorage.flashblade.purefb_s3acc:
+ name: foo
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create object store account foo (with quotas)
+ purestorage.flashblade.purefb_s3acc:
+ name: foo
+ quota: 20480000
+ hard_limit: true
+ default_quota: 1024000
+ default_hard_limit: false
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete object store account foo
+ purestorage.flashblade.purefb_s3acc:
+ name: foo
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient.flashblade import ObjectStoreAccountPatch, BucketDefaults
+except ImportError:
+ HAS_PURESTORAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ get_system,
+ purefb_argument_spec,
+)
+
+
+MIN_REQUIRED_API_VERSION = "1.3"
+QUOTA_API_VERSION = "2.1"
+
+
+def get_s3acc(module, blade):
+ """Return Object Store Account or None"""
+ s3acc = None
+ accts = blade.object_store_accounts.list_object_store_accounts()
+ for acct in range(0, len(accts.items)):
+ if accts.items[acct].name == module.params["name"]:
+ s3acc = accts.items[acct]
+ return s3acc
+
+
+def update_s3acc(module):
+ """Update Object Store Account"""
+ changed = False
+ blade = get_system(module)
+ acc_settings = list(
+ blade.get_object_store_accounts(names=[module.params["name"]]).items
+ )[0]
+ current_account = {
+ "hard_limit": acc_settings.hard_limit_enabled,
+ "default_hard_limit": acc_settings.bucket_defaults.hard_limit_enabled,
+ "quota": str(acc_settings.quota_limit),
+ "default_quota": str(acc_settings.bucket_defaults.quota_limit),
+ }
+ if current_account["quota"] == "None":
+ current_account["quota"] = ""
+ if current_account["default_quota"] == "None":
+ current_account["default_quota"] = ""
+ if module.params["quota"] is None:
+ module.params["quota"] = current_account["quota"]
+ if module.params["default_quota"] is None:
+ module.params["default_quota"] = current_account["default_quota"]
+ new_account = {
+ "hard_limit": module.params["hard_limit"],
+ "default_hard_limit": module.params["default_hard_limit"],
+ "quota": module.params["quota"],
+ "default_quota": module.params["default_quota"],
+ }
+ if new_account != current_account:
+ changed = True
+ if not module.check_mode:
+ osa = ObjectStoreAccountPatch(
+ hard_limit_enabled=new_account["hard_limit"],
+ quota_limit=new_account["quota"],
+ bucket_defaults=BucketDefaults(
+ hard_limit_enabled=new_account["default_hard_limit"],
+ quota_limit=new_account["default_quota"],
+ ),
+ )
+ res = blade.patch_object_store_accounts(
+ object_store_account=osa, names=[module.params["name"]]
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to update account {0}. "
+ "Error: {1}".format(module.params["name"], res.errors[0].message)
+ )
+ module.exit_json(changed=changed)
+
+
+def create_s3acc(module, blade):
+ """Create Object Store Account"""
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.object_store_accounts.create_object_store_accounts(
+ names=[module.params["name"]]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Object Store Account {0}: Creation failed".format(
+ module.params["name"]
+ )
+ )
+ if module.params["quota"] or module.params["default_quota"]:
+ blade2 = get_system(module)
+ if module.params["quota"] and not module.params["default_quota"]:
+ osa = ObjectStoreAccountPatch(
+ hard_limit_enabled=module.params["hard_limit"],
+ quota_limit=module.params["quota"],
+ )
+ if not module.params["quota"] and module.params["default_quota"]:
+ osa = ObjectStoreAccountPatch(
+ bucket_defaults=BucketDefaults(
+ hard_limit_enabled=module.params["default_hard_limit"],
+ quota_limit=module.params["default_quota"],
+ )
+ )
+ else:
+ osa = ObjectStoreAccountPatch(
+ hard_limit_enabled=module.params["hard_limit"],
+ quota_limit=module.params["quota"],
+ bucket_defaults=BucketDefaults(
+ hard_limit_enabled=module.params["default_hard_limit"],
+ quota_limit=module.params["default_quota"],
+ ),
+ )
+ res = blade2.patch_object_store_accounts(
+ object_store_account=osa, names=[module.params["name"]]
+ )
+ if res.status_code != 200:
+ blade.object_store_accounts.delete_object_store_accounts(
+ names=[module.params["name"]]
+ )
+ module.fail_json(
+ msg="Failed to set quotas correctly for account {0}. "
+ "Error: {1}".format(module.params["name"], res.errors[0].message)
+ )
+ module.exit_json(changed=changed)
+
+
+def delete_s3acc(module, blade):
+ """Delete Object Store Account"""
+ changed = True
+ if not module.check_mode:
+ count = len(
+ blade.object_store_users.list_object_store_users(
+ filter="name='" + module.params["name"] + "/*'"
+ ).items
+ )
+ if count != 0:
+ module.fail_json(
+ msg="Remove all Users from Object Store Account {0} \
+ before deletion".format(
+ module.params["name"]
+ )
+ )
+ else:
+ try:
+ blade.object_store_accounts.delete_object_store_accounts(
+ names=[module.params["name"]]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Object Store Account {0}: Deletion failed".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(required=True, type="str"),
+ hard_limit=dict(type="bool", default=False),
+ default_hard_limit=dict(type="bool", default=False),
+ quota=dict(type="str"),
+ default_quota=dict(type="str"),
+ state=dict(default="present", choices=["present", "absent"]),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ state = module.params["state"]
+ blade = get_blade(module)
+ versions = blade.api_version.list_versions().versions
+
+ if MIN_REQUIRED_API_VERSION not in versions:
+ module.fail_json(
+ msg="Minimum FlashBlade REST version required: {0}".format(
+ MIN_REQUIRED_API_VERSION
+ )
+ )
+
+ if module.params["quota"] or module.params["default_quota"]:
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for to set quotas")
+ if QUOTA_API_VERSION not in versions:
+ module.fail_json(
+ msg="Quotas require minimum FlashBlade REST version: {0}".format(
+ QUOTA_API_VERSION
+ )
+ )
+
+ upper = False
+ for element in module.params["name"]:
+ if element.isupper():
+ upper = True
+ break
+ if upper:
+ module.warn("Changing account name to lowercase...")
+ module.params["name"] = module.params["name"].lower()
+
+ s3acc = get_s3acc(module, blade)
+
+ if state == "absent" and s3acc:
+ delete_s3acc(module, blade)
+ elif state == "present" and s3acc:
+ update_s3acc(module)
+ elif not s3acc and state == "present":
+ create_s3acc(module, blade)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_s3user.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_s3user.py
new file mode 100644
index 000000000..55bc05c3f
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_s3user.py
@@ -0,0 +1,436 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_s3user
+version_added: '1.0.0'
+short_description: Create or delete FlashBlade Object Store account users
+description:
+- Create or delete object store account users on a Pure Stoage FlashBlade.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Create or delete object store account user
+ - Remove a specified access key for a user
+ default: present
+ choices: [ absent, present, remove_key ]
+ type: str
+ name:
+ description:
+ - The name of object store user
+ type: str
+ required: true
+ account:
+ description:
+ - The name of object store account associated with user
+ type: str
+ required: true
+ access_key:
+ description:
+ - Create secret access key.
+ - Key can be exposed using the I(debug) module
+ - If enabled this will override I(imported_key)
+ type: bool
+ default: false
+ remove_key:
+ description:
+ - Access key to be removed from user
+ type: str
+ version_added: "1.5.0"
+ imported_key:
+ description:
+ - Access key of imported credentials
+ type: str
+ version_added: "1.4.0"
+ imported_secret:
+ description:
+ - Access key secret for access key to import
+ type: str
+ version_added: "1.4.0"
+ policy:
+ description:
+ - User Access Policies to be assigned to user on creation
+ - To amend policies use the I(purestorage.flashblade.purefb_userpolicy) module
+ - If not specified, I(pure\:policy/full-access) will be added
+ type: list
+ elements: str
+ version_added: "1.6.0"
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Create object store user (with access ID and key) foo in account bar
+ purestorage.flashblade.purefb_s3user:
+ name: foo
+ account: bar
+ access_key: true
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ register: result
+
+- debug:
+ msg: "S3 User: {{ result['s3user_info'] }}"
+
+- name: Create object store user (with access ID and key) foo in account bar with access policy (Purity 3.2 and higher)
+ purestorage.flashblade.purefb_s3user:
+ name: foo
+ account: bar
+ access_key: true
+ policy:
+ - pure:policy/safemode-configure
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Create object store user foo using imported key/secret in account bar
+ purestorage.flashblade.purefb_s3user:
+ name: foo
+ account: bar
+ imported_key: "PSABSSZRHPMEDKHMAAJPJBONPJGGDDAOFABDGLBJLHO"
+ imported_secret: "BAG61F63105e0d3669/e066+5C5DFBE2c127d395LBGG"
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Delete object store user foo in account bar
+ purestorage.flashblade.purefb_s3user:
+ name: foo
+ account: bar
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import ObjectStoreAccessKey, ObjectStoreAccessKeyPost
+except ImportError:
+ HAS_PURITY_FB = False
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ get_system,
+ purefb_argument_spec,
+)
+
+
+MIN_REQUIRED_API_VERSION = "1.3"
+IMPORT_KEY_API_VERSION = "1.10"
+POLICY_API_VERSION = "2.0"
+
+
+def get_s3acc(module, blade):
+ """Return Object Store Account or None"""
+ s3acc = None
+ accts = blade.object_store_accounts.list_object_store_accounts()
+ for acct in range(0, len(accts.items)):
+ if accts.items[acct].name == module.params["account"]:
+ s3acc = accts.items[acct]
+ return s3acc
+
+
+def get_s3user(module, blade):
+ """Return Object Store Account or None"""
+ full_user = module.params["account"] + "/" + module.params["name"]
+ s3user = None
+ s3users = blade.object_store_users.list_object_store_users()
+ for user in range(0, len(s3users.items)):
+ if s3users.items[user].name == full_user:
+ s3user = s3users.items[user]
+ return s3user
+
+
+def update_s3user(module, blade):
+ """Update Object Store User"""
+ changed = False
+ exists = False
+ s3user_facts = {}
+ user = module.params["account"] + "/" + module.params["name"]
+ if module.params["access_key"] or module.params["imported_key"]:
+ key_count = 0
+ keys = blade.object_store_access_keys.list_object_store_access_keys()
+ for key in range(0, len(keys.items)):
+ if module.params["imported_key"]:
+ versions = blade.api_version.list_versions().versions
+ if IMPORT_KEY_API_VERSION in versions:
+ if keys.items[key].name == module.params["imported_key"]:
+ module.warn("Imported key provided already belongs to a user")
+ exists = True
+ if keys.items[key].user.name == user:
+ key_count += 1
+ if not exists:
+ if key_count < 2:
+ changed = True
+ if not module.check_mode:
+ try:
+ if (
+ module.params["access_key"]
+ and module.params["imported_key"]
+ ):
+ module.warn("'access_key: true' overrides imported keys")
+ if module.params["access_key"]:
+ result = blade.object_store_access_keys.create_object_store_access_keys(
+ object_store_access_key=ObjectStoreAccessKey(
+ user={"name": user}
+ )
+ )
+ s3user_facts["fb_s3user"] = {
+ "user": user,
+ "access_key": result.items[0].secret_access_key,
+ "access_id": result.items[0].name,
+ }
+ else:
+ if IMPORT_KEY_API_VERSION in versions:
+ blade.object_store_access_keys.create_object_store_access_keys(
+ names=[module.params["imported_key"]],
+ object_store_access_key=ObjectStoreAccessKeyPost(
+ user={"name": user},
+ secret_access_key=module.params[
+ "imported_secret"
+ ],
+ ),
+ )
+ except Exception:
+ if module.params["imported_key"]:
+ module.fail_json(
+ msg="Object Store User {0}: Access Key import failed".format(
+ user
+ )
+ )
+ else:
+ module.fail_json(
+ msg="Object Store User {0}: Access Key creation failed".format(
+ user
+ )
+ )
+ else:
+ module.warn(
+ "Object Store User {0}: Maximum Access Key count reached".format(
+ user
+ )
+ )
+ module.exit_json(changed=changed, s3user_info=s3user_facts)
+
+
+def create_s3user(module, blade):
+ """Create Object Store Account"""
+ s3user_facts = {}
+ changed = True
+ if not module.check_mode:
+ user = module.params["account"] + "/" + module.params["name"]
+ blade.object_store_users.create_object_store_users(names=[user])
+ if module.params["access_key"] and module.params["imported_key"]:
+ module.warn("'access_key: true' overrides imported keys")
+ if module.params["access_key"]:
+ try:
+ result = blade.object_store_access_keys.create_object_store_access_keys(
+ object_store_access_key=ObjectStoreAccessKey(user={"name": user})
+ )
+ s3user_facts["fb_s3user"] = {
+ "user": user,
+ "access_key": result.items[0].secret_access_key,
+ "access_id": result.items[0].name,
+ }
+ except Exception:
+ delete_s3user(module, blade, True)
+ module.fail_json(
+ msg="Object Store User {0}: Creation failed".format(user)
+ )
+ else:
+ if module.params["imported_key"]:
+ versions = blade.api_version.list_versions().versions
+ if IMPORT_KEY_API_VERSION in versions:
+ try:
+ blade.object_store_access_keys.create_object_store_access_keys(
+ names=[module.params["imported_key"]],
+ object_store_access_key=ObjectStoreAccessKeyPost(
+ user={"name": user},
+ secret_access_key=module.params["imported_secret"],
+ ),
+ )
+ except Exception:
+ delete_s3user(module, blade)
+ module.fail_json(
+ msg="Object Store User {0}: Creation failed with imported access key".format(
+ user
+ )
+ )
+ if module.params["policy"]:
+ blade = get_system(module)
+ api_version = list(blade.get_versions().items)
+
+ if POLICY_API_VERSION in api_version:
+ policy_list = module.params["policy"]
+ for policy in range(0, len(policy_list)):
+ if (
+ blade.get_object_store_access_policies(
+ names=[policy_list[policy]]
+ ).status_code
+ != 200
+ ):
+ module.warn(
+ "Policy {0} is not valid. Ignoring...".format(
+ policy_list[policy]
+ )
+ )
+ policy_list.remove(policy_list[policy])
+ username = module.params["account"] + "/" + module.params["name"]
+ for policy in range(0, len(policy_list)):
+ if not (
+ blade.get_object_store_users_object_store_access_policies(
+ member_names=[username], policy_names=[policy_list[policy]]
+ ).items
+ ):
+ res = (
+ blade.post_object_store_access_policies_object_store_users(
+ member_names=[username],
+ policy_names=[policy_list[policy]],
+ )
+ )
+ if res.status_code != 200:
+ module.warn(
+ "Failed to add policy {0} to account user {1}. Skipping...".format(
+ policy_list[policy], username
+ )
+ )
+ if "pure:policy/full-access" not in policy_list:
+ # User Create adds the pure:policy/full-access policy by default
+ # If we are specifying a list then remove this default value
+ blade.delete_object_store_access_policies_object_store_users(
+ member_names=[username],
+ policy_names=["pure:policy/full-access"],
+ )
+ else:
+ module.warn(
+ "FlashBlade REST version not supported for user access policies. Skipping..."
+ )
+ module.exit_json(changed=changed, s3user_info=s3user_facts)
+
+
+def remove_key(module, blade):
+ """Remove Access Key from User"""
+ changed = False
+ if not module.check_mode:
+ try:
+ keys = blade.object_store_access_keys.list_object_store_access_keys()
+ for key in range(0, len(keys.items)):
+ if keys.items[key].name == module.params["remove_key"]:
+ blade.object_store_access_keys.delete_object_store_access_keys(
+ names=[module.params["remove_key"]]
+ )
+ changed = True
+ except Exception:
+ module.fail_json(msg="Failed to correctly read or delete access keys")
+ module.exit_json(changed=changed)
+
+
+def delete_s3user(module, blade, internal=False):
+ """Delete Object Store Account"""
+ changed = True
+ if not module.check_mode:
+ user = module.params["account"] + "/" + module.params["name"]
+ try:
+ blade.object_store_users.delete_object_store_users(names=[user])
+ except Exception:
+ module.fail_json(
+ msg="Object Store Account {0}: Deletion failed".format(
+ module.params["name"]
+ )
+ )
+ if internal:
+ return
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(required=True, type="str"),
+ account=dict(required=True, type="str"),
+ access_key=dict(default="false", type="bool"),
+ imported_key=dict(type="str", no_log=False),
+ remove_key=dict(type="str", no_log=False),
+ imported_secret=dict(type="str", no_log=True),
+ policy=dict(type="list", elements="str"),
+ state=dict(default="present", choices=["present", "absent", "remove_key"]),
+ )
+ )
+
+ required_together = [["imported_key", "imported_secret"]]
+ required_if = [["state", "remove_key", ["remove_key"]]]
+
+ module = AnsibleModule(
+ argument_spec,
+ required_together=required_together,
+ required_if=required_if,
+ supports_check_mode=True,
+ )
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg="purity_fb sdk is required for this module")
+
+ state = module.params["state"]
+ blade = get_blade(module)
+ versions = blade.api_version.list_versions().versions
+
+ if MIN_REQUIRED_API_VERSION not in versions:
+ module.fail_json(
+ msg="Minimum FlashBlade REST version required: {0}".format(
+ MIN_REQUIRED_API_VERSION
+ )
+ )
+ upper = False
+ for element in module.params["account"]:
+ if element.isupper():
+ upper = True
+ break
+ if upper:
+ module.warn("Changing account name to lowercase...")
+ module.params["account"] = module.params["account"].lower()
+
+ s3acc = get_s3acc(module, blade)
+ if not s3acc:
+ module.fail_json(
+ msg="Object Store Account {0} does not exist".format(
+ module.params["account"]
+ )
+ )
+
+ s3user = get_s3user(module, blade)
+
+ if state == "absent" and s3user:
+ delete_s3user(module, blade)
+ elif state == "present" and s3user:
+ update_s3user(module, blade)
+ elif not s3user and state == "present":
+ create_s3user(module, blade)
+ elif state == "remove_key" and s3user:
+ remove_key(module, blade)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_smtp.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_smtp.py
new file mode 100644
index 000000000..379443669
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_smtp.py
@@ -0,0 +1,124 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_smtp
+version_added: '1.0.0'
+short_description: Configure SMTP for Pure Storage FlashBlade
+description:
+- Configure SMTP for a Pure Storage FlashBlade.
+- Whilst there can be no relay host, a sender domain must be configured.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ host:
+ description:
+ - Relay server name
+ type: str
+ domain:
+ description:
+ - Domain name for alert messages
+ required: true
+ type: str
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Configure SMTP settings
+ purestorage.flashblade.purefb_smtp:
+ host: hostname
+ domain: xyz.com
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import Smtp
+except ImportError:
+ HAS_PURITY_FB = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ purefb_argument_spec,
+)
+
+
+MIN_REQUIRED_API_VERSION = "1.6"
+
+
+def set_smtp(module, blade):
+ """Configure SMTP settings"""
+ changed = False
+ current_smtp = blade.smtp.list_smtp().items[0]
+ if module.params["host"] and module.params["host"] != current_smtp.relay_host:
+ smtp_settings = Smtp(relay_host=module.params["host"])
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.smtp.update_smtp(smtp_settings=smtp_settings)
+ except Exception:
+ module.fail_json(msg="Configuring SMTP relay host failed")
+ elif current_smtp.relay_host and not module.params["host"]:
+ smtp_settings = Smtp(relay_host="")
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.smtp.update_smtp(smtp_settings=smtp_settings)
+ except Exception:
+ module.fail_json(msg="Configuring SMTP relay host failed")
+ if module.params["domain"] != current_smtp.sender_domain:
+ smtp_settings = Smtp(sender_domain=module.params["domain"])
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.smtp.update_smtp(smtp_settings=smtp_settings)
+ except Exception:
+ module.fail_json(msg="Configuring SMTP sender domain failed")
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ host=dict(type="str"),
+ domain=dict(type="str", required=True),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ blade = get_blade(module)
+ api_version = blade.api_version.list_versions().versions
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(msg="Purity//FB must be upgraded to support this module.")
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg="purity_fb SDK is required for this module")
+
+ set_smtp(module, blade)
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_snap.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_snap.py
new file mode 100644
index 000000000..5df0455f8
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_snap.py
@@ -0,0 +1,379 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_snap
+version_added: '1.0.0'
+short_description: Manage filesystem snapshots on Pure Storage FlashBlades
+description:
+- Create or delete volumes and filesystem snapshots on Pure Storage FlashBlades.
+- Restoring a filesystem from a snapshot is only supported using
+ the latest snapshot.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - The name of the source filesystem.
+ required: true
+ type: str
+ suffix:
+ description:
+ - Suffix of snapshot name.
+ type: str
+ state:
+ description:
+ - Define whether the filesystem snapshot should exist or not.
+ choices: [ absent, present, restore ]
+ default: present
+ type: str
+ targets:
+ description:
+ - Name of target to replicate snapshot to.
+ - This is only applicable when I(now) is B(true)
+ type: list
+ elements: str
+ version_added: "1.7.0"
+ now:
+ description:
+ - Whether to initiate a snapshot replication immeadiately
+ type: bool
+ default: false
+ version_added: "1.7.0"
+ eradicate:
+ description:
+ - Define whether to eradicate the snapshot on delete or leave in trash.
+ type: bool
+ default: false
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Create snapshot foo.ansible
+ purestorage.flashblade.purefb_snap:
+ name: foo
+ suffix: ansible
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: present
+
+- name: Create immeadiate snapshot foo.ansible to connected FB bar
+ purestorage.flashblade.purefb_snap:
+ name: foo
+ suffix: ansible
+ now: true
+ targets:
+ - bar
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: present
+
+- name: Delete snapshot named foo.snap
+ purestorage.flashblade.purefb_snap:
+ name: foo
+ suffix: snap
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: absent
+
+- name: Recover deleted snapshot foo.ansible
+ purestorage.flashblade.purefb_snap:
+ name: foo
+ suffix: ansible
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: present
+
+- name: Restore filesystem foo (uses latest snapshot)
+ purestorage.flashblade.purefb_snap:
+ name: foo
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: restore
+
+- name: Eradicate snapshot named foo.snap
+ purestorage.flashblade.purefb_snap:
+ name: foo
+ suffix: snap
+ eradicate: true
+ fb_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: absent
+"""
+
+RETURN = r"""
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ purefb_argument_spec,
+)
+
+from datetime import datetime
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import FileSystemSnapshot, SnapshotSuffix, FileSystem, Reference
+except ImportError:
+ HAS_PURITY_FB = False
+
+SNAP_NOW_API = 1.10
+
+
+def get_fs(module, blade):
+ """Return Filesystem or None"""
+ filesystem = []
+ filesystem.append(module.params["name"])
+ try:
+ res = blade.file_systems.list_file_systems(names=filesystem)
+ return res.items[0]
+ except Exception:
+ return None
+
+
+def get_latest_fssnapshot(module, blade):
+ """Get the name of the latest snpshot or None"""
+ try:
+ filt = "source='" + module.params["name"] + "'"
+ all_snaps = blade.file_system_snapshots.list_file_system_snapshots(filter=filt)
+ if not all_snaps.items[0].destroyed:
+ return all_snaps.items[0].name
+ else:
+ module.fail_json(
+ msg="Latest snapshot {0} is destroyed."
+ " Eradicate or recover this first.".format(all_snaps.items[0].name)
+ )
+ except Exception:
+ return None
+
+
+def get_fssnapshot(module, blade):
+ """Return Snapshot or None"""
+ try:
+ filt = (
+ "source='"
+ + module.params["name"]
+ + "' and suffix='"
+ + module.params["suffix"]
+ + "'"
+ )
+ res = blade.file_system_snapshots.list_file_system_snapshots(filter=filt)
+ return res.items[0]
+ except Exception:
+ return None
+
+
+def create_snapshot(module, blade):
+ """Create Snapshot"""
+ changed = False
+ source = []
+ source.append(module.params["name"])
+ try:
+ if module.params["now"]:
+ blade_exists = []
+ connected_blades = blade.array_connections.list_array_connections()
+ for target in range(0, len(module.params["targets"])):
+ blade_exists.append(False)
+ for blade in range(0, len(connected_blades)):
+ if (
+ target[target] == connected_blades.items[blade].name
+ and connected_blades.items[blade].status == "connected"
+ ):
+ blade_exists[target] = True
+ if not blade_exists:
+ module.fail_json(
+ msg="Not all selected targets are correctly connected blades"
+ )
+ changed = True
+ if not module.check_mode:
+ blade.file_system_snapshots.create_file_system_snapshots(
+ sources=source,
+ send=True,
+ targets=module.params["targets"],
+ suffix=SnapshotSuffix(module.params["suffix"]),
+ )
+ else:
+ changed = True
+ if not module.check_mode:
+ blade.file_system_snapshots.create_file_system_snapshots(
+ sources=source, suffix=SnapshotSuffix(module.params["suffix"])
+ )
+ except Exception:
+ changed = False
+ module.exit_json(changed=changed)
+
+
+def restore_snapshot(module, blade):
+ """Restore a filesystem back from the latest snapshot"""
+ changed = True
+ snapname = get_latest_fssnapshot(module, blade)
+ if snapname is not None:
+ if not module.check_mode:
+ fs_attr = FileSystem(
+ name=module.params["name"], source=Reference(name=snapname)
+ )
+ try:
+ blade.file_systems.create_file_systems(
+ overwrite=True,
+ discard_non_snapshotted_data=True,
+ file_system=fs_attr,
+ )
+ except Exception:
+ changed = False
+ else:
+ module.fail_json(
+ msg="Filesystem {0} has no snapshots to restore from.".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def recover_snapshot(module, blade):
+ """Recover deleted Snapshot"""
+ changed = True
+ if not module.check_mode:
+ snapname = module.params["name"] + "." + module.params["suffix"]
+ new_attr = FileSystemSnapshot(destroyed=False)
+ try:
+ blade.file_system_snapshots.update_file_system_snapshots(
+ name=snapname, attributes=new_attr
+ )
+ except Exception:
+ changed = False
+ module.exit_json(changed=changed)
+
+
+def update_snapshot(module, blade):
+ """Update Snapshot"""
+ changed = False
+ module.exit_json(changed=changed)
+
+
+def delete_snapshot(module, blade):
+ """Delete Snapshot"""
+ if not module.check_mode:
+ snapname = module.params["name"] + "." + module.params["suffix"]
+ new_attr = FileSystemSnapshot(destroyed=True)
+ try:
+ blade.file_system_snapshots.update_file_system_snapshots(
+ name=snapname, attributes=new_attr
+ )
+ changed = True
+ if module.params["eradicate"]:
+ try:
+ blade.file_system_snapshots.delete_file_system_snapshots(
+ name=snapname
+ )
+ changed = True
+ except Exception:
+ changed = False
+ except Exception:
+ changed = False
+ module.exit_json(changed=changed)
+
+
+def eradicate_snapshot(module, blade):
+ """Eradicate Snapshot"""
+ if not module.check_mode:
+ snapname = module.params["name"] + "." + module.params["suffix"]
+ try:
+ blade.file_system_snapshots.delete_file_system_snapshots(name=snapname)
+ changed = True
+ except Exception:
+ changed = False
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(required=True),
+ suffix=dict(type="str"),
+ now=dict(type="bool", default=False),
+ targets=dict(type="list", elements="str"),
+ eradicate=dict(default="false", type="bool"),
+ state=dict(default="present", choices=["present", "absent", "restore"]),
+ )
+ )
+
+ required_if = [["now", True, ["targets"]]]
+ module = AnsibleModule(
+ argument_spec, required_if=required_if, supports_check_mode=True
+ )
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg="purity_fb sdk is required for this module")
+
+ if module.params["suffix"] is None:
+ suffix = "snap-" + str(
+ (datetime.utcnow() - datetime(1970, 1, 1, 0, 0, 0, 0)).total_seconds()
+ )
+ module.params["suffix"] = suffix.replace(".", "")
+
+ state = module.params["state"]
+ blade = get_blade(module)
+ versions = blade.api_version.list_versions().versions
+
+ if SNAP_NOW_API not in versions and module.params["now"]:
+ module.fail_json(
+ msg="Minimum FlashBlade REST version for immeadiate remote snapshots: {0}".format(
+ SNAP_NOW_API
+ )
+ )
+ filesystem = get_fs(module, blade)
+ snap = get_fssnapshot(module, blade)
+
+ if state == "present" and filesystem and not filesystem.destroyed and not snap:
+ create_snapshot(module, blade)
+ elif (
+ state == "present"
+ and filesystem
+ and not filesystem.destroyed
+ and snap
+ and not snap.destroyed
+ ):
+ update_snapshot(module, blade)
+ elif (
+ state == "present"
+ and filesystem
+ and not filesystem.destroyed
+ and snap
+ and snap.destroyed
+ ):
+ recover_snapshot(module, blade)
+ elif state == "present" and filesystem and filesystem.destroyed:
+ update_snapshot(module, blade)
+ elif state == "present" and not filesystem:
+ update_snapshot(module, blade)
+ elif state == "restore" and filesystem:
+ restore_snapshot(module, blade)
+ elif state == "absent" and snap and not snap.destroyed:
+ delete_snapshot(module, blade)
+ elif state == "absent" and snap and snap.destroyed:
+ eradicate_snapshot(module, blade)
+ elif state == "absent" and not snap:
+ module.exit_json(changed=False)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_snmp_agent.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_snmp_agent.py
new file mode 100644
index 000000000..8db09f7b6
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_snmp_agent.py
@@ -0,0 +1,210 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_snmp_agent
+version_added: '1.0.0'
+short_description: Configure the FlashBlade SNMP Agent
+description:
+- Configure the management SNMP Agent on a Pure Storage FlashBlade.
+- This module is not idempotent and will always modify the
+ existing management SNMP agent due to hidden parameters that cannot
+ be compared to the play parameters.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ auth_passphrase:
+ type: str
+ description:
+ - SNMPv3 only. Passphrase of 8 - 32 characters.
+ auth_protocol:
+ type: str
+ description:
+ - SNMP v3 only. Hash algorithm to use
+ choices: [ MD5, SHA ]
+ community:
+ type: str
+ description:
+ - SNMP v2c only. Manager community ID. Between 1 and 32 characters long.
+ user:
+ type: str
+ description:
+ - SNMP v3 only. User ID recognized by the specified SNMP agent.
+ Must be between 1 and 32 characters.
+ version:
+ type: str
+ description:
+ - Version of SNMP protocol to use for the agent.
+ choices: [ v2c, v3 ]
+ privacy_passphrase:
+ type: str
+ description:
+ - SNMPv3 only. Passphrase to encrypt SNMP messages.
+ Must be between 8 and 63 non-space ASCII characters.
+ privacy_protocol:
+ type: str
+ description:
+ - SNMP v3 only. Encryption protocol to use
+ choices: [ AES, DES ]
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Update v2c SNMP agent
+ purestorage.flashblade.purefb_snmp_agent:
+ community: public
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Update v3 SNMP agent
+ purestorage.flashblade.purefb_snmp_agent:
+ version: v3
+ auth_protocol: MD5
+ auth_passphrase: password
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+"""
+
+RETURN = r"""
+"""
+
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import SnmpAgent, SnmpV2c, SnmpV3
+except ImportError:
+ HAS_PURITY_FB = False
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ purefb_argument_spec,
+)
+
+
+MIN_REQUIRED_API_VERSION = "1.9"
+
+
+def update_agent(module, blade):
+ """Update SNMP Agent"""
+ changed = False
+ try:
+ agent = blade.snmp_agents.list_snmp_agents()
+ except Exception:
+ module.fail_json(msg="Failed to get configuration for SNMP agent.")
+ current_attr = {
+ "community": agent.items[0].v2c.community,
+ "version": agent.items[0].version,
+ "auth_passphrase": agent.items[0].v3.auth_passphrase,
+ "auth_protocol": agent.items[0].v3.auth_protocol,
+ "privacy_passphrase": agent.items[0].v3.privacy_passphrase,
+ "privacy_protocol": agent.items[0].v3.privacy_protocol,
+ "user": agent.items[0].v3.user,
+ }
+ new_attr = {
+ "community": module.params["community"],
+ "version": module.params["version"],
+ "auth_passphrase": module.params["auth_passphrase"],
+ "auth_protocol": module.params["auth_protocol"],
+ "privacy_passphrase": module.params["privacy_passphrase"],
+ "privacy_protocol": module.params["privacy_protocol"],
+ "user": module.params["user"],
+ }
+ if current_attr != new_attr:
+ changed = True
+ if not module.check_mode:
+ if new_attr["version"] == "v2c":
+ updated_v2c_attrs = SnmpV2c(community=new_attr["community"])
+ updated_v2c_agent = SnmpAgent(version="v2c", v2c=updated_v2c_attrs)
+ try:
+ blade.snmp_agents.update_snmp_agents(snmp_agent=updated_v2c_agent)
+ changed = True
+ except Exception:
+ module.fail_json(msg="Failed to update v2c SNMP agent.")
+ else:
+ updated_v3_attrs = SnmpV3(
+ auth_protocol=new_attr["auth_protocol"],
+ auth_passphrase=new_attr["auth_passphrase"],
+ privacy_protocol=new_attr["privacy_protocol"],
+ privacy_passphrase=new_attr["privacy_passphrase"],
+ user=new_attr["user"],
+ )
+ updated_v3_agent = SnmpAgent(version="v3", v3=updated_v3_attrs)
+ try:
+ blade.snmp_agents.update_snmp_agents(snmp_agent=updated_v3_agent)
+ changed = True
+ except Exception:
+ module.fail_json(msg="Failed to update v3 SNMP agent.")
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ user=dict(type="str"),
+ auth_passphrase=dict(type="str", no_log=True),
+ auth_protocol=dict(type="str", choices=["MD5", "SHA"]),
+ privacy_passphrase=dict(type="str", no_log=True),
+ privacy_protocol=dict(type="str", choices=["AES", "DES"]),
+ version=dict(type="str", choices=["v2c", "v3"]),
+ community=dict(type="str"),
+ )
+ )
+
+ required_together = [
+ ["auth_passphrase", "auth_protocol"],
+ ["privacy_passphrase", "privacy_protocol"],
+ ]
+ required_if = [["version", "v2c", ["community"]], ["version", "v3", ["user"]]]
+
+ module = AnsibleModule(
+ argument_spec,
+ required_together=required_together,
+ required_if=required_if,
+ supports_check_mode=True,
+ )
+
+ blade = get_blade(module)
+ api_version = blade.api_version.list_versions().versions
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(msg="Purity//FB must be upgraded to support this module.")
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg="purity_fb SDK is required for this module")
+
+ if module.params["version"] == "v3":
+ if module.params["auth_passphrase"] and (
+ 8 > len(module.params["auth_passphrase"]) > 32
+ ):
+ module.fail_json(msg="auth_password must be between 8 and 32 characters")
+ if (
+ module.params["privacy_passphrase"]
+ and 8 > len(module.params["privacy_passphrase"]) > 63
+ ):
+ module.fail_json(msg="privacy_password must be between 8 and 63 characters")
+
+ update_agent(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_snmp_mgr.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_snmp_mgr.py
new file mode 100644
index 000000000..66c2417dd
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_snmp_mgr.py
@@ -0,0 +1,357 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_snmp_mgr
+version_added: '1.0.0'
+short_description: Configure FlashBlade SNMP Managers
+description:
+- Manage SNMP managers on a Pure Storage FlashBlade.
+- This module is not idempotent and will always modify an
+ existing SNMP manager due to hidden parameters that cannot
+ be compared to the play parameters.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Name of SNMP Manager
+ required: true
+ type: str
+ state:
+ description:
+ - Create or delete SNMP manager
+ type: str
+ default: present
+ choices: [ absent, present ]
+ auth_passphrase:
+ type: str
+ description:
+ - SNMPv3 only. Passphrase of 8 - 32 characters.
+ auth_protocol:
+ type: str
+ description:
+ - SNMP v3 only. Hash algorithm to use
+ choices: [ MD5, SHA ]
+ community:
+ type: str
+ description:
+ - SNMP v2c only. Manager community ID. Between 1 and 32 characters long.
+ host:
+ type: str
+ description:
+ - IPv4 or IPv6 address or FQDN to send trap messages to.
+ user:
+ type: str
+ description:
+ - SNMP v3 only. User ID recognized by the specified SNMP manager.
+ Must be between 1 and 32 characters.
+ version:
+ type: str
+ description:
+ - Version of SNMP protocol to use for the manager.
+ choices: [ v2c, v3 ]
+ notification:
+ type: str
+ description:
+ - Action to perform on event.
+ default: trap
+ choices: [ inform, trap ]
+ privacy_passphrase:
+ type: str
+ description:
+ - SNMPv3 only. Passphrase to encrypt SNMP messages.
+ Must be between 8 and 63 non-space ASCII characters.
+ privacy_protocol:
+ type: str
+ description:
+ - SNMP v3 only. Encryption protocol to use
+ choices: [ AES, DES ]
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Delete exisitng SNMP manager
+ purestorage.flashblade.purefb_snmp_mgr:
+ name: manager1
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Create v2c SNMP manager
+ purestorage.flashblade.purefb_snmp_mgr:
+ name: manager1
+ community: public
+ host: 10.21.22.23
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Create v3 SNMP manager
+ purestorage.flashblade.purefb_snmp_mgr:
+ name: manager2
+ version: v3
+ auth_protocol: MD5
+ auth_passphrase: password
+ host: 10.21.22.23
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Update existing SNMP manager
+ purestorage.flashblade.purefb_snmp_mgr:
+ name: manager1
+ community: private
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+"""
+
+RETURN = r"""
+"""
+
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import SnmpManager, SnmpV2c, SnmpV3
+except ImportError:
+ HAS_PURITY_FB = False
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ purefb_argument_spec,
+)
+
+
+MIN_REQUIRED_API_VERSION = "1.9"
+
+
+def update_manager(module, blade):
+ """Update SNMP Manager"""
+ changed = False
+ try:
+ mgr = blade.snmp_managers.list_snmp_managers(names=[module.params["name"]])
+ except Exception:
+ module.fail_json(
+ msg="Failed to get configuration for SNMP manager {0}.".format(
+ module.params["name"]
+ )
+ )
+ current_attr = {
+ "community": mgr.items[0].v2c.community,
+ "notification": mgr.items[0].notification,
+ "host": mgr.items[0].host,
+ "version": mgr.items[0].version,
+ "auth_passphrase": mgr.items[0].v3.auth_passphrase,
+ "auth_protocol": mgr.items[0].v3.auth_protocol,
+ "privacy_passphrase": mgr.items[0].v3.privacy_passphrase,
+ "privacy_protocol": mgr.items[0].v3.privacy_protocol,
+ "user": mgr.items[0].v3.user,
+ }
+ new_attr = {
+ "community": module.params["community"],
+ "notification": module.params["notification"],
+ "host": module.params["host"],
+ "version": module.params["version"],
+ "auth_passphrase": module.params["auth_passphrase"],
+ "auth_protocol": module.params["auth_protocol"],
+ "privacy_passphrase": module.params["privacy_passphrase"],
+ "privacy_protocol": module.params["privacy_protocol"],
+ "user": module.params["user"],
+ }
+ if current_attr != new_attr:
+ changed = True
+ if not module.check_mode:
+ if new_attr["version"] == "v2c":
+ updated_v2c_attrs = SnmpV2c(community=new_attr["community"])
+ updated_v2c_manager = SnmpManager(
+ host=new_attr["host"],
+ notification=new_attr["notification"],
+ version="v2c",
+ v2c=updated_v2c_attrs,
+ )
+ try:
+ blade.snmp_managers.update_snmp_managers(
+ names=[module.params["name"]], snmp_manager=updated_v2c_manager
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to update v2c SNMP manager {0}.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ updated_v3_attrs = SnmpV3(
+ auth_protocol=new_attr["auth_protocol"],
+ auth_passphrase=new_attr["auth_passphrase"],
+ privacy_protocol=new_attr["privacy_protocol"],
+ privacy_passphrase=new_attr["privacy_passphrase"],
+ user=new_attr["user"],
+ )
+ updated_v3_manager = SnmpManager(
+ host=new_attr["host"],
+ notification=new_attr["notification"],
+ version="v3",
+ v3=updated_v3_attrs,
+ )
+ try:
+ blade.snmp_managers.update_snmp_managers(
+ names=[module.params["name"]], snmp_manager=updated_v3_manager
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to update v3 SNMP manager {0}.".format(
+ module.params["name"]
+ )
+ )
+
+ module.exit_json(changed=changed)
+
+
+def delete_manager(module, blade):
+ """Delete SNMP Manager"""
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.snmp_managers.delete_snmp_managers(names=[module.params["name"]])
+ except Exception:
+ module.fail_json(
+ msg="Delete SNMP manager {0} failed".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def create_manager(module, blade):
+ """Create SNMP Manager"""
+ changed = True
+ if not module.check_mode:
+ if not module.params["version"]:
+ module.fail_json(msg="SNMP version required to create a new manager")
+ if module.params["version"] == "v2c":
+ v2_attrs = SnmpV2c(community=module.params["community"])
+ new_v2_manager = SnmpManager(
+ host=module.params["host"],
+ notification=module.params["notification"],
+ version="v2c",
+ v2c=v2_attrs,
+ )
+ try:
+ blade.snmp_managers.create_snmp_managers(
+ names=[module.params["name"]], snmp_manager=new_v2_manager
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to create v2c SNMP manager {0}.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ v3_attrs = SnmpV3(
+ auth_protocol=module.params["auth_protocol"],
+ auth_passphrase=module.params["auth_passphrase"],
+ privacy_protocol=module.params["privacy_protocol"],
+ privacy_passphrase=module.params["privacy_passphrase"],
+ user=module.params["user"],
+ )
+ new_v3_manager = SnmpManager(
+ host=module.params["host"],
+ notification=module.params["notification"],
+ version="v3",
+ v3=v3_attrs,
+ )
+ try:
+ blade.snmp_managers.create_snmp_managers(
+ names=[module.params["name"]], snmp_manager=new_v3_manager
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to create v3 SNMP manager {0}.".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True),
+ host=dict(type="str"),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ user=dict(type="str"),
+ notification=dict(type="str", choices=["inform", "trap"], default="trap"),
+ auth_passphrase=dict(type="str", no_log=True),
+ auth_protocol=dict(type="str", choices=["MD5", "SHA"]),
+ privacy_passphrase=dict(type="str", no_log=True),
+ privacy_protocol=dict(type="str", choices=["AES", "DES"]),
+ version=dict(type="str", choices=["v2c", "v3"]),
+ community=dict(type="str"),
+ )
+ )
+
+ required_together = [
+ ["auth_passphrase", "auth_protocol"],
+ ["privacy_passphrase", "privacy_protocol"],
+ ]
+ required_if = [
+ ["version", "v2c", ["community", "host"]],
+ ["version", "v3", ["host", "user"]],
+ ]
+
+ module = AnsibleModule(
+ argument_spec,
+ required_together=required_together,
+ required_if=required_if,
+ supports_check_mode=True,
+ )
+
+ state = module.params["state"]
+ blade = get_blade(module)
+ api_version = blade.api_version.list_versions().versions
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(msg="Purity//FB must be upgraded to support this module.")
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg="purity_fb SDK is required for this module")
+
+ mgr_configured = False
+ mgrs = blade.snmp_managers.list_snmp_managers()
+ for mgr in range(0, len(mgrs.items)):
+ if mgrs.items[mgr].name == module.params["name"]:
+ mgr_configured = True
+ break
+ if module.params["version"] == "v3":
+ if module.params["auth_passphrase"] and (
+ 8 > len(module.params["auth_passphrase"]) > 32
+ ):
+ module.fail_json(msg="auth_password must be between 8 and 32 characters")
+ if (
+ module.params["privacy_passphrase"]
+ and 8 > len(module.params["privacy_passphrase"]) > 63
+ ):
+ module.fail_json(msg="privacy_password must be between 8 and 63 characters")
+ if state == "absent" and mgr_configured:
+ delete_manager(module, blade)
+ elif mgr_configured and state == "present":
+ update_manager(module, blade)
+ elif not mgr_configured and state == "present":
+ create_manager(module, blade)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_subnet.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_subnet.py
new file mode 100644
index 000000000..7e3a35484
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_subnet.py
@@ -0,0 +1,347 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+
+DOCUMENTATION = """
+---
+module: purefb_subnet
+version_added: "1.0.0"
+short_description: Manage network subnets in a Pure Storage FlashBlade
+description:
+ - This module manages network subnets on Pure Storage FlashBlade.
+author: Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Subnet Name.
+ required: true
+ type: str
+ state:
+ description:
+ - Create, delete or modifies a subnet.
+ required: false
+ default: present
+ choices: [ "present", "absent" ]
+ type: str
+ lag:
+ description:
+ - Name of the Link Aggreation Group to use for the subnet.
+ default: uplink
+ type: str
+ version_added: "1.7.0"
+ gateway:
+ description:
+ - IPv4 or IPv6 address of subnet gateway.
+ required: false
+ type: str
+ mtu:
+ description:
+ - MTU size of the subnet. Range is 1280 to 9216.
+ required: false
+ default: 1500
+ type: int
+ prefix:
+ description:
+ - IPv4 or IPv6 address associated with the subnet.
+ - Supply the prefix length (CIDR) as well as the IP address.
+ - Required for subnet creation.
+ required: false
+ type: str
+ vlan:
+ description:
+ - VLAN ID of the subnet.
+ required: false
+ default: 0
+ type: int
+extends_documentation_fragment:
+ - purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = """
+- name: Create new network subnet named foo
+ purestorage.flashblade.purefb_subnet:
+ name: foo
+ prefix: "10.21.200.3/24"
+ gateway: 10.21.200.1
+ mtu: 9000
+ vlan: 2200
+ lag: bar
+ state: present
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Change configuration of existing subnet foo
+ purestorage.flashblade.purefb_subnet:
+ name: foo
+ state: present
+ prefix: "10.21.100.3/24"
+ gateway: 10.21.100.1
+ mtu: 1500
+ address: 10.21.200.123
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Delete network subnet named foo
+ purestorage.flashblade.purefb_subnet:
+ name: foo
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641"""
+
+RETURN = """
+"""
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import Subnet, Reference
+except ImportError:
+ HAS_PURITY_FB = False
+
+try:
+ import netaddr
+
+ HAS_NETADDR = True
+except ImportError:
+ HAS_NETADDR = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ purefb_argument_spec,
+)
+
+
+MINIMUM_API_VERSION = "1.3"
+
+
+def get_subnet(module, blade):
+ """Return Subnet or None"""
+ subnet = []
+ subnet.append(module.params["name"])
+ try:
+ res = blade.subnets.list_subnets(names=subnet)
+ return res.items[0]
+ except Exception:
+ return None
+
+
+def create_subnet(module, blade):
+ """Create Subnet"""
+ changed = True
+ if not module.params["prefix"]:
+ module.fail_json(msg="prefix is required for subnet creation")
+ if not module.check_mode:
+ subnet = []
+ subnet.append(module.params["name"])
+ try:
+ if module.params["gateway"]:
+ blade.subnets.create_subnets(
+ names=subnet,
+ subnet=Subnet(
+ prefix=module.params["prefix"],
+ vlan=module.params["vlan"],
+ mtu=module.params["mtu"],
+ gateway=module.params["gateway"],
+ link_aggregation_group=Reference(name=module.params["lag"]),
+ ),
+ )
+ else:
+ blade.subnets.create_subnets(
+ names=subnet,
+ subnet=Subnet(
+ prefix=module.params["prefix"],
+ vlan=module.params["vlan"],
+ mtu=module.params["mtu"],
+ link_aggregation_group=Reference(name=module.params["lag"]),
+ ),
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to create subnet {0}. Confirm supplied parameters".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def modify_subnet(module, blade):
+ """Modify Subnet settings"""
+ changed = False
+ subnet = get_subnet(module, blade)
+ subnet_new = []
+ subnet_new.append(module.params["name"])
+ if module.params["prefix"]:
+ if module.params["prefix"] != subnet.prefix:
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.subnets.update_subnets(
+ names=subnet_new, subnet=Subnet(prefix=module.params["prefix"])
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to change subnet {0} prefix to {1}".format(
+ module.params["name"], module.params["prefix"]
+ )
+ )
+ if module.params["vlan"]:
+ if module.params["vlan"] != subnet.vlan:
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.subnets.update_subnets(
+ names=subnet_new, subnet=Subnet(vlan=module.params["vlan"])
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to change subnet {0} VLAN to {1}".format(
+ module.params["name"], module.params["vlan"]
+ )
+ )
+ if module.params["gateway"]:
+ if module.params["gateway"] != subnet.gateway:
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.subnets.update_subnets(
+ names=subnet_new,
+ subnet=Subnet(gateway=module.params["gateway"]),
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to change subnet {0} gateway to {1}".format(
+ module.params["name"], module.params["gateway"]
+ )
+ )
+ if module.params["mtu"]:
+ if module.params["mtu"] != subnet.mtu:
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.subnets.update_subnets(
+ names=subnet_new, subnet=Subnet(mtu=module.params["mtu"])
+ )
+ changed = True
+ except Exception:
+ module.fail_json(
+ msg="Failed to change subnet {0} MTU to {1}".format(
+ module.params["name"], module.params["mtu"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def delete_subnet(module, blade):
+ """Delete Subnet"""
+ changed = True
+ if not module.check_mode:
+ subnet = []
+ subnet.append(module.params["name"])
+ try:
+ blade.subnets.delete_subnets(names=subnet)
+ except Exception:
+ module.fail_json(
+ msg="Failed to delete subnet {0}".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(required=True),
+ state=dict(default="present", choices=["present", "absent"]),
+ gateway=dict(),
+ lag=dict(type="str", default="uplink"),
+ mtu=dict(type="int", default=1500),
+ prefix=dict(),
+ vlan=dict(type="int", default=0),
+ )
+ )
+
+ required_if = [["state", "present", ["prefix"]]]
+
+ module = AnsibleModule(
+ argument_spec, required_if=required_if, supports_check_mode=True
+ )
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg="purity_fb sdk is required for this module")
+
+ if not HAS_NETADDR:
+ module.fail_json(msg="netaddr module is required")
+
+ state = module.params["state"]
+ blade = get_blade(module)
+ api_version = blade.api_version.list_versions().versions
+ if MINIMUM_API_VERSION not in api_version:
+ module.fail_json(msg="Upgrade Purity//FB to enable this module")
+ subnet = get_subnet(module, blade)
+ try:
+ blade.link_aggregation_groups.list_link_aggregation_groups(
+ names=[module.params["lag"]]
+ )
+ except Exception:
+ module.fail_json(msg="LAG {0} does not exist.".format(module.params["lag"]))
+
+ if state == "present":
+ if not (1280 <= module.params["mtu"] <= 9216):
+ module.fail_json(
+ msg="MTU {0} is out of range (1280 to 9216)".format(
+ module.params["mtu"]
+ )
+ )
+ if not (0 <= module.params["vlan"] <= 4094):
+ module.fail_json(
+ msg="VLAN ID {0} is out of range (0 to 4094)".format(
+ module.params["vlan"]
+ )
+ )
+ if module.params["gateway"]:
+ if netaddr.IPAddress(module.params["gateway"]) not in netaddr.IPNetwork(
+ module.params["prefix"]
+ ):
+ module.fail_json(msg="Gateway and subnet are not compatible.")
+ subnets = blade.subnets.list_subnets()
+ nrange = netaddr.IPSet([module.params["prefix"]])
+ for sub in range(0, len(subnets.items)):
+ if (
+ subnets.items[sub].vlan == module.params["vlan"]
+ and subnets.items[sub].name != module.params["name"]
+ ):
+ module.fail_json(
+ msg="VLAN ID {0} is already in use.".format(module.params["vlan"])
+ )
+ if (
+ nrange & netaddr.IPSet([subnets.items[sub].prefix])
+ and subnets.items[sub].name != module.params["name"]
+ ):
+ module.fail_json(msg="Prefix CIDR overlaps with existing subnet.")
+
+ if state == "present" and not subnet:
+ create_subnet(module, blade)
+ elif state == "present" and subnet:
+ modify_subnet(module, blade)
+ elif state == "absent" and subnet:
+ delete_subnet(module, blade)
+ elif state == "absent" and not subnet:
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_syslog.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_syslog.py
new file mode 100644
index 000000000..2a7406418
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_syslog.py
@@ -0,0 +1,198 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_syslog
+version_added: '1.4.0'
+short_description: Configure Pure Storage FlashBlade syslog settings
+description:
+- Configure syslog configuration for Pure Storage FlashBlades.
+- Add or delete an individual syslog server to the existing
+ list of serves.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Unique identifier for the syslog server address
+ type: str
+ required: true
+ state:
+ description:
+ - Create or delete syslog servers configuration
+ default: present
+ type: str
+ choices: [ absent, present ]
+ protocol:
+ description:
+ - Protocol which server uses
+ type: str
+ choices: [ tcp, tls, udp ]
+ port:
+ description:
+ - Port at which the server is listening. If no port is specified
+ the system will use 514
+ type: str
+ address:
+ description:
+ - Syslog server address.
+ This field supports IPv4 or FQDN.
+ An invalid IP addresses will cause the module to fail.
+ No validation is performed for FQDNs.
+ type: str
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Delete exisitng syslog server entries
+ purestorage.flashblade.purefb_syslog:
+ name: syslog1
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Set array syslog servers
+ purestorage.flashblade.purefb_syslog:
+ state: present
+ name: syslog1
+ address: syslog1.com
+ protocol: udp
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+"""
+
+RETURN = r"""
+"""
+
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import SyslogServerPostOrPatch
+except ImportError:
+ HAS_PURITY_FB = False
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ purefb_argument_spec,
+)
+
+
+MIN_REQUIRED_API_VERSION = "1.10"
+
+
+def delete_syslog(module, blade):
+ """Delete Syslog Server"""
+ changed = False
+ try:
+ server = blade.syslog.list_syslog_servers(names=[module.params["name"]])
+ except Exception:
+ server = None
+
+ if server:
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.syslog.delete_syslog_servers(names=[module.params["name"]])
+ changed = True
+ except Exception:
+ module.fail_json(
+ msg="Failed to remove syslog server: {0}".format(
+ module.params["name"]
+ )
+ )
+
+ module.exit_json(changed=changed)
+
+
+def add_syslog(module, blade):
+ """Add Syslog Server"""
+ changed = False
+ noport_address = module.params["protocol"] + "://" + module.params["address"]
+
+ if module.params["port"]:
+ full_address = noport_address + ":" + module.params["port"]
+ else:
+ full_address = noport_address
+
+ address_list = blade.syslog.list_syslog_servers()
+ if len(address_list.items) == 3:
+ module.fail_json(msg="Maximum number of syslog servers (3) already configured.")
+ exists = False
+
+ if address_list:
+ for address in range(0, len(address_list.items)):
+ if address_list.items[address].name == module.params["name"]:
+ exists = True
+ break
+ if not exists:
+ changed = True
+ if not module.check_mode:
+ try:
+ attr = SyslogServerPostOrPatch(uri=full_address)
+ blade.syslog.create_syslog_servers(
+ syslog=attr, names=[module.params["name"]]
+ )
+ changed = True
+ except Exception:
+ module.fail_json(
+ msg="Failed to add syslog server {0} - {1}".format(
+ module.params["name"], full_address
+ )
+ )
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ address=dict(type="str"),
+ protocol=dict(type="str", choices=["tcp", "tls", "udp"]),
+ port=dict(type="str"),
+ name=dict(type="str", required=True),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ )
+ )
+
+ required_if = [["state", "present", ["address", "protocol"]]]
+
+ module = AnsibleModule(
+ argument_spec, required_if=required_if, supports_check_mode=True
+ )
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg="purity_fb sdk is required for this module")
+
+ blade = get_blade(module)
+ api_version = blade.api_version.list_versions().versions
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(msg="Purity//FB must be upgraded to support this module.")
+
+ if module.params["state"] == "absent":
+ delete_syslog(module, blade)
+ else:
+ add_syslog(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_target.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_target.py
new file mode 100644
index 000000000..ab37bfda3
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_target.py
@@ -0,0 +1,201 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_target
+version_added: '1.0.0'
+short_description: Manage remote S3-capable targets for a FlashBlade
+description:
+- Manage remote S3-capable targets for a FlashBlade system
+- Use this for non-FlashBlade targets.
+- Use I(purestorage.flashblade.purefb_connect) for FlashBlade targets.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Create or delete remote target
+ default: present
+ type: str
+ choices: [ absent, present ]
+ name:
+ description:
+ - Name of S3-capable target (IP or FQDN)
+ type: str
+ required: true
+ address:
+ description:
+ - Address of S3-capable target (IP or FQDN)
+ type: str
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Create a connection to remote S3-capable target
+ purestorage.flashblade.purefb_target:
+ name: target_1
+ address: 10.10.10.20
+ fb_url: 10.10.10.2
+ api_token: T-89faa581-c668-483d-b77d-23c5d88ba35c
+- name: Delete connection to remote S3-capable system
+ purestorage.flashblade.purefb_target:
+ state: absent
+ name: target_1
+ target_api: 9c0b56bc-f941-f7a6-9f85-dcc3e9a8f7d6
+ fb_url: 10.10.10.2
+ api_token: T-89faa581-c668-483d-b77d-23c5d88ba35c
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURITYFB = True
+try:
+ from purity_fb import TargetPost, Target
+except ImportError:
+ HAS_PURITYFB = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ purefb_argument_spec,
+)
+
+
+MINIMUM_API_VERSION = "1.9"
+
+
+def _check_replication_configured(module, blade):
+ interfaces = blade.network_interfaces.list_network_interfaces()
+ repl_ok = False
+ for link in range(0, len(interfaces.items)):
+ if "replication" in interfaces.items[link].services:
+ repl_ok = True
+ if not repl_ok:
+ module.fail_json(
+ msg="Replication network interface required to configure a target"
+ )
+
+
+def _check_connected(module, blade):
+ connected_targets = blade.targets.list_targets()
+ for target in range(0, len(connected_targets.items)):
+ if connected_targets.items[target].name == module.params["name"]:
+ return connected_targets.items[target]
+ return None
+
+
+def break_connection(module, blade):
+ """Break connection to remote target"""
+ changed = True
+ if not module.check_mode:
+ try:
+ blade.targets.delete_targets(names=[module.params["name"]])
+ except Exception:
+ module.fail_json(
+ msg="Failed to disconnect target {0}.".format(module.params["name"])
+ )
+ module.exit_json(changed=changed)
+
+
+def create_connection(module, blade):
+ """Create connection to remote target"""
+ changed = True
+ if not module.check_mode:
+ connected_targets = blade.targets.list_targets()
+ for target in range(0, len(connected_targets.items)):
+ if connected_targets.items[target].address == module.params["address"]:
+ module.fail_json(
+ msg="Target already exists with same connection address"
+ )
+ try:
+ target = TargetPost(address=module.params["address"])
+ blade.targets.create_targets(names=[module.params["name"]], target=target)
+ except Exception:
+ module.fail_json(
+ msg="Failed to connect to remote target {0}.".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def update_connection(module, blade, connection):
+ """Update target connection address"""
+ changed = False
+ connected_targets = blade.targets.list_targets()
+ for target in range(0, len(connected_targets.items)):
+ if (
+ connected_targets.items[target].address == module.params["address"]
+ and connected_targets.items[target].name != module.params["name"]
+ ):
+ module.fail_json(msg="Target already exists with same connection address")
+ if module.params["address"] != connection.address:
+ changed = True
+ if not module.check_mode:
+ new_address = Target(
+ name=module.params["name"], address=module.params["address"]
+ )
+ try:
+ blade.targets.update_targets(
+ names=[connection.name], target=new_address
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to change address for target {0}.".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ name=dict(type="str", required=True),
+ address=dict(type="str"),
+ )
+ )
+
+ required_if = [["state", "present", ["address"]]]
+
+ module = AnsibleModule(
+ argument_spec, required_if=required_if, supports_check_mode=True
+ )
+
+ if not HAS_PURITYFB:
+ module.fail_json(msg="purity_fb sdk is required for this module")
+
+ state = module.params["state"]
+ blade = get_blade(module)
+ _check_replication_configured(module, blade)
+ target = _check_connected(module, blade)
+ if state == "present" and not target:
+ create_connection(module, blade)
+ elif state == "present" and target:
+ update_connection(module, blade, target)
+ elif state == "absent" and target:
+ break_connection(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_timeout.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_timeout.py
new file mode 100644
index 000000000..21e83c002
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_timeout.py
@@ -0,0 +1,136 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2021, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_timeout
+version_added: '1.6.0'
+short_description: Configure Pure Storage FlashBlade GUI idle timeout
+description:
+- Configure GUI idle timeout for Pure Storage FlashBlade.
+- This does not affect existing GUI sessions.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Set or disable the GUI idle timeout
+ default: present
+ type: str
+ choices: [ present, absent ]
+ timeout:
+ description:
+ - Minutes for idle timeout.
+ type: int
+ default: 30
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Set GUI idle timeout to 25 minutes
+ purestorage.flashblade.purefb_timeout:
+ timeout: 25
+ state: present
+ fb_url: 10.10.10.2
+ api_token: T-68618f31-0c9e-4e57-aa44-5306a2cf10e3
+
+- name: Disable idle timeout
+ purestorage.flashblade.purefb_timeout:
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-68618f31-0c9e-4e57-aa44-5306a2cf10e3
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flashblade
+except ImportError:
+ HAS_PURESTORAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_system,
+ purefb_argument_spec,
+)
+
+MIN_REQUIRED_API_VERSION = "2.0"
+
+
+def set_timeout(module, blade):
+ """Set GUI idle timeout"""
+ changed = True
+ if not module.check_mode:
+ res = blade.patch_arrays(
+ flashblade.Array(idle_timeout=module.params["timeout"] * 60000)
+ )
+ if res.status_code != 200:
+ module.fail_json(msg="Failed to set GUI idle timeout")
+
+ module.exit_json(changed=changed)
+
+
+def disable_timeout(module, blade):
+ """Disable idle timeout"""
+ changed = True
+ if not module.check_mode:
+ res = blade.patch_arrays(flashblade.Array(idle_timeout=0))
+ if res.status_code != 200:
+ module.fail_json(msg="Failed to disable GUI idle timeout")
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ timeout=dict(type="int", default=30),
+ state=dict(type="str", default="present", choices=["present", "absent"]),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+
+ blade = get_system(module)
+ api_version = list(blade.get_versions().items)
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashBlade REST version not supported. "
+ "Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
+ )
+
+ state = module.params["state"]
+ if 5 < module.params["timeout"] > 180 and module.params["timeout"] != 0:
+ module.fail_json(msg="Timeout value must be between 5 and 180 minutes")
+ blade = get_system(module)
+ current_timeout = list(blade.get_arrays().items)[0].idle_timeout * 60000
+ if state == "present" and current_timeout != module.params["timeout"]:
+ set_timeout(module, blade)
+ elif state == "absent" and current_timeout != 0:
+ disable_timeout(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_tz.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_tz.py
new file mode 100644
index 000000000..9f6acc5d4
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_tz.py
@@ -0,0 +1,206 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2021, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_tz
+version_added: '1.10.0'
+short_description: Configure Pure Storage FlashBlade timezone
+description:
+- Configure the timezone for a Pure Storage FlashBlade.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ timezone:
+ description:
+ - If not provided, the module will attempt to get the current local timezone from the server
+ type: str
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Set FlashBlade Timezone to Americas/Los_Angeles
+ purestorage.flashblade.purefb_tz:
+ timezone: "America/Los_Angeles"
+ fb_url: 10.10.10.2
+ api_token: T-68618f31-0c9e-4e57-aa44-5306a2cf10e3
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flashblade
+except ImportError:
+ HAS_PURESTORAGE = False
+
+HAS_PYTZ = True
+try:
+ import pytz
+except ImportError:
+ HAS_PYTX = False
+
+import os
+import re
+import platform
+
+from ansible.module_utils.common.process import get_bin_path
+from ansible.module_utils.facts.utils import get_file_content
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_system,
+ purefb_argument_spec,
+)
+
+MIN_REQUIRED_API_VERSION = "2.0"
+
+
+def _findstr(text, match):
+ for line in text.splitlines():
+ if match in line:
+ found = line
+ return found
+
+
+def _get_local_tz(module, timezone="UTC"):
+ """
+ We will attempt to get the local timezone of the server running the module and use that.
+ If we can't get the timezone then we will set the default to be UTC
+
+ Linnux has been tested and other opersting systems should be OK.
+ Failures cause assumption of UTC
+
+ Windows is not supported and will assume UTC
+ """
+ if platform.system() == "Linux":
+ timedatectl = get_bin_path("timedatectl")
+ if timedatectl is not None:
+ rcode, stdout, stderr = module.run_command(timedatectl)
+ if rcode == 0 and stdout:
+ line = _findstr(stdout, "Time zone")
+ full_tz = line.split(":", 1)[1].rstrip()
+ timezone = full_tz.split()[0]
+ return timezone
+ else:
+ module.warn("Incorrect timedatectl output. Timezone will be set to UTC")
+ else:
+ if os.path.exists("/etc/timezone"):
+ timezone = get_file_content("/etc/timezone")
+ else:
+ module.warn("Could not find /etc/timezone. Assuming UTC")
+
+ elif platform.system() == "SunOS":
+ if os.path.exists("/etc/default/init"):
+ for line in get_file_content("/etc/default/init", "").splitlines():
+ if line.startswith("TZ="):
+ timezone = line.split("=", 1)[1]
+ return timezone
+ else:
+ module.warn("Could not find /etc/default/init. Assuming UTC")
+
+ elif re.match("^Darwin", platform.platform()):
+ systemsetup = get_bin_path("systemsetup")
+ if systemsetup is not None:
+ rcode, stdout, stderr = module.execute(systemsetup, "-gettimezone")
+ if rcode == 0 and stdout:
+ timezone = stdout.split(":", 1)[1].lstrip()
+ else:
+ module.warn("Could not run systemsetup. Assuming UTC")
+ else:
+ module.warn("Could not find systemsetup. Assuming UTC")
+
+ elif re.match("^(Free|Net|Open)BSD", platform.platform()):
+ if os.path.exists("/etc/timezone"):
+ timezone = get_file_content("/etc/timezone")
+ else:
+ module.warn("Could not find /etc/timezone. Assuming UTC")
+
+ elif platform.system() == "AIX":
+ aix_oslevel = int(platform.version() + platform.release())
+ if aix_oslevel >= 61:
+ if os.path.exists("/etc/environment"):
+ for line in get_file_content("/etc/environment", "").splitlines():
+ if line.startswith("TZ="):
+ timezone = line.split("=", 1)[1]
+ return timezone
+ else:
+ module.warn("Could not find /etc/environment. Assuming UTC")
+ else:
+ module.warn(
+ "Cannot determine timezone when AIX os level < 61. Assuming UTC"
+ )
+
+ else:
+ module.warn("Could not find /etc/timezone. Assuming UTC")
+
+ return timezone
+
+
+def set_timezone(module, blade):
+ """Set timezone"""
+ changed = True
+ if not module.check_mode:
+ res = blade.patch_arrays(flashblade.Array(time_zone=module.params["timezone"]))
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to timezone. Error: {0}".format(res.errors[0].message)
+ )
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ timezone=dict(type="str"),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+ if not HAS_PYTZ:
+ module.fail_json(msg="pytz is required for this module")
+
+ blade = get_system(module)
+ api_version = list(blade.get_versions().items)
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashBlade REST version not supported. "
+ "Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
+ )
+ if not module.params["timezone"]:
+ module.params["timezone"] = _get_local_tz(module)
+ if module.params["timezone"] not in pytz.all_timezones_set:
+ module.fail_json(
+ msg="Timezone {0} is not valid".format(module.params["timezone"])
+ )
+
+ blade = get_system(module)
+ current_tz = list(blade.get_arrays().items)[0].time_zone
+ if current_tz != module.params["timezone"]:
+ set_timezone(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_user.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_user.py
new file mode 100644
index 000000000..ffa34be8e
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_user.py
@@ -0,0 +1,211 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2019, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_user
+version_added: '1.0.0'
+short_description: Modify FlashBlade user accounts
+description:
+- Modify user on a Pure Stoage FlashBlade.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - The name of the user account
+ type: str
+ password:
+ description:
+ - Password for the local user.
+ - Only applies to the local user 'pureuser'
+ type: str
+ old_password:
+ description:
+ - If changing an existing password, you must provide the old password for security
+ - Only applies to the local user 'pureuser'
+ type: str
+ public_key:
+ description:
+ - The API clients PEM formatted (Base64 encoded) RSA public key.
+ - Include the I(—–BEGIN PUBLIC KEY—–) and I(—–END PUBLIC KEY—–) lines
+ type: str
+ version_added: "1.8.0"
+ clear_lock:
+ description:
+ - Clear user lockout flag
+ type: bool
+ default: false
+ version_added: "1.8.0"
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Change password for local user (NOT IDEMPOTENT)
+ purestorage.flashblade.purefb_user:
+ name: pureuser
+ password: anewpassword
+ old_password: apassword
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+
+- name: Set public key for user
+ purestorage.flashblade.purefb_user:
+ name: fred
+ public_key: "{{lookup('file', 'public_pem_file') }}"
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+
+- name: Clear user lockout
+ purestorage.flashblade.purefb_user:
+ name: fred
+ clear_lock: true
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import Admin
+except ImportError:
+ HAS_PURITY_FB = False
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient.flashblade import AdminPatch
+except ImportError:
+ HAS_PURESTORAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ get_system,
+ purefb_argument_spec,
+)
+
+MIN_REQUIRED_API_VERSION = "1.3"
+MIN_KEY_API_VERSION = "2.1"
+MIN_LOCK_API_VERSION = "2.3"
+
+
+def update_user(module, blade):
+ """Create or Update Local User Account"""
+ changed = False
+ if module.params["password"] and module.params["name"].lower() == "pureuser":
+ if module.params["password"] != module.params["old_password"]:
+ changed = True
+ if not module.check_mode:
+ try:
+ new_admin = Admin()
+ new_admin.password = module.params["password"]
+ new_admin.old_password = module.params["old_password"]
+ blade.admins.update_admins(names=["pureuser"], admin=new_admin)
+ except Exception:
+ module.fail_json(
+ msg="Local User {0}: Password reset failed. "
+ "Check passwords. One of these is incorrect.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ module.fail_json(
+ msg="Local User Account {0}: Password change failed - "
+ "Old and new passwords are the same".format(module.params["name"])
+ )
+ if module.params["password"] and module.params["name"].lower() != "pureuser":
+ module.fail_json(msg="Changing password for remote accounts is not supported.")
+ api_version = blade.api_version.list_versions().versions
+ if MIN_KEY_API_VERSION in api_version:
+ bladev2 = get_system(module)
+ try:
+ user_data = list(bladev2.get_admins(names=[module.params["name"]]).items)[0]
+ except AttributeError:
+ module.fail_json(
+ msg="User {0} does not currently exist in the FlashBlade. "
+ "Please login to this user before attempting to modify it.".format(
+ module.params["name"]
+ )
+ )
+ current_key = user_data.public_key
+ if module.params["public_key"] and current_key != module.params["public_key"]:
+ changed = True
+ if not module.check_mode:
+ my_admin = AdminPatch(public_key=module.params["public_key"])
+ res = bladev2.patch_admins(
+ names=[module.params["name"]], admin=my_admin
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to change public_key for {0}.".format(
+ module.params["name"]
+ )
+ )
+ if MIN_LOCK_API_VERSION in api_version:
+ if user_data.locked and module.params["clear_lock"]:
+ changed = True
+ if not module.check_mode:
+ my_admin = AdminPatch(locked=False)
+ res = bladev2.patch_admins(
+ names=[module.params["name"]], admin=my_admin
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to unlock user {0}.".format(
+ module.params["name"]
+ )
+ )
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str"),
+ public_key=dict(type="str", no_log=True),
+ password=dict(type="str", no_log=True),
+ old_password=dict(type="str", no_log=True),
+ clear_lock=dict(type="bool", default=False),
+ )
+ )
+
+ required_together = [["password", "old_password"]]
+ module = AnsibleModule(
+ argument_spec, supports_check_mode=True, required_together=required_together
+ )
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg="purity_fb sdk is required for this module")
+ if not HAS_PURESTORAGE and module.params["public_key"]:
+ module.fail_json(msg="py-pure-client sdk is required for to set public keys")
+
+ blade = get_blade(module)
+ api_version = blade.api_version.list_versions().versions
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(msg="Purity//FB must be upgraded to support this module.")
+
+ update_user(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_userpolicy.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_userpolicy.py
new file mode 100644
index 000000000..6e7dbe49d
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_userpolicy.py
@@ -0,0 +1,269 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2021, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_userpolicy
+version_added: '1.6.0'
+short_description: Manage FlashBlade Object Store User Access Policies
+description:
+- Add or Remove FlashBlade Object Store Access Policies for Account User
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Name of the Object Store User
+ - The user to have the policy request applied to
+ type: str
+ account:
+ description:
+ - Name of the Object Store Account associated with the user
+ type: str
+ state:
+ description:
+ - Define whether the Access Policy should be added or deleted
+ - Option to list all available policies
+ default: present
+ choices: [ absent, present, show ]
+ type: str
+ policy:
+ description:
+ - Policies to added or deleted from the Object Store User
+ - Only valid policies can be used
+ - use I(list) to see available policies
+ type: list
+ elements: str
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: List existng ruser access policies for a specific user
+ purestorage.flashblade.purefb_userpolicy:
+ state: show
+ account: foo
+ name: bar
+ fb_url: 10.10.10.2
+ api_token: T-68618f31-0c9e-4e57-aa44-5306a2cf10e3
+ register: policy_list
+
+- name: List all available user access policies
+ purestorage.flashblade.purefb_userpolicy:
+ state: show
+ fb_url: 10.10.10.2
+ api_token: T-68618f31-0c9e-4e57-aa44-5306a2cf10e3
+ register: policy_list
+
+- name: Add user access policies to account user foo/bar
+ purestorage.flashblade.purefb_userpolicy:
+ name: bar
+ account: foo
+ policy:
+ - pure:policy/bucket-create
+ - pure:policy/bucket-delete
+ fb_url: 10.10.10.2
+ api_token: T-68618f31-0c9e-4e57-aa44-5306a2cf10e3
+
+- name: Delete user access policies to account user foo/bar
+ purestorage.flashblade.purefb_userpolicy:
+ name: bar
+ account: foo
+ policy:
+ - pure:policy/bucket-create
+ - pure:policy/bucket-delete
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-68618f31-0c9e-4e57-aa44-5306a2cf10e3
+"""
+
+RETURN = r"""
+policy_list:
+ description:
+ - Returns the list of access policies for a user
+ - If no user specified returns all available access policies
+ returned: always
+ type: list
+ elements: str
+ sample: ['pure:policy/object-list', 'pure:policy/bucket-list', 'pure:policy/object-read', 'pure:policy/bucket-delete', 'pure:policy/full-access']
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_system,
+ purefb_argument_spec,
+)
+
+MIN_REQUIRED_API_VERSION = "2.0"
+
+
+def _check_valid_policy(blade, policy):
+ try:
+ return bool(blade.get_object_store_access_policies(names=[policy]))
+ except AttributeError:
+ return False
+
+
+def add_policy(module, blade):
+ """Add a single or list of policies to an account user"""
+ changed = False
+ user_policy_list = []
+ policy_list = module.params["policy"]
+ for policy in range(0, len(policy_list)):
+ if not _check_valid_policy(blade, policy_list[policy]):
+ module.fail_json(msg="Policy {0} is not valid.".format(policy_list[policy]))
+ username = module.params["account"] + "/" + module.params["name"]
+ for policy in range(0, len(policy_list)):
+ if not (
+ blade.get_object_store_users_object_store_access_policies(
+ member_names=[username], policy_names=[policy_list[policy]]
+ ).items
+ ):
+ if not module.check_mode:
+ changed = True
+ res = blade.post_object_store_access_policies_object_store_users(
+ member_names=[username], policy_names=[policy_list[policy]]
+ )
+ user_policies = list(
+ blade.get_object_store_access_policies_object_store_users(
+ member_names=[username]
+ ).items
+ )
+ for user_policy in range(0, len(user_policies)):
+ user_policy_list.append(user_policies[user_policy].policy.name)
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to add policy {0} to account user {1}. Error: {2}".format(
+ policy_list[policy], username, res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed, policy_list=user_policy_list)
+
+
+def remove_policy(module, blade):
+ """Remove a single or list of policies to an account user"""
+ changed = False
+ user_policy_list = []
+ policy_list = module.params["policy"]
+ for policy in range(0, len(policy_list)):
+ if not _check_valid_policy(blade, policy):
+ module.fail_json(msg="Policy {0} is not valid.".format(policy))
+ username = module.params["account"] + "/" + module.params["name"]
+ for policy in range(0, len(policy_list)):
+ if (
+ blade.get_object_store_users_object_store_access_policies(
+ member_names=[username], policy_names=[policy_list[policy]]
+ ).total_item_count
+ == 1
+ ):
+ if not module.check_mode:
+ changed = True
+ res = blade.delete_object_store_access_policies_object_store_users(
+ member_names=[username], policy_names=[policy_list[policy]]
+ )
+ user_policies = list(
+ blade.get_object_store_access_policies_object_store_users(
+ member_names=[username]
+ ).items
+ )
+ for user_policy in range(0, len(user_policies)):
+ user_policy_list.append(user_policies[user_policy].policy.name)
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to remove policy {0} from account user {1}. Error: {2}".format(
+ policy_list[policy], username, res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed, policy_list=user_policy_list)
+
+
+def list_policy(module, blade):
+ """List Object Store User Access Policies"""
+ changed = True
+ policy_list = []
+ if not module.check_mode:
+ if module.params["account"] and module.params["name"]:
+ username = module.params["account"] + "/" + module.params["name"]
+ user_policies = list(
+ blade.get_object_store_access_policies_object_store_users(
+ member_names=[username]
+ ).items
+ )
+ for user_policy in range(0, len(user_policies)):
+ policy_list.append(user_policies[user_policy].policy.name)
+ else:
+ policies = blade.get_object_store_access_policies()
+ p_list = list(policies.items)
+ if policies.status_code != 200:
+ module.fail_json(msg="Failed to get Object Store User Access Policies")
+ for policy in range(0, len(p_list)):
+ policy_list.append(p_list[policy].name)
+ module.exit_json(changed=changed, policy_list=policy_list)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(
+ type="str", default="present", choices=["absent", "present", "show"]
+ ),
+ name=dict(type="str"),
+ account=dict(type="str"),
+ policy=dict(type="list", elements="str"),
+ )
+ )
+ required_if = [
+ ["state", "present", ["name", "account", "policy"]],
+ ["state", "absent", ["name", "account", "policy"]],
+ ]
+ module = AnsibleModule(
+ argument_spec, required_if=required_if, supports_check_mode=True
+ )
+
+ blade = get_system(module)
+ api_version = list(blade.get_versions().items)
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashBlade REST version not supported. "
+ "Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
+ )
+
+ state = module.params["state"]
+ if (
+ blade.get_object_store_users(
+ names=[module.params["account"] + "/" + module.params["name"]]
+ ).status_code
+ != 200
+ ):
+ module.fail_json(
+ msg="Account User {0}/{1} does not exist".format(
+ module.params["account"], module.params["name"]
+ )
+ )
+ if state == "show":
+ list_policy(module, blade)
+ elif state == "present":
+ add_policy(module, blade)
+ elif state == "absent":
+ remove_policy(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_userquota.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_userquota.py
new file mode 100644
index 000000000..74361f2ea
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_userquota.py
@@ -0,0 +1,315 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2021, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+
+DOCUMENTATION = """
+---
+module: purefb_userquota
+version_added: "1.7.0"
+short_description: Manage filesystem user quotas
+description:
+ - This module manages user quotas for filesystems on Pure Storage FlashBlade.
+author: Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Filesystem Name.
+ required: true
+ type: str
+ state:
+ description:
+ - Create, delete or modifies a quota.
+ required: false
+ default: present
+ type: str
+ choices: [ "present", "absent" ]
+ quota:
+ description:
+ - User quota in M, G, T or P units. This cannot be 0.
+ - This value will override the file system's default user quota.
+ type: str
+ uid:
+ description:
+ - The user id on which the quota is enforced.
+ - Cannot be combined with I(uname)
+ type: int
+ uname:
+ description:
+ - The user name on which the quota is enforced.
+ - Cannot be combined with I(uid)
+ type: str
+extends_documentation_fragment:
+ - purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = """
+- name: Create new user (using UID) quota for filesystem named foo
+ purestorage.flashblade.purefb_userquota:
+ name: foo
+ quota: 1T
+ uid: 1234
+ state: present
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Create new user (using username) quota for filesystem named foo
+ purestorage.flashblade.purefb_userquota:
+ name: foo
+ quota: 1T
+ uname: bar
+ state: present
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Delete user quota on filesystem foo for user by UID
+ purestorage.flashblade.purefb_userquota:
+ name: foo
+ uid: 1234
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Delete user quota on filesystem foo for user by username
+ purestorage.flashblade.purefb_userquota:
+ name: foo
+ uname: bar
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Update user quota on filesystem foo for user by username
+ purestorage.flashblade.purefb_userquota:
+ name: foo
+ quota: 20G
+ uname: bar
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Update user quota on filesystem foo for user by UID
+ purestorage.flashblade.purefb_userquota:
+ name: foo
+ quota: 20G
+ uid: bar
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641"""
+
+RETURN = """
+"""
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import QuotasUser
+except ImportError:
+ HAS_PURITY_FB = False
+
+from ansible.module_utils.basic import AnsibleModule, human_to_bytes
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_blade,
+ purefb_argument_spec,
+)
+
+
+MIN_REQUIRED_API_VERSION = "1.6"
+
+
+def get_fs(module, blade):
+ """Return Filesystem or None"""
+ fsys = []
+ fsys.append(module.params["name"])
+ try:
+ res = blade.file_systems.list_file_systems(names=fsys)
+ return res.items[0]
+ except Exception:
+ return None
+
+
+def get_quota(module, blade):
+ """Return Filesystem User Quota or None"""
+ fsys = []
+ fsys.append(module.params["name"])
+ try:
+ if module.params["uid"]:
+ res = blade.quotas_users.list_user_quotas(
+ file_system_names=fsys, filter="user.id=" + str(module.params["uid"])
+ )
+ else:
+ res = blade.quotas_users.list_user_quotas(
+ file_system_names=fsys,
+ filter="user.name='" + module.params["uname"] + "'",
+ )
+ return res.items[0]
+ except Exception:
+ return None
+
+
+def create_quota(module, blade):
+ """Create Filesystem User Quota"""
+ changed = True
+ quota = int(human_to_bytes(module.params["quota"]))
+ if not module.check_mode:
+ try:
+ if module.params["uid"]:
+ blade.quotas_users.create_user_quotas(
+ file_system_names=[module.params["name"]],
+ uids=[module.params["uid"]],
+ quota=QuotasUser(quota=quota),
+ )
+ else:
+ blade.quotas_users.create_user_quotas(
+ file_system_names=[module.params["name"]],
+ user_names=[module.params["uname"]],
+ quota=QuotasUser(quota=quota),
+ )
+ except Exception:
+ if module.params["uid"]:
+ module.fail_json(
+ msg="Failed to create quote for UID {0} on filesystem {1}.".format(
+ module.params["uid"], module.params["name"]
+ )
+ )
+ else:
+ module.fail_json(
+ msg="Failed to create quote for username {0} on filesystem {1}.".format(
+ module.params["uname"], module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def update_quota(module, blade):
+ """Upodate Filesystem User Quota"""
+ changed = False
+ current_quota = get_quota(module, blade)
+ quota = int(human_to_bytes(module.params["quota"]))
+ if current_quota.quota != quota:
+ changed = True
+ if not module.check_mode:
+ if module.params["uid"]:
+ try:
+ blade.quotas_users.update_user_quotas(
+ file_system_names=[module.params["name"]],
+ uids=[module.params["uid"]],
+ quota=QuotasUser(quota=quota),
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to update quota for UID {0} on filesystem {1}.".format(
+ module.params["uid"], module.params["name"]
+ )
+ )
+ else:
+ try:
+ blade.quotas_users.update_user_quotas(
+ file_system_names=[module.params["name"]],
+ user_names=[module.params["uname"]],
+ quota=QuotasUser(quota=quota),
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to update quota for UID {0} on filesystem {1}.".format(
+ module.params["uname"], module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def delete_quota(module, blade):
+ """Delete Filesystem User Quota"""
+ changed = True
+ if not module.check_mode:
+ try:
+ if module.params["uid"]:
+ blade.quotas_users.delete_user_quotas(
+ file_system_names=[module.params["name"]],
+ uids=[module.params["uid"]],
+ )
+ else:
+ blade.quotas_users.delete_user_quotas(
+ file_system_names=[module.params["name"]],
+ user_names=[module.params["uname"]],
+ )
+ except Exception:
+ if module.params["uid"]:
+ module.fail_json(
+ msg="Failed to delete quota for UID {0} on filesystem {1}.".format(
+ module.params["uid"], module.params["name"]
+ )
+ )
+ else:
+ module.fail_json(
+ msg="Failed to delete quota for username {0} on filesystem {1}.".format(
+ module.params["uname"], module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True),
+ uid=dict(type="int"),
+ uname=dict(type="str"),
+ state=dict(default="present", choices=["present", "absent"]),
+ quota=dict(type="str"),
+ )
+ )
+
+ mutually_exclusive = [["uid", "uname"]]
+ required_if = [["state", "present", ["quota"]]]
+ module = AnsibleModule(
+ argument_spec,
+ mutually_exclusive=mutually_exclusive,
+ required_if=required_if,
+ supports_check_mode=True,
+ )
+
+ if not HAS_PURITY_FB:
+ module.fail_json(msg="purity_fb sdk is required for this module")
+
+ state = module.params["state"]
+ blade = get_blade(module)
+ versions = blade.api_version.list_versions().versions
+
+ if MIN_REQUIRED_API_VERSION not in versions:
+ module.fail_json(
+ msg="Minimum FlashBlade REST version required: {0}".format(
+ MIN_REQUIRED_API_VERSION
+ )
+ )
+ fsys = get_fs(module, blade)
+ if not fsys:
+ module.fail_json(
+ msg="Filesystem {0} does not exist.".format(module.params["name"])
+ )
+ quota = get_quota(module, blade)
+
+ if state == "present" and not quota:
+ create_quota(module, blade)
+ elif state == "present" and quota:
+ update_quota(module, blade)
+ elif state == "absent" and quota:
+ delete_quota(module, blade)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_virtualhost.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_virtualhost.py
new file mode 100644
index 000000000..7f4a2310e
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_virtualhost.py
@@ -0,0 +1,142 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2021, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_virtualhost
+version_added: '1.6.0'
+short_description: Manage FlashBlade Object Store Virtual Hosts
+description:
+- Add or delete FlashBlade Object Store Virtual Hosts
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Name of the Object Store Virtual Host
+ - A hostname or domain by which the array can be addressed for virtual
+ hosted-style S3 requests.
+ type: str
+ required: true
+ state:
+ description:
+ - Define whether the Object Store Virtual Host should be added or deleted
+ default: present
+ choices: [ absent, present ]
+ type: str
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Add Object Store Virtual Host
+ purestorage.flashblade.purefb_virtualhost:
+ name: "s3.acme.com"
+ fb_url: 10.10.10.2
+ api_token: T-68618f31-0c9e-4e57-aa44-5306a2cf10e3
+
+- name: Delete Object Store Virtual Host
+ purestorage.flashblade.purefb_virtualhost:
+ name: "nohost.acme.com"
+ state: absent
+ fb_url: 10.10.10.2
+ api_token: T-68618f31-0c9e-4e57-aa44-5306a2cf10e3
+"""
+
+RETURN = r"""
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_system,
+ purefb_argument_spec,
+)
+
+MIN_REQUIRED_API_VERSION = "2.0"
+MAX_HOST_COUNT = 10
+
+
+def delete_host(module, blade):
+ """Delete Object Store Virtual Host"""
+ changed = False
+ if module.params["name"] == "s3.amazonaws.com":
+ module.warn("s3.amazonaws.com is a reserved name and cannot be deleted")
+ else:
+ changed = True
+ if not module.check_mode:
+ res = blade.delete_object_store_virtual_hosts(names=[module.params["name"]])
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete Object Store Virtual Host {0}".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def add_host(module, blade):
+ """Add Object Store Virtual Host"""
+ changed = True
+ if not module.check_mode:
+ res = blade.post_object_store_virtual_hosts(names=[module.params["name"]])
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to add Object Store Virtual Host {0}".format(
+ module.params["name"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ name=dict(type="str", required=True),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ blade = get_system(module)
+ api_version = list(blade.get_versions().items)
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashBlade REST version not supported. "
+ "Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
+ )
+ state = module.params["state"]
+
+ exists = bool(
+ blade.get_object_store_virtual_hosts(names=[module.params["name"]]).status_code
+ == 200
+ )
+
+ if len(list(blade.get_object_store_virtual_hosts().items)) < MAX_HOST_COUNT:
+ if not exists and state == "present":
+ add_host(module, blade)
+ elif exists and state == "absent":
+ delete_host(module, blade)
+ else:
+ module.warn("Maximum Object Store Virtual Host reached.")
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/requirements.txt b/ansible_collections/purestorage/flashblade/requirements.txt
new file mode 100644
index 000000000..9742ecbfa
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/requirements.txt
@@ -0,0 +1,5 @@
+netaddr
+datetime
+pytz
+purity-fb
+py-pure-client
diff --git a/ansible_collections/purestorage/flashblade/roles/.keep b/ansible_collections/purestorage/flashblade/roles/.keep
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/roles/.keep
diff --git a/ansible_collections/purestorage/flashblade/settings.json b/ansible_collections/purestorage/flashblade/settings.json
new file mode 100644
index 000000000..a6fe89373
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/settings.json
@@ -0,0 +1,8 @@
+{
+ "id": "FlashBlade-Collection",
+ "name": "Ansible Collection for FlashBlade",
+ "filter": "devops",
+ "image": "http://code.purestorage.com/images/32_fb_collection.png",
+ "featured": 1,
+ "priority": 3
+}
diff --git a/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.10.txt b/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.10.txt
new file mode 100644
index 000000000..771db46ec
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.10.txt
@@ -0,0 +1,2 @@
+plugins/modules/purefb_info.py validate-modules:return-syntax-error
+plugins/modules/purefb_inventory.py validate-modules:return-syntax-error
diff --git a/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.11.txt b/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.11.txt
new file mode 100644
index 000000000..771db46ec
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.11.txt
@@ -0,0 +1,2 @@
+plugins/modules/purefb_info.py validate-modules:return-syntax-error
+plugins/modules/purefb_inventory.py validate-modules:return-syntax-error
diff --git a/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.12.txt b/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.12.txt
new file mode 100644
index 000000000..771db46ec
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.12.txt
@@ -0,0 +1,2 @@
+plugins/modules/purefb_info.py validate-modules:return-syntax-error
+plugins/modules/purefb_inventory.py validate-modules:return-syntax-error
diff --git a/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.13.txt b/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.13.txt
new file mode 100644
index 000000000..771db46ec
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.13.txt
@@ -0,0 +1,2 @@
+plugins/modules/purefb_info.py validate-modules:return-syntax-error
+plugins/modules/purefb_inventory.py validate-modules:return-syntax-error
diff --git a/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.14.txt b/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.14.txt
new file mode 100644
index 000000000..771db46ec
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.14.txt
@@ -0,0 +1,2 @@
+plugins/modules/purefb_info.py validate-modules:return-syntax-error
+plugins/modules/purefb_inventory.py validate-modules:return-syntax-error
diff --git a/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.15.txt b/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.15.txt
new file mode 100644
index 000000000..771db46ec
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.15.txt
@@ -0,0 +1,2 @@
+plugins/modules/purefb_info.py validate-modules:return-syntax-error
+plugins/modules/purefb_inventory.py validate-modules:return-syntax-error
diff --git a/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.16.txt b/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.16.txt
new file mode 100644
index 000000000..771db46ec
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.16.txt
@@ -0,0 +1,2 @@
+plugins/modules/purefb_info.py validate-modules:return-syntax-error
+plugins/modules/purefb_inventory.py validate-modules:return-syntax-error
diff --git a/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.9.txt b/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.9.txt
new file mode 100644
index 000000000..771db46ec
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.9.txt
@@ -0,0 +1,2 @@
+plugins/modules/purefb_info.py validate-modules:return-syntax-error
+plugins/modules/purefb_inventory.py validate-modules:return-syntax-error
diff --git a/ansible_collections/purestorage/fusion/.github/CONTRIBUTING.md b/ansible_collections/purestorage/fusion/.github/CONTRIBUTING.md
new file mode 100644
index 000000000..a9054d18c
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/.github/CONTRIBUTING.md
@@ -0,0 +1,19 @@
+# WELCOME TO PURE STORAGE FUSION ANSIBLE COLLECTION GITHUB
+
+Hi! Nice to see you here!
+
+## QUESTIONS ?
+
+The GitHub issue tracker is not the best place for questions for various reasons, but the [mailing list](mailto:pure-ansible-team@purestorage.com) is a very helpful places for those things.
+
+## CONTRIBUTING ?
+
+By contributing you agree that these contributions are your own (or approved by your employer) and you grant a full, complete, irrevocable copyright license to all users and developers of the project, present and future, pursuant to the license of the project.
+
+## BUG TO REPORT ?
+
+You can report bugs or make enhancement requests at the [Ansible GitHub issue page](http://github.com/Pure-Storage-Ansible/Fusion-Collection/issues/new/choose) by filling out the issue template that will be presented.
+
+Also please make sure you are testing on the latest released version of Ansible or the development branch; see the [Installation Guide](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) for details.
+
+Thanks!
diff --git a/ansible_collections/purestorage/fusion/.github/ISSUE_TEMPLATE/bug_report.md b/ansible_collections/purestorage/fusion/.github/ISSUE_TEMPLATE/bug_report.md
new file mode 100644
index 000000000..dd84ea782
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/.github/ISSUE_TEMPLATE/bug_report.md
@@ -0,0 +1,38 @@
+---
+name: Bug report
+about: Create a report to help us improve
+title: ''
+labels: ''
+assignees: ''
+
+---
+
+**Describe the bug**
+A clear and concise description of what the bug is.
+
+**To Reproduce**
+Steps to reproduce the behavior:
+1. Go to '...'
+2. Click on '....'
+3. Scroll down to '....'
+4. See error
+
+**Expected behavior**
+A clear and concise description of what you expected to happen.
+
+**Screenshots**
+If applicable, add screenshots to help explain your problem.
+
+**Desktop (please complete the following information):**
+ - OS: [e.g. iOS]
+ - Browser [e.g. chrome, safari]
+ - Version [e.g. 22]
+
+**Smartphone (please complete the following information):**
+ - Device: [e.g. iPhone6]
+ - OS: [e.g. iOS8.1]
+ - Browser [e.g. stock browser, safari]
+ - Version [e.g. 22]
+
+**Additional context**
+Add any other context about the problem here.
diff --git a/ansible_collections/purestorage/fusion/.github/ISSUE_TEMPLATE/feature_request.md b/ansible_collections/purestorage/fusion/.github/ISSUE_TEMPLATE/feature_request.md
new file mode 100644
index 000000000..bbcbbe7d6
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/.github/ISSUE_TEMPLATE/feature_request.md
@@ -0,0 +1,20 @@
+---
+name: Feature request
+about: Suggest an idea for this project
+title: ''
+labels: ''
+assignees: ''
+
+---
+
+**Is your feature request related to a problem? Please describe.**
+A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
+
+**Describe the solution you'd like**
+A clear and concise description of what you want to happen.
+
+**Describe alternatives you've considered**
+A clear and concise description of any alternative solutions or features you've considered.
+
+**Additional context**
+Add any other context or screenshots about the feature request here.
diff --git a/ansible_collections/purestorage/fusion/.github/pull_request_template.md b/ansible_collections/purestorage/fusion/.github/pull_request_template.md
new file mode 100644
index 000000000..27079cb18
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/.github/pull_request_template.md
@@ -0,0 +1,25 @@
+##### SUMMARY
+<!--- Describe the change below, including rationale and design decisions -->
+
+<!--- HINT: Include "Fixes #nnn" if you are fixing an existing issue -->
+
+##### ISSUE TYPE
+<!--- Pick one below and delete the rest -->
+- Bugfix Pull Request
+- Docs Pull Request
+- Feature Pull Request
+- New Module Pull Request
+- New Role Pull Request
+
+##### COMPONENT NAME
+<!--- Write the short name of the module, plugin, task or feature below -->
+
+##### ADDITIONAL INFORMATION
+<!--- Include additional information to help people understand the change here -->
+<!--- A step-by-step reproduction of the problem is helpful if there is no related issue -->
+- All new PRs must include a changelog fragment
+- Details of naming convention and format can be found [here](https://docs.ansible.com/ansible/latest/community/development_process.html#creating-a-changelog-fragment)
+<!--- Paste verbatim command output below, e.g. before and after your change -->
+```paste below
+
+```
diff --git a/ansible_collections/purestorage/fusion/.github/workflows/ansible-lint.yaml b/ansible_collections/purestorage/fusion/.github/workflows/ansible-lint.yaml
new file mode 100644
index 000000000..0b2102184
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/.github/workflows/ansible-lint.yaml
@@ -0,0 +1,10 @@
+name: Ansible Lint # feel free to pick your own name
+on: [push, pull_request]
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+ - name: Run ansible-lint
+ uses: ansible-community/ansible-lint-action@main
diff --git a/ansible_collections/purestorage/fusion/.github/workflows/black.yaml b/ansible_collections/purestorage/fusion/.github/workflows/black.yaml
new file mode 100644
index 000000000..68061652a
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/.github/workflows/black.yaml
@@ -0,0 +1,11 @@
+name: Black
+
+on: [push, pull_request]
+
+jobs:
+ lint:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+ - uses: actions/setup-python@v3
+ - uses: psf/black@stable
diff --git a/ansible_collections/purestorage/fusion/.github/workflows/create-release.yaml b/ansible_collections/purestorage/fusion/.github/workflows/create-release.yaml
new file mode 100644
index 000000000..25725c15d
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/.github/workflows/create-release.yaml
@@ -0,0 +1,117 @@
+name: Release Collection
+
+on: workflow_dispatch
+jobs:
+ create_github_release:
+ runs-on: ubuntu-latest
+ environment: fusion-env
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v2
+
+ - name: Get version from galaxy.yml
+ run: |
+ RELEASE_VERSION=$(grep 'version:' galaxy.yml | awk '{print $2}')
+ echo "RELEASE_VERSION=$RELEASE_VERSION" >> $GITHUB_ENV
+
+ - name: Check if tag exists
+ env:
+ GITHUB_TOKEN: ${{ github.TOKEN }}
+ run: |
+ trap "exit 0" EXIT
+ response=$(gh api -H "Accept: application/vnd.github+json" /repos/${{ github.repository }}/git/ref/tags/$RELEASE_VERSION)
+ if [[ "$response" == *"$RELEASE_VERSION"* ]]; then
+ trap "exit 1" EXIT
+ echo "Error: Tag $RELEASE_VERSION already exists"
+ exit 1
+ fi
+
+ - name: Extract changelog
+ run: |
+ awk -v version="$RELEASE_VERSION" '
+ BEGIN { targetVersionFound = 0; previousVersionFound = 0 }
+ {
+ if (match($0, "^v"version) > 0) {
+ targetVersionFound = 1;
+ } else if (targetVersionFound && match($0, "^v") > 0) {
+ previousVersionFound = 1;
+ }
+
+ if (targetVersionFound && !previousVersionFound) {
+ print $0;
+ }
+ }
+ ' CHANGELOG.rst > changelog
+
+ - name: Create a release
+ env:
+ GITHUB_TOKEN: ${{ github.TOKEN }}
+ run: gh release create "${{ env.RELEASE_VERSION }}" --title "Fusion Collection v${{ env.RELEASE_VERSION }}" --notes-file changelog
+
+ publish_collection_to_galaxy:
+ runs-on: ubuntu-latest
+ environment: fusion-env
+ needs: create_github_release
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v2
+
+ - name: Get version from galaxy.yml
+ run: |
+ RELEASE_VERSION=$(grep 'version:' galaxy.yml | awk '{print $2}')
+ echo "RELEASE_VERSION=$RELEASE_VERSION" >> $GITHUB_ENV
+
+ - name: Set up Python
+ uses: actions/setup-python@v2
+ with:
+ python-version: "3.x"
+
+ - name: Install dependencies
+ run: |
+ python -m pip install --upgrade pip
+ pip install ansible ansible-lint yamllint
+
+ - name: Build Ansible Collection
+ run: ansible-galaxy collection build
+
+ - name: Release to Ansible Galaxy
+ run: ansible-galaxy collection publish --api-key=${{ secrets.ANSIBLE_GALAXY_API_KEY }} ./purestorage-fusion-${{ env.RELEASE_VERSION }}.tar.gz
+
+ publish_collection_to_automation_hub:
+ runs-on: ubuntu-latest
+ environment: fusion-env
+ needs: create_github_release
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v2
+
+ - name: Get version from galaxy.yml
+ run: |
+ RELEASE_VERSION=$(grep 'version:' galaxy.yml | awk '{print $2}')
+ echo "RELEASE_VERSION=$RELEASE_VERSION" >> $GITHUB_ENV
+
+ - name: Set up Python
+ uses: actions/setup-python@v2
+ with:
+ python-version: "3.x"
+
+ - name: Install dependencies
+ run: |
+ python -m pip install --upgrade pip
+ pip install ansible ansible-lint yamllint
+
+ - name: Build Ansible Collection
+ run: ansible-galaxy collection build
+ - name: Create config for Automation Hub
+ run: |
+ cat << EOF > ansible.cfg
+ [galaxy]
+ server_list = rh_automation_hub
+ [galaxy_server.rh_automation_hub]
+ url=${{ secrets.RH_AUTOMATION_HUB_URL }}
+ auth_url=${{ secrets.RH_SSO_URL }}
+ token=${{ secrets.RH_AUTOMATION_HUB_TOKEN }}
+ EOF
+
+ - name: Release to Automation Hub
+ run: ansible-galaxy collection publish ./purestorage-fusion-${{ env.RELEASE_VERSION }}.tar.gz
diff --git a/ansible_collections/purestorage/fusion/.github/workflows/main.yml b/ansible_collections/purestorage/fusion/.github/workflows/main.yml
new file mode 100644
index 000000000..da0a69969
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/.github/workflows/main.yml
@@ -0,0 +1,66 @@
+name: Pure Storage Ansible CI
+
+on:
+ pull_request:
+ push:
+ schedule:
+ - cron: '25 10 * * *'
+
+jobs:
+ build:
+ name: Build purefusion on Ansible ${{ matrix.ansible }} (Python ${{ matrix.python-version }})
+ runs-on: ubuntu-20.04
+ strategy:
+ matrix:
+ ansible:
+ - stable-2.11
+ - stable-2.12
+ - stable-2.13
+ - stable-2.14
+ - stable-2.15
+ - devel
+ python-version:
+ - 3.8
+ - 3.9
+ - "3.10"
+ - "3.11"
+ exclude:
+ - python-version: "3.11"
+ ansible: stable-2.11
+ - python-version: "3.11"
+ ansible: stable-2.12
+ - python-version: "3.11"
+ ansible: stable-2.13
+ - python-version: "3.10"
+ ansible: stable-2.11
+ - python-version: 3.8
+ ansible: stable-2.14
+ - python-version: 3.8
+ ansible: stable-2.15
+ - python-version: 3.8
+ ansible: devel
+ steps:
+ - name: Check out code
+ uses: actions/checkout@v3
+
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v3
+ with:
+ python-version: ${{ matrix.python-version }}
+
+ - name: Install dependencies
+ run: |
+ python${{ matrix.python }} -m pip install --upgrade pip
+ python${{ matrix.python }} -m pip install https://github.com/ansible/ansible/archive/${{ matrix.ansible }}.tar.gz --disable-pip-version-check
+ python${{ matrix.python }} -m pip install purefusion pytest
+
+ - name: Run sanity tests
+ run: |
+ pwd
+ mkdir -p ansible_collections/purestorage/fusion
+ rsync -av . ansible_collections/purestorage/fusion --exclude ansible_collection/purestorage/fusion
+ cd ansible_collections/purestorage/fusion
+ ansible-test sanity -v --color --python ${{ matrix.python-version }} --docker
+
+ - name: Run unit tests
+ run: python -m pytest --import-mode=append tests
diff --git a/ansible_collections/purestorage/fusion/.github/workflows/stale.yml b/ansible_collections/purestorage/fusion/.github/workflows/stale.yml
new file mode 100644
index 000000000..7bbc0505b
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/.github/workflows/stale.yml
@@ -0,0 +1,19 @@
+name: Mark stale issues and pull requests
+
+on:
+ schedule:
+ - cron: "0 0 * * *"
+
+jobs:
+ stale:
+
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/stale@v1
+ with:
+ repo-token: ${{ secrets.GITHUB_TOKEN }}
+ stale-issue-message: 'Stale issue message'
+ stale-pr-message: 'Stale pull request message'
+ stale-issue-label: 'no-issue-activity'
+ stale-pr-label: 'no-pr-activity'
diff --git a/ansible_collections/purestorage/fusion/.gitignore b/ansible_collections/purestorage/fusion/.gitignore
new file mode 100644
index 000000000..4d7880b52
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/.gitignore
@@ -0,0 +1,12 @@
+*.tar.gz
+.pylintrc
+tests/output/*
+changelogs/.plugin-cache.yaml
+**/__pycache__/
+
+# files produced by Visual Studio Code
+.vscode/
+.history/
+
+# files produces by JetBrains IDEs
+.idea \ No newline at end of file
diff --git a/ansible_collections/purestorage/fusion/.yamllint b/ansible_collections/purestorage/fusion/.yamllint
new file mode 100644
index 000000000..6c19f43f7
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/.yamllint
@@ -0,0 +1,7 @@
+extends: default
+
+rules:
+ document-start: disable
+ indentation: disable
+ line-length:
+ max: 200
diff --git a/ansible_collections/purestorage/fusion/CHANGELOG.rst b/ansible_collections/purestorage/fusion/CHANGELOG.rst
new file mode 100644
index 000000000..b4d9bd6ae
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/CHANGELOG.rst
@@ -0,0 +1,226 @@
+================================
+Purestorage.Fusion Release Notes
+================================
+
+.. contents:: Topics
+
+
+v1.5.0
+======
+
+Minor Changes
+-------------
+
+- FUSION_API_HOST && FUSION_HOST - changed logic, now this variables require host name without path
+- Fusion authentication - add 'access_token' module's parameter and 'FUSION_ACCESS_TOKEN' environment variable, as an alternative way of the authentication.
+- fusion - added private key password, which is used to decrypt private key files
+- fusion_info - `array` is None if missing in `volume`
+- fusion_info - `hardware_types` is None if missing in `storage_service`
+- fusion_info - `network_interface_groups` is None if missing in `iscsi_interfaces` in `storage_endpoint`
+- fusion_info - introduce 'availability_zones' subset option
+- fusion_info - introduce 'host_access_policies' subset option
+- fusion_info - introduce 'network_interfaces' subset option
+- fusion_info - introduce 'regions' subset option
+- fusion_info - rename 'appliances' in default dict to 'arrays' for consistency
+- fusion_info - rename 'hosts' dict to 'host_access_policies' for consistency
+- fusion_info - rename 'interfaces' dict to 'network_interfaces' for consistency
+- fusion_info - rename 'placements_groups' in default dict to 'placement_groups' for consistency
+- fusion_info - rename 'zones' dict to 'availability_zones' for consistency
+- fusion_info - rename hardware to hardware_types in response for consistency
+- fusion_info - rename storageclass to storage_classes in response for consistency
+- fusion_pp - duration parsing improved. Supports combination of time units (E.g 5H5M)
+- fusion_ra - added `api_client_key` argument, which can be used instead of `user` and `principal` argument
+- fusion_ra - added `principal` argument, which is an ID of either API client or User and can be used instead of `user` argument
+- fusion_se - add support for CBS Storage Endpoint
+
+Deprecated Features
+-------------------
+
+- fusion_api_client - 'app_id' and 'key_file' parameters are deprecated in favor of 'issuer_id' and 'private_key_file' parameters and will be removed in the version 2.0.0, FUSION_APP_ID and FUSION_HOST env variables are deprecated in favor of FUSION_ISSUER_ID and FUSION_HOST and will be removed in the version 2.0.0
+- fusion_array - 'app_id' and 'key_file' parameters are deprecated in favor of 'issuer_id' and 'private_key_file' parameters and will be removed in the version 2.0.0, FUSION_APP_ID and FUSION_HOST env variables are deprecated in favor of FUSION_ISSUER_ID and FUSION_HOST and will be removed in the version 2.0.0
+- fusion_az - 'app_id' and 'key_file' parameters are deprecated in favor of 'issuer_id' and 'private_key_file' parameters and will be removed in the version 2.0.0, FUSION_APP_ID and FUSION_HOST env variables are deprecated in favor of FUSION_ISSUER_ID and FUSION_HOST and will be removed in the version 2.0.0
+- fusion_hap - 'app_id' and 'key_file' parameters are deprecated in favor of 'issuer_id' and 'private_key_file' parameters and will be removed in the version 2.0.0, FUSION_APP_ID and FUSION_HOST env variables are deprecated in favor of FUSION_ISSUER_ID and FUSION_HOST and will be removed in the version 2.0.0
+- fusion_hap - parameters `nqn`, `wwns`, `host_password`, `host_user`, `target_password`and `target_user` were deprecated
+- fusion_hw - FUSION_APP_ID and FUSION_HOST env variables are deprecated in favor of FUSION_ISSUER_ID and FUSION_HOST and will be removed in the version 2.0.0
+- fusion_info - 'app_id' and 'key_file' parameters are deprecated in favor of 'issuer_id' and 'private_key_file' parameters and will be removed in the version 2.0.0, FUSION_APP_ID and FUSION_HOST env variables are deprecated in favor of FUSION_ISSUER_ID and FUSION_HOST and will be removed in the version 2.0.0
+- fusion_info - 'hosts' subset is deprecated in favor of 'host_access_policies' and will be removed in the version 2.0.0
+- fusion_info - 'interfaces' subset is deprecated in favor of 'network_interfaces' and will be removed in the version 2.0.0
+- fusion_info - 'zones' subset is deprecated in favor of 'availability_zones' and will be removed in the version 2.0.0
+- fusion_ni - 'app_id' and 'key_file' parameters are deprecated in favor of 'issuer_id' and 'private_key_file' parameters and will be removed in the version 2.0.0, FUSION_APP_ID and FUSION_HOST env variables are deprecated in favor of FUSION_ISSUER_ID and FUSION_HOST and will be removed in the version 2.0.0
+- fusion_nig - 'app_id' and 'key_file' parameters are deprecated in favor of 'issuer_id' and 'private_key_file' parameters and will be removed in the version 2.0.0, FUSION_APP_ID and FUSION_HOST env variables are deprecated in favor of FUSION_ISSUER_ID and FUSION_HOST and will be removed in the version 2.0.0
+- fusion_pg - 'app_id' and 'key_file' parameters are deprecated in favor of 'issuer_id' and 'private_key_file' parameters and will be removed in the version 2.0.0, FUSION_APP_ID and FUSION_HOST env variables are deprecated in favor of FUSION_ISSUER_ID and FUSION_HOST and will be removed in the version 2.0.0
+- fusion_pp - 'app_id' and 'key_file' parameters are deprecated in favor of 'issuer_id' and 'private_key_file' parameters and will be removed in the version 2.0.0, FUSION_APP_ID and FUSION_HOST env variables are deprecated in favor of FUSION_ISSUER_ID and FUSION_HOST and will be removed in the version 2.0.0
+- fusion_ra - 'app_id' and 'key_file' parameters are deprecated in favor of 'issuer_id' and 'private_key_file' parameters and will be removed in the version 2.0.0, FUSION_APP_ID and FUSION_HOST env variables are deprecated in favor of FUSION_ISSUER_ID and FUSION_HOST and will be removed in the version 2.0.0
+- fusion_region - 'app_id' and 'key_file' parameters are deprecated in favor of 'issuer_id' and 'private_key_file' parameters and will be removed in the version 2.0.0, FUSION_APP_ID and FUSION_HOST env variables are deprecated in favor of FUSION_ISSUER_ID and FUSION_HOST and will be removed in the version 2.0.0
+- fusion_sc - 'app_id' and 'key_file' parameters are deprecated in favor of 'issuer_id' and 'private_key_file' parameters and will be removed in the version 2.0.0, FUSION_APP_ID and FUSION_HOST env variables are deprecated in favor of FUSION_ISSUER_ID and FUSION_HOST and will be removed in the version 2.0.0
+- fusion_se - 'app_id' and 'key_file' parameters are deprecated in favor of 'issuer_id' and 'private_key_file' parameters and will be removed in the version 2.0.0, FUSION_APP_ID and FUSION_HOST env variables are deprecated in favor of FUSION_ISSUER_ID and FUSION_HOST and will be removed in the version 2.0.0
+- fusion_se - `endpoint_type` parameter is now deprecated and will be removed in version 2.0.0
+- fusion_ss - 'app_id' and 'key_file' parameters are deprecated in favor of 'issuer_id' and 'private_key_file' parameters and will be removed in the version 2.0.0, FUSION_APP_ID and FUSION_HOST env variables are deprecated in favor of FUSION_ISSUER_ID and FUSION_HOST and will be removed in the version 2.0.0
+- fusion_tenant - 'app_id' and 'key_file' parameters are deprecated in favor of 'issuer_id' and 'private_key_file' parameters and will be removed in the version 2.0.0, FUSION_APP_ID and FUSION_HOST env variables are deprecated in favor of FUSION_ISSUER_ID and FUSION_HOST and will be removed in the version 2.0.0
+- fusion_tn - FUSION_APP_ID and FUSION_HOST env variables are deprecated in favor of FUSION_ISSUER_ID and FUSION_HOST and will be removed in the version 2.0.0
+- fusion_ts - 'app_id' and 'key_file' parameters are deprecated in favor of 'issuer_id' and 'private_key_file' parameters and will be removed in the version 2.0.0, FUSION_APP_ID and FUSION_HOST env variables are deprecated in favor of FUSION_ISSUER_ID and FUSION_HOST and will be removed in the version 2.0.0
+- fusion_volume - 'app_id' and 'key_file' parameters are deprecated in favor of 'issuer_id' and 'private_key_file' parameters and will be removed in the version 2.0.0, FUSION_APP_ID and FUSION_HOST env variables are deprecated in favor of FUSION_ISSUER_ID and FUSION_HOST and will be removed in the version 2.0.0
+
+Bugfixes
+--------
+
+- fusion_info - fix runtime errors caused when listing `interfaces`, `arrays` and `snapshots` dicts
+- fusion_pg - freshly created placement group is now moved to correct array
+- fusion_pp - 'local_rpo' changed to accept same input as 'local_retention'
+- fusion_pp - updated retention description
+- fusion_ra - 'name' deprecated and aliased to 'role'
+
+v1.4.2
+======
+
+Minor Changes
+-------------
+
+- added Python package dependency checks in prerequisites.py
+- fusion_hap - added missing 'windows' personality type
+
+Bugfixes
+--------
+
+- fusion_array - correct required parameters
+- fusion_hap - display name has now default value set to the value of name
+- fusion_hw - correct required parameters
+- fusion_pg - correct required parameters
+- fusion_pp - correct required parameters
+- fusion_sc - correct required parameters
+- fusion_ss - allow updating hardware types, correct required parameters
+- fusion_tn - fix attribute error
+- fusion_volume - protection policy can now be unset by using '' as name
+
+v1.4.1
+======
+
+v1.4.0
+======
+
+Major Changes
+-------------
+
+- Patching of resource properties was brought to parity with underlying Python SDK
+- fusion_volume - fixed and reorganized, arguments changed
+
+Minor Changes
+-------------
+
+- errors_py - added opt-in global exception handler which produces simpler and cleaner messages on REST errors
+- removed dependency on Python `netaddr` package
+
+Deprecated Features
+-------------------
+
+- fusion_hw - hardware module is being removed as changing hardware type has never been supported by Pure Storage Fusion
+- fusion_info - nigs subset is deprecated in favor of network_interface_groups and will be removed in the version 1.7.0
+- fusion_info - placements subset is deprecated in favor of placement_groups and will be removed in the version 1.7.0
+- fusion_pg - placement_engine option is deprecated because Fusion API does not longer support this parameter It will be removed in the version 2.0.0
+- fusion_se - parameters 'addresses', 'gateway' and 'network_interface_groups' are deprecated in favor of 'iscsi' and will be removed in version 2.0.0
+- fusion_tn - tenant networks are being replaced by storage endpoints ```fusion_se``` and Network Interface Groups ```fusion_nig```
+
+Bugfixes
+--------
+
+- fusion_api_client - error messages now mostly handled by errors_py
+- fusion_hap - could not delete host access policy without iqn option. Now it needs only name option for deletion
+- fusion_hap - error messages now mostly handled by errors_py
+- fusion_hap - uppercase names were not supported. Now uppercase names are allowed
+- fusion_info - fixes typo in output 'appiiances' -> 'appliances'
+- fusion_info - network_interface_groups subset returned nothing. Now it collects the same information as nigs subset
+- fusion_info - placements subset returned nothing. Now it collects the same information as placement_groups subset
+- fusion_nig - add missing 'availability_zone' format param in error message
+- fusion_nig - error messages now mostly handled by errors_py
+- fusion_pg - create_pg always broke runtime. Now it executes and creates placement group successfully
+- fusion_pg - error messages now mostly handled by errors_py
+- fusion_pp - error messages now mostly handled by errors_py
+- fusion_pp - fix call to parse_minutes where we were missing a required argument
+- fusion_sc - error messages now mostly handled by errors_py
+- fusion_se - add missing 'availability_zone' format param in error message
+- fusion_se - error messages now mostly handled by errors_py
+- fusion_se - fix call in get_nifg where provider_subnet was used instead of network_interface_group_name
+- fusion_ss - error messages now mostly handled by errors_py
+- fusion_tenant - error messages now mostly handled by errors_py
+- fusion_ts - add missing 'tenant' format param in error message
+- fusion_ts - error messages now mostly handled by errors_py
+- fusion_volume - error messages now mostly handled by errors_py
+
+v1.3.0
+======
+
+Bugfixes
+--------
+
+- fusion_pg - Add missing 'region' parameter
+- fusion_tn - Add missing 'region' parameter
+
+v1.2.0
+======
+
+Minor Changes
+-------------
+
+- fusion_info - Added API Client information
+
+Bugfixes
+--------
+
+- fusion_info - Fixed issue with storage endpoint dict formatting
+
+v1.1.1
+======
+
+v1.1.0
+======
+
+Minor Changes
+-------------
+
+- fusion_az - Add delete AZ option
+- fusion_az - Allow any region to be specified instead of limited to a known list
+- fusion_pp - Add delete PP option
+- fusion_sc - Add delete SC option
+- fusion_ss - Add delete SS option
+
+Bugfixes
+--------
+
+- Allow correct use of environmental variables for App ID and private file file
+
+New Modules
+-----------
+
+- purestorage.fusion.fusion_region - Manage Regions in Pure Storage Fusion
+
+v1.0.3
+======
+
+v1.0.2
+======
+
+v1.0.1
+======
+
+v1.0.0
+======
+
+New Modules
+-----------
+
+- purestorage.fusion.fusion_api_client - Manage API clients in Pure Storage Fusion
+- purestorage.fusion.fusion_array - Manage arrays in Pure Storage Fusion
+- purestorage.fusion.fusion_az - Create Availability Zones in Pure Storage Fusion
+- purestorage.fusion.fusion_hap - Manage host access policies in Pure Storage Fusion
+- purestorage.fusion.fusion_hw - Create hardware types in Pure Storage Fusion
+- purestorage.fusion.fusion_info - Collect information from Pure Fusion
+- purestorage.fusion.fusion_nig - Manage Network Interface Groups in Pure Storage Fusion
+- purestorage.fusion.fusion_pg - Manage placement groups in Pure Storage Fusion
+- purestorage.fusion.fusion_pp - Manage protection policies in Pure Storage Fusion
+- purestorage.fusion.fusion_ra - Manage role assignments in Pure Storage Fusion
+- purestorage.fusion.fusion_sc - Manage storage classes in Pure Storage Fusion
+- purestorage.fusion.fusion_ss - Manage storage services in Pure Storage Fusion
+- purestorage.fusion.fusion_tenant - Manage tenants in Pure Storage Fusion
+- purestorage.fusion.fusion_tn - Manage tenant networks in Pure Storage Fusion
+- purestorage.fusion.fusion_ts - Manage tenant spaces in Pure Storage Fusion
+- purestorage.fusion.fusion_volume - Manage volumes in Pure Storage Fusion
diff --git a/ansible_collections/purestorage/fusion/COPYING.GPLv3 b/ansible_collections/purestorage/fusion/COPYING.GPLv3
new file mode 100644
index 000000000..94a9ed024
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/COPYING.GPLv3
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ <program> Copyright (C) <year> <name of author>
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<http://www.gnu.org/licenses/>.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+<http://www.gnu.org/philosophy/why-not-lgpl.html>.
diff --git a/ansible_collections/purestorage/fusion/FILES.json b/ansible_collections/purestorage/fusion/FILES.json
new file mode 100644
index 000000000..b3f73b7e0
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/FILES.json
@@ -0,0 +1,1209 @@
+{
+ "files": [
+ {
+ "name": ".",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "changelogs",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "changelogs/changelog.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b568495166ca2ef38576e62cc6f1eb2d1f4caa988b020112e14650d37510dd83",
+ "format": 1
+ },
+ {
+ "name": "changelogs/.plugin-cache.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "492bf617d0924a14708a862efd096e1a032e1a1243f25e2287e44a6e072e2f1a",
+ "format": 1
+ },
+ {
+ "name": "changelogs/config.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1762f9f7012a7e135eacb3e2d2c35bdcb25570cdfc33da7a190fa36edc604aec",
+ "format": 1
+ },
+ {
+ "name": "tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a20772cc1cfbb1676994a74cc05e703aaa7df0759926614ea6832f7232ffc9f7",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/fusion_region",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/fusion_region/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/fusion_region/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "13360be1e9c683c45a0019ea3a9fd55fe1414964773a69110f0cf6e3cd4ca081",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/fusion_pg",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/fusion_pg/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/fusion_pg/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "66add9c2d9f2c8a02d4cbd18b6a871266af0a364756416991e80a70c8eed76f5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/fusion_ni",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/fusion_ni/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/fusion_ni/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a14ec0c5ef2a4ce26fd806a960c7b4d2c55988028b2b406d298929e53547193d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/fusion_tenant",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/fusion_tenant/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/fusion_tenant/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d0061bd98f9da02af742c85e7f9f1a1ca52363d3935c9e0fa105db13466922bc",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/fusion_hap",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/fusion_hap/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/fusion_hap/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "97ac38033a460071019a230efc40f3b63085528fecdc72f98b683c3b888ed04a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/fusion_se",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/fusion_se/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/fusion_se/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ba77056fd00ca6a52382f4285a2452609cfda8b045cde1042963c1c58944e31d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/fusion_ts",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/fusion_ts/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/fusion_ts/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a1462a14b4dc7a7c0b0f76df30c2283338bc1b53838bd29537c00c7d0ea2a855",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/fusion_pp",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/fusion_pp/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/fusion_pp/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0839b0cf5da3154795a7e5a5357168b498b44fea9dd2c1b9729385b50784c68f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/fusion_sc",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/fusion_sc/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/fusion_sc/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5f100551a2f19101772101bc413be17b08baada7c7b92d4fbefccfa4f3637c04",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/fusion_ss",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/fusion_ss/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/fusion_ss/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5b5be20d4cd09db5c3edbde5a6b723a93bb266834a8c1908525435f9ab813a7d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/fusion_az",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/fusion_az/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/fusion_az/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9b594ff2d1199f94997c63b8ba704b0497ea86a8a7215437541358603dcc68aa",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/fusion_nig",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/fusion_nig/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/fusion_nig/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ec0586fc68e8ea986f752237f2f75b42a6a36724b71b01dbd401ca3d5fed0c2e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/integration_config.template",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e55ed10c7dca1c84ffa8dd2e19087f630b8fd50fedbb96a167fb8e6231cbb6d2",
+ "format": 1
+ },
+ {
+ "name": "tests/unit",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/test_fusion_az.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "75be72264bf7d95ddc73d72c4763b6e877a05feaab2f6d9b91a55448bb77af51",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/mocks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/mocks/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/mocks/operation_mock.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "aaa5ad3b4a9bcd10a95947af5f06ec4153512927b56d94f4d442da6007d43c7b",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/mocks/module_mock.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3e72e4d7a51c8fcb35cfb7473f84e33de359a969140ebddc185dfce602966e75",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e74786237c54bd81d7b314e5871bd89bf1329f228087a0ecb17a64024b76396c",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/module_utils",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/module_utils/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/module_utils/test_prerequisites.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8ea9eec2b76c9b4ff0c5e2891972bdf12091b59770bfee4462ddb0014afb33a9",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/module_utils/test_networking.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "01fa89c92da97f0e9f66527663bddff99a46e6ef07b5601f5be7ae129956bfa1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/module_utils/test_parsing.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "58c8e7b81680984e3e606cc56210aa8afb93c020939f1d3d585b5cf7de61c513",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/module_utils/test_operations.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "656c85c8580a65fac1cb42e806513233596126d18a8f93465bc79af50872f3e2",
+ "format": 1
+ },
+ {
+ "name": "tests/functional",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/functional/test_fusion_region.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8d108a21480c4cb9c9d2810e809ea876173b3d43621f417c0957c77d21f33f76",
+ "format": 1
+ },
+ {
+ "name": "tests/functional/test_fusion_ss.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "676bba88edd7f73957c605356d31f4bd61cd144d354280c373beb3689196d5cd",
+ "format": 1
+ },
+ {
+ "name": "tests/functional/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/functional/test_fusion_volume.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8b6a8f18e610fcd4f2aea719e5b51ff58ef6f6b06412afd98309255ccab7f8a4",
+ "format": 1
+ },
+ {
+ "name": "tests/functional/test_fusion_ts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dc844fc260f337525396d47da3e012fbb2f1f5188a96c3d1071515bdac879583",
+ "format": 1
+ },
+ {
+ "name": "tests/functional/test_fusion_pg.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4da9f7a334491933d40fe8d32fbae767393f84d744537a7d03a57d84a1693b38",
+ "format": 1
+ },
+ {
+ "name": "tests/functional/test_fusion_nig.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f20b2ab1eed1bd182d68a00198537f960e9c7e844cfb8df3c99922e98e2365c1",
+ "format": 1
+ },
+ {
+ "name": "tests/functional/test_fusion_se.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "79d30463a37430a6a697778bb58fe2ced187672ec74ddae6b98f191069931b04",
+ "format": 1
+ },
+ {
+ "name": "tests/functional/test_fusion_az.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d6b7e24d40c1268b1ce0de3557210fbd89e97441dcd384522263f5982a5922b5",
+ "format": 1
+ },
+ {
+ "name": "tests/functional/test_fusion_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "05d60add74e73360eefd8679e808c2c5c5c774726a15c29d923dd077099e9537",
+ "format": 1
+ },
+ {
+ "name": "tests/functional/test_fusion_hap.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0a8ffe64ef5a561e2eb102f58b20f62c99c8a79022be63976f6e8c19608178ab",
+ "format": 1
+ },
+ {
+ "name": "tests/functional/test_fusion_hw.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e247eb15edad8ed402ee8e3bd6bb3add1426502f1ecf283882ac97d450e13753",
+ "format": 1
+ },
+ {
+ "name": "tests/functional/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d73fe992181a129802dda79cf3efd3a5ff6473dc5c9c6b99b907af629e5c9279",
+ "format": 1
+ },
+ {
+ "name": "tests/functional/test_fusion_sc.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cf1794f2f91b496adc54131be84e1ea81263ccf603cf648fecd0a52c7a1da467",
+ "format": 1
+ },
+ {
+ "name": "tests/functional/test_fusion_ra.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "718b4be5026d83e2fe3fefe6838637efce66d9880c635a80603844266b3e926c",
+ "format": 1
+ },
+ {
+ "name": "tests/functional/test_fusion_tenant.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b5c6413737db89d121c98f7798b36bb736b367fb6c2ee1651645c742822f9b66",
+ "format": 1
+ },
+ {
+ "name": "tests/functional/utils.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d6e339b28c2b3ed78108244bde3950424b3acc81a6a3b9c8cd7b32a53fbd5ba9",
+ "format": 1
+ },
+ {
+ "name": "tests/functional/test_fusion_api_client.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "44f1df7dfe3c53b30ae7c2c2fd2873b651f2306bba67a26310a8c2d86c53f04e",
+ "format": 1
+ },
+ {
+ "name": "tests/functional/test_fusion_array.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "910cd4c0859534f5de3b8cb743c9f549b93f25c0f18399158adff63b933a8110",
+ "format": 1
+ },
+ {
+ "name": "tests/functional/test_fusion_pp.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3578756616fff28885b379a93221c3dfe8d083a9d685bd8b16878b7f9bf045c9",
+ "format": 1
+ },
+ {
+ "name": "tests/helpers.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "74d50a53434e0ca164aa41ea5f272755e9b6ad0dc105d3eec53f62d3e188034c",
+ "format": 1
+ },
+ {
+ "name": "COPYING.GPLv3",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8ceb4b9ee5adedde47b31e975c1d90c73ad27b6b165a1dcd80c7c545eb65b903",
+ "format": 1
+ },
+ {
+ "name": "playbooks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/tasks/.keep",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "playbooks/templates",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/templates/.keep",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "playbooks/simple",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/simple/create_availability_zone.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b16b7c3f57b7e505ebd105b26d5dfef45b0eb51021aef23b1e39e87386fefc28",
+ "format": 1
+ },
+ {
+ "name": "playbooks/simple/setup_infrastructure.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d5883177ccab7b449f9c08100d3941c533186457b06b2dcbd6f12ed1f2382240",
+ "format": 1
+ },
+ {
+ "name": "playbooks/simple/list_storage_classes.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2e301ac27d5ba24bbb0e48757349f88fc3839b1fb3e2bf504526b4837a0d67f5",
+ "format": 1
+ },
+ {
+ "name": "playbooks/simple/list_interfaces.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "26dcfb2bf6fd1ac96ef63c3de9b6b8a36a6297c078b296bb32a96a102dc93804",
+ "format": 1
+ },
+ {
+ "name": "playbooks/simple/list_placement_groups.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8d6fef0868932ba3a8f17b95e95d7def5c01906d679519d3ad141021e780045a",
+ "format": 1
+ },
+ {
+ "name": "playbooks/simple/list_hardware_types.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4cd99a9b6cec5a88f8d13a3359a8d5f3687465dd63a3d779ba43c27f4fd2b4ab",
+ "format": 1
+ },
+ {
+ "name": "playbooks/simple/list_volumes.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f130c119ae9ce955440ca3b29ed5a31653062d556bd835ce65c2ef9db8e12122",
+ "format": 1
+ },
+ {
+ "name": "playbooks/simple/list_snapshots.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ef36856102859719d6a3c3970685e7c9fde30b9ef071c1f720bb5a17823df004",
+ "format": 1
+ },
+ {
+ "name": "playbooks/simple/setup_workloads.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "20cdb263572e18b7b6e13dd1c457c4dd8883eed7e332681003a33eb576874fce",
+ "format": 1
+ },
+ {
+ "name": "playbooks/simple/remove_array.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "062b8643519792fc53b2fe912ca16ebf1138912e6bbedea99a36ad1ee06cc0cd",
+ "format": 1
+ },
+ {
+ "name": "playbooks/simple/list_arrays.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "45c636e983761357f5087229ed5fe4d478b51d239b4620fef21cc635d931999a",
+ "format": 1
+ },
+ {
+ "name": "playbooks/simple/list_storage_services.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "be8249059cb0948829e01156336c85bf7c22764541e49019b47a42f9801b2d15",
+ "format": 1
+ },
+ {
+ "name": "playbooks/simple/make_tenant_admin.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9fcfafc8e9d8f2a1d27e825ba62ff359ec29f3b34ec42e7610720b2fd5b88c94",
+ "format": 1
+ },
+ {
+ "name": "playbooks/simple/list_api_clients.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5ff79a24e584c6922e819976cdd49ceb86b8fbc683df3fac5514544e464bff68",
+ "format": 1
+ },
+ {
+ "name": "playbooks/simple/create_tenant_space.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b0254c52265517a8d98720efc6e76b163c4a59b575d672a3069c0aef25afd7b4",
+ "format": 1
+ },
+ {
+ "name": "playbooks/simple/list_protection_policies.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f0257e0d3199f931562d43ab7bd00caff26aed9ca7991ae181b02812ce8012e9",
+ "format": 1
+ },
+ {
+ "name": "playbooks/simple/list_network_interface_groups.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0ece2378f2b1e2f8085665f90da83f716a52e7a1d56f579c91480da003d2d6cc",
+ "format": 1
+ },
+ {
+ "name": "playbooks/simple/list_all.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cacd3575668cd03ebc1d13fb98f321b3c74eeb80b5f3d5a9b53cc0979d5b424a",
+ "format": 1
+ },
+ {
+ "name": "playbooks/simple/list_hosts.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "adfde2160651de7b7697a736b12da24c967981167f82d6b7fd058fa408e5c0ad",
+ "format": 1
+ },
+ {
+ "name": "playbooks/simple/setup_protection_policies.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3d4558b3e53bdb83cd1ebd98f9d91741811cfe2457905b7b9bb0ceb445d5546e",
+ "format": 1
+ },
+ {
+ "name": "playbooks/simple/list_roles.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5fdba34ac2ad048deac5a3f906ae30b0b5b6c7621f6f4a16353ad1f53dd1a1e4",
+ "format": 1
+ },
+ {
+ "name": "playbooks/simple/list_tenant_spaces.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "763263dbc9765ac7d270d18f4d5278dd3ce623673a04308c26750a6186d92dc0",
+ "format": 1
+ },
+ {
+ "name": "playbooks/simple/create_array.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e7ec429c847303f52c63c61aefa8c3714e9ee57a523ce751efa9dd6fec7f4da7",
+ "format": 1
+ },
+ {
+ "name": "playbooks/simple/list_availability_zones.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "37271a1ab28648a6604bba7e95ec3a41ae36a7510ebd4d93cc39a493cc9c6048",
+ "format": 1
+ },
+ {
+ "name": "playbooks/simple/list_users.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6a2cef1205c097069e3f5968de1abcf7b468519bf0c9f8d9e0effa6c5839262d",
+ "format": 1
+ },
+ {
+ "name": "playbooks/simple/list_tenants.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f5bb3ab4560bf4b46fec826e1183de35b2832957fde2978693711c21cb1f7e19",
+ "format": 1
+ },
+ {
+ "name": "playbooks/simple/setup_storage_service_class.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9ccffa6de2c433cbd99d0a30ccc41b5f282b8b98f33b4eb0f41c683410560dfd",
+ "format": 1
+ },
+ {
+ "name": "playbooks/simple/list_storage_endpoints.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5cb78b90c813503944af88630d9616d9ca9af238c1461cf816cdc9ad64f14e34",
+ "format": 1
+ },
+ {
+ "name": "playbooks/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/vars/.keep",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/fusion_ts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "91e740ffbf27ab279cc6fbd07b5a59e92de941a4e88f437040eae89c1b8f1f3b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/fusion_nig.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "59cd2a72c5544ebf83ff6fe239f623ec03b4de84efb7cb08fdf4b4159544bc2c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/fusion_api_client.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5b92104038365e11b958a75a521a843c7b599e950e1d7815ff40a01a519dfff5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/fusion_ni.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5edf635cb12288af965649f3941bac2f1eea781c2e23793ac40988faedd34735",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/fusion_ss.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c278ef52dbd80a2c143b56ace8f31ebcca5ae76426bc7e38bea3e7e66a1a5742",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/fusion_pp.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "29b9019589464b7650892f84ebe112d03f38e03e8c804d6ce35401f85e31603f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/fusion_tn.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4b1f04a80642f68426a58f5146dedd45bda2c820d65c7c3892e3e2acff495f37",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/fusion_array.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0edaabce3e589722d0dd299f7f81d66811351e678046068fae179ad3f331fa4e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/fusion_az.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f0e9ea0a969913323e917d5b7d631c38e33b3e55a3b641cf553c8ea01228f0a5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/fusion_pg.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3c03eb5a59d749a954fe09d4c2a57ec36d30f9bdd51565c8e1e3d3e293d2bbc5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/fusion_volume.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e2b6a4837e1abc3efc2fa88707cfa80f618b800bccdad6bd5a5ac3f60ba77d14",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/fusion_tenant.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "03823b7283e0de940ee3e95bf5645595e4330759ad7dd18f0411c319774ec173",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/fusion_se.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "355892de73b5d265e1e57e8ff31b3dd0775c04a191ded999131ebbfdbbcd7231",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/fusion_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "de84aa2313d454b404ffc3791805b29cc8cbc49ad6daae83adfefa029ae2aecc",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/fusion_ra.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4a1bd14fe1038fbf09d4196143f4ac262ef7627ee550ea2efbaeceaa3e0a6176",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/fusion_sc.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7302c71a969dbbc3fb636455ee16ef807a3e2c212d307c305b86504b2b42603c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/fusion_region.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d6cb89588cca8681cfc12651ace18375eba88d2aaaacf2ece2d7652d9821fde9",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/fusion_hw.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5a5b7a795057365965db00615af3e349a9aba55f0d5d3e02ccf17ab6b77bf018",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/fusion_hap.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ffc4d041a552ac1d45ab51868428f9281829d2f345581eef8f379b1692e50a1a",
+ "format": 1
+ },
+ {
+ "name": "plugins/inventory",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/inventory/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/getters.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5bf8895d303da94229d2492c46c621bd9fc45d046d22f1f804acb4d81d7b00d4",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/prerequisites.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "589f5ad7eed9dfe57263a9d3ec7dd6b179da0406aa2a6706ec056f3ab60af5cd",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/fusion.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "35d1ef53acaac66c4aadb35d8e259e97a465094db322a27191bf9dd9de1068ed",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/errors.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fa7c577ce38810b137980e87e6e5b87e95fb43e101d02652df7cbb434f630699",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/parsing.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "efe7b474e24d7fa53dc134f7fd1e4062542a22d5ea9f8b16715ab8a353d1d953",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/operations.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "83b10433689009c69ed3a926d2b3cbe70b047fef29fbd432d69f836b48375354",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/startup.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "42f7acffe52f3038f483f0698072de73307fa0664eac245901683a509b241a85",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/networking.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "42ac372c6e7bf8cd23747ac6d9684a4f9b8f1378a4ada923f6d55bc8606e3df8",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/purestorage.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e231135e8559211663aea10e5ccc9ffbc0504bf09ac218cc4efefcf883275165",
+ "format": 1
+ },
+ {
+ "name": ".github",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".github/pull_request_template.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "565ead1b588caaa10cd6f2ed1bb6c809eb2ad93bf75da3a198690cac778432d6",
+ "format": 1
+ },
+ {
+ "name": ".github/ISSUE_TEMPLATE",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".github/ISSUE_TEMPLATE/bug_report.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0c8d64f29fb4536513653bf8c97da30f3340e2041b91c8952db1515d6b23a7b3",
+ "format": 1
+ },
+ {
+ "name": ".github/ISSUE_TEMPLATE/feature_request.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1f48c52f209a971b8e7eae4120144d28fcf8ee38a7778a7b4d8cf1ab356617d2",
+ "format": 1
+ },
+ {
+ "name": ".github/workflows",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".github/workflows/ansible-lint.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4c85688d98b71e3a6594530a362cd5d2cf83842ceaccd0e0fc76e233777c1cef",
+ "format": 1
+ },
+ {
+ "name": ".github/workflows/stale.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0bdef4889afabcd627fc30711a0809c7468b8c9e64cbcebe1334f794a41e7bd9",
+ "format": 1
+ },
+ {
+ "name": ".github/workflows/black.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c62a1a4fcc1e00f3e8f295863e304db520124bfd3e9b0c2cccd6d78343b679c5",
+ "format": 1
+ },
+ {
+ "name": ".github/workflows/create-release.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "43ea888cb2b22ddc86ea989f75967accaff0065cc43c39a0043ba6cf2f424378",
+ "format": 1
+ },
+ {
+ "name": ".github/workflows/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0c8c2578e81d44e4a9611c57a59c6fbc7dd947ff149a169ea65f497484d6d4a4",
+ "format": 1
+ },
+ {
+ "name": ".github/CONTRIBUTING.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ed15d670a6a61e8159059927017e7ba7c8bcc94623884ced7beb39ef445c7b95",
+ "format": 1
+ },
+ {
+ "name": "README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a753d4c6dc5cdd493fd60f147cf68f644ec6f301b895fc249093914db1cf3ab1",
+ "format": 1
+ },
+ {
+ "name": "meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "meta/runtime.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9f829699b200db8a8282ce6f44d6ae28a2e3377e0e611b0d327db64b0cbba321",
+ "format": 1
+ },
+ {
+ "name": ".gitignore",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3f4f6972c0df5e698a758d74ab5778f3941fca83f509a00f9665b338c220d762",
+ "format": 1
+ },
+ {
+ "name": ".yamllint",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2970fa4875092f99825ac0da3c82d2413ce973087b9945e68fdfa7b3b1e2012e",
+ "format": 1
+ },
+ {
+ "name": "CHANGELOG.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "28eab01a890a0719cf1908791d9575a4d47014547796bb077f44702dbbc5632a",
+ "format": 1
+ }
+ ],
+ "format": 1
+} \ No newline at end of file
diff --git a/ansible_collections/purestorage/fusion/MANIFEST.json b/ansible_collections/purestorage/fusion/MANIFEST.json
new file mode 100644
index 000000000..4fe3bc8b5
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/MANIFEST.json
@@ -0,0 +1,34 @@
+{
+ "collection_info": {
+ "namespace": "purestorage",
+ "name": "fusion",
+ "version": "1.5.0",
+ "authors": [
+ "Pure Storage Ansible Team <pure-ansible-team@purestorage.com>"
+ ],
+ "readme": "README.md",
+ "tags": [
+ "purestorage",
+ "fusion",
+ "storage"
+ ],
+ "description": "Collection of modules to manage Pure Fusion",
+ "license": [
+ "GPL-3.0-or-later"
+ ],
+ "license_file": null,
+ "dependencies": {},
+ "repository": "https://github.com/Pure-Storage-Ansible/Fusion-Collection",
+ "documentation": "https://github.com/Pure-Storage-Ansible/Fusion-Collection",
+ "homepage": null,
+ "issues": "https://github.com/Pure-Storage-Ansible/Fusion-Collection/issues"
+ },
+ "file_manifest_file": {
+ "name": "FILES.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3e406206ea2f67e0a9846219a9d5d2813aef76437e1b05d12d341aded53cfd13",
+ "format": 1
+ },
+ "format": 1
+} \ No newline at end of file
diff --git a/ansible_collections/purestorage/fusion/README.md b/ansible_collections/purestorage/fusion/README.md
new file mode 100644
index 000000000..b2a36de10
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/README.md
@@ -0,0 +1,98 @@
+<a href="https://github.com/Pure-Storage-Ansible/Fusion-Collection/releases/latest"><img src="https://img.shields.io/github/v/tag/Pure-Storage-Ansible/Fusion-Collection?label=release">
+<a href="COPYING.GPLv3"><img src="https://img.shields.io/badge/license-GPL%20v3.0-brightgreen.svg"></a>
+<img src="https://cla-assistant.io/readme/badge/Pure-Storage-Ansible/Fusion-Collection">
+<img src="https://github.com/Pure-Storage-Ansible/Fusion-Collection/workflows/Pure%20Storage%20Ansible%20CI/badge.svg">
+<a href="https://github.com/psf/black"><img src="https://img.shields.io/badge/code%20style-black-000000.svg"></a>
+
+# Pure Storage Fusion Collection
+
+The Pure Storage Fusion collection consists of the latest versions of the Fusion modules.
+
+## Requirements
+
+- ansible-core >= 2.11
+- Python >= 3.8
+- Authorized API Application ID for Pure Storage Pure1 and associated Private Key
+ - Refer to Pure Storage documentation on how to create these.
+- purefusion >= 1.0.4
+- time
+
+## Available Modules
+
+- fusion_api_client: Manage API clients in Pure Storage Fusion
+- fusion_array: Manage arrays in Pure Storage Fusion
+- fusion_az: Create Availability Zones in Pure Storage Fusion
+- fusion_hap: Manage host access policies in Pure Storage Fusion
+- fusion_hw: Create hardware types in Pure Storage Fusion
+- fusion_info: Collect information from Pure Fusion
+- fusion_ni: Manage Network Interfaces in Pure Storage Fusion
+- fusion_nig: Manage Network Interface Groups in Pure Storage Fusion
+- fusion_pg: Manage placement groups in Pure Storage Fusion
+- fusion_pp: Manage protection policies in Pure Storage Fusion
+- fusion_ra: Manage role assignments in Pure Storage Fusion
+- fusion_region: Manage regions in Pure Storage Fusion
+- fusion_sc: Manage storage classes in Pure Storage Fusion
+- fusion_se: Manage storage endpoints in Pure Storage Fusion
+- fusion_ss: Manage storage services in Pure Storage Fusion
+- fusion_tenant: Manage tenants in Pure Storage Fusion
+- fusion_tn: Manage tenant networks in Pure Storage Fusion
+- fusion_ts: Manage tenant spaces in Pure Storage Fusion
+- fusion_volume: Manage volumes in Pure Storage Fusion
+
+## Instructions
+
+Ansible must be installed [Install guide](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html)
+```
+sudo pip install ansible
+```
+
+Python PureFusion SDK must be installed
+```
+sudo pip install purefusion
+```
+
+Install the Pure Storage Fusion collection on your Ansible management host - [Galaxy link](https://galaxy.ansible.com/purestorage/fusion)
+```
+ansible-galaxy collection install purestorage.fusion
+```
+
+## Example Playbook
+```yaml
+- hosts: localhost
+ tasks:
+ - name: Collect information for Pure Storage fleet in Pure1
+ purestorage.fusion.fusion_info:
+ gather_subset: all
+ issuer_id: <Pure1 API Application ID>
+ private_key_file: <private key file name>
+```
+
+You can find more examples in our [example-playbooks](https://github.com/PureStorage-OpenConnect/ansible-playbook-examples/tree/master/fusion) repository.
+
+## Contributing to this collection
+
+Ongoing development efforts and contributions to this collection are tracked as issues in this repository.
+
+We welcome community contributions to this collection. If you find problems, need an enhancement or need a new module, please open an issue or create a PR against the [Pure Storage Fusion Ansible collection repository](https://github.com/Pure-Storage-Ansible/Fusion-Collection/issues).
+
+Code of Conduct
+---------------
+This collection follows the Ansible project's
+[Code of Conduct](https://docs.ansible.com/ansible/devel/community/code_of_conduct.html).
+Please read and familiarize yourself with this document.
+
+Releasing, Versioning and Deprecation
+-------------------------------------
+
+This collection follows [Semantic Versioning](https://semver.org). More details on versioning can be found [in the Ansible docs](https://docs.ansible.com/ansible/latest/dev_guide/developing_collections.html#collection-versions).
+
+New minor and major releases as well as deprecations will follow new releases and deprecations of the Pure Storage Fusion product, its REST API and the corresponding Python SDK, which this project relies on.
+
+## License
+
+[BSD-2-Clause](https://directory.fsf.org/wiki?title=License:FreeBSD)
+[GPL-3.0-or-later](https://www.gnu.org/licenses/gpl-3.0.en.html)
+
+## Author
+
+This collection was created in 2022 by [Simon Dodsley](@sdodsley) for, and on behalf of, the [Pure Storage Ansible Team](pure-ansible-team@purestorage.com)
diff --git a/ansible_collections/purestorage/fusion/changelogs/.plugin-cache.yaml b/ansible_collections/purestorage/fusion/changelogs/.plugin-cache.yaml
new file mode 100644
index 000000000..23a38bf01
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/changelogs/.plugin-cache.yaml
@@ -0,0 +1,114 @@
+objects:
+ role: {}
+plugins:
+ become: {}
+ cache: {}
+ callback: {}
+ cliconf: {}
+ connection: {}
+ filter: {}
+ httpapi: {}
+ inventory: {}
+ lookup: {}
+ module:
+ fusion_api_client:
+ description: Manage API clients in Pure Storage Fusion
+ name: fusion_api_client
+ namespace: ''
+ version_added: 1.0.0
+ fusion_array:
+ description: Manage arrays in Pure Storage Fusion
+ name: fusion_array
+ namespace: ''
+ version_added: 1.0.0
+ fusion_az:
+ description: Create Availability Zones in Pure Storage Fusion
+ name: fusion_az
+ namespace: ''
+ version_added: 1.0.0
+ fusion_hap:
+ description: Manage host access policies in Pure Storage Fusion
+ name: fusion_hap
+ namespace: ''
+ version_added: 1.0.0
+ fusion_hw:
+ description: Create hardware types in Pure Storage Fusion
+ name: fusion_hw
+ namespace: ''
+ version_added: 1.0.0
+ fusion_info:
+ description: Collect information from Pure Fusion
+ name: fusion_info
+ namespace: ''
+ version_added: 1.0.0
+ fusion_ni:
+ description: Manage network interfaces in Pure Storage Fusion
+ name: fusion_ni
+ namespace: ''
+ version_added: 1.0.0
+ fusion_nig:
+ description: Manage Network Interface Groups in Pure Storage Fusion
+ name: fusion_nig
+ namespace: ''
+ version_added: 1.0.0
+ fusion_pg:
+ description: Manage placement groups in Pure Storage Fusion
+ name: fusion_pg
+ namespace: ''
+ version_added: 1.0.0
+ fusion_pp:
+ description: Manage protection policies in Pure Storage Fusion
+ name: fusion_pp
+ namespace: ''
+ version_added: 1.0.0
+ fusion_ra:
+ description: Manage role assignments in Pure Storage Fusion
+ name: fusion_ra
+ namespace: ''
+ version_added: 1.0.0
+ fusion_region:
+ description: Manage Regions in Pure Storage Fusion
+ name: fusion_region
+ namespace: ''
+ version_added: 1.1.0
+ fusion_sc:
+ description: Manage storage classes in Pure Storage Fusion
+ name: fusion_sc
+ namespace: ''
+ version_added: 1.0.0
+ fusion_se:
+ description: Manage storage endpoints in Pure Storage Fusion
+ name: fusion_se
+ namespace: ''
+ version_added: 1.0.0
+ fusion_ss:
+ description: Manage storage services in Pure Storage Fusion
+ name: fusion_ss
+ namespace: ''
+ version_added: 1.0.0
+ fusion_tenant:
+ description: Manage tenants in Pure Storage Fusion
+ name: fusion_tenant
+ namespace: ''
+ version_added: 1.0.0
+ fusion_tn:
+ description: Manage tenant networks in Pure Storage Fusion
+ name: fusion_tn
+ namespace: ''
+ version_added: 1.0.0
+ fusion_ts:
+ description: Manage tenant spaces in Pure Storage Fusion
+ name: fusion_ts
+ namespace: ''
+ version_added: 1.0.0
+ fusion_volume:
+ description: Manage volumes in Pure Storage Fusion
+ name: fusion_volume
+ namespace: ''
+ version_added: 1.0.0
+ netconf: {}
+ shell: {}
+ strategy: {}
+ test: {}
+ vars: {}
+version: 1.5.0
diff --git a/ansible_collections/purestorage/fusion/changelogs/changelog.yaml b/ansible_collections/purestorage/fusion/changelogs/changelog.yaml
new file mode 100644
index 000000000..82ef323c8
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/changelogs/changelog.yaml
@@ -0,0 +1,345 @@
+ancestor: null
+releases:
+ 1.0.0:
+ modules:
+ - description: Manage API clients in Pure Storage Fusion
+ name: fusion_api_client
+ namespace: ''
+ - description: Manage arrays in Pure Storage Fusion
+ name: fusion_array
+ namespace: ''
+ - description: Create Availability Zones in Pure Storage Fusion
+ name: fusion_az
+ namespace: ''
+ - description: Manage host access policies in Pure Storage Fusion
+ name: fusion_hap
+ namespace: ''
+ - description: Create hardware types in Pure Storage Fusion
+ name: fusion_hw
+ namespace: ''
+ - description: Collect information from Pure Fusion
+ name: fusion_info
+ namespace: ''
+ - description: Manage Network Interface Groups in Pure Storage Fusion
+ name: fusion_nig
+ namespace: ''
+ - description: Manage placement groups in Pure Storage Fusion
+ name: fusion_pg
+ namespace: ''
+ - description: Manage protection policies in Pure Storage Fusion
+ name: fusion_pp
+ namespace: ''
+ - description: Manage role assignments in Pure Storage Fusion
+ name: fusion_ra
+ namespace: ''
+ - description: Manage storage classes in Pure Storage Fusion
+ name: fusion_sc
+ namespace: ''
+ - description: Manage storage services in Pure Storage Fusion
+ name: fusion_ss
+ namespace: ''
+ - description: Manage tenants in Pure Storage Fusion
+ name: fusion_tenant
+ namespace: ''
+ - description: Manage tenant networks in Pure Storage Fusion
+ name: fusion_tn
+ namespace: ''
+ - description: Manage tenant spaces in Pure Storage Fusion
+ name: fusion_ts
+ namespace: ''
+ - description: Manage volumes in Pure Storage Fusion
+ name: fusion_volume
+ namespace: ''
+ release_date: '2022-05-25'
+ 1.0.1:
+ release_date: '2022-05-27'
+ 1.0.2:
+ release_date: '2022-06-13'
+ 1.0.3:
+ fragments:
+ - 27_review_fixes.yaml
+ release_date: '2022-06-27'
+ 1.1.0:
+ changes:
+ bugfixes:
+ - Allow correct use of environmental variables for App ID and private file file
+ minor_changes:
+ - fusion_az - Add delete AZ option
+ - fusion_az - Allow any region to be specified instead of limited to a known
+ list
+ - fusion_pp - Add delete PP option
+ - fusion_sc - Add delete SC option
+ - fusion_ss - Add delete SS option
+ fragments:
+ - 27_review_fixes.yaml
+ - 28_any_region.yaml
+ - 29_use_env.yaml
+ - 31_add_delete_az.yaml
+ - 32_add_delete_pp.yaml
+ - 33_add_delete_sc.yaml
+ - 34_add_delete_ss.yaml
+ modules:
+ - description: Manage Regions in Pure Storage Fusion
+ name: fusion_region
+ namespace: ''
+ release_date: '2022-09-01'
+ 1.1.1:
+ release_date: '2022-09-23'
+ 1.2.0:
+ changes:
+ bugfixes:
+ - fusion_info - Fixed issue with storage endpoint dict formatting
+ minor_changes:
+ - fusion_info - Added API Client information
+ fragments:
+ - 39_fix_info.yaml
+ - 41_add_api_client.yaml
+ release_date: '2022-12-02'
+ 1.3.0:
+ changes:
+ bugfixes:
+ - fusion_pg - Add missing 'region' parameter
+ - fusion_tn - Add missing 'region' parameter
+ fragments:
+ - 44_fix_missing_regions.yaml
+ release_date: '2022-12-21'
+ 1.4.0:
+ changes:
+ bugfixes:
+ - fusion_api_client - error messages now mostly handled by errors_py
+ - fusion_hap - could not delete host access policy without iqn option. Now it
+ needs only name option for deletion
+ - fusion_hap - error messages now mostly handled by errors_py
+ - fusion_hap - uppercase names were not supported. Now uppercase names are allowed
+ - fusion_info - fixes typo in output 'appiiances' -> 'appliances'
+ - fusion_info - network_interface_groups subset returned nothing. Now it collects
+ the same information as nigs subset
+ - fusion_info - placements subset returned nothing. Now it collects the same
+ information as placement_groups subset
+ - fusion_nig - add missing 'availability_zone' format param in error message
+ - fusion_nig - error messages now mostly handled by errors_py
+ - fusion_pg - create_pg always broke runtime. Now it executes and creates placement
+ group successfully
+ - fusion_pg - error messages now mostly handled by errors_py
+ - fusion_pp - error messages now mostly handled by errors_py
+ - fusion_pp - fix call to parse_minutes where we were missing a required argument
+ - fusion_sc - error messages now mostly handled by errors_py
+ - fusion_se - add missing 'availability_zone' format param in error message
+ - fusion_se - error messages now mostly handled by errors_py
+ - fusion_se - fix call in get_nifg where provider_subnet was used instead of
+ network_interface_group_name
+ - fusion_ss - error messages now mostly handled by errors_py
+ - fusion_tenant - error messages now mostly handled by errors_py
+ - fusion_ts - add missing 'tenant' format param in error message
+ - fusion_ts - error messages now mostly handled by errors_py
+ - fusion_volume - error messages now mostly handled by errors_py
+ deprecated_features:
+ - fusion_hw - hardware module is being removed as changing hardware type has
+ never been supported by Pure Storage Fusion
+ - fusion_info - nigs subset is deprecated in favor of network_interface_groups
+ and will be removed in the version 1.7.0
+ - fusion_info - placements subset is deprecated in favor of placement_groups
+ and will be removed in the version 1.7.0
+ - fusion_pg - placement_engine option is deprecated because Fusion API does
+ not longer support this parameter It will be removed in the version 2.0.0
+ - fusion_se - parameters 'addresses', 'gateway' and 'network_interface_groups'
+ are deprecated in favor of 'iscsi' and will be removed in version 2.0.0
+ - fusion_tn - tenant networks are being replaced by storage endpoints ```fusion_se```
+ and Network Interface Groups ```fusion_nig```
+ major_changes:
+ - Patching of resource properties was brought to parity with underlying Python
+ SDK
+ - fusion_volume - fixed and reorganized, arguments changed
+ minor_changes:
+ - errors_py - added opt-in global exception handler which produces simpler and
+ cleaner messages on REST errors
+ - removed dependency on Python `netaddr` package
+ fragments:
+ - 46_deprecate fusion_tn.yaml
+ - 47_fix_volumes.yaml
+ - 63_update_resource_patching.yaml
+ - 67_deprecate fusion_hw.yaml
+ - 68_update_error_handling_for_ts_volume.yaml
+ - 69_use_exc_handler_in_most_modules.yaml
+ - 70_fix_typo_appiances.yaml
+ - 71_fix_few_missing_format_params.yaml
+ - 72_fix_typo_getnifg.yaml
+ - 73_fix_missing_module_params.yaml
+ - 88_deprecate parameters in fusion_se.yaml
+ - PR46_network_interface_groups.yaml
+ - PR53_create_pg.yaml
+ - PR55_host_access_policy.yaml
+ - PR62_fix_placements.yaml
+ release_date: '2023-03-16'
+ 1.4.1:
+ release_date: '2023-03-17'
+ 1.4.2:
+ changes:
+ bugfixes:
+ - fusion_array - correct required parameters
+ - fusion_hap - display name has now default value set to the value of name
+ - fusion_hw - correct required parameters
+ - fusion_pg - correct required parameters
+ - fusion_pp - correct required parameters
+ - fusion_sc - correct required parameters
+ - fusion_ss - allow updating hardware types, correct required parameters
+ - fusion_tn - fix attribute error
+ - fusion_volume - protection policy can now be unset by using '' as name
+ minor_changes:
+ - added Python package dependency checks in prerequisites.py
+ - fusion_hap - added missing 'windows' personality type
+ fragments:
+ - 102_set_hap_display_name_default_value.yaml
+ - 103_correct_required_parameters_in_all_modules.yaml
+ - 108_add_missing_hap_personality.yaml
+ - 114_fix_fusion_tn_error.yaml
+ - 1483_allow_to_unset_protection_policy.yaml
+ - 1538_improve_missing_python_deps_error.yaml
+ release_date: '2023-04-11'
+ 1.5.0:
+ changes:
+ bugfixes:
+ - fusion_info - fix runtime errors caused when listing `interfaces`, `arrays`
+ and `snapshots` dicts
+ - fusion_pg - freshly created placement group is now moved to correct array
+ - fusion_pp - 'local_rpo' changed to accept same input as 'local_retention'
+ - fusion_pp - updated retention description
+ - fusion_ra - 'name' deprecated and aliased to 'role'
+ deprecated_features:
+ - fusion_api_client - 'app_id' and 'key_file' parameters are deprecated in favor
+ of 'issuer_id' and 'private_key_file' parameters and will be removed in the
+ version 2.0.0, FUSION_APP_ID and FUSION_HOST env variables are deprecated
+ in favor of FUSION_ISSUER_ID and FUSION_HOST and will be removed in the version
+ 2.0.0
+ - fusion_array - 'app_id' and 'key_file' parameters are deprecated in favor
+ of 'issuer_id' and 'private_key_file' parameters and will be removed in the
+ version 2.0.0, FUSION_APP_ID and FUSION_HOST env variables are deprecated
+ in favor of FUSION_ISSUER_ID and FUSION_HOST and will be removed in the version
+ 2.0.0
+ - fusion_az - 'app_id' and 'key_file' parameters are deprecated in favor of
+ 'issuer_id' and 'private_key_file' parameters and will be removed in the version
+ 2.0.0, FUSION_APP_ID and FUSION_HOST env variables are deprecated in favor
+ of FUSION_ISSUER_ID and FUSION_HOST and will be removed in the version 2.0.0
+ - fusion_hap - 'app_id' and 'key_file' parameters are deprecated in favor of
+ 'issuer_id' and 'private_key_file' parameters and will be removed in the version
+ 2.0.0, FUSION_APP_ID and FUSION_HOST env variables are deprecated in favor
+ of FUSION_ISSUER_ID and FUSION_HOST and will be removed in the version 2.0.0
+ - fusion_hap - parameters `nqn`, `wwns`, `host_password`, `host_user`, `target_password`and
+ `target_user` were deprecated
+ - fusion_hw - FUSION_APP_ID and FUSION_HOST env variables are deprecated in
+ favor of FUSION_ISSUER_ID and FUSION_HOST and will be removed in the version
+ 2.0.0
+ - fusion_info - 'app_id' and 'key_file' parameters are deprecated in favor of
+ 'issuer_id' and 'private_key_file' parameters and will be removed in the version
+ 2.0.0, FUSION_APP_ID and FUSION_HOST env variables are deprecated in favor
+ of FUSION_ISSUER_ID and FUSION_HOST and will be removed in the version 2.0.0
+ - fusion_info - 'hosts' subset is deprecated in favor of 'host_access_policies'
+ and will be removed in the version 2.0.0
+ - fusion_info - 'interfaces' subset is deprecated in favor of 'network_interfaces'
+ and will be removed in the version 2.0.0
+ - fusion_info - 'zones' subset is deprecated in favor of 'availability_zones'
+ and will be removed in the version 2.0.0
+ - fusion_ni - 'app_id' and 'key_file' parameters are deprecated in favor of
+ 'issuer_id' and 'private_key_file' parameters and will be removed in the version
+ 2.0.0, FUSION_APP_ID and FUSION_HOST env variables are deprecated in favor
+ of FUSION_ISSUER_ID and FUSION_HOST and will be removed in the version 2.0.0
+ - fusion_nig - 'app_id' and 'key_file' parameters are deprecated in favor of
+ 'issuer_id' and 'private_key_file' parameters and will be removed in the version
+ 2.0.0, FUSION_APP_ID and FUSION_HOST env variables are deprecated in favor
+ of FUSION_ISSUER_ID and FUSION_HOST and will be removed in the version 2.0.0
+ - fusion_pg - 'app_id' and 'key_file' parameters are deprecated in favor of
+ 'issuer_id' and 'private_key_file' parameters and will be removed in the version
+ 2.0.0, FUSION_APP_ID and FUSION_HOST env variables are deprecated in favor
+ of FUSION_ISSUER_ID and FUSION_HOST and will be removed in the version 2.0.0
+ - fusion_pp - 'app_id' and 'key_file' parameters are deprecated in favor of
+ 'issuer_id' and 'private_key_file' parameters and will be removed in the version
+ 2.0.0, FUSION_APP_ID and FUSION_HOST env variables are deprecated in favor
+ of FUSION_ISSUER_ID and FUSION_HOST and will be removed in the version 2.0.0
+ - fusion_ra - 'app_id' and 'key_file' parameters are deprecated in favor of
+ 'issuer_id' and 'private_key_file' parameters and will be removed in the version
+ 2.0.0, FUSION_APP_ID and FUSION_HOST env variables are deprecated in favor
+ of FUSION_ISSUER_ID and FUSION_HOST and will be removed in the version 2.0.0
+ - fusion_region - 'app_id' and 'key_file' parameters are deprecated in favor
+ of 'issuer_id' and 'private_key_file' parameters and will be removed in the
+ version 2.0.0, FUSION_APP_ID and FUSION_HOST env variables are deprecated
+ in favor of FUSION_ISSUER_ID and FUSION_HOST and will be removed in the version
+ 2.0.0
+ - fusion_sc - 'app_id' and 'key_file' parameters are deprecated in favor of
+ 'issuer_id' and 'private_key_file' parameters and will be removed in the version
+ 2.0.0, FUSION_APP_ID and FUSION_HOST env variables are deprecated in favor
+ of FUSION_ISSUER_ID and FUSION_HOST and will be removed in the version 2.0.0
+ - fusion_se - 'app_id' and 'key_file' parameters are deprecated in favor of
+ 'issuer_id' and 'private_key_file' parameters and will be removed in the version
+ 2.0.0, FUSION_APP_ID and FUSION_HOST env variables are deprecated in favor
+ of FUSION_ISSUER_ID and FUSION_HOST and will be removed in the version 2.0.0
+ - fusion_se - `endpoint_type` parameter is now deprecated and will be removed
+ in version 2.0.0
+ - fusion_ss - 'app_id' and 'key_file' parameters are deprecated in favor of
+ 'issuer_id' and 'private_key_file' parameters and will be removed in the version
+ 2.0.0, FUSION_APP_ID and FUSION_HOST env variables are deprecated in favor
+ of FUSION_ISSUER_ID and FUSION_HOST and will be removed in the version 2.0.0
+ - fusion_tenant - 'app_id' and 'key_file' parameters are deprecated in favor
+ of 'issuer_id' and 'private_key_file' parameters and will be removed in the
+ version 2.0.0, FUSION_APP_ID and FUSION_HOST env variables are deprecated
+ in favor of FUSION_ISSUER_ID and FUSION_HOST and will be removed in the version
+ 2.0.0
+ - fusion_tn - FUSION_APP_ID and FUSION_HOST env variables are deprecated in
+ favor of FUSION_ISSUER_ID and FUSION_HOST and will be removed in the version
+ 2.0.0
+ - fusion_ts - 'app_id' and 'key_file' parameters are deprecated in favor of
+ 'issuer_id' and 'private_key_file' parameters and will be removed in the version
+ 2.0.0, FUSION_APP_ID and FUSION_HOST env variables are deprecated in favor
+ of FUSION_ISSUER_ID and FUSION_HOST and will be removed in the version 2.0.0
+ - fusion_volume - 'app_id' and 'key_file' parameters are deprecated in favor
+ of 'issuer_id' and 'private_key_file' parameters and will be removed in the
+ version 2.0.0, FUSION_APP_ID and FUSION_HOST env variables are deprecated
+ in favor of FUSION_ISSUER_ID and FUSION_HOST and will be removed in the version
+ 2.0.0
+ minor_changes:
+ - FUSION_API_HOST && FUSION_HOST - changed logic, now this variables require
+ host name without path
+ - Fusion authentication - add 'access_token' module's parameter and 'FUSION_ACCESS_TOKEN'
+ environment variable, as an alternative way of the authentication.
+ - fusion - added private key password, which is used to decrypt private key
+ files
+ - fusion_info - `array` is None if missing in `volume`
+ - fusion_info - `hardware_types` is None if missing in `storage_service`
+ - fusion_info - `network_interface_groups` is None if missing in `iscsi_interfaces`
+ in `storage_endpoint`
+ - fusion_info - introduce 'availability_zones' subset option
+ - fusion_info - introduce 'host_access_policies' subset option
+ - fusion_info - introduce 'network_interfaces' subset option
+ - fusion_info - introduce 'regions' subset option
+ - fusion_info - rename 'appliances' in default dict to 'arrays' for consistency
+ - fusion_info - rename 'hosts' dict to 'host_access_policies' for consistency
+ - fusion_info - rename 'interfaces' dict to 'network_interfaces' for consistency
+ - fusion_info - rename 'placements_groups' in default dict to 'placement_groups'
+ for consistency
+ - fusion_info - rename 'zones' dict to 'availability_zones' for consistency
+ - fusion_info - rename hardware to hardware_types in response for consistency
+ - fusion_info - rename storageclass to storage_classes in response for consistency
+ - fusion_pp - duration parsing improved. Supports combination of time units
+ (E.g 5H5M)
+ - fusion_ra - added `api_client_key` argument, which can be used instead of
+ `user` and `principal` argument
+ - fusion_ra - added `principal` argument, which is an ID of either API client
+ or User and can be used instead of `user` argument
+ - fusion_se - add support for CBS Storage Endpoint
+ fragments:
+ - 101_improve_duration_parsing.yml
+ - 109_unify_storage_classes_key_in_fusion_info.yml
+ - 112_unify_hardware_types_key_in_fusion_info.yml
+ - 121_unify_parameters_and_env_variables_across_different_clients.yml
+ - 127_change_logic_of_fusion_api_host.yml
+ - 129_add_access_token.yml
+ - 130_add_private_key_password.yml
+ - 132_deprecate_unused_parameters_in_hap_module.yml
+ - 134_add_cbs_storage_endpoint_support.yml
+ - 135_add_principal_to_fusion_ra.yml
+ - 138_add_api_client_key_to_fusion_ra.yml
+ - 139_fix_bugs_in_fusion_info.yml
+ - 142_add_missing_none_fields.yml
+ - 143_unify_keys_in_fusion_info.yml
+ - 3289_functests_pp_pg_ra.yml
+ - 99_update_protection_policy_retention_description.yaml
+ release_date: '2023-05-31'
diff --git a/ansible_collections/purestorage/fusion/changelogs/config.yaml b/ansible_collections/purestorage/fusion/changelogs/config.yaml
new file mode 100644
index 000000000..0c1851805
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/changelogs/config.yaml
@@ -0,0 +1,32 @@
+changelog_filename_template: ../CHANGELOG.rst
+changelog_filename_version_depth: 0
+changes_file: changelog.yaml
+changes_format: combined
+ignore_other_fragment_extensions: true
+keep_fragments: false
+mention_ancestor: true
+new_plugins_after_name: removed_features
+notesdir: fragments
+prelude_section_name: release_summary
+prelude_section_title: Release Summary
+sanitize_changelog: true
+sections:
+ - - major_changes
+ - Major Changes
+ - - minor_changes
+ - Minor Changes
+ - - breaking_changes
+ - Breaking Changes / Porting Guide
+ - - deprecated_features
+ - Deprecated Features
+ - - removed_features
+ - Removed Features (previously deprecated)
+ - - security_fixes
+ - Security Fixes
+ - - bugfixes
+ - Bugfixes
+ - - known_issues
+ - Known Issues
+title: Purestorage.Fusion
+trivial_section_name: trivial
+use_fqcn: true
diff --git a/ansible_collections/purestorage/fusion/meta/runtime.yml b/ansible_collections/purestorage/fusion/meta/runtime.yml
new file mode 100644
index 000000000..1812440b2
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/meta/runtime.yml
@@ -0,0 +1,12 @@
+---
+requires_ansible: ">=2.11.0"
+plugin_routing:
+ modules:
+ fusion_tn:
+ deprecation:
+ removal_date: "2023-07-26"
+ warning_text: "Use fusion_se + fusion_nig instead."
+ fusion_hw:
+ deprecation:
+ removal_date: "2023-08-09"
+ warning_text: "Don't use this module. This functionality isn't supported."
diff --git a/ansible_collections/purestorage/fusion/playbooks/simple/create_array.yml b/ansible_collections/purestorage/fusion/playbooks/simple/create_array.yml
new file mode 100644
index 000000000..b4109f7f9
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/playbooks/simple/create_array.yml
@@ -0,0 +1,17 @@
+---
+- name: Create array
+ hosts: localhost
+ tasks:
+
+ - name: Register array
+ purestorage.fusion.fusion_array:
+ issuer_id: "{{ ansible_env.FUSION_ISSUER_ID }}"
+ private_key_file: "{{ ansible_env.FUSION_PRIVATE_KEY_FILE }}"
+ state: present # or absent
+ name: flasharray3
+ display_name: "flasharray3"
+ host_name: "flasharray3"
+ hardware_type: flash-array-x
+ appliance_id: 1187351-242133817-5976825671211737520
+ az: az1
+ region: region1
diff --git a/ansible_collections/purestorage/fusion/playbooks/simple/create_availability_zone.yml b/ansible_collections/purestorage/fusion/playbooks/simple/create_availability_zone.yml
new file mode 100644
index 000000000..37f4264c6
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/playbooks/simple/create_availability_zone.yml
@@ -0,0 +1,13 @@
+---
+- name: Create availability zone
+ hosts: localhost
+ tasks:
+
+ - name: Create new Availability Zone
+ purestorage.fusion.fusion_az:
+ issuer_id: "{{ ansible_env.FUSION_ISSUER_ID }}"
+ private_key_file: "{{ ansible_env.FUSION_PRIVATE_KEY_FILE }}"
+ state: present # or absent
+ name: "az2"
+ display_name: "az2"
+ region: pure-us-west
diff --git a/ansible_collections/purestorage/fusion/playbooks/simple/create_tenant_space.yml b/ansible_collections/purestorage/fusion/playbooks/simple/create_tenant_space.yml
new file mode 100644
index 000000000..a71723c00
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/playbooks/simple/create_tenant_space.yml
@@ -0,0 +1,12 @@
+---
+- name: Create tenant space
+ hosts: localhost
+ tasks:
+
+ - name: Create new tenant space db_tenant_space for tenant_name
+ purestorage.fusion.fusion_ts:
+ issuer_id: "{{ ansible_env.FUSION_ISSUER_ID }}"
+ private_key_file: "{{ ansible_env.FUSION_PRIVATE_KEY_FILE }}"
+ state: "present" # or absent
+ name: "db_tenant_space"
+ tenant: "tenant_name"
diff --git a/ansible_collections/purestorage/fusion/playbooks/simple/list_all.yml b/ansible_collections/purestorage/fusion/playbooks/simple/list_all.yml
new file mode 100644
index 000000000..ff9d15cce
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/playbooks/simple/list_all.yml
@@ -0,0 +1,14 @@
+---
+- name: Print all Fusion resources
+ hosts: localhost
+ tasks:
+ - name: Collect all for Pure Storage
+ purestorage.fusion.fusion_info:
+ gather_subset: all
+ issuer_id: "{{ ansible_env.FUSION_ISSUER_ID }}"
+ private_key_file: "{{ ansible_env.FUSION_PRIVATE_KEY_FILE }}"
+ register: fusion_info
+
+ - name: Print Fusion resources
+ ansible.builtin.debug:
+ msg: "{{ fusion_info['fusion_info'] }}"
diff --git a/ansible_collections/purestorage/fusion/playbooks/simple/list_api_clients.yml b/ansible_collections/purestorage/fusion/playbooks/simple/list_api_clients.yml
new file mode 100644
index 000000000..3e9c7ccb4
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/playbooks/simple/list_api_clients.yml
@@ -0,0 +1,14 @@
+---
+- name: Print API clients
+ hosts: localhost
+ tasks:
+ - name: Collect api_clients for Pure Storage
+ purestorage.fusion.fusion_info:
+ gather_subset: api_clients
+ issuer_id: "{{ ansible_env.FUSION_ISSUER_ID }}"
+ private_key_file: "{{ ansible_env.FUSION_PRIVATE_KEY_FILE }}"
+ register: fusion_info
+
+ - name: Print Fusion resources
+ ansible.builtin.debug:
+ msg: "{{ fusion_info['fusion_info'] }}"
diff --git a/ansible_collections/purestorage/fusion/playbooks/simple/list_arrays.yml b/ansible_collections/purestorage/fusion/playbooks/simple/list_arrays.yml
new file mode 100644
index 000000000..4c8cad76c
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/playbooks/simple/list_arrays.yml
@@ -0,0 +1,14 @@
+---
+- name: Print arrays
+ hosts: localhost
+ tasks:
+ - name: Collect arrays for Pure Storage
+ purestorage.fusion.fusion_info:
+ gather_subset: arrays
+ issuer_id: "{{ ansible_env.FUSION_ISSUER_ID }}"
+ private_key_file: "{{ ansible_env.FUSION_PRIVATE_KEY_FILE }}"
+ register: fusion_info
+
+ - name: Print Fusion resources
+ ansible.builtin.debug:
+ msg: "{{ fusion_info['fusion_info'] }}"
diff --git a/ansible_collections/purestorage/fusion/playbooks/simple/list_availability_zones.yml b/ansible_collections/purestorage/fusion/playbooks/simple/list_availability_zones.yml
new file mode 100644
index 000000000..3f4fc6ccf
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/playbooks/simple/list_availability_zones.yml
@@ -0,0 +1,14 @@
+---
+- name: Print availability zones
+ hosts: localhost
+ tasks:
+ - name: Collect Availability Zones for Pure Storage
+ purestorage.fusion.fusion_info:
+ gather_subset: zones
+ issuer_id: "{{ ansible_env.FUSION_ISSUER_ID }}"
+ private_key_file: "{{ ansible_env.FUSION_PRIVATE_KEY_FILE }}"
+ register: fusion_info
+
+ - name: Print Fusion resources
+ ansible.builtin.debug:
+ msg: "{{ fusion_info['fusion_info'] }}"
diff --git a/ansible_collections/purestorage/fusion/playbooks/simple/list_hardware_types.yml b/ansible_collections/purestorage/fusion/playbooks/simple/list_hardware_types.yml
new file mode 100644
index 000000000..4c1e20db0
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/playbooks/simple/list_hardware_types.yml
@@ -0,0 +1,14 @@
+---
+- name: Print hardware types
+ hosts: localhost
+ tasks:
+ - name: Collect hardware_types for Pure Storage
+ purestorage.fusion.fusion_info:
+ gather_subset: hardware_types
+ issuer_id: "{{ ansible_env.FUSION_ISSUER_ID }}"
+ private_key_file: "{{ ansible_env.FUSION_PRIVATE_KEY_FILE }}"
+ register: fusion_info
+
+ - name: Print Fusion resources
+ ansible.builtin.debug:
+ msg: "{{ fusion_info['fusion_info'] }}"
diff --git a/ansible_collections/purestorage/fusion/playbooks/simple/list_hosts.yml b/ansible_collections/purestorage/fusion/playbooks/simple/list_hosts.yml
new file mode 100644
index 000000000..1c60fdf7b
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/playbooks/simple/list_hosts.yml
@@ -0,0 +1,14 @@
+---
+- name: List hosts
+ hosts: localhost
+ tasks:
+ - name: Collect hosts for Pure Storage
+ purestorage.fusion.fusion_info:
+ gather_subset: hosts
+ issuer_id: "{{ ansible_env.FUSION_ISSUER_ID }}"
+ private_key_file: "{{ ansible_env.FUSION_PRIVATE_KEY_FILE }}"
+ register: fusion_info
+
+ - name: Print Fusion resources
+ ansible.builtin.debug:
+ msg: "{{ fusion_info['fusion_info'] }}"
diff --git a/ansible_collections/purestorage/fusion/playbooks/simple/list_interfaces.yml b/ansible_collections/purestorage/fusion/playbooks/simple/list_interfaces.yml
new file mode 100644
index 000000000..edee38234
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/playbooks/simple/list_interfaces.yml
@@ -0,0 +1,14 @@
+---
+- name: Print interfaces
+ hosts: localhost
+ tasks:
+ - name: Collect interfaces for Pure Storage
+ purestorage.fusion.fusion_info:
+ gather_subset: interfaces
+ issuer_id: "{{ ansible_env.FUSION_ISSUER_ID }}"
+ private_key_file: "{{ ansible_env.FUSION_PRIVATE_KEY_FILE }}"
+ register: fusion_info
+
+ - name: Print Fusion resources
+ ansible.builtin.debug:
+ msg: "{{ fusion_info['fusion_info'] }}"
diff --git a/ansible_collections/purestorage/fusion/playbooks/simple/list_network_interface_groups.yml b/ansible_collections/purestorage/fusion/playbooks/simple/list_network_interface_groups.yml
new file mode 100644
index 000000000..6b2cbc260
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/playbooks/simple/list_network_interface_groups.yml
@@ -0,0 +1,14 @@
+---
+- name: Print network interface groups
+ hosts: localhost
+ tasks:
+ - name: Collect network_interface_groups for Pure Storage
+ purestorage.fusion.fusion_info:
+ gather_subset: network_interface_groups
+ issuer_id: "{{ ansible_env.FUSION_ISSUER_ID }}"
+ private_key_file: "{{ ansible_env.FUSION_PRIVATE_KEY_FILE }}"
+ register: fusion_info
+
+ - name: Print Fusion resources
+ ansible.builtin.debug:
+ msg: "{{ fusion_info['fusion_info'] }}"
diff --git a/ansible_collections/purestorage/fusion/playbooks/simple/list_placement_groups.yml b/ansible_collections/purestorage/fusion/playbooks/simple/list_placement_groups.yml
new file mode 100644
index 000000000..2b7da696d
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/playbooks/simple/list_placement_groups.yml
@@ -0,0 +1,14 @@
+---
+- name: Print placement groups
+ hosts: localhost
+ tasks:
+ - name: Collect placement_groups for Pure Storage
+ purestorage.fusion.fusion_info:
+ gather_subset: placement_groups
+ issuer_id: "{{ ansible_env.FUSION_ISSUER_ID }}"
+ private_key_file: "{{ ansible_env.FUSION_PRIVATE_KEY_FILE }}"
+ register: fusion_info
+
+ - name: Print Fusion resources
+ ansible.builtin.debug:
+ msg: "{{ fusion_info['fusion_info'] }}"
diff --git a/ansible_collections/purestorage/fusion/playbooks/simple/list_protection_policies.yml b/ansible_collections/purestorage/fusion/playbooks/simple/list_protection_policies.yml
new file mode 100644
index 000000000..f9b8249ec
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/playbooks/simple/list_protection_policies.yml
@@ -0,0 +1,14 @@
+---
+- name: Print protection policies
+ hosts: localhost
+ tasks:
+ - name: Collect protection_policies for Pure Storage
+ purestorage.fusion.fusion_info:
+ gather_subset: protection_policies
+ issuer_id: "{{ ansible_env.FUSION_ISSUER_ID }}"
+ private_key_file: "{{ ansible_env.FUSION_PRIVATE_KEY_FILE }}"
+ register: fusion_info
+
+ - name: Print Fusion resources
+ ansible.builtin.debug:
+ msg: "{{ fusion_info['fusion_info'] }}"
diff --git a/ansible_collections/purestorage/fusion/playbooks/simple/list_roles.yml b/ansible_collections/purestorage/fusion/playbooks/simple/list_roles.yml
new file mode 100644
index 000000000..c224f54ab
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/playbooks/simple/list_roles.yml
@@ -0,0 +1,14 @@
+---
+- name: Print roles
+ hosts: localhost
+ tasks:
+ - name: Collect roles for Pure Storage
+ purestorage.fusion.fusion_info:
+ gather_subset: roles
+ issuer_id: "{{ ansible_env.FUSION_ISSUER_ID }}"
+ private_key_file: "{{ ansible_env.FUSION_PRIVATE_KEY_FILE }}"
+ register: fusion_info
+
+ - name: Print Fusion resources
+ ansible.builtin.debug:
+ msg: "{{ fusion_info['fusion_info'] }}"
diff --git a/ansible_collections/purestorage/fusion/playbooks/simple/list_snapshots.yml b/ansible_collections/purestorage/fusion/playbooks/simple/list_snapshots.yml
new file mode 100644
index 000000000..fda1c1665
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/playbooks/simple/list_snapshots.yml
@@ -0,0 +1,14 @@
+---
+- name: Print snapshots
+ hosts: localhost
+ tasks:
+ - name: Collect snapshots for Pure Storage
+ purestorage.fusion.fusion_info:
+ gather_subset: snapshots
+ issuer_id: "{{ ansible_env.FUSION_ISSUER_ID }}"
+ private_key_file: "{{ ansible_env.FUSION_PRIVATE_KEY_FILE }}"
+ register: fusion_info
+
+ - name: Print Fusion resources
+ ansible.builtin.debug:
+ msg: "{{ fusion_info['fusion_info'] }}"
diff --git a/ansible_collections/purestorage/fusion/playbooks/simple/list_storage_classes.yml b/ansible_collections/purestorage/fusion/playbooks/simple/list_storage_classes.yml
new file mode 100644
index 000000000..1d361e329
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/playbooks/simple/list_storage_classes.yml
@@ -0,0 +1,14 @@
+---
+- name: Print storage classes
+ hosts: localhost
+ tasks:
+ - name: Collect storage_classes for Pure Storage
+ purestorage.fusion.fusion_info:
+ gather_subset: storage_classes
+ issuer_id: "{{ ansible_env.FUSION_ISSUER_ID }}"
+ private_key_file: "{{ ansible_env.FUSION_PRIVATE_KEY_FILE }}"
+ register: fusion_info
+
+ - name: Print Fusion resources
+ ansible.builtin.debug:
+ msg: "{{ fusion_info['fusion_info'] }}"
diff --git a/ansible_collections/purestorage/fusion/playbooks/simple/list_storage_endpoints.yml b/ansible_collections/purestorage/fusion/playbooks/simple/list_storage_endpoints.yml
new file mode 100644
index 000000000..806cceab1
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/playbooks/simple/list_storage_endpoints.yml
@@ -0,0 +1,14 @@
+---
+- name: Print storage endpoints
+ hosts: localhost
+ tasks:
+ - name: Collect storage_endpoints for Pure Storage
+ purestorage.fusion.fusion_info:
+ gather_subset: storage_endpoints
+ issuer_id: "{{ ansible_env.FUSION_ISSUER_ID }}"
+ private_key_file: "{{ ansible_env.FUSION_PRIVATE_KEY_FILE }}"
+ register: fusion_info
+
+ - name: Print Fusion resources
+ ansible.builtin.debug:
+ msg: "{{ fusion_info['fusion_info'] }}"
diff --git a/ansible_collections/purestorage/fusion/playbooks/simple/list_storage_services.yml b/ansible_collections/purestorage/fusion/playbooks/simple/list_storage_services.yml
new file mode 100644
index 000000000..2d4414867
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/playbooks/simple/list_storage_services.yml
@@ -0,0 +1,14 @@
+---
+- name: Print storage services
+ hosts: localhost
+ tasks:
+ - name: Collect storage_services for Pure Storage
+ purestorage.fusion.fusion_info:
+ gather_subset: storage_services
+ issuer_id: "{{ ansible_env.FUSION_ISSUER_ID }}"
+ private_key_file: "{{ ansible_env.FUSION_PRIVATE_KEY_FILE }}"
+ register: fusion_info
+
+ - name: Print Fusion resources
+ ansible.builtin.debug:
+ msg: "{{ fusion_info['fusion_info'] }}"
diff --git a/ansible_collections/purestorage/fusion/playbooks/simple/list_tenant_spaces.yml b/ansible_collections/purestorage/fusion/playbooks/simple/list_tenant_spaces.yml
new file mode 100644
index 000000000..dd392c131
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/playbooks/simple/list_tenant_spaces.yml
@@ -0,0 +1,14 @@
+---
+- name: Print tenant spaces
+ hosts: localhost
+ tasks:
+ - name: Collect tenant_spaces for Pure Storage
+ purestorage.fusion.fusion_info:
+ gather_subset: tenant_spaces
+ issuer_id: "{{ ansible_env.FUSION_ISSUER_ID }}"
+ private_key_file: "{{ ansible_env.FUSION_PRIVATE_KEY_FILE }}"
+ register: fusion_info
+
+ - name: Print Fusion resources
+ ansible.builtin.debug:
+ msg: "{{ fusion_info['fusion_info'] }}"
diff --git a/ansible_collections/purestorage/fusion/playbooks/simple/list_tenants.yml b/ansible_collections/purestorage/fusion/playbooks/simple/list_tenants.yml
new file mode 100644
index 000000000..f15c62848
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/playbooks/simple/list_tenants.yml
@@ -0,0 +1,14 @@
+---
+- name: Print tenants
+ hosts: localhost
+ tasks:
+ - name: Collect tenants for Pure Storage
+ purestorage.fusion.fusion_info:
+ gather_subset: tenants
+ issuer_id: "{{ ansible_env.FUSION_ISSUER_ID }}"
+ private_key_file: "{{ ansible_env.FUSION_PRIVATE_KEY_FILE }}"
+ register: fusion_info
+
+ - name: Print Fusion resources
+ ansible.builtin.debug:
+ msg: "{{ fusion_info['fusion_info'] }}"
diff --git a/ansible_collections/purestorage/fusion/playbooks/simple/list_users.yml b/ansible_collections/purestorage/fusion/playbooks/simple/list_users.yml
new file mode 100644
index 000000000..e64852513
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/playbooks/simple/list_users.yml
@@ -0,0 +1,14 @@
+---
+- name: Print users
+ hosts: localhost
+ tasks:
+ - name: Collect users for Pure Storage
+ purestorage.fusion.fusion_info:
+ gather_subset: users
+ issuer_id: "{{ ansible_env.FUSION_ISSUER_ID }}"
+ private_key_file: "{{ ansible_env.FUSION_PRIVATE_KEY_FILE }}"
+ register: fusion_info
+
+ - name: Print Fusion resources
+ ansible.builtin.debug:
+ msg: "{{ fusion_info['fusion_info'] }}"
diff --git a/ansible_collections/purestorage/fusion/playbooks/simple/list_volumes.yml b/ansible_collections/purestorage/fusion/playbooks/simple/list_volumes.yml
new file mode 100644
index 000000000..8c97e6a6c
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/playbooks/simple/list_volumes.yml
@@ -0,0 +1,14 @@
+---
+- name: Print volumes
+ hosts: localhost
+ tasks:
+ - name: Collect volumes for Pure Storage
+ purestorage.fusion.fusion_info:
+ gather_subset: volumes
+ issuer_id: "{{ ansible_env.FUSION_ISSUER_ID }}"
+ private_key_file: "{{ ansible_env.FUSION_PRIVATE_KEY_FILE }}"
+ register: fusion_info
+
+ - name: Print Fusion resources
+ ansible.builtin.debug:
+ msg: "{{ fusion_info['fusion_info'] }}"
diff --git a/ansible_collections/purestorage/fusion/playbooks/simple/make_tenant_admin.yml b/ansible_collections/purestorage/fusion/playbooks/simple/make_tenant_admin.yml
new file mode 100644
index 000000000..f55a30023
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/playbooks/simple/make_tenant_admin.yml
@@ -0,0 +1,12 @@
+---
+- name: Add tenant admin role
+ hosts: localhost
+ tasks:
+ - name: Add tenant-admin role to api-client
+ purestorage.fusion.fusion_ra:
+ issuer_id: "{{ ansible_env.FUSION_ISSUER_ID }}"
+ private_key_file: "{{ ansible_env.FUSION_PRIVATE_KEY_FILE }}"
+ state: present
+ role: "tenant-admin"
+ scope: "organization" # "organization" "tenant_space"
+ user: "{{ ansible_env.FUSION_ISSUER_ID }}"
diff --git a/ansible_collections/purestorage/fusion/playbooks/simple/remove_array.yml b/ansible_collections/purestorage/fusion/playbooks/simple/remove_array.yml
new file mode 100644
index 000000000..b50031743
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/playbooks/simple/remove_array.yml
@@ -0,0 +1,17 @@
+---
+- name: Remote array
+ hosts: localhost
+ tasks:
+
+ - name: Remove array
+ purestorage.fusion.fusion_array:
+ issuer_id: "{{ ansible_env.FUSION_ISSUER_ID }}"
+ private_key_file: "{{ ansible_env.FUSION_PRIVATE_KEY_FILE }}"
+ state: absent # or present
+ name: flasharray3
+ display_name: "flasharray3"
+ host_name: "flasharray3"
+ hardware_type: flash-array-x
+ appliance_id: 1187351-242133817-5976825671211737520
+ az: az1
+ region: region1
diff --git a/ansible_collections/purestorage/fusion/playbooks/simple/setup_infrastructure.yml b/ansible_collections/purestorage/fusion/playbooks/simple/setup_infrastructure.yml
new file mode 100644
index 000000000..baea6e59d
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/playbooks/simple/setup_infrastructure.yml
@@ -0,0 +1,64 @@
+---
+- name: Setup Fusion resources
+ hosts: localhost
+ tasks:
+
+ - name: Create new region
+ purestorage.fusion.fusion_region:
+ issuer_id: "{{ ansible_env.FUSION_ISSUER_ID }}"
+ private_key_file: "{{ ansible_env.FUSION_PRIVATE_KEY_FILE }}"
+ state: present # or absent
+ name: region1
+ display_name: "region1"
+
+ - name: Create new Availability Zone
+ purestorage.fusion.fusion_az:
+ issuer_id: "{{ ansible_env.FUSION_ISSUER_ID }}"
+ private_key_file: "{{ ansible_env.FUSION_PRIVATE_KEY_FILE }}"
+ state: present # or absent
+ name: az1
+ region: region1
+ display_name: "az1"
+
+ - name: Create new network interface group
+ purestorage.fusion.fusion_nig:
+ issuer_id: "{{ ansible_env.FUSION_ISSUER_ID }}"
+ private_key_file: "{{ ansible_env.FUSION_PRIVATE_KEY_FILE }}"
+ state: present # or absent
+ name: "interface_group1"
+ availability_zone: "az1"
+ region: region1
+ mtu: 1500
+ gateway: 172.17.1.1
+ prefix: 172.17.1.0/24
+
+ - name: Create new Storage Endpoint
+ purestorage.fusion.fusion_se:
+ issuer_id: "{{ ansible_env.FUSION_ISSUER_ID }}"
+ private_key_file: "{{ ansible_env.FUSION_PRIVATE_KEY_FILE }}"
+ state: present # or absent
+ region: region1
+ name: default
+ display_name: default
+ availability_zone: az1
+ endpoint_type: iscsi
+ iscsi:
+ - address: "172.17.1.2/24"
+ gateway: "172.17.1.1"
+ network_interface_groups: ["interface_group1"]
+ - address: "172.17.1.1/24"
+ gateway: "172.17.1.1"
+ network_interface_groups: ["interface_group1"]
+
+ - name: Register new array
+ purestorage.fusion.fusion_array:
+ issuer_id: "{{ ansible_env.FUSION_ISSUER_ID }}"
+ private_key_file: "{{ ansible_env.FUSION_PRIVATE_KEY_FILE }}"
+ state: present # or absent
+ name: flasharray1
+ display_name: "flasharray1"
+ az: az1
+ hardware_type: flash-array-x
+ appliance_id: 1187351-242133817-5976825671211737520
+ region: region1
+ host_name: flasharray1
diff --git a/ansible_collections/purestorage/fusion/playbooks/simple/setup_protection_policies.yml b/ansible_collections/purestorage/fusion/playbooks/simple/setup_protection_policies.yml
new file mode 100644
index 000000000..f88c35e7a
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/playbooks/simple/setup_protection_policies.yml
@@ -0,0 +1,13 @@
+---
+- name: Create protection policy
+ hosts: localhost
+ tasks:
+ - name: Create new Protection Policy PP_name
+ purestorage.fusion.fusion_pp:
+ issuer_id: "{{ ansible_env.FUSION_ISSUER_ID }}"
+ private_key_file: "{{ ansible_env.FUSION_PRIVATE_KEY_FILE }}"
+ name: "PP_name"
+ display_name: "PP_name"
+ local_rpo: "15" # in minutes
+ local_retention: "24h" # m(inutes), h(ours), d(ays), w(eeks) or y(ears)
+ state: present # or absent
diff --git a/ansible_collections/purestorage/fusion/playbooks/simple/setup_storage_service_class.yml b/ansible_collections/purestorage/fusion/playbooks/simple/setup_storage_service_class.yml
new file mode 100644
index 000000000..16326bc93
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/playbooks/simple/setup_storage_service_class.yml
@@ -0,0 +1,24 @@
+---
+- name: Create storage service & class
+ hosts: localhost
+ tasks:
+ - name: Create new storage service called storage_service_1
+ purestorage.fusion.fusion_ss:
+ issuer_id: "{{ ansible_env.FUSION_ISSUER_ID }}"
+ private_key_file: "{{ ansible_env.FUSION_PRIVATE_KEY_FILE }}"
+ name: storage_service_1
+ display_name: "storage_service_1"
+ hardware_types: flash-array-c
+ state: present # or absent
+
+ - name: Create new storage class storage_class_1
+ purestorage.fusion.fusion_sc:
+ issuer_id: "{{ ansible_env.FUSION_ISSUER_ID }}"
+ private_key_file: "{{ ansible_env.FUSION_PRIVATE_KEY_FILE }}"
+ name: storage_class_1
+ display_name: "storage_class_1"
+ storage_service: storage_service_1
+ size_limit: 100M # 100M 100G 100T 100P - Default 4PB
+ iops_limit: 10000 # Must be between 100 and 100000000.
+ bw_limit: 4194M # 524288000
+ state: present # or absent
diff --git a/ansible_collections/purestorage/fusion/playbooks/simple/setup_workloads.yml b/ansible_collections/purestorage/fusion/playbooks/simple/setup_workloads.yml
new file mode 100644
index 000000000..3a601514c
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/playbooks/simple/setup_workloads.yml
@@ -0,0 +1,71 @@
+---
+- name: Setup workloads
+ hosts: localhost
+ # This playbook create:
+
+ # *- Tenant space: db_tenant_space
+ # *- Placement Group: pg1
+ # *- AIX host access policy: customer_host_access
+ # *- volume: data_vol1
+ # *- volume: data_vol2
+
+ # require:
+ # *- Storage class: db_high_performance
+ # *- Tenant: oracle_dbas
+ # *- Region: region1
+
+ tasks:
+ - name: Create new tenant space db_tenant_space for oracle_dbas
+ purestorage.fusion.fusion_ts:
+ issuer_id: "{{ ansible_env.FUSION_ISSUER_ID }}"
+ private_key_file: "{{ ansible_env.FUSION_PRIVATE_KEY_FILE }}"
+ name: "db_tenant_space"
+ tenant: "oracle_dbas"
+ state: "present" # or absent
+
+ - name: Create new placement group named pg1
+ purestorage.fusion.fusion_pg:
+ issuer_id: "{{ ansible_env.FUSION_ISSUER_ID }}"
+ private_key_file: "{{ ansible_env.FUSION_PRIVATE_KEY_FILE }}"
+ name: "pg1"
+ tenant: "oracle_dbas"
+ tenant_space: "db_tenant_space"
+ region: "region1"
+ availability_zone: "az1"
+ storage_service: "db_xl"
+ state: "present" # or absent
+
+ - name: Create new host access policy
+ purestorage.fusion.fusion_hap:
+ issuer_id: "{{ ansible_env.FUSION_ISSUER_ID }}"
+ private_key_file: "{{ ansible_env.FUSION_PRIVATE_KEY_FILE }}"
+ name: "customer_host_access"
+ personality: "linux"
+ iqn: "iqn.1994-05.com.redhat:9dd57693efb"
+ state: "present" # or absent
+
+ - name: Create new volume data_vol1 in storage_class db_high_performance
+ purestorage.fusion.fusion_volume:
+ issuer_id: "{{ ansible_env.FUSION_ISSUER_ID }}"
+ private_key_file: "{{ ansible_env.FUSION_PRIVATE_KEY_FILE }}"
+ name: "data_vol1"
+ storage_class: "db_high_performance"
+ size: "500G" # Volume size in M, G, T or P units.
+ tenant: "oracle_dbas"
+ tenant_space: "db_tenant_space"
+ placement_group: "pg1"
+ host_access_policies: "customer_host_access"
+ state: "present" # or absent
+
+ - name: Create new volume data_vol2 in storage_class db_high_performance
+ purestorage.fusion.fusion_volume:
+ issuer_id: "{{ ansible_env.FUSION_ISSUER_ID }}"
+ private_key_file: "{{ ansible_env.FUSION_PRIVATE_KEY_FILE }}"
+ name: "data_vol2"
+ storage_class: "db_high_performance"
+ size: "500G" # Volume size in M, G, T or P units.
+ tenant: "oracle_dbas"
+ tenant_space: "db_tenant_space"
+ placement_group: "pg1"
+ host_access_policies: "customer_host_access"
+ state: "present" # or absent
diff --git a/ansible_collections/purestorage/fusion/playbooks/tasks/.keep b/ansible_collections/purestorage/fusion/playbooks/tasks/.keep
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/playbooks/tasks/.keep
diff --git a/ansible_collections/purestorage/fusion/playbooks/templates/.keep b/ansible_collections/purestorage/fusion/playbooks/templates/.keep
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/playbooks/templates/.keep
diff --git a/ansible_collections/purestorage/fusion/playbooks/vars/.keep b/ansible_collections/purestorage/fusion/playbooks/vars/.keep
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/playbooks/vars/.keep
diff --git a/ansible_collections/purestorage/fusion/plugins/doc_fragments/purestorage.py b/ansible_collections/purestorage/fusion/plugins/doc_fragments/purestorage.py
new file mode 100644
index 000000000..a2f933161
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/plugins/doc_fragments/purestorage.py
@@ -0,0 +1,56 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2021, Simon Dodsley <simon@purestorage.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ # Standard Pure Storage documentation fragment
+ DOCUMENTATION = r"""
+options:
+ - See separate platform section for more details
+requirements:
+ - See separate platform section for more details
+notes:
+ - Ansible modules are available for the following Pure Storage products: FlashArray, FlashBlade, Pure1, Fusion
+"""
+
+ # Documentation fragment for Fusion
+ FUSION = r"""
+options:
+ private_key_file:
+ aliases: [ key_file ]
+ description:
+ - Path to the private key file
+ - Defaults to the set environment variable under FUSION_PRIVATE_KEY_FILE.
+ type: str
+ private_key_password:
+ description:
+ - Password of the encrypted private key file
+ type: str
+ issuer_id:
+ aliases: [ app_id ]
+ description:
+ - Application ID from Pure1 Registration page
+ - eg. pure1:apikey:dssf2331sd
+ - Defaults to the set environment variable under FUSION_ISSUER_ID
+ type: str
+ access_token:
+ description:
+ - Access token for Fusion Service
+ - Defaults to the set environment variable under FUSION_ACCESS_TOKEN
+ type: str
+notes:
+ - This module requires the I(purefusion) Python library
+ - You must set C(FUSION_ISSUER_ID) and C(FUSION_PRIVATE_KEY_FILE) environment variables
+ if I(issuer_id) and I(private_key_file) arguments are not passed to the module directly
+ - If you want to use access token for authentication, you must use C(FUSION_ACCESS_TOKEN) environment variable
+ if I(access_token) argument is not passed to the module directly
+requirements:
+ - python >= 3.8
+ - purefusion
+"""
diff --git a/ansible_collections/purestorage/fusion/plugins/inventory/__init__.py b/ansible_collections/purestorage/fusion/plugins/inventory/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/plugins/inventory/__init__.py
diff --git a/ansible_collections/purestorage/fusion/plugins/module_utils/errors.py b/ansible_collections/purestorage/fusion/plugins/module_utils/errors.py
new file mode 100644
index 000000000..0edf364cf
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/plugins/module_utils/errors.py
@@ -0,0 +1,291 @@
+# -*- coding: utf-8 -*-
+
+# (c) 2023, Jan Kodera (jkodera@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING.GPLv3 or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+try:
+ import fusion as purefusion
+ import urllib3
+except ImportError:
+ pass
+
+import sys
+import json
+import re
+import traceback as trace
+
+
+class OperationException(Exception):
+ """Raised if an asynchronous Operation fails."""
+
+ def __init__(self, op, http_error=None):
+ self._op = op
+ self._http_error = http_error
+
+ @property
+ def op(self):
+ return self._op
+
+ @property
+ def http_error(self):
+ return self._http_error
+
+
+def _get_verbosity(module):
+ # verbosity is a private member and Ansible does not really allow
+ # providing extra information only if the user wants it due to ideological
+ # reasons, so extract it as carefully as possible and assume non-verbose
+ # if something fails
+ try:
+ if module._verbosity is not None and isinstance(module._verbosity, int):
+ return module._verbosity
+ except Exception:
+ pass
+ return 0
+
+
+def _extract_rest_call_site(traceback):
+ # extracts first function in traceback that comes from 'fusion.api.*_api*',
+ # converts its name from something like 'get_volume' to 'Get volume' and returns
+ while traceback:
+ try:
+ frame = traceback.tb_frame
+ func_name = (
+ frame.f_code.co_name
+ ) # contains function name, e.g. 'get_volume'
+ mod_path = frame.f_globals[
+ "__name__"
+ ] # contains module path, e.g. 'fusion.api.volumes_api'
+ path_segments = mod_path.split(".")
+ if (
+ path_segments[0] == "fusion"
+ and path_segments[1] == "api"
+ and "_api" in path_segments[2]
+ ):
+ call_site = func_name.replace("_", " ").capitalize()
+ return call_site
+ except Exception:
+ pass
+ traceback = traceback.tb_next
+ return None
+
+
+class DetailsPrinter:
+ def __init__(self, target):
+ self._target = target
+ self._parenthesed = False
+
+ def append(self, what):
+ if not self._parenthesed:
+ self._target += " ("
+ self._parenthesed = True
+ else:
+ self._target += ", "
+
+ self._target += what
+
+ def finish(self):
+ if self._parenthesed:
+ self._target += ")"
+ return self._target
+
+
+def format_fusion_api_exception(exception, traceback=None):
+ """Formats `fusion.rest.ApiException` into a simple short form, suitable
+ for Ansible error output. Returns a (message: str, body: dict) tuple."""
+ message = None
+ code = None
+ resource_name = None
+ request_id = None
+ body = None
+ call_site = _extract_rest_call_site(traceback)
+ try:
+ body = json.loads(exception.body)
+ request_id = body.get("request_id", None)
+ error = body["error"]
+ message = error.get("message")
+ code = error.get("pure_code")
+ if not code:
+ code = exception.status
+ if not code:
+ code = error.get("http_code")
+ resource_name = error["details"]["name"]
+ except Exception:
+ pass
+
+ output = ""
+ if call_site:
+ output += "'{0}' failed".format(call_site)
+ else:
+ output += "request failed"
+
+ if message:
+ output += ", {0}".format(message.replace('"', "'"))
+
+ details = DetailsPrinter(output)
+ if resource_name:
+ details.append("resource: '{0}'".format(resource_name))
+ if code:
+ details.append("code: '{0}'".format(code))
+ if request_id:
+ details.append("request id: '{0}'".format(request_id))
+ output = details.finish()
+
+ return (output, body)
+
+
+def format_failed_fusion_operation_exception(exception):
+ """Formats failed `fusion.Operation` into a simple short form, suitable
+ for Ansible error output. Returns a (message: str, body: dict) tuple."""
+ op = exception.op
+ http_error = exception.http_error
+ if op.status != "Failed" and not http_error:
+ raise ValueError(
+ "BUG: can only format Operation exception with .status == Failed or http_error != None"
+ )
+
+ message = None
+ code = None
+ operation_name = None
+ operation_id = None
+
+ try:
+ if op.status == "Failed":
+ operation_id = op.id
+ error = op.error
+ message = error.message
+ code = error.pure_code
+ if not code:
+ code = error.http_code
+ operation_name = op.request_type
+ except Exception as e:
+ pass
+
+ output = ""
+ if operation_name:
+ # converts e.g. 'CreateVolume' to 'Create volume'
+ operation_name = re.sub("(.)([A-Z][a-z]+)", r"\1 \2", operation_name)
+ operation_name = re.sub(
+ "([a-z0-9])([A-Z])", r"\1 \2", operation_name
+ ).capitalize()
+ output += "{0}: ".format(operation_name)
+ output += "operation failed"
+
+ if message:
+ output += ", {0}".format(message.replace('"', "'"))
+
+ details = DetailsPrinter(output)
+ if code:
+ details.append("code: '{0}'".format(code))
+ if operation_id:
+ details.append("operation id: '{0}'".format(operation_id))
+ if http_error:
+ details.append("HTTP error: '{0}'".format(str(http_error).replace('"', "'")))
+
+ output = details.finish()
+
+ return output
+
+
+def format_http_exception(exception, traceback):
+ """Formats failed `urllib3.exceptions` exceptions into a simple short form,
+ suitable for Ansible error output. Returns a `str`."""
+ # urllib3 exceptions hide all details in a formatted message so all we
+ # can do is append the REST call that caused this
+ output = ""
+ call_site = _extract_rest_call_site(traceback)
+ if call_site:
+ output += "'{0}': ".format(call_site)
+ output += "HTTP request failed via "
+
+ inner = exception
+ while True:
+ try:
+ e = inner.reason
+ if e and isinstance(e, urllib3.exceptions.HTTPError):
+ inner = e
+ continue
+ break
+ except Exception:
+ break
+
+ if inner != exception:
+ output += "'{0}'/'{1}'".format(type(inner).__name__, type(exception).__name__)
+ else:
+ output += "'{0}'".format(type(exception).__name__)
+
+ output += " - {0}".format(str(exception).replace('"', "'"))
+
+ return output
+
+
+def _handle_api_exception(
+ module,
+ exception,
+ traceback,
+ verbosity,
+):
+ (error_message, body) = format_fusion_api_exception(exception, traceback)
+
+ if verbosity > 1:
+ module.fail_json(msg=error_message, call_details=body, traceback=str(traceback))
+ elif verbosity > 0:
+ module.fail_json(msg=error_message, call_details=body)
+ else:
+ module.fail_json(msg=error_message)
+
+
+def _handle_operation_exception(module, exception, traceback, verbosity):
+ op = exception.op
+
+ error_message = format_failed_fusion_operation_exception(exception)
+
+ if verbosity > 1:
+ module.fail_json(
+ msg=error_message, op_details=op.to_dict(), traceback=str(traceback)
+ )
+ elif verbosity > 0:
+ module.fail_json(msg=error_message, op_details=op.to_dict())
+ else:
+ module.fail_json(msg=error_message)
+
+
+def _handle_http_exception(module, exception, traceback, verbosity):
+ error_message = format_http_exception(exception, traceback)
+
+ if verbosity > 1:
+ module.fail_json(msg=error_message, traceback=trace.format_exception(exception))
+ else:
+ module.fail_json(msg=error_message)
+
+
+def _except_hook_callback(module, original_hook, type, value, traceback):
+ verbosity = _get_verbosity(module)
+ if type == purefusion.rest.ApiException:
+ _handle_api_exception(
+ module,
+ value,
+ traceback,
+ verbosity,
+ )
+ elif type == OperationException:
+ _handle_operation_exception(module, value, traceback, verbosity)
+ elif issubclass(type, urllib3.exceptions.HTTPError):
+ _handle_http_exception(module, value, traceback, verbosity)
+
+ # if we bubbled here the handlers were not able to process the exception
+ original_hook(type, value, traceback)
+
+
+def install_fusion_exception_hook(module):
+ """Installs a hook that catches `purefusion.rest.ApiException` and
+ `OperationException` and produces simpler and nicer error messages
+ for Ansible output."""
+ original_hook = sys.excepthook
+ sys.excepthook = lambda type, value, traceback: _except_hook_callback(
+ module, original_hook, type, value, traceback
+ )
diff --git a/ansible_collections/purestorage/fusion/plugins/module_utils/fusion.py b/ansible_collections/purestorage/fusion/plugins/module_utils/fusion.py
new file mode 100644
index 000000000..74b5f0e91
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/plugins/module_utils/fusion.py
@@ -0,0 +1,183 @@
+# -*- coding: utf-8 -*-
+
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Simon Dodsley <simon@purestorage.com>,2021
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+try:
+ import fusion
+except ImportError:
+ pass
+
+from os import environ
+from urllib.parse import urljoin
+import platform
+
+TOKEN_EXCHANGE_URL = "https://api.pure1.purestorage.com/oauth2/1.0/token"
+VERSION = 1.0
+USER_AGENT_BASE = "Ansible"
+
+PARAM_ISSUER_ID = "issuer_id"
+PARAM_PRIVATE_KEY_FILE = "private_key_file"
+PARAM_PRIVATE_KEY_PASSWORD = "private_key_password"
+PARAM_ACCESS_TOKEN = "access_token"
+ENV_ISSUER_ID = "FUSION_ISSUER_ID"
+ENV_API_HOST = "FUSION_API_HOST"
+ENV_PRIVATE_KEY_FILE = "FUSION_PRIVATE_KEY_FILE"
+ENV_TOKEN_ENDPOINT = "FUSION_TOKEN_ENDPOINT"
+ENV_ACCESS_TOKEN = "FUSION_ACCESS_TOKEN"
+
+# will be deprecated in 2.0.0
+PARAM_APP_ID = "app_id" # replaced by PARAM_ISSUER_ID
+PARAM_KEY_FILE = "key_file" # replaced by PARAM_PRIVATE_KEY_FILE
+ENV_APP_ID = "FUSION_APP_ID" # replaced by ENV_ISSUER_ID
+ENV_HOST = "FUSION_HOST" # replaced by ENV_API_HOST
+DEP_VER = "2.0.0"
+BASE_PATH = "/api/1.1"
+
+
+def _env_deprecation_warning(module, old_env, new_env, vers):
+ if old_env in environ:
+ if new_env in environ:
+ module.warn(
+ f"{old_env} env variable is ignored because {new_env} is specified."
+ f" {old_env} env variable is deprecated and will be removed in version {vers}"
+ f" Please use {new_env} env variable only."
+ )
+ else:
+ module.warn(
+ f"{old_env} env variable is deprecated and will be removed in version {vers}"
+ f" Please use {new_env} env variable instead."
+ )
+
+
+def _param_deprecation_warning(module, old_param, new_param, vers):
+ if old_param in module.params:
+ module.warn(
+ f"{old_param} parameter is deprecated and will be removed in version {vers}"
+ f" Please use {new_param} parameter instead."
+ f" Don't use both parameters simultaneously."
+ )
+
+
+def get_fusion(module):
+ """Return System Object or Fail"""
+ # deprecation warnings
+ _param_deprecation_warning(module, PARAM_APP_ID, PARAM_ISSUER_ID, DEP_VER)
+ _param_deprecation_warning(module, PARAM_KEY_FILE, PARAM_PRIVATE_KEY_FILE, DEP_VER)
+ _env_deprecation_warning(module, ENV_APP_ID, ENV_ISSUER_ID, DEP_VER)
+ _env_deprecation_warning(module, ENV_HOST, ENV_API_HOST, DEP_VER)
+
+ user_agent = "%(base)s %(class)s/%(version)s (%(platform)s)" % {
+ "base": USER_AGENT_BASE,
+ "class": __name__,
+ "version": VERSION,
+ "platform": platform.platform(),
+ }
+
+ issuer_id = module.params[PARAM_ISSUER_ID]
+ access_token = module.params[PARAM_ACCESS_TOKEN]
+ private_key_file = module.params[PARAM_PRIVATE_KEY_FILE]
+ private_key_password = module.params[PARAM_PRIVATE_KEY_PASSWORD]
+
+ if private_key_password is not None:
+ module.fail_on_missing_params([PARAM_PRIVATE_KEY_FILE])
+
+ config = fusion.Configuration()
+ if ENV_API_HOST in environ or ENV_HOST in environ:
+ host_url = environ.get(ENV_API_HOST, environ.get(ENV_HOST))
+ config.host = urljoin(host_url, BASE_PATH)
+ config.token_endpoint = environ.get(ENV_TOKEN_ENDPOINT, config.token_endpoint)
+
+ if access_token is not None:
+ config.access_token = access_token
+ elif issuer_id is not None and private_key_file is not None:
+ config.issuer_id = issuer_id
+ config.private_key_file = private_key_file
+ if private_key_password is not None:
+ config.private_key_password = private_key_password
+ elif ENV_ACCESS_TOKEN in environ:
+ config.access_token = environ.get(ENV_ACCESS_TOKEN)
+ elif (
+ ENV_ISSUER_ID in environ or ENV_APP_ID in environ
+ ) and ENV_PRIVATE_KEY_FILE in environ:
+ config.issuer_id = environ.get(ENV_ISSUER_ID, environ.get(ENV_APP_ID))
+ config.private_key_file = environ.get(ENV_PRIVATE_KEY_FILE)
+ else:
+ module.fail_json(
+ msg=f"You must set either {ENV_ISSUER_ID} and {ENV_PRIVATE_KEY_FILE} or {ENV_ACCESS_TOKEN} environment variables. "
+ f"Or module arguments either {PARAM_ISSUER_ID} and {PARAM_PRIVATE_KEY_FILE} or {PARAM_ACCESS_TOKEN}"
+ )
+
+ try:
+ client = fusion.ApiClient(config)
+ client.set_default_header("User-Agent", user_agent)
+ api_instance = fusion.DefaultApi(client)
+ api_instance.get_version()
+ except Exception as err:
+ module.fail_json(msg="Fusion authentication failed: {0}".format(err))
+
+ return client
+
+
+def fusion_argument_spec():
+ """Return standard base dictionary used for the argument_spec argument in AnsibleModule"""
+
+ return {
+ PARAM_ISSUER_ID: {
+ "no_log": True,
+ "aliases": [PARAM_APP_ID],
+ "deprecated_aliases": [
+ {
+ "name": PARAM_APP_ID,
+ "version": DEP_VER,
+ "collection_name": "purefusion.fusion",
+ }
+ ],
+ },
+ PARAM_PRIVATE_KEY_FILE: {
+ "no_log": False,
+ "aliases": [PARAM_KEY_FILE],
+ "deprecated_aliases": [
+ {
+ "name": PARAM_KEY_FILE,
+ "version": DEP_VER,
+ "collection_name": "purefusion.fusion",
+ }
+ ],
+ },
+ PARAM_PRIVATE_KEY_PASSWORD: {
+ "no_log": True,
+ },
+ PARAM_ACCESS_TOKEN: {
+ "no_log": True,
+ },
+ }
diff --git a/ansible_collections/purestorage/fusion/plugins/module_utils/getters.py b/ansible_collections/purestorage/fusion/plugins/module_utils/getters.py
new file mode 100644
index 000000000..535de76ba
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/plugins/module_utils/getters.py
@@ -0,0 +1,99 @@
+# -*- coding: utf-8 -*-
+
+# (c) 2023, Daniel Turecek (dturecek@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING.GPLv3 or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+try:
+ import fusion as purefusion
+except ImportError:
+ pass
+
+
+def get_array(module, fusion, array_name=None):
+ """Return Array or None"""
+ array_api_instance = purefusion.ArraysApi(fusion)
+ try:
+ if array_name is None:
+ array_name = module.params["array"]
+
+ return array_api_instance.get_array(
+ array_name=array_name,
+ availability_zone_name=module.params["availability_zone"],
+ region_name=module.params["region"],
+ )
+ except purefusion.rest.ApiException:
+ return None
+
+
+def get_az(module, fusion, availability_zone_name=None):
+ """Get Availability Zone or None"""
+ az_api_instance = purefusion.AvailabilityZonesApi(fusion)
+ try:
+ if availability_zone_name is None:
+ availability_zone_name = module.params["availability_zone"]
+
+ return az_api_instance.get_availability_zone(
+ region_name=module.params["region"],
+ availability_zone_name=availability_zone_name,
+ )
+ except purefusion.rest.ApiException:
+ return None
+
+
+def get_region(module, fusion, region_name=None):
+ """Get Region or None"""
+ region_api_instance = purefusion.RegionsApi(fusion)
+ try:
+ if region_name is None:
+ region_name = module.params["region"]
+
+ return region_api_instance.get_region(
+ region_name=region_name,
+ )
+ except purefusion.rest.ApiException:
+ return None
+
+
+def get_ss(module, fusion, storage_service_name=None):
+ """Return Storage Service or None"""
+ ss_api_instance = purefusion.StorageServicesApi(fusion)
+ try:
+ if storage_service_name is None:
+ storage_service_name = module.params["storage_service"]
+
+ return ss_api_instance.get_storage_service(
+ storage_service_name=storage_service_name
+ )
+ except purefusion.rest.ApiException:
+ return None
+
+
+def get_tenant(module, fusion, tenant_name=None):
+ """Return Tenant or None"""
+ api_instance = purefusion.TenantsApi(fusion)
+ try:
+ if tenant_name is None:
+ tenant_name = module.params["tenant"]
+
+ return api_instance.get_tenant(tenant_name=tenant_name)
+ except purefusion.rest.ApiException:
+ return None
+
+
+def get_ts(module, fusion, tenant_space_name=None):
+ """Tenant Space or None"""
+ ts_api_instance = purefusion.TenantSpacesApi(fusion)
+ try:
+ if tenant_space_name is None:
+ tenant_space_name = module.params["tenant_space"]
+
+ return ts_api_instance.get_tenant_space(
+ tenant_name=module.params["tenant"],
+ tenant_space_name=tenant_space_name,
+ )
+ except purefusion.rest.ApiException:
+ return None
diff --git a/ansible_collections/purestorage/fusion/plugins/module_utils/networking.py b/ansible_collections/purestorage/fusion/plugins/module_utils/networking.py
new file mode 100644
index 000000000..a00d8200a
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/plugins/module_utils/networking.py
@@ -0,0 +1,76 @@
+# -*- coding: utf-8 -*-
+
+# (c) 2023, Jan Kodera (jkodera@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING.GPLv3 or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+import ipaddress
+
+# while regexes are hard to maintain, they are used anyways for few reasons:
+# a) REST backend accepts fairly restricted input and we need to match that input instead of all
+# the esoteric extra forms various packages are usually capable of parsing (like dotted-decimal
+# subnet masks, octal octets, hexadecimal octets, zero-extended addresses etc.)
+# b) manually written parsing routines are usually complex to write, verify and think about
+import re
+
+# IPv4 octet regex part, matches only simple decimal 0-255 without leading zeroes
+_octet = (
+ "((?:[0-9])|" # matches 0-9
+ "(?:[1-9][0-9])|" # matches 10-99
+ "(?:1[0-9][0-9])|" # matches 100-199
+ "(?:2[0-4][0-9])|" # matches 200-249
+ "(?:25[0-5]))" # matches 250-255
+)
+
+# IPv4 subnet mask regex part, matches decimal 8-32
+_subnet_mask = (
+ "((?:[8-9])|" # matches 8-9
+ "(?:[1-2][0-9])|" # matches 10-29
+ "(?:3[0-2]))" # matches 30-32
+)
+
+# matches IPv4 addresses
+_addr_pattern = re.compile(r"^{octet}\.{octet}\.{octet}\.{octet}$".format(octet=_octet))
+# matches IPv4 networks in CIDR format, i.e. addresses in the form 'a.b.c.d/e'
+_cidr_pattern = re.compile(
+ r"^{octet}\.{octet}\.{octet}\.{octet}\/{0}$".format(_subnet_mask, octet=_octet)
+)
+
+
+def is_valid_network(addr):
+ """Returns True if `addr` is IPv4 address/submask in bit CIDR notation, False otherwise."""
+ match = re.match(_cidr_pattern, addr)
+ if match is None:
+ return False
+ for i in range(4):
+ if int(match.group(i + 1)) > 255:
+ return False
+ mask = int(match.group(5))
+ if mask < 8 or mask > 32:
+ return False
+ return True
+
+
+def is_valid_address(addr):
+ """Returns True if `addr` is a valid IPv4 address, False otherwise. Does not support
+ octal/hex notations."""
+ match = re.match(_addr_pattern, addr)
+ if match is None:
+ return False
+ for i in range(4):
+ if int(match.group(i + 1)) > 255:
+ return False
+ return True
+
+
+def is_address_in_network(addr, network):
+ """Returns True if `addr` and `network` are a valid IPv4 address and
+ IPv4 network respectively and if `addr` is in `network`, False otherwise."""
+ if not is_valid_address(addr) or not is_valid_network(network):
+ return False
+ parsed_addr = ipaddress.ip_address(addr)
+ parsed_net = ipaddress.ip_network(network)
+ return parsed_addr in parsed_net
diff --git a/ansible_collections/purestorage/fusion/plugins/module_utils/operations.py b/ansible_collections/purestorage/fusion/plugins/module_utils/operations.py
new file mode 100644
index 000000000..dc80aefe3
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/plugins/module_utils/operations.py
@@ -0,0 +1,42 @@
+# -*- coding: utf-8 -*-
+
+# (c) 2023, Jan Kodera (jkodera@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING.GPLv3 or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+import time
+import math
+
+try:
+ import fusion as purefusion
+ from urllib3.exceptions import HTTPError
+except ImportError:
+ pass
+
+from ansible_collections.purestorage.fusion.plugins.module_utils.errors import (
+ OperationException,
+)
+
+
+def await_operation(fusion, operation, fail_playbook_if_operation_fails=True):
+ """
+ Waits for given operation to finish.
+ Throws an exception by default if the operation fails.
+ """
+ op_api = purefusion.OperationsApi(fusion)
+ operation_get = None
+ while True:
+ try:
+ operation_get = op_api.get_operation(operation.id)
+ if operation_get.status == "Succeeded":
+ return operation_get
+ if operation_get.status == "Failed":
+ if fail_playbook_if_operation_fails:
+ raise OperationException(operation_get)
+ return operation_get
+ except HTTPError as err:
+ raise OperationException(operation, http_error=err)
+ time.sleep(int(math.ceil(operation_get.retry_in / 1000)))
diff --git a/ansible_collections/purestorage/fusion/plugins/module_utils/parsing.py b/ansible_collections/purestorage/fusion/plugins/module_utils/parsing.py
new file mode 100644
index 000000000..a2cd75245
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/plugins/module_utils/parsing.py
@@ -0,0 +1,75 @@
+# -*- coding: utf-8 -*-
+
+# (c) 2023, Jan Kodera (jkodera@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING.GPLv3 or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+import re
+
+__metaclass__ = type
+
+METRIC_SUFFIXES = ["K", "M", "G", "T", "P"]
+
+duration_pattern = re.compile(
+ r"^((?P<Y>[1-9]\d*)Y)?((?P<W>[1-9]\d*)W)?((?P<D>[1-9]\d*)D)?(((?P<H>[1-9]\d*)H)?((?P<M>[1-9]\d*)M)?)?$"
+)
+duration_transformation = {
+ "Y": 365 * 24 * 60,
+ "W": 7 * 24 * 60,
+ "D": 24 * 60,
+ "H": 60,
+ "M": 1,
+}
+
+
+def parse_number_with_metric_suffix(module, number, factor=1024):
+ """Given a human-readable string (e.g. 2G, 30M, 400),
+ return the resolved integer.
+ Will call `module.fail_json()` for invalid inputs.
+ """
+ try:
+ stripped_num = number.strip()
+ if stripped_num[-1].isdigit():
+ return int(stripped_num)
+ # has unit prefix
+ result = float(stripped_num[:-1])
+ suffix = stripped_num[-1].upper()
+ factor_count = METRIC_SUFFIXES.index(suffix) + 1
+ for _i in range(0, factor_count):
+ result = result * float(factor)
+ return int(result)
+ except Exception:
+ module.fail_json(
+ msg="'{0}' is not a valid number, use '400', '1K', '2M', ...".format(number)
+ )
+ return 0
+
+
+def parse_duration(period):
+ if period.isdigit():
+ return int(period)
+
+ match = duration_pattern.match(period.upper())
+ if not match or period == "":
+ raise ValueError("Invalid format")
+
+ durations = {
+ "Y": int(match.group("Y")) if match.group("Y") else 0,
+ "W": int(match.group("W")) if match.group("W") else 0,
+ "D": int(match.group("D")) if match.group("D") else 0,
+ "H": int(match.group("H")) if match.group("H") else 0,
+ "M": int(match.group("M")) if match.group("M") else 0,
+ }
+ return sum(value * duration_transformation[key] for key, value in durations.items())
+
+
+def parse_minutes(module, period):
+ try:
+ return parse_duration(period)
+ except ValueError:
+ module.fail_json(
+ msg=(
+ "'{0}' is not a valid time period, use combination of data units (Y,W,D,H,M)"
+ "e.g. 4W3D5H, 5D8H5M, 3D, 5W, 1Y5W..."
+ ).format(period)
+ )
diff --git a/ansible_collections/purestorage/fusion/plugins/module_utils/prerequisites.py b/ansible_collections/purestorage/fusion/plugins/module_utils/prerequisites.py
new file mode 100644
index 000000000..a4edaf341
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/plugins/module_utils/prerequisites.py
@@ -0,0 +1,162 @@
+# -*- coding: utf-8 -*-
+
+# (c) 2023, Jan Kodera (jkodera@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING.GPLv3 or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+import re
+import importlib
+import importlib.metadata
+
+# This file exists because Ansible currently cannot declare dependencies on Python modules.
+# see https://github.com/ansible/ansible/issues/62733 for more info about lack of req support
+
+#############################
+
+# 'module_name, package_name, version_requirements' triplets
+DEPENDENCIES = [
+ ("fusion", "purefusion", ">=1.0.11,<2.0"),
+ ("urllib3", "urllib3", None),
+]
+
+#############################
+
+
+def _parse_version(val):
+ """
+ Parse a package version.
+ Takes in either MAJOR.MINOR or MAJOR.MINOR.PATCH form. PATCH
+ can have additional suffixes, e.g. '-prerelease', 'a1', ...
+
+ :param val: a string representation of the package version
+ :returns: tuple of ints (MAJOR, MINOR, PATCH) or None if not parsed
+ """
+ # regexes for this were really ugly
+ try:
+ parts = val.split(".")
+ if len(parts) < 2 or len(parts) > 3:
+ return None
+ major = int(parts[0])
+ minor = int(parts[1])
+ if len(parts) > 2:
+ patch = re.match(r"^\d+", parts[2])
+ patch = int(patch.group(0))
+ else:
+ patch = None
+ return (major, minor, patch)
+ except Exception:
+ return None
+
+
+# returns list of tuples [(COMPARATOR, (MAJOR, MINOR, PATCH)),...]
+def _parse_version_requirements(val):
+ """
+ Parse package requirements.
+
+ :param val: a string in the form ">=1.0.11,<2.0"
+ :returns: list of tuples in the form [(">=", (1, 0, 11)), ("<", (2, 0, None))] or None if not parsed
+ """
+ reqs = []
+ try:
+ parts = val.split(",")
+ for part in parts:
+ match = re.match(r"\s*(>=|<=|==|=|<|>|!=)\s*([^\s]+)", part)
+ op = match.group(1)
+ ver = match.group(2)
+ ver_tuple = _parse_version(ver)
+ if not ver_tuple:
+ raise ValueError("invalid version {0}".format(ver))
+ reqs.append((op, ver_tuple))
+ return reqs
+ except Exception as e:
+ raise ValueError("invalid version requirement '{0}' {1}".format(val, e))
+
+
+def _compare_version(op, ver, req):
+ """
+ Compare two versions.
+
+ :param op: a string, one of comparators ">=", "<=", "=", "==", ">" or "<"
+ :param ver: version tuple in _parse_version() return form
+ :param req: version tuple in _parse_version() return form
+ :returns: True if ver 'op' req; False otherwise
+ """
+
+ def _cmp(a, b):
+ return (a > b) - (a < b)
+
+ major = _cmp(ver[0], req[0])
+ minor = _cmp(ver[1], req[1])
+ patch = None
+ if req[2] is not None:
+ patch = _cmp(ver[2] or 0, req[2])
+ result = {
+ ">=": major > 0 or (major == 0 and (minor > 0 or patch is None or patch >= 0)),
+ "<=": major < 0 or (major == 0 and (minor < 0 or patch is None or patch <= 0)),
+ ">": major > 0
+ or (major == 0 and (minor > 0 or patch is not None and patch > 0)),
+ "<": major < 0
+ or (major == 0 and (minor < 0 or patch is not None and patch < 0)),
+ "=": major == 0 and minor == 0 and (patch is None or patch == 0),
+ "==": major == 0 and minor == 0 and (patch is None or patch == 0),
+ "!=": major != 0 or minor != 0 or (patch is not None and patch != 0),
+ }.get(op)
+ return result
+
+
+def _version_satisfied(version, requirements):
+ """
+ Checks whether version matches given version requirements.
+
+ :param version: a string, in input form to _parse_version()
+ :param requirements: as string, in input form to _parse_version_requirements()
+ :returns: True if 'version' matches 'requirements'; False otherwise
+ """
+
+ version = _parse_version(version)
+ requirements = _parse_version_requirements(requirements)
+ for req in requirements:
+ if not _compare_version(req[0], version, req[1]):
+ return False
+ return True
+
+
+# poor helper to work around the fact Ansible is unable to manage python dependencies
+def _check_import(ansible_module, module, package=None, version_requirements=None):
+ """
+ Tries to import a module and optionally validates its package version.
+ Calls AnsibleModule.fail_json() if not satisfied.
+
+ :param ansible_module: an AnsibleModule instance
+ :param module: a string with module name to try to import
+ :param package: a string, package to check version for; must be specified with 'version_requirements'
+ :param version_requirements: a string, version requirements for 'package'
+ """
+ try:
+ mod = importlib.import_module(module)
+ except ImportError:
+ ansible_module.fail_json(
+ msg="Error: Python package '{0}' required and missing".format(module)
+ )
+
+ if package and version_requirements:
+ # silently ignore version checks and hope for the best if we can't fetch
+ # the package version since we can't know how the user installs packages
+ try:
+ version = importlib.metadata.version(package)
+ if version and not _version_satisfied(version, version_requirements):
+ ansible_module.fail_json(
+ msg="Error: Python package '{0}' version '{1}' does not satisfy requirements '{2}'".format(
+ module, version, version_requirements
+ )
+ )
+ except Exception:
+ pass # ignore package loads
+
+
+def check_dependencies(ansible_module):
+ for module, package, version_requirements in DEPENDENCIES:
+ _check_import(ansible_module, module, package, version_requirements)
diff --git a/ansible_collections/purestorage/fusion/plugins/module_utils/startup.py b/ansible_collections/purestorage/fusion/plugins/module_utils/startup.py
new file mode 100644
index 000000000..55d7f11a2
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/plugins/module_utils/startup.py
@@ -0,0 +1,26 @@
+# -*- coding: utf-8 -*-
+
+# (c) 2023, Jan Kodera (jkodera@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING.GPLv3 or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible_collections.purestorage.fusion.plugins.module_utils.errors import (
+ install_fusion_exception_hook,
+)
+
+from ansible_collections.purestorage.fusion.plugins.module_utils.prerequisites import (
+ check_dependencies,
+)
+
+from ansible_collections.purestorage.fusion.plugins.module_utils.fusion import (
+ get_fusion,
+)
+
+
+def setup_fusion(module):
+ check_dependencies(module)
+ install_fusion_exception_hook(module)
+ return get_fusion(module)
diff --git a/ansible_collections/purestorage/fusion/plugins/modules/fusion_api_client.py b/ansible_collections/purestorage/fusion/plugins/modules/fusion_api_client.py
new file mode 100644
index 000000000..39860449d
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/plugins/modules/fusion_api_client.py
@@ -0,0 +1,139 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2022, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING.GPLv3 or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: fusion_api_client
+version_added: '1.0.0'
+short_description: Manage API clients in Pure Storage Fusion
+description:
+- Create or delete an API Client in Pure Storage Fusion.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+notes:
+- Supports C(check mode).
+options:
+ name:
+ description:
+ - The name of the client.
+ type: str
+ required: true
+ state:
+ description:
+ - Define whether the client should exist or not.
+ default: present
+ choices: [ present, absent ]
+ type: str
+ public_key:
+ description:
+ - The API clients PEM formatted (Base64 encoded) RSA public key.
+ - Include the C(—–BEGIN PUBLIC KEY—–) and C(—–END PUBLIC KEY—–) lines.
+ type: str
+ required: true
+extends_documentation_fragment:
+- purestorage.fusion.purestorage.fusion
+"""
+
+EXAMPLES = r"""
+- name: Create new API client foo
+ purestorage.fusion.fusion_api_client:
+ name: "foo client"
+ public_key: "{{lookup('file', 'public_pem_file') }}"
+ issuer_id: key_name
+ private_key_file: "az-admin-private-key.pem"
+"""
+
+RETURN = r"""
+"""
+
+try:
+ import fusion as purefusion
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.fusion.plugins.module_utils.fusion import (
+ fusion_argument_spec,
+)
+from ansible_collections.purestorage.fusion.plugins.module_utils.startup import (
+ setup_fusion,
+)
+
+
+def get_client_id(module, fusion):
+ """Get API Client ID, or None if not available"""
+ id_api_instance = purefusion.IdentityManagerApi(fusion)
+ try:
+ clients = id_api_instance.list_api_clients()
+ for client in clients:
+ if (
+ client.public_key == module.params["public_key"]
+ and client.display_name == module.params["name"]
+ ):
+ return client.id
+ return None
+ except purefusion.rest.ApiException:
+ return None
+
+
+def delete_client(module, fusion, client_id):
+ """Delete API Client"""
+ id_api_instance = purefusion.IdentityManagerApi(fusion)
+
+ changed = True
+ if not module.check_mode:
+ id_api_instance.delete_api_client(api_client_id=client_id)
+ module.exit_json(changed=changed)
+
+
+def create_client(module, fusion):
+ """Create API Client"""
+
+ id_api_instance = purefusion.IdentityManagerApi(fusion)
+
+ changed = True
+ if not module.check_mode:
+ client = purefusion.APIClientPost(
+ public_key=module.params["public_key"],
+ display_name=module.params["name"],
+ )
+ id_api_instance.create_api_client(client)
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ """Main code"""
+ argument_spec = fusion_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True),
+ public_key=dict(type="str", required=True),
+ state=dict(type="str", default="present", choices=["present", "absent"]),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+ fusion = setup_fusion(module)
+
+ state = module.params["state"]
+ client_id = get_client_id(module, fusion)
+ if client_id is None and state == "present":
+ create_client(module, fusion)
+ elif client_id is not None and state == "absent":
+ delete_client(module, fusion, client_id)
+ else:
+ module.exit_json(changed=False)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/fusion/plugins/modules/fusion_array.py b/ansible_collections/purestorage/fusion/plugins/modules/fusion_array.py
new file mode 100644
index 000000000..f7933eabe
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/plugins/modules/fusion_array.py
@@ -0,0 +1,265 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2022, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING.GPLv3 or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: fusion_array
+version_added: '1.0.0'
+short_description: Manage arrays in Pure Storage Fusion
+description:
+- Create or delete an array in Pure Storage Fusion.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+notes:
+- Supports C(check mode).
+options:
+ name:
+ description:
+ - The name of the array.
+ type: str
+ required: true
+ state:
+ description:
+ - Define whether the array should exist or not.
+ default: present
+ choices: [ present, absent ]
+ type: str
+ display_name:
+ description:
+ - The human name of the array.
+ - If not provided, defaults to I(name).
+ type: str
+ region:
+ description:
+ - The region the AZ is in.
+ type: str
+ required: true
+ availability_zone:
+ aliases: [ az ]
+ description:
+ - The availability zone the array is located in.
+ type: str
+ required: true
+ hardware_type:
+ description:
+ - Hardware type to which the storage class applies.
+ choices: [ flash-array-x, flash-array-c, flash-array-x-optane, flash-array-xl ]
+ type: str
+ host_name:
+ description:
+ - Management IP address of the array, or FQDN.
+ type: str
+ appliance_id:
+ description:
+ - Appliance ID of the array.
+ type: str
+ maintenance_mode:
+ description:
+ - "Switch the array into maintenance mode or back.
+ Array in maintenance mode can have placement groups migrated out but not in.
+ Intended use cases are for example safe decommissioning or to prevent use
+ of an array that has not yet been fully configured."
+ type: bool
+ unavailable_mode:
+ description:
+ - "Switch the array into unavailable mode or back.
+ Fusion tries to exclude unavailable arrays from virtually any operation it
+ can. This is to prevent stalling operations in case of e.g. a networking
+ failure. As of the moment arrays have to be marked unavailable manually."
+ type: bool
+extends_documentation_fragment:
+- purestorage.fusion.purestorage.fusion
+"""
+
+EXAMPLES = r"""
+- name: Create new array foo
+ purestorage.fusion.fusion_array:
+ name: foo
+ az: zone_1
+ region: region1
+ hardware_type: flash-array-x
+ host_name: foo_array
+ display_name: "foo array"
+ appliance_id: 1227571-198887878-35016350232000707
+ issuer_id: key_name
+ private_key_file: "az-admin-private-key.pem"
+"""
+
+RETURN = r"""
+"""
+
+try:
+ import fusion as purefusion
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.fusion.plugins.module_utils.fusion import (
+ fusion_argument_spec,
+)
+
+from ansible_collections.purestorage.fusion.plugins.module_utils import getters
+from ansible_collections.purestorage.fusion.plugins.module_utils.operations import (
+ await_operation,
+)
+from ansible_collections.purestorage.fusion.plugins.module_utils.startup import (
+ setup_fusion,
+)
+
+
+def get_array(module, fusion):
+ """Return Array or None"""
+ return getters.get_array(module, fusion, array_name=module.params["name"])
+
+
+def create_array(module, fusion):
+ """Create Array"""
+
+ array_api_instance = purefusion.ArraysApi(fusion)
+
+ if not module.check_mode:
+ if not module.params["display_name"]:
+ display_name = module.params["name"]
+ else:
+ display_name = module.params["display_name"]
+ array = purefusion.ArrayPost(
+ hardware_type=module.params["hardware_type"],
+ display_name=display_name,
+ host_name=module.params["host_name"],
+ name=module.params["name"],
+ appliance_id=module.params["appliance_id"],
+ )
+ res = array_api_instance.create_array(
+ array,
+ availability_zone_name=module.params["availability_zone"],
+ region_name=module.params["region"],
+ )
+ await_operation(fusion, res)
+ return True
+
+
+def update_array(module, fusion):
+ """Update Array"""
+ array = get_array(module, fusion)
+ patches = []
+ if (
+ module.params["display_name"]
+ and module.params["display_name"] != array.display_name
+ ):
+ patch = purefusion.ArrayPatch(
+ display_name=purefusion.NullableString(module.params["display_name"]),
+ )
+ patches.append(patch)
+
+ if module.params["host_name"] and module.params["host_name"] != array.host_name:
+ patch = purefusion.ArrayPatch(
+ host_name=purefusion.NullableString(module.params["host_name"])
+ )
+ patches.append(patch)
+
+ if (
+ module.params["maintenance_mode"] is not None
+ and module.params["maintenance_mode"] != array.maintenance_mode
+ ):
+ patch = purefusion.ArrayPatch(
+ maintenance_mode=purefusion.NullableBoolean(
+ module.params["maintenance_mode"]
+ )
+ )
+ patches.append(patch)
+ if (
+ module.params["unavailable_mode"] is not None
+ and module.params["unavailable_mode"] != array.unavailable_mode
+ ):
+ patch = purefusion.ArrayPatch(
+ unavailable_mode=purefusion.NullableBoolean(
+ module.params["unavailable_mode"]
+ )
+ )
+ patches.append(patch)
+
+ if not module.check_mode:
+ array_api_instance = purefusion.ArraysApi(fusion)
+ for patch in patches:
+ op = array_api_instance.update_array(
+ patch,
+ availability_zone_name=module.params["availability_zone"],
+ region_name=module.params["region"],
+ array_name=module.params["name"],
+ )
+ await_operation(fusion, op)
+
+ changed = len(patches) != 0
+ return changed
+
+
+def delete_array(module, fusion):
+ """Delete Array - not currently available"""
+ array_api_instance = purefusion.ArraysApi(fusion)
+ if not module.check_mode:
+ res = array_api_instance.delete_array(
+ region_name=module.params["region"],
+ availability_zone_name=module.params["availability_zone"],
+ array_name=module.params["name"],
+ )
+ await_operation(fusion, res)
+ return True
+
+
+def main():
+ """Main code"""
+ argument_spec = fusion_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True),
+ availability_zone=dict(type="str", required=True, aliases=["az"]),
+ display_name=dict(type="str"),
+ region=dict(type="str", required=True),
+ appliance_id=dict(type="str"),
+ host_name=dict(type="str"),
+ hardware_type=dict(
+ type="str",
+ choices=[
+ "flash-array-x",
+ "flash-array-c",
+ "flash-array-x-optane",
+ "flash-array-xl",
+ ],
+ ),
+ maintenance_mode=dict(type="bool"),
+ unavailable_mode=dict(type="bool"),
+ state=dict(type="str", default="present", choices=["present", "absent"]),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+ fusion = setup_fusion(module)
+
+ state = module.params["state"]
+ array = get_array(module, fusion)
+
+ changed = False
+ if not array and state == "present":
+ module.fail_on_missing_params(["hardware_type", "host_name", "appliance_id"])
+ changed = create_array(module, fusion) | update_array(
+ module, fusion
+ ) # update is run to set properties which cannot be set on creation and instead use defaults
+ elif array and state == "present":
+ changed = changed | update_array(module, fusion)
+ elif array and state == "absent":
+ changed = changed | delete_array(module, fusion)
+ else:
+ module.exit_json(changed=False)
+
+ module.exit_json(changed=changed)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/fusion/plugins/modules/fusion_az.py b/ansible_collections/purestorage/fusion/plugins/modules/fusion_az.py
new file mode 100644
index 000000000..02647d397
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/plugins/modules/fusion_az.py
@@ -0,0 +1,162 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2022, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING.GPLv3 or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: fusion_az
+version_added: '1.0.0'
+short_description: Create Availability Zones in Pure Storage Fusion
+description:
+- Manage an Availability Zone in Pure Storage Fusion.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+notes:
+- Supports C(check mode).
+options:
+ name:
+ description:
+ - The name of the Availability Zone.
+ type: str
+ required: true
+ state:
+ description:
+ - Define whether the Availability Zone should exist or not.
+ default: present
+ choices: [ present, absent ]
+ type: str
+ display_name:
+ description:
+ - The human name of the Availability Zone.
+ - If not provided, defaults to I(name).
+ type: str
+ region:
+ description:
+ - Region within which the AZ is created.
+ type: str
+ required: true
+extends_documentation_fragment:
+- purestorage.fusion.purestorage.fusion
+"""
+
+EXAMPLES = r"""
+- name: Create new AZ foo
+ purestorage.fusion.fusion_az:
+ name: foo
+ display_name: "foo AZ"
+ region: region1
+ issuer_id: key_name
+ private_key_file: "az-admin-private-key.pem"
+
+- name: Delete AZ foo
+ purestorage.fusion.fusion_az:
+ name: foo
+ state: absent
+ region: region1
+ issuer_id: key_name
+ private_key_file: "az-admin-private-key.pem"
+"""
+
+RETURN = r"""
+"""
+
+try:
+ import fusion as purefusion
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.fusion.plugins.module_utils.fusion import (
+ fusion_argument_spec,
+)
+
+from ansible_collections.purestorage.fusion.plugins.module_utils import getters
+from ansible_collections.purestorage.fusion.plugins.module_utils.operations import (
+ await_operation,
+)
+from ansible_collections.purestorage.fusion.plugins.module_utils.startup import (
+ setup_fusion,
+)
+
+
+def get_az(module, fusion):
+ """Get Availability Zone or None"""
+ return getters.get_az(module, fusion, availability_zone_name=module.params["name"])
+
+
+def delete_az(module, fusion):
+ """Delete Availability Zone"""
+
+ az_api_instance = purefusion.AvailabilityZonesApi(fusion)
+
+ changed = True
+ if not module.check_mode:
+ op = az_api_instance.delete_availability_zone(
+ region_name=module.params["region"],
+ availability_zone_name=module.params["name"],
+ )
+ await_operation(fusion, op)
+
+ module.exit_json(changed=changed)
+
+
+def create_az(module, fusion):
+ """Create Availability Zone"""
+
+ az_api_instance = purefusion.AvailabilityZonesApi(fusion)
+
+ changed = True
+ if not module.check_mode:
+ if not module.params["display_name"]:
+ display_name = module.params["name"]
+ else:
+ display_name = module.params["display_name"]
+
+ azone = purefusion.AvailabilityZonePost(
+ name=module.params["name"],
+ display_name=display_name,
+ )
+ op = az_api_instance.create_availability_zone(
+ azone, region_name=module.params["region"]
+ )
+ await_operation(fusion, op)
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ """Main code"""
+ argument_spec = fusion_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True),
+ display_name=dict(type="str"),
+ region=dict(type="str", required=True),
+ state=dict(type="str", default="present", choices=["present", "absent"]),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+ fusion = setup_fusion(module)
+
+ state = module.params["state"]
+ azone = get_az(module, fusion)
+
+ if not azone and state == "present":
+ create_az(module, fusion)
+ elif azone and state == "absent":
+ delete_az(module, fusion)
+ else:
+ module.exit_json(changed=False)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/fusion/plugins/modules/fusion_hap.py b/ansible_collections/purestorage/fusion/plugins/modules/fusion_hap.py
new file mode 100644
index 000000000..3f45ea2dd
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/plugins/modules/fusion_hap.py
@@ -0,0 +1,312 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2022, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING.GPLv3 or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: fusion_hap
+version_added: '1.0.0'
+short_description: Manage host access policies in Pure Storage Fusion
+description:
+- Create or delete host access policies in Pure Storage Fusion.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+notes:
+- Supports C(check mode).
+- Setting passwords is not an idempotent action.
+- Only iSCSI transport is currently supported.
+- iSCSI CHAP is not yet supported.
+options:
+ name:
+ description:
+ - The name of the host access policy.
+ type: str
+ required: true
+ display_name:
+ description:
+ - The human name of the host access policy.
+ type: str
+ state:
+ description:
+ - Define whether the host access policy should exist or not.
+ - When removing host access policy all connected volumes must
+ have been previously disconnected.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ wwns:
+ type: list
+ elements: str
+ description:
+ - CURRENTLY NOT SUPPORTED.
+ - List of wwns for the host access policy.
+ iqn:
+ type: str
+ description:
+ - IQN for the host access policy.
+ nqn:
+ type: str
+ description:
+ - CURRENTLY NOT SUPPORTED.
+ - NQN for the host access policy.
+ personality:
+ type: str
+ description:
+ - Define which operating system the host is.
+ default: linux
+ choices: ['linux', 'windows', 'hpux', 'vms', 'aix', 'esxi', 'solaris', 'hitachi-vsp', 'oracle-vm-server']
+ target_user:
+ type: str
+ description:
+ - CURRENTLY NOT SUPPORTED.
+ - Sets the target user name for CHAP authentication.
+ - Required with I(target_password).
+ - To clear the username/password pair use C(clear) as the password.
+ target_password:
+ type: str
+ description:
+ - CURRENTLY NOT SUPPORTED.
+ - Sets the target password for CHAP authentication.
+ - Password length between 12 and 255 characters.
+ - To clear the username/password pair use C(clear) as the password.
+ host_user:
+ type: str
+ description:
+ - CURRENTLY NOT SUPPORTED.
+ - Sets the host user name for CHAP authentication.
+ - Required with I(host_password).
+ - To clear the username/password pair use C(clear) as the password.
+ host_password:
+ type: str
+ description:
+ - CURRENTLY NOT SUPPORTED.
+ - Sets the host password for CHAP authentication.
+ - Password length between 12 and 255 characters.
+ - To clear the username/password pair use C(clear) as the password.
+extends_documentation_fragment:
+- purestorage.fusion.purestorage.fusion
+"""
+
+EXAMPLES = r"""
+- name: Create new AIX host access policy
+ purestorage.fusion.fusion_hap:
+ name: foo
+ personality: aix
+ iqn: "iqn.2005-03.com.RedHat:linux-host1"
+ issuer_id: key_name
+ private_key_file: "az-admin-private-key.pem"
+
+- name: Delete host access policy
+ purestorage.fusion.fusion_hap:
+ name: foo
+ state: absent
+ issuer_id: key_name
+ private_key_file: "az-admin-private-key.pem"
+"""
+
+RETURN = r"""
+"""
+
+try:
+ import fusion as purefusion
+except ImportError:
+ pass
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.fusion.plugins.module_utils.fusion import (
+ fusion_argument_spec,
+)
+
+from ansible_collections.purestorage.fusion.plugins.module_utils.startup import (
+ setup_fusion,
+)
+from ansible_collections.purestorage.fusion.plugins.module_utils.operations import (
+ await_operation,
+)
+
+
+def _check_iqn(module, fusion):
+ hap_api_instance = purefusion.HostAccessPoliciesApi(fusion)
+ hosts = hap_api_instance.list_host_access_policies().items
+ for host in hosts:
+ if host.iqn == module.params["iqn"] and host.name != module.params["name"]:
+ module.fail_json(
+ msg="Supplied IQN {0} already used by host access policy {1}".format(
+ module.params["iqn"], host.name
+ )
+ )
+
+
+def get_host(module, fusion):
+ """Return host or None"""
+ hap_api_instance = purefusion.HostAccessPoliciesApi(fusion)
+ try:
+ return hap_api_instance.get_host_access_policy(
+ host_access_policy_name=module.params["name"]
+ )
+ except purefusion.rest.ApiException:
+ return None
+
+
+def create_hap(module, fusion):
+ """Create a new host access policy"""
+ hap_api_instance = purefusion.HostAccessPoliciesApi(fusion)
+ changed = True
+ if not module.check_mode:
+ display_name = module.params["display_name"] or module.params["name"]
+
+ op = hap_api_instance.create_host_access_policy(
+ purefusion.HostAccessPoliciesPost(
+ iqn=module.params["iqn"],
+ personality=module.params["personality"],
+ name=module.params["name"],
+ display_name=display_name,
+ )
+ )
+ await_operation(fusion, op)
+ module.exit_json(changed=changed)
+
+
+def delete_hap(module, fusion):
+ """Delete a Host Access Policy"""
+ hap_api_instance = purefusion.HostAccessPoliciesApi(fusion)
+ changed = True
+ if not module.check_mode:
+ op = hap_api_instance.delete_host_access_policy(
+ host_access_policy_name=module.params["name"]
+ )
+ await_operation(fusion, op)
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = fusion_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ nqn=dict(
+ type="str",
+ removed_in_version="2.0.0",
+ removed_from_collection="purestorage.fusion",
+ ),
+ iqn=dict(type="str"),
+ wwns=dict(
+ type="list",
+ elements="str",
+ removed_in_version="2.0.0",
+ removed_from_collection="purestorage.fusion",
+ ),
+ host_password=dict(
+ type="str",
+ removed_in_version="2.0.0",
+ removed_from_collection="purestorage.fusion",
+ no_log=True,
+ ),
+ host_user=dict(
+ type="str",
+ removed_in_version="2.0.0",
+ removed_from_collection="purestorage.fusion",
+ ),
+ target_password=dict(
+ type="str",
+ removed_in_version="2.0.0",
+ removed_from_collection="purestorage.fusion",
+ no_log=True,
+ ),
+ target_user=dict(
+ type="str",
+ removed_in_version="2.0.0",
+ removed_from_collection="purestorage.fusion",
+ ),
+ display_name=dict(type="str"),
+ personality=dict(
+ type="str",
+ default="linux",
+ choices=[
+ "linux",
+ "windows",
+ "hpux",
+ "vms",
+ "aix",
+ "esxi",
+ "solaris",
+ "hitachi-vsp",
+ "oracle-vm-server",
+ ],
+ ),
+ )
+ )
+
+ required_if = [["state", "present", ["personality", "iqn"]]]
+
+ module = AnsibleModule(
+ argument_spec,
+ supports_check_mode=True,
+ required_if=required_if,
+ )
+ fusion = setup_fusion(module)
+
+ if module.params["nqn"]:
+ module.warn(
+ "`nqn` parameter is deprecated and will be removed in version 2.0.0"
+ )
+ if module.params["wwns"]:
+ module.warn(
+ "`wwns` parameter is deprecated and will be removed in version 2.0.0"
+ )
+ if module.params["host_password"]:
+ module.warn(
+ "`host_password` parameter is deprecated and will be removed in version 2.0.0"
+ )
+ if module.params["host_user"]:
+ module.warn(
+ "`host_user` parameter is deprecated and will be removed in version 2.0.0"
+ )
+ if module.params["target_password"]:
+ module.warn(
+ "`target_password` parameter is deprecated and will be removed in version 2.0.0"
+ )
+ if module.params["target_user"]:
+ module.warn(
+ "`target_user` parameter is deprecated and will be removed in version 2.0.0"
+ )
+
+ hap_pattern = re.compile("^[a-zA-Z0-9]([a-zA-Z0-9-_]{0,61}[a-zA-Z0-9])?$")
+ iqn_pattern = re.compile(
+ r"^iqn\.\d{4}-\d{2}((?<!-)\.(?!-)[a-zA-Z0-9\-]+){1,63}(?<!-)(?<!\.)(:(?!:)[^,\s'\"]+)?$"
+ )
+
+ if not hap_pattern.match(module.params["name"]):
+ module.fail_json(
+ msg="Host Access Policy {0} does not conform to naming convention".format(
+ module.params["name"]
+ )
+ )
+
+ if module.params["iqn"] is not None and not iqn_pattern.match(module.params["iqn"]):
+ module.fail_json(
+ msg="IQN {0} is not a valid iSCSI IQN".format(module.params["name"])
+ )
+
+ state = module.params["state"]
+ host = get_host(module, fusion)
+ _check_iqn(module, fusion)
+
+ if host is None and state == "present":
+ create_hap(module, fusion)
+ elif host is not None and state == "absent":
+ delete_hap(module, fusion)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/fusion/plugins/modules/fusion_hw.py b/ansible_collections/purestorage/fusion/plugins/modules/fusion_hw.py
new file mode 100644
index 000000000..31d313e9d
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/plugins/modules/fusion_hw.py
@@ -0,0 +1,88 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2022, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING.GPLv3 or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: fusion_hw
+version_added: '1.0.0'
+deprecated:
+ removed_at_date: "2023-08-09"
+ why: Hardware type cannot be modified in Pure Storage Fusion
+ alternative: there's no alternative as this functionality has never worked before
+short_description: Create hardware types in Pure Storage Fusion
+description:
+- Create a hardware type in Pure Storage Fusion.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+notes:
+- Supports C(check mode).
+options:
+ name:
+ description:
+ - The name of the hardware type.
+ type: str
+ state:
+ description:
+ - Define whether the hardware type should exist or not.
+ - Currently there is no mechanism to delete a hardware type.
+ default: present
+ choices: [ present ]
+ type: str
+ display_name:
+ description:
+ - The human name of the hardware type.
+ - If not provided, defaults to I(name).
+ type: str
+ media_type:
+ description:
+ - Volume size limit in M, G, T or P units.
+ type: str
+ array_type:
+ description:
+ - The array type for the hardware type.
+ choices: [ FA//X, FA//C ]
+ type: str
+extends_documentation_fragment:
+- purestorage.fusion.purestorage.fusion
+"""
+
+# this module does nothing, thus no example is provided
+EXAMPLES = r"""
+"""
+
+RETURN = r"""
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.fusion.plugins.module_utils.fusion import (
+ fusion_argument_spec,
+)
+
+
+def main():
+ """Main code"""
+ argument_spec = fusion_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str"),
+ display_name=dict(type="str"),
+ array_type=dict(type="str", choices=["FA//X", "FA//C"]),
+ media_type=dict(type="str"),
+ state=dict(type="str", default="present", choices=["present"]),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/fusion/plugins/modules/fusion_info.py b/ansible_collections/purestorage/fusion/plugins/modules/fusion_info.py
new file mode 100644
index 000000000..be019d3d2
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/plugins/modules/fusion_info.py
@@ -0,0 +1,1130 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2021, Simon Dodsley (simon@purestorage.com), Andrej Pajtas (apajtas@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING.GPLv3 or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: fusion_info
+version_added: '1.0.0'
+short_description: Collect information from Pure Fusion
+description:
+ - Collect information from a Pure Fusion environment.
+ - By default, the module will collect basic
+ information including counts for arrays, availability_zones, volumes, snapshots
+ . Fleet capacity and data reduction rates are also provided.
+ - Additional information can be collected based on the configured set of arguments.
+author:
+ - Pure Storage ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+notes:
+- Supports C(check mode).
+options:
+ gather_subset:
+ description:
+ - When supplied, this argument will define the information to be collected.
+ Possible values for this include all, minimum, roles, users, arrays, hardware_types,
+ volumes, host_access_policies, storage_classes, protection_policies, placement_groups,
+ network_interfaces, availability_zones, network_interface_groups, storage_endpoints,
+ snapshots, regions, storage_services, tenants, tenant_spaces, network_interface_groups and api_clients.
+ type: list
+ elements: str
+ required: false
+ default: minimum
+extends_documentation_fragment:
+ - purestorage.fusion.purestorage.fusion
+"""
+
+EXAMPLES = r"""
+- name: Collect default set of information
+ purestorage.fusion.fusion_info:
+ issuer_id: key_name
+ private_key_file: "az-admin-private-key.pem"
+ register: fusion_info
+
+- name: Show default information
+ ansible.builtin.debug:
+ msg: "{{ fusion_info['fusion_info']['default'] }}"
+
+- name: Collect all information
+ purestorage.fusion.fusion_info:
+ gather_subset:
+ - all
+ issuer_id: key_name
+ private_key_file: "az-admin-private-key.pem"
+
+- name: Show all information
+ ansible.builtin.debug:
+ msg: "{{ fusion_info['fusion_info'] }}"
+"""
+
+RETURN = r"""
+fusion_info:
+ description: Returns the information collected from Fusion
+ returned: always
+ type: dict
+"""
+
+try:
+ import fusion as purefusion
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.fusion.plugins.module_utils.fusion import (
+ fusion_argument_spec,
+)
+from ansible_collections.purestorage.fusion.plugins.module_utils.startup import (
+ setup_fusion,
+)
+import time
+import http
+
+
+def _convert_microseconds(micros):
+ seconds = (micros / 1000) % 60
+ minutes = (micros / (1000 * 60)) % 60
+ hours = (micros / (1000 * 60 * 60)) % 24
+ return seconds, minutes, hours
+
+
+def _api_permission_denied_handler(name):
+ """Return decorator which catches #403 errors"""
+
+ def inner(func):
+ def wrapper(module, fusion, *args, **kwargs):
+ try:
+ return func(module, fusion, *args, **kwargs)
+ except purefusion.rest.ApiException as exc:
+ if exc.status == http.HTTPStatus.FORBIDDEN:
+ module.warn(f"Cannot get [{name} dict], reason: Permission denied")
+ return None
+ else:
+ # other exceptions will be handled by our exception hook
+ raise exc
+
+ return wrapper
+
+ return inner
+
+
+def generate_default_dict(module, fusion):
+ def warning_api_exception(name):
+ module.warn(f"Cannot get {name} in [default dict], reason: Permission denied")
+
+ def warning_argument_none(name, requirement):
+ module.warn(
+ f"Cannot get {name} in [default dict], reason: Required argument `{requirement}` not available."
+ )
+
+ # All values are independent on each other - if getting one value fails, we will show warning and continue.
+ # That's also the reason why there's so many nested for loops repeating all over again.
+ version = None
+ users_num = None
+ protection_policies_num = None
+ host_access_policies_num = None
+ hardware_types_num = None
+ storage_services = None
+ storage_services_num = None
+ tenants = None
+ tenants_num = None
+ regions = None
+ regions_num = None
+ roles = None
+ roles_num = None
+ storage_classes_num = None
+ role_assignments_num = None
+ tenant_spaces_num = None
+ volumes_num = None
+ placement_groups_num = None
+ snapshots_num = None
+ availability_zones_num = None
+ arrays_num = None
+ network_interfaces_num = None
+ network_interface_groups_num = None
+ storage_endpoints_num = None
+
+ try:
+ version = purefusion.DefaultApi(fusion).get_version().version
+ except purefusion.rest.ApiException as exc:
+ if exc.status == http.HTTPStatus.FORBIDDEN:
+ warning_api_exception("API version")
+ else:
+ # other exceptions will be handled by our exception hook
+ raise exc
+
+ try:
+ users_num = len(purefusion.IdentityManagerApi(fusion).list_users())
+ except purefusion.rest.ApiException as exc:
+ if exc.status == http.HTTPStatus.FORBIDDEN:
+ warning_api_exception("Users")
+ else:
+ # other exceptions will be handled by our exception hook
+ raise exc
+
+ try:
+ protection_policies_num = len(
+ purefusion.ProtectionPoliciesApi(fusion).list_protection_policies().items
+ )
+ except purefusion.rest.ApiException as exc:
+ if exc.status == http.HTTPStatus.FORBIDDEN:
+ warning_api_exception("Protection Policies")
+ else:
+ # other exceptions will be handled by our exception hook
+ raise exc
+
+ try:
+ host_access_policies_num = len(
+ purefusion.HostAccessPoliciesApi(fusion).list_host_access_policies().items
+ )
+ except purefusion.rest.ApiException as exc:
+ if exc.status == http.HTTPStatus.FORBIDDEN:
+ warning_api_exception("Host Access Policies")
+ else:
+ # other exceptions will be handled by our exception hook
+ raise exc
+
+ try:
+ hardware_types_num = len(
+ purefusion.HardwareTypesApi(fusion).list_hardware_types().items
+ )
+ except purefusion.rest.ApiException as exc:
+ if exc.status == http.HTTPStatus.FORBIDDEN:
+ warning_api_exception("Hardware Types")
+ else:
+ # other exceptions will be handled by our exception hook
+ raise exc
+
+ try:
+ storage_services = purefusion.StorageServicesApi(fusion).list_storage_services()
+ storage_services_num = len(storage_services.items)
+ except purefusion.rest.ApiException as exc:
+ if exc.status == http.HTTPStatus.FORBIDDEN:
+ warning_api_exception("Storage Services")
+ else:
+ # other exceptions will be handled by our exception hook
+ raise exc
+
+ try:
+ tenants = purefusion.TenantsApi(fusion).list_tenants()
+ tenants_num = len(tenants.items)
+ except purefusion.rest.ApiException as exc:
+ if exc.status == http.HTTPStatus.FORBIDDEN:
+ warning_api_exception("Tenants")
+ else:
+ # other exceptions will be handled by our exception hook
+ raise exc
+
+ try:
+ regions = purefusion.RegionsApi(fusion).list_regions()
+ regions_num = len(regions.items)
+ except purefusion.rest.ApiException as exc:
+ if exc.status == http.HTTPStatus.FORBIDDEN:
+ warning_api_exception("Regions")
+ else:
+ # other exceptions will be handled by our exception hook
+ raise exc
+
+ try:
+ roles = purefusion.RolesApi(fusion).list_roles()
+ roles_num = len(roles)
+ except purefusion.rest.ApiException as exc:
+ if exc.status == http.HTTPStatus.FORBIDDEN:
+ warning_api_exception("Roles")
+ else:
+ # other exceptions will be handled by our exception hook
+ raise exc
+
+ if storage_services is not None:
+ try:
+ storage_class_api_instance = purefusion.StorageClassesApi(fusion)
+ storage_classes_num = sum(
+ len(
+ storage_class_api_instance.list_storage_classes(
+ storage_service_name=storage_service.name
+ ).items
+ )
+ for storage_service in storage_services.items
+ )
+ except purefusion.rest.ApiException as exc:
+ if exc.status == http.HTTPStatus.FORBIDDEN:
+ warning_api_exception("Storage Classes")
+ else:
+ # other exceptions will be handled by our exception hook
+ raise exc
+ else:
+ warning_argument_none("Storage Classes", "storage_services")
+
+ if roles is not None:
+ try:
+ role_assign_api_instance = purefusion.RoleAssignmentsApi(fusion)
+ role_assignments_num = sum(
+ len(role_assign_api_instance.list_role_assignments(role_name=role.name))
+ for role in roles
+ )
+ except purefusion.rest.ApiException as exc:
+ if exc.status == http.HTTPStatus.FORBIDDEN:
+ warning_api_exception("Role Assignments")
+ else:
+ # other exceptions will be handled by our exception hook
+ raise exc
+ else:
+ warning_argument_none("Role Assignments", "roles")
+
+ if tenants is not None:
+ tenantspace_api_instance = purefusion.TenantSpacesApi(fusion)
+
+ try:
+ tenant_spaces_num = sum(
+ len(
+ tenantspace_api_instance.list_tenant_spaces(
+ tenant_name=tenant.name
+ ).items
+ )
+ for tenant in tenants.items
+ )
+ except purefusion.rest.ApiException as exc:
+ if exc.status == http.HTTPStatus.FORBIDDEN:
+ warning_api_exception("Tenant Spaces")
+ else:
+ # other exceptions will be handled by our exception hook
+ raise exc
+
+ try:
+ vol_api_instance = purefusion.VolumesApi(fusion)
+ volumes_num = sum(
+ len(
+ vol_api_instance.list_volumes(
+ tenant_name=tenant.name,
+ tenant_space_name=tenant_space.name,
+ ).items
+ )
+ for tenant in tenants.items
+ for tenant_space in tenantspace_api_instance.list_tenant_spaces(
+ tenant_name=tenant.name
+ ).items
+ )
+ except purefusion.rest.ApiException as exc:
+ if exc.status == http.HTTPStatus.FORBIDDEN:
+ warning_api_exception("Volumes")
+ else:
+ # other exceptions will be handled by our exception hook
+ raise exc
+
+ try:
+ plgrp_api_instance = purefusion.PlacementGroupsApi(fusion)
+ placement_groups_num = sum(
+ len(
+ plgrp_api_instance.list_placement_groups(
+ tenant_name=tenant.name,
+ tenant_space_name=tenant_space.name,
+ ).items
+ )
+ for tenant in tenants.items
+ for tenant_space in tenantspace_api_instance.list_tenant_spaces(
+ tenant_name=tenant.name
+ ).items
+ )
+ except purefusion.rest.ApiException as exc:
+ if exc.status == http.HTTPStatus.FORBIDDEN:
+ warning_api_exception("Placement Groups")
+ else:
+ # other exceptions will be handled by our exception hook
+ raise exc
+
+ try:
+ snapshot_api_instance = purefusion.SnapshotsApi(fusion)
+ snapshots_num = sum(
+ len(
+ snapshot_api_instance.list_snapshots(
+ tenant_name=tenant.name,
+ tenant_space_name=tenant_space.name,
+ ).items
+ )
+ for tenant in tenants.items
+ for tenant_space in tenantspace_api_instance.list_tenant_spaces(
+ tenant_name=tenant.name
+ ).items
+ )
+ except purefusion.rest.ApiException as exc:
+ if exc.status == http.HTTPStatus.FORBIDDEN:
+ warning_api_exception("Snapshots")
+ else:
+ # other exceptions will be handled by our exception hook
+ raise exc
+ else:
+ warning_argument_none("Tenant Spaces", "tenants")
+ warning_argument_none("Volumes", "tenants")
+ warning_argument_none("Placement Groups", "tenants")
+ warning_argument_none("Snapshots", "tenants")
+
+ if regions is not None:
+ az_api_instance = purefusion.AvailabilityZonesApi(fusion)
+
+ try:
+ availability_zones_num = sum(
+ len(
+ az_api_instance.list_availability_zones(
+ region_name=region.name
+ ).items
+ )
+ for region in regions.items
+ )
+ except purefusion.rest.ApiException as exc:
+ if exc.status == http.HTTPStatus.FORBIDDEN:
+ warning_api_exception("Availability Zones")
+ else:
+ # other exceptions will be handled by our exception hook
+ raise exc
+
+ try:
+ arrays_api_instance = purefusion.ArraysApi(fusion)
+ arrays_num = sum(
+ len(
+ arrays_api_instance.list_arrays(
+ availability_zone_name=availability_zone.name,
+ region_name=region.name,
+ ).items
+ )
+ for region in regions.items
+ for availability_zone in az_api_instance.list_availability_zones(
+ region_name=region.name
+ ).items
+ )
+ except purefusion.rest.ApiException as exc:
+ if exc.status == http.HTTPStatus.FORBIDDEN:
+ warning_api_exception("Arrays")
+ else:
+ # other exceptions will be handled by our exception hook
+ raise exc
+
+ try:
+ nig_api_instance = purefusion.NetworkInterfaceGroupsApi(fusion)
+ network_interface_groups_num = sum(
+ len(
+ nig_api_instance.list_network_interface_groups(
+ availability_zone_name=availability_zone.name,
+ region_name=region.name,
+ ).items
+ )
+ for region in regions.items
+ for availability_zone in az_api_instance.list_availability_zones(
+ region_name=region.name
+ ).items
+ )
+ except purefusion.rest.ApiException as exc:
+ if exc.status == http.HTTPStatus.FORBIDDEN:
+ warning_api_exception("Network Interface Groups")
+ else:
+ # other exceptions will be handled by our exception hook
+ raise exc
+
+ try:
+ send_api_instance = purefusion.StorageEndpointsApi(fusion)
+ storage_endpoints_num = sum(
+ len(
+ send_api_instance.list_storage_endpoints(
+ availability_zone_name=availability_zone.name,
+ region_name=region.name,
+ ).items
+ )
+ for region in regions.items
+ for availability_zone in az_api_instance.list_availability_zones(
+ region_name=region.name
+ ).items
+ )
+ except purefusion.rest.ApiException as exc:
+ if exc.status == http.HTTPStatus.FORBIDDEN:
+ warning_api_exception("Storage Endpoints")
+ else:
+ # other exceptions will be handled by our exception hook
+ raise exc
+
+ try:
+ nic_api_instance = purefusion.NetworkInterfacesApi(fusion)
+ network_interfaces_num = sum(
+ len(
+ nic_api_instance.list_network_interfaces(
+ availability_zone_name=availability_zone.name,
+ region_name=region.name,
+ array_name=array_detail.name,
+ ).items
+ )
+ for region in regions.items
+ for availability_zone in az_api_instance.list_availability_zones(
+ region_name=region.name
+ ).items
+ for array_detail in arrays_api_instance.list_arrays(
+ availability_zone_name=availability_zone.name,
+ region_name=region.name,
+ ).items
+ )
+ except purefusion.rest.ApiException as exc:
+ if exc.status == http.HTTPStatus.FORBIDDEN:
+ warning_api_exception("Network Interfaces")
+ else:
+ # other exceptions will be handled by our exception hook
+ raise exc
+ else:
+ warning_argument_none("Availability Zones", "regions")
+ warning_argument_none("Network Interfaces", "regions")
+ warning_argument_none("Network Interface Groups", "regions")
+ warning_argument_none("Storage Endpoints", "regions")
+ warning_argument_none("Arrays", "regions")
+
+ return {
+ "version": version,
+ "users": users_num,
+ "protection_policies": protection_policies_num,
+ "host_access_policies": host_access_policies_num,
+ "hardware_types": hardware_types_num,
+ "storage_services": storage_services_num,
+ "tenants": tenants_num,
+ "regions": regions_num,
+ "storage_classes": storage_classes_num,
+ "roles": roles_num,
+ "role_assignments": role_assignments_num,
+ "tenant_spaces": tenant_spaces_num,
+ "volumes": volumes_num,
+ "placement_groups": placement_groups_num,
+ "snapshots": snapshots_num,
+ "availability_zones": availability_zones_num,
+ "arrays": arrays_num,
+ "network_interfaces": network_interfaces_num,
+ "network_interface_groups": network_interface_groups_num,
+ "storage_endpoints": storage_endpoints_num,
+ }
+
+
+@_api_permission_denied_handler("network_interfaces")
+def generate_nics_dict(module, fusion):
+ nics_info = {}
+ nic_api_instance = purefusion.NetworkInterfacesApi(fusion)
+ arrays_api_instance = purefusion.ArraysApi(fusion)
+ az_api_instance = purefusion.AvailabilityZonesApi(fusion)
+ regions_api_instance = purefusion.RegionsApi(fusion)
+ regions = regions_api_instance.list_regions()
+ for region in regions.items:
+ azs = az_api_instance.list_availability_zones(region_name=region.name)
+ for az in azs.items:
+ array_details = arrays_api_instance.list_arrays(
+ availability_zone_name=az.name,
+ region_name=region.name,
+ )
+ for array_detail in array_details.items:
+ array_name = az.name + "/" + array_detail.name
+ nics_info[array_name] = {}
+ nics = nic_api_instance.list_network_interfaces(
+ availability_zone_name=az.name,
+ region_name=region.name,
+ array_name=array_detail.name,
+ )
+
+ for nic in nics.items:
+ nics_info[array_name][nic.name] = {
+ "enabled": nic.enabled,
+ "display_name": nic.display_name,
+ "interface_type": nic.interface_type,
+ "services": nic.services,
+ "max_speed": nic.max_speed,
+ "vlan": nic.eth.vlan,
+ "address": nic.eth.address,
+ "mac_address": nic.eth.mac_address,
+ "gateway": nic.eth.gateway,
+ "mtu": nic.eth.mtu,
+ "network_interface_group": nic.network_interface_group.name,
+ "availability_zone": nic.availability_zone.name,
+ }
+ return nics_info
+
+
+@_api_permission_denied_handler("host_access_policies")
+def generate_hap_dict(module, fusion):
+ hap_info = {}
+ api_instance = purefusion.HostAccessPoliciesApi(fusion)
+ hosts = api_instance.list_host_access_policies()
+ for host in hosts.items:
+ name = host.name
+ hap_info[name] = {
+ "personality": host.personality,
+ "display_name": host.display_name,
+ "iqn": host.iqn,
+ }
+ return hap_info
+
+
+@_api_permission_denied_handler("arrays")
+def generate_array_dict(module, fusion):
+ array_info = {}
+ array_api_instance = purefusion.ArraysApi(fusion)
+ az_api_instance = purefusion.AvailabilityZonesApi(fusion)
+ regions_api_instance = purefusion.RegionsApi(fusion)
+ regions = regions_api_instance.list_regions()
+ for region in regions.items:
+ azs = az_api_instance.list_availability_zones(region_name=region.name)
+ for az in azs.items:
+ arrays = array_api_instance.list_arrays(
+ availability_zone_name=az.name,
+ region_name=region.name,
+ )
+ for array in arrays.items:
+ array_name = array.name
+ array_space = array_api_instance.get_array_space(
+ availability_zone_name=az.name,
+ array_name=array_name,
+ region_name=region.name,
+ )
+ array_perf = array_api_instance.get_array_performance(
+ availability_zone_name=az.name,
+ array_name=array_name,
+ region_name=region.name,
+ )
+ array_info[array_name] = {
+ "region": region.name,
+ "availability_zone": az.name,
+ "host_name": array.host_name,
+ "maintenance_mode": array.maintenance_mode,
+ "unavailable_mode": array.unavailable_mode,
+ "display_name": array.display_name,
+ "hardware_type": array.hardware_type.name,
+ "appliance_id": array.appliance_id,
+ "apartment_id": getattr(array, "apartment_id", None),
+ "space": {
+ "total_physical_space": array_space.total_physical_space,
+ },
+ "performance": {
+ "read_bandwidth": array_perf.read_bandwidth,
+ "read_latency_us": array_perf.read_latency_us,
+ "reads_per_sec": array_perf.reads_per_sec,
+ "write_bandwidth": array_perf.write_bandwidth,
+ "write_latency_us": array_perf.write_latency_us,
+ "writes_per_sec": array_perf.writes_per_sec,
+ },
+ }
+ return array_info
+
+
+@_api_permission_denied_handler("placement_groups")
+def generate_pg_dict(module, fusion):
+ pg_info = {}
+ tenant_api_instance = purefusion.TenantsApi(fusion)
+ tenantspace_api_instance = purefusion.TenantSpacesApi(fusion)
+ pg_api_instance = purefusion.PlacementGroupsApi(fusion)
+ tenants = tenant_api_instance.list_tenants()
+ for tenant in tenants.items:
+ tenant_spaces = tenantspace_api_instance.list_tenant_spaces(
+ tenant_name=tenant.name
+ ).items
+ for tenant_space in tenant_spaces:
+ groups = pg_api_instance.list_placement_groups(
+ tenant_name=tenant.name,
+ tenant_space_name=tenant_space.name,
+ )
+ for group in groups.items:
+ group_name = tenant.name + "/" + tenant_space.name + "/" + group.name
+ pg_info[group_name] = {
+ "tenant": group.tenant.name,
+ "display_name": group.display_name,
+ "placement_engine": group.placement_engine,
+ "tenant_space": group.tenant_space.name,
+ "az": group.availability_zone.name,
+ "array": getattr(group.array, "name", None),
+ }
+ return pg_info
+
+
+@_api_permission_denied_handler("tenant_spaces")
+def generate_ts_dict(module, fusion):
+ ts_info = {}
+ tenant_api_instance = purefusion.TenantsApi(fusion)
+ tenantspace_api_instance = purefusion.TenantSpacesApi(fusion)
+ tenants = tenant_api_instance.list_tenants()
+ for tenant in tenants.items:
+ tenant_spaces = tenantspace_api_instance.list_tenant_spaces(
+ tenant_name=tenant.name
+ ).items
+ for tenant_space in tenant_spaces:
+ ts_name = tenant.name + "/" + tenant_space.name
+ ts_info[ts_name] = {
+ "tenant": tenant.name,
+ "display_name": tenant_space.display_name,
+ }
+ return ts_info
+
+
+@_api_permission_denied_handler("protection_policies")
+def generate_pp_dict(module, fusion):
+ pp_info = {}
+ api_instance = purefusion.ProtectionPoliciesApi(fusion)
+ policies = api_instance.list_protection_policies()
+ for policy in policies.items:
+ policy_name = policy.name
+ pp_info[policy_name] = {
+ "objectives": policy.objectives,
+ }
+ return pp_info
+
+
+@_api_permission_denied_handler("tenants")
+def generate_tenant_dict(module, fusion):
+ tenants_api_instance = purefusion.TenantsApi(fusion)
+ return {
+ tenant.name: {
+ "display_name": tenant.display_name,
+ }
+ for tenant in tenants_api_instance.list_tenants().items
+ }
+
+
+@_api_permission_denied_handler("regions")
+def generate_regions_dict(module, fusion):
+ regions_api_instance = purefusion.RegionsApi(fusion)
+ return {
+ region.name: {
+ "display_name": region.display_name,
+ }
+ for region in regions_api_instance.list_regions().items
+ }
+
+
+@_api_permission_denied_handler("availability_zones")
+def generate_zones_dict(module, fusion):
+ zones_info = {}
+ az_api_instance = purefusion.AvailabilityZonesApi(fusion)
+ regions_api_instance = purefusion.RegionsApi(fusion)
+ regions = regions_api_instance.list_regions()
+ for region in regions.items:
+ zones = az_api_instance.list_availability_zones(region_name=region.name)
+ for zone in zones.items:
+ az_name = zone.name
+ zones_info[az_name] = {
+ "display_name": zone.display_name,
+ "region": zone.region.name,
+ }
+ return zones_info
+
+
+@_api_permission_denied_handler("role_assignments")
+def generate_ras_dict(module, fusion):
+ ras_info = {}
+ ras_api_instance = purefusion.RoleAssignmentsApi(fusion)
+ role_api_instance = purefusion.RolesApi(fusion)
+ roles = role_api_instance.list_roles()
+ for role in roles:
+ ras = ras_api_instance.list_role_assignments(role_name=role.name)
+ for assignment in ras:
+ name = assignment.name
+ ras_info[name] = {
+ "display_name": assignment.display_name,
+ "role": assignment.role.name,
+ "scope": assignment.scope.name,
+ }
+ return ras_info
+
+
+@_api_permission_denied_handler("roles")
+def generate_roles_dict(module, fusion):
+ roles_info = {}
+ api_instance = purefusion.RolesApi(fusion)
+ roles = api_instance.list_roles()
+ for role in roles:
+ name = role.name
+ roles_info[name] = {
+ "display_name": role.display_name,
+ "scopes": role.assignable_scopes,
+ }
+ return roles_info
+
+
+@_api_permission_denied_handler("api_clients")
+def generate_api_client_dict(module, fusion):
+ client_info = {}
+ api_instance = purefusion.IdentityManagerApi(fusion)
+ clients = api_instance.list_api_clients()
+ for client in clients:
+ client_info[client.name] = {
+ "display_name": client.display_name,
+ "issuer": client.issuer,
+ "public_key": client.public_key,
+ "creator_id": client.creator_id,
+ "last_key_update": time.strftime(
+ "%a, %d %b %Y %H:%M:%S %Z",
+ time.localtime(client.last_key_update / 1000),
+ ),
+ "last_used": time.strftime(
+ "%a, %d %b %Y %H:%M:%S %Z",
+ time.localtime(client.last_used / 1000),
+ ),
+ }
+ return client_info
+
+
+@_api_permission_denied_handler("users")
+def generate_users_dict(module, fusion):
+ users_info = {}
+ api_instance = purefusion.IdentityManagerApi(fusion)
+ users = api_instance.list_users()
+ for user in users:
+ users_info[user.name] = {
+ "display_name": user.display_name,
+ "email": user.email,
+ "id": user.id,
+ }
+ return users_info
+
+
+@_api_permission_denied_handler("hardware_types")
+def generate_hardware_types_dict(module, fusion):
+ hardware_info = {}
+ api_instance = purefusion.HardwareTypesApi(fusion)
+ hw_types = api_instance.list_hardware_types()
+ for hw_type in hw_types.items:
+ hardware_info[hw_type.name] = {
+ "array_type": hw_type.array_type,
+ "display_name": hw_type.display_name,
+ "media_type": hw_type.media_type,
+ }
+ return hardware_info
+
+
+@_api_permission_denied_handler("storage_classes")
+def generate_sc_dict(module, fusion):
+ sc_info = {}
+ ss_api_instance = purefusion.StorageServicesApi(fusion)
+ sc_api_instance = purefusion.StorageClassesApi(fusion)
+ services = ss_api_instance.list_storage_services()
+ for service in services.items:
+ classes = sc_api_instance.list_storage_classes(
+ storage_service_name=service.name,
+ )
+ for s_class in classes.items:
+ sc_info[s_class.name] = {
+ "bandwidth_limit": getattr(s_class, "bandwidth_limit", None),
+ "iops_limit": getattr(s_class, "iops_limit", None),
+ "size_limit": getattr(s_class, "size_limit", None),
+ "display_name": s_class.display_name,
+ "storage_service": service.name,
+ }
+ return sc_info
+
+
+@_api_permission_denied_handler("storage_services")
+def generate_storserv_dict(module, fusion):
+ ss_dict = {}
+ ss_api_instance = purefusion.StorageServicesApi(fusion)
+ services = ss_api_instance.list_storage_services()
+ for service in services.items:
+ ss_dict[service.name] = {
+ "display_name": service.display_name,
+ "hardware_types": None,
+ }
+ # can be None if we don't have permission to see this
+ if service.hardware_types is not None:
+ ss_dict[service.name]["hardware_types"] = []
+ for hwtype in service.hardware_types:
+ ss_dict[service.name]["hardware_types"].append(hwtype.name)
+ return ss_dict
+
+
+@_api_permission_denied_handler("storage_endpoints")
+def generate_se_dict(module, fusion):
+ se_dict = {}
+ se_api_instance = purefusion.StorageEndpointsApi(fusion)
+ az_api_instance = purefusion.AvailabilityZonesApi(fusion)
+ regions_api_instance = purefusion.RegionsApi(fusion)
+ regions = regions_api_instance.list_regions()
+ for region in regions.items:
+ azs = az_api_instance.list_availability_zones(region_name=region.name)
+ for az in azs.items:
+ endpoints = se_api_instance.list_storage_endpoints(
+ region_name=region.name,
+ availability_zone_name=az.name,
+ )
+ for endpoint in endpoints.items:
+ name = region.name + "/" + az.name + "/" + endpoint.name
+ se_dict[name] = {
+ "display_name": endpoint.display_name,
+ "endpoint_type": endpoint.endpoint_type,
+ "iscsi_interfaces": [],
+ }
+ for iface in endpoint.iscsi.discovery_interfaces:
+ dct = {
+ "address": iface.address,
+ "gateway": iface.gateway,
+ "mtu": iface.mtu,
+ "network_interface_groups": None,
+ }
+ if iface.network_interface_groups is not None:
+ dct["network_interface_groups"] = [
+ nig.name for nig in iface.network_interface_groups
+ ]
+ se_dict[name]["iscsi_interfaces"].append(dct)
+ return se_dict
+
+
+@_api_permission_denied_handler("network_interface_groups")
+def generate_nigs_dict(module, fusion):
+ nigs_dict = {}
+ nig_api_instance = purefusion.NetworkInterfaceGroupsApi(fusion)
+ az_api_instance = purefusion.AvailabilityZonesApi(fusion)
+ regions_api_instance = purefusion.RegionsApi(fusion)
+ regions = regions_api_instance.list_regions()
+ for region in regions.items:
+ azs = az_api_instance.list_availability_zones(region_name=region.name)
+ for az in azs.items:
+ nigs = nig_api_instance.list_network_interface_groups(
+ region_name=region.name,
+ availability_zone_name=az.name,
+ )
+ for nig in nigs.items:
+ name = region.name + "/" + az.name + "/" + nig.name
+ nigs_dict[name] = {
+ "display_name": nig.display_name,
+ "gateway": nig.eth.gateway,
+ "prefix": nig.eth.prefix,
+ "mtu": nig.eth.mtu,
+ }
+ return nigs_dict
+
+
+@_api_permission_denied_handler("snapshots")
+def generate_snap_dicts(module, fusion):
+ snap_dict = {}
+ vsnap_dict = {}
+ tenant_api_instance = purefusion.TenantsApi(fusion)
+ tenantspace_api_instance = purefusion.TenantSpacesApi(fusion)
+ snap_api_instance = purefusion.SnapshotsApi(fusion)
+ vsnap_api_instance = purefusion.VolumeSnapshotsApi(fusion)
+ tenants = tenant_api_instance.list_tenants()
+ for tenant in tenants.items:
+ tenant_spaces = tenantspace_api_instance.list_tenant_spaces(
+ tenant_name=tenant.name
+ ).items
+ for tenant_space in tenant_spaces:
+ snaps = snap_api_instance.list_snapshots(
+ tenant_name=tenant.name,
+ tenant_space_name=tenant_space.name,
+ )
+ for snap in snaps.items:
+ snap_name = tenant.name + "/" + tenant_space.name + "/" + snap.name
+ secs, mins, hours = _convert_microseconds(snap.time_remaining)
+ snap_dict[snap_name] = {
+ "display_name": snap.display_name,
+ "protection_policy": snap.protection_policy,
+ "time_remaining": "{0} hours, {1} mins, {2} secs".format(
+ int(hours), int(mins), int(secs)
+ ),
+ "volume_snapshots_link": snap.volume_snapshots_link,
+ }
+ vsnaps = vsnap_api_instance.list_volume_snapshots(
+ tenant_name=tenant.name,
+ tenant_space_name=tenant_space.name,
+ snapshot_name=snap.name,
+ )
+ for vsnap in vsnaps.items:
+ vsnap_name = (
+ tenant.name
+ + "/"
+ + tenant_space.name
+ + "/"
+ + snap.name
+ + "/"
+ + vsnap.name
+ )
+ secs, mins, hours = _convert_microseconds(vsnap.time_remaining)
+ vsnap_dict[vsnap_name] = {
+ "size": vsnap.size,
+ "display_name": vsnap.display_name,
+ "protection_policy": vsnap.protection_policy,
+ "serial_number": vsnap.serial_number,
+ "created_at": time.strftime(
+ "%a, %d %b %Y %H:%M:%S %Z",
+ time.localtime(vsnap.created_at / 1000),
+ ),
+ "time_remaining": "{0} hours, {1} mins, {2} secs".format(
+ int(hours), int(mins), int(secs)
+ ),
+ "placement_group": vsnap.placement_group.name,
+ }
+ return snap_dict, vsnap_dict
+
+
+@_api_permission_denied_handler("volumes")
+def generate_volumes_dict(module, fusion):
+ volume_info = {}
+
+ tenant_api_instance = purefusion.TenantsApi(fusion)
+ vol_api_instance = purefusion.VolumesApi(fusion)
+ tenant_space_api_instance = purefusion.TenantSpacesApi(fusion)
+
+ tenants = tenant_api_instance.list_tenants()
+ for tenant in tenants.items:
+ tenant_spaces = tenant_space_api_instance.list_tenant_spaces(
+ tenant_name=tenant.name
+ ).items
+ for tenant_space in tenant_spaces:
+ volumes = vol_api_instance.list_volumes(
+ tenant_name=tenant.name,
+ tenant_space_name=tenant_space.name,
+ )
+ for volume in volumes.items:
+ vol_name = tenant.name + "/" + tenant_space.name + "/" + volume.name
+ volume_info[vol_name] = {
+ "tenant": tenant.name,
+ "tenant_space": tenant_space.name,
+ "name": volume.name,
+ "size": volume.size,
+ "display_name": volume.display_name,
+ "placement_group": volume.placement_group.name,
+ "source_volume_snapshot": getattr(
+ volume.source_volume_snapshot, "name", None
+ ),
+ "protection_policy": getattr(
+ volume.protection_policy, "name", None
+ ),
+ "storage_class": volume.storage_class.name,
+ "serial_number": volume.serial_number,
+ "target": {},
+ "array": getattr(volume.array, "name", None),
+ }
+
+ volume_info[vol_name]["target"] = {
+ "iscsi": {
+ "addresses": volume.target.iscsi.addresses,
+ "iqn": volume.target.iscsi.iqn,
+ },
+ "nvme": {
+ "addresses": None,
+ "nqn": None,
+ },
+ "fc": {
+ "addresses": None,
+ "wwns": None,
+ },
+ }
+ return volume_info
+
+
+def main():
+ argument_spec = fusion_argument_spec()
+ argument_spec.update(
+ dict(gather_subset=dict(default="minimum", type="list", elements="str"))
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ # will handle all errors (except #403 which should be handled in code)
+ fusion = setup_fusion(module)
+
+ subset = [test.lower() for test in module.params["gather_subset"]]
+ valid_subsets = (
+ "all",
+ "minimum",
+ "roles",
+ "users",
+ "placements",
+ "arrays",
+ "hardware_types",
+ "volumes",
+ "hosts",
+ "storage_classes",
+ "protection_policies",
+ "placement_groups",
+ "interfaces",
+ "zones",
+ "nigs",
+ "storage_endpoints",
+ "snapshots",
+ "storage_services",
+ "tenants",
+ "tenant_spaces",
+ "network_interface_groups",
+ "api_clients",
+ "availability_zones",
+ "host_access_policies",
+ "network_interfaces",
+ "regions",
+ )
+ for option in subset:
+ if option not in valid_subsets:
+ module.fail_json(
+ msg=f"value gather_subset must be one or more of: {','.join(valid_subsets)}, got: {','.join(subset)}\nvalue {option} is not allowed"
+ )
+
+ info = {}
+
+ if "minimum" in subset or "all" in subset:
+ info["default"] = generate_default_dict(module, fusion)
+ if "hardware_types" in subset or "all" in subset:
+ info["hardware_types"] = generate_hardware_types_dict(module, fusion)
+ if "users" in subset or "all" in subset:
+ info["users"] = generate_users_dict(module, fusion)
+ if "regions" in subset or "all" in subset:
+ info["regions"] = generate_regions_dict(module, fusion)
+ if "availability_zones" in subset or "all" in subset or "zones" in subset:
+ info["availability_zones"] = generate_zones_dict(module, fusion)
+ if "zones" in subset:
+ module.warn(
+ "The 'zones' subset is deprecated and will be removed in the version 2.0.0\nUse 'availability_zones' subset instead."
+ )
+ if "roles" in subset or "all" in subset:
+ info["roles"] = generate_roles_dict(module, fusion)
+ info["role_assignments"] = generate_ras_dict(module, fusion)
+ if "storage_services" in subset or "all" in subset:
+ info["storage_services"] = generate_storserv_dict(module, fusion)
+ if "volumes" in subset or "all" in subset:
+ info["volumes"] = generate_volumes_dict(module, fusion)
+ if "protection_policies" in subset or "all" in subset:
+ info["protection_policies"] = generate_pp_dict(module, fusion)
+ if "placement_groups" in subset or "all" in subset or "placements" in subset:
+ info["placement_groups"] = generate_pg_dict(module, fusion)
+ if "placements" in subset:
+ module.warn(
+ "The 'placements' subset is deprecated and will be removed in the version 1.7.0"
+ )
+ if "storage_classes" in subset or "all" in subset:
+ info["storage_classes"] = generate_sc_dict(module, fusion)
+ if "network_interfaces" in subset or "all" in subset or "interfaces" in subset:
+ info["network_interfaces"] = generate_nics_dict(module, fusion)
+ if "interfaces" in subset:
+ module.warn(
+ "The 'interfaces' subset is deprecated and will be removed in the version 2.0.0\nUse 'network_interfaces' subset instead."
+ )
+ if "host_access_policies" in subset or "all" in subset or "hosts" in subset:
+ info["host_access_policies"] = generate_hap_dict(module, fusion)
+ if "hosts" in subset:
+ module.warn(
+ "The 'hosts' subset is deprecated and will be removed in the version 2.0.0\nUse 'host_access_policies' subset instead."
+ )
+ if "arrays" in subset or "all" in subset:
+ info["arrays"] = generate_array_dict(module, fusion)
+ if "tenants" in subset or "all" in subset:
+ info["tenants"] = generate_tenant_dict(module, fusion)
+ if "tenant_spaces" in subset or "all" in subset:
+ info["tenant_spaces"] = generate_ts_dict(module, fusion)
+ if "storage_endpoints" in subset or "all" in subset:
+ info["storage_endpoints"] = generate_se_dict(module, fusion)
+ if "api_clients" in subset or "all" in subset:
+ info["api_clients"] = generate_api_client_dict(module, fusion)
+ if "network_interface_groups" in subset or "all" in subset or "nigs" in subset:
+ info["network_interface_groups"] = generate_nigs_dict(module, fusion)
+ if "nigs" in subset:
+ module.warn(
+ "The 'nigs' subset is deprecated and will be removed in the version 1.7.0"
+ )
+ if "snapshots" in subset or "all" in subset:
+ snap_dicts = generate_snap_dicts(module, fusion)
+ if snap_dicts is not None:
+ info["snapshots"], info["volume_snapshots"] = snap_dicts
+ else:
+ info["snapshots"], info["volume_snapshots"] = None, None
+
+ module.exit_json(changed=False, fusion_info=info)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/fusion/plugins/modules/fusion_ni.py b/ansible_collections/purestorage/fusion/plugins/modules/fusion_ni.py
new file mode 100644
index 000000000..6816ed841
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/plugins/modules/fusion_ni.py
@@ -0,0 +1,244 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2023, Andrej Pajtas (apajtas@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING.GPLv3 or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: fusion_ni
+version_added: '1.0.0'
+short_description: Manage network interfaces in Pure Storage Fusion
+description:
+- Update parameters of network interfaces in Pure Storage Fusion.
+notes:
+- Supports C(check_mode).
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - The name of the network interface.
+ type: str
+ required: true
+ display_name:
+ description:
+ - The human name of the network interface.
+ - If not provided, defaults to I(name).
+ type: str
+ region:
+ description:
+ - The name of the region the availability zone is in.
+ type: str
+ required: true
+ availability_zone:
+ aliases: [ az ]
+ description:
+ - The name of the availability zone for the network interface.
+ type: str
+ required: true
+ array:
+ description:
+ - The name of the array the network interface belongs to.
+ type: str
+ required: true
+ eth:
+ description:
+ - The IP address associated with the network interface.
+ - IP address must include a CIDR notation.
+ - Only IPv4 is supported at the moment.
+ - Required together with `network_interface_group` parameter.
+ type: str
+ enabled:
+ description:
+ - True if network interface is in use.
+ type: bool
+ network_interface_group:
+ description:
+ - The name of the network interface group this network interface belongs to.
+ type: str
+extends_documentation_fragment:
+- purestorage.fusion.purestorage.fusion
+"""
+
+EXAMPLES = r"""
+- name: Patch network interface
+ purestorage.fusion.fusion_ni:
+ name: foo
+ region: us-west
+ availability_zone: bar
+ array: array0
+ eth: 10.21.200.124/24
+ enabled: true
+ network_interface_group: subnet-0
+ issuer_id: key_name
+ private_key_file: "az-admin-private-key.pem"
+"""
+
+RETURN = r"""
+"""
+
+try:
+ import fusion as purefusion
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.fusion.plugins.module_utils.fusion import (
+ fusion_argument_spec,
+)
+
+from ansible_collections.purestorage.fusion.plugins.module_utils.getters import (
+ get_array,
+ get_az,
+ get_region,
+)
+from ansible_collections.purestorage.fusion.plugins.module_utils.networking import (
+ is_valid_network,
+)
+from ansible_collections.purestorage.fusion.plugins.module_utils.startup import (
+ setup_fusion,
+)
+from ansible_collections.purestorage.fusion.plugins.module_utils.operations import (
+ await_operation,
+)
+
+
+def get_ni(module, fusion):
+ """Get Network Interface or None"""
+ ni_api_instance = purefusion.NetworkInterfacesApi(fusion)
+ try:
+ return ni_api_instance.get_network_interface(
+ region_name=module.params["region"],
+ availability_zone_name=module.params["availability_zone"],
+ array_name=module.params["array"],
+ net_intf_name=module.params["name"],
+ )
+ except purefusion.rest.ApiException:
+ return None
+
+
+def update_ni(module, fusion, ni):
+ """Update Network Interface"""
+ ni_api_instance = purefusion.NetworkInterfacesApi(fusion)
+
+ patches = []
+ if (
+ module.params["display_name"]
+ and module.params["display_name"] != ni.display_name
+ ):
+ patch = purefusion.NetworkInterfacePatch(
+ display_name=purefusion.NullableString(module.params["display_name"]),
+ )
+ patches.append(patch)
+
+ if module.params["enabled"] is not None and module.params["enabled"] != ni.enabled:
+ patch = purefusion.NetworkInterfacePatch(
+ enabled=purefusion.NullableBoolean(module.params["enabled"]),
+ )
+ patches.append(patch)
+
+ if (
+ module.params["network_interface_group"]
+ and module.params["network_interface_group"] != ni.network_interface_group
+ ):
+ if module.params["eth"] and module.params["eth"] != ni.eth:
+ patch = purefusion.NetworkInterfacePatch(
+ eth=purefusion.NetworkInterfacePatchEth(
+ purefusion.NullableString(module.params["eth"])
+ ),
+ network_interface_group=purefusion.NullableString(
+ module.params["network_interface_group"]
+ ),
+ )
+ else:
+ patch = purefusion.NetworkInterfacePatch(
+ network_interface_group=purefusion.NullableString(
+ module.params["network_interface_group"]
+ ),
+ )
+ patches.append(patch)
+
+ if not module.check_mode:
+ for patch in patches:
+ op = ni_api_instance.update_network_interface(
+ patch,
+ region_name=module.params["region"],
+ availability_zone_name=module.params["availability_zone"],
+ array_name=module.params["array"],
+ net_intf_name=module.params["name"],
+ )
+ await_operation(fusion, op)
+
+ changed = len(patches) != 0
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ """Main code"""
+ argument_spec = fusion_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True),
+ display_name=dict(type="str"),
+ region=dict(type="str", required=True),
+ availability_zone=dict(type="str", required=True, aliases=["az"]),
+ array=dict(type="str", required=True),
+ eth=dict(type="str"),
+ enabled=dict(type="bool"),
+ network_interface_group=dict(type="str"),
+ )
+ )
+
+ required_by = {
+ "eth": "network_interface_group",
+ }
+
+ module = AnsibleModule(
+ argument_spec,
+ supports_check_mode=True,
+ required_by=required_by,
+ )
+
+ fusion = setup_fusion(module)
+
+ if module.params["eth"] and not is_valid_network(module.params["eth"]):
+ module.fail_json(
+ msg="`eth` '{0}' is not a valid address in CIDR notation".format(
+ module.params["eth"]
+ )
+ )
+
+ if not get_region(module, fusion):
+ module.fail_json(
+ msg="Region {0} does not exist.".format(module.params["region"])
+ )
+
+ if not get_az(module, fusion):
+ module.fail_json(
+ msg="Availability Zone {0} does not exist.".format(
+ module.params["availability_zone"]
+ )
+ )
+
+ if not get_array(module, fusion):
+ module.fail_json(msg="Array {0} does not exist.".format(module.params["array"]))
+
+ ni = get_ni(module, fusion)
+ if not ni:
+ module.fail_json(
+ msg="Network Interface {0} does not exist".format(module.params["name"])
+ )
+
+ update_ni(module, fusion, ni)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/fusion/plugins/modules/fusion_nig.py b/ansible_collections/purestorage/fusion/plugins/modules/fusion_nig.py
new file mode 100644
index 000000000..d6056fd5a
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/plugins/modules/fusion_nig.py
@@ -0,0 +1,274 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2022, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING.GPLv3 or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: fusion_nig
+version_added: '1.0.0'
+short_description: Manage Network Interface Groups in Pure Storage Fusion
+description:
+- Create, delete and modify network interface groups in Pure Storage Fusion.
+- Currently this only supports a single tenant subnet per tenant network
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+notes:
+- Supports C(check mode).
+options:
+ name:
+ description:
+ - The name of the network interface group.
+ type: str
+ required: true
+ display_name:
+ description:
+ - The human name of the network interface group.
+ - If not provided, defaults to I(name).
+ type: str
+ state:
+ description:
+ - Define whether the network interface group should exist or not.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ availability_zone:
+ aliases: [ az ]
+ description:
+ - The name of the availability zone for the network interface group.
+ type: str
+ required: true
+ region:
+ description:
+ - Region for the network interface group.
+ type: str
+ required: true
+ gateway:
+ description:
+ - "Address of the subnet gateway.
+ Currently must be a valid IPv4 address."
+ type: str
+ mtu:
+ description:
+ - MTU setting for the subnet.
+ default: 1500
+ type: int
+ group_type:
+ description:
+ - The type of network interface group.
+ type: str
+ default: eth
+ choices: [ eth ]
+ prefix:
+ description:
+ - "Network prefix in CIDR notation.
+ Required to create a new network interface group.
+ Currently only IPv4 addresses with subnet mask are supported."
+ type: str
+extends_documentation_fragment:
+- purestorage.fusion.purestorage.fusion
+"""
+
+EXAMPLES = r"""
+- name: Create new network interface group foo in AZ bar
+ purestorage.fusion.fusion_nig:
+ name: foo
+ availability_zone: bar
+ region: region1
+ mtu: 9000
+ gateway: 10.21.200.1
+ prefix: 10.21.200.0/24
+ state: present
+ issuer_id: key_name
+ private_key_file: "az-admin-private-key.pem"
+
+- name: Delete network interface group foo in AZ bar
+ purestorage.fusion.fusion_nig:
+ name: foo
+ availability_zone: bar
+ region: region1
+ state: absent
+ issuer_id: key_name
+ private_key_file: "az-admin-private-key.pem"
+"""
+
+RETURN = r"""
+"""
+
+try:
+ import fusion as purefusion
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.fusion.plugins.module_utils.fusion import (
+ fusion_argument_spec,
+)
+from ansible_collections.purestorage.fusion.plugins.module_utils.networking import (
+ is_valid_address,
+ is_valid_network,
+ is_address_in_network,
+)
+from ansible_collections.purestorage.fusion.plugins.module_utils.startup import (
+ setup_fusion,
+)
+from ansible_collections.purestorage.fusion.plugins.module_utils.operations import (
+ await_operation,
+)
+
+
+def get_nig(module, fusion):
+ """Check Network Interface Group"""
+ nig_api_instance = purefusion.NetworkInterfaceGroupsApi(fusion)
+ try:
+ return nig_api_instance.get_network_interface_group(
+ availability_zone_name=module.params["availability_zone"],
+ region_name=module.params["region"],
+ network_interface_group_name=module.params["name"],
+ )
+ except purefusion.rest.ApiException:
+ return None
+
+
+def create_nig(module, fusion):
+ """Create Network Interface Group"""
+
+ nig_api_instance = purefusion.NetworkInterfaceGroupsApi(fusion)
+
+ changed = False
+ if module.params["gateway"] and not is_address_in_network(
+ module.params["gateway"], module.params["prefix"]
+ ):
+ module.fail_json(msg="`gateway` must be an address in subnet `prefix`")
+
+ if not module.check_mode:
+ display_name = module.params["display_name"] or module.params["name"]
+ if module.params["group_type"] == "eth":
+ if module.params["gateway"]:
+ eth = purefusion.NetworkInterfaceGroupEthPost(
+ prefix=module.params["prefix"],
+ gateway=module.params["gateway"],
+ mtu=module.params["mtu"],
+ )
+ else:
+ eth = purefusion.NetworkInterfaceGroupEthPost(
+ prefix=module.params["prefix"],
+ mtu=module.params["mtu"],
+ )
+ nig = purefusion.NetworkInterfaceGroupPost(
+ group_type="eth",
+ eth=eth,
+ name=module.params["name"],
+ display_name=display_name,
+ )
+ op = nig_api_instance.create_network_interface_group(
+ nig,
+ availability_zone_name=module.params["availability_zone"],
+ region_name=module.params["region"],
+ )
+ await_operation(fusion, op)
+ changed = True
+ else:
+ # to prevent future unintended error
+ module.warn(f"group_type={module.params['group_type']} is not implemented")
+
+ module.exit_json(changed=changed)
+
+
+def delete_nig(module, fusion):
+ """Delete Network Interface Group"""
+ changed = True
+ nig_api_instance = purefusion.NetworkInterfaceGroupsApi(fusion)
+ if not module.check_mode:
+ op = nig_api_instance.delete_network_interface_group(
+ availability_zone_name=module.params["availability_zone"],
+ region_name=module.params["region"],
+ network_interface_group_name=module.params["name"],
+ )
+ await_operation(fusion, op)
+ module.exit_json(changed=changed)
+
+
+def update_nig(module, fusion, nig):
+ """Update Network Interface Group"""
+
+ nifg_api_instance = purefusion.NetworkInterfaceGroupsApi(fusion)
+ patches = []
+ if (
+ module.params["display_name"]
+ and module.params["display_name"] != nig.display_name
+ ):
+ patch = purefusion.NetworkInterfaceGroupPatch(
+ display_name=purefusion.NullableString(module.params["display_name"]),
+ )
+ patches.append(patch)
+
+ if not module.check_mode:
+ for patch in patches:
+ op = nifg_api_instance.update_network_interface_group(
+ patch,
+ availability_zone_name=module.params["availability_zone"],
+ region_name=module.params["region"],
+ network_interface_group_name=module.params["name"],
+ )
+ await_operation(fusion, op)
+
+ changed = len(patches) != 0
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ """Main code"""
+ argument_spec = fusion_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True),
+ display_name=dict(type="str"),
+ availability_zone=dict(type="str", required=True, aliases=["az"]),
+ region=dict(type="str", required=True),
+ prefix=dict(type="str"),
+ gateway=dict(type="str"),
+ mtu=dict(type="int", default=1500),
+ group_type=dict(type="str", default="eth", choices=["eth"]),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+ fusion = setup_fusion(module)
+
+ state = module.params["state"]
+ if module.params["prefix"] and not is_valid_network(module.params["prefix"]):
+ module.fail_json(
+ msg="`prefix` '{0}' is not a valid address in CIDR notation".format(
+ module.params["prefix"]
+ )
+ )
+ if module.params["gateway"] and not is_valid_address(module.params["gateway"]):
+ module.fail_json(
+ msg="`gateway` '{0}' is not a valid address".format(
+ module.params["gateway"]
+ )
+ )
+
+ nig = get_nig(module, fusion)
+
+ if state == "present" and not nig:
+ module.fail_on_missing_params(["prefix"])
+ create_nig(module, fusion)
+ elif state == "present" and nig:
+ update_nig(module, fusion, nig)
+ elif state == "absent" and nig:
+ delete_nig(module, fusion)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/fusion/plugins/modules/fusion_pg.py b/ansible_collections/purestorage/fusion/plugins/modules/fusion_pg.py
new file mode 100644
index 000000000..57843d896
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/plugins/modules/fusion_pg.py
@@ -0,0 +1,278 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2022, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING.GPLv3 or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: fusion_pg
+version_added: '1.0.0'
+short_description: Manage placement groups in Pure Storage Fusion
+description:
+- Create, update or delete a placement groups in Pure Storage Fusion.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+notes:
+- Supports C(check mode).
+options:
+ name:
+ description:
+ - The name of the placement group.
+ type: str
+ required: true
+ display_name:
+ description:
+ - The human name of the placement group.
+ - If not provided, defaults to I(name).
+ type: str
+ state:
+ description:
+ - Define whether the placement group should exist or not.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ tenant:
+ description:
+ - The name of the tenant.
+ type: str
+ required: true
+ tenant_space:
+ description:
+ - The name of the tenant space.
+ type: str
+ required: true
+ region:
+ description:
+ - The name of the region the availability zone is in.
+ type: str
+ availability_zone:
+ aliases: [ az ]
+ description:
+ - The name of the availability zone the placement group is in.
+ type: str
+ storage_service:
+ description:
+ - The name of the storage service to create the placement group for.
+ type: str
+ array:
+ description:
+ - "Array to place the placement group to. Changing it (i.e. manual migration)
+ is an elevated operation."
+ type: str
+ placement_engine:
+ description:
+ - For workload placement recommendations from Pure1 Meta, use C(pure1meta).
+ - Please note that this might increase volume creation time.
+ type: str
+ choices: [ heuristics, pure1meta ]
+extends_documentation_fragment:
+- purestorage.fusion.purestorage.fusion
+"""
+
+EXAMPLES = r"""
+- name: Create new placement group named foo
+ purestorage.fusion.fusion_pg:
+ name: foo
+ tenant: test
+ tenant_space: space_1
+ availability_zone: az1
+ region: region1
+ storage_service: storage_service_1
+ state: present
+ issuer_id: key_name
+ private_key_file: "az-admin-private-key.pem"
+
+- name: Delete placement group foo
+ purestorage.fusion.fusion_pg:
+ name: foo
+ tenant: test
+ tenant_space: space_1
+ state: absent
+ issuer_id: key_name
+ private_key_file: "az-admin-private-key.pem"
+"""
+
+RETURN = r"""
+"""
+
+try:
+ import fusion as purefusion
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.fusion.plugins.module_utils.fusion import (
+ fusion_argument_spec,
+)
+
+from ansible_collections.purestorage.fusion.plugins.module_utils.startup import (
+ setup_fusion,
+)
+from ansible_collections.purestorage.fusion.plugins.module_utils.operations import (
+ await_operation,
+)
+
+
+def get_pg(module, fusion):
+ """Return Placement Group or None"""
+ pg_api_instance = purefusion.PlacementGroupsApi(fusion)
+ try:
+ return pg_api_instance.get_placement_group(
+ tenant_name=module.params["tenant"],
+ tenant_space_name=module.params["tenant_space"],
+ placement_group_name=module.params["name"],
+ )
+ except purefusion.rest.ApiException:
+ return None
+
+
+def create_pg(module, fusion):
+ """Create Placement Group"""
+
+ pg_api_instance = purefusion.PlacementGroupsApi(fusion)
+
+ if not module.check_mode:
+ if not module.params["display_name"]:
+ display_name = module.params["name"]
+ else:
+ display_name = module.params["display_name"]
+ group = purefusion.PlacementGroupPost(
+ availability_zone=module.params["availability_zone"],
+ name=module.params["name"],
+ display_name=display_name,
+ region=module.params["region"],
+ storage_service=module.params["storage_service"],
+ )
+ op = pg_api_instance.create_placement_group(
+ group,
+ tenant_name=module.params["tenant"],
+ tenant_space_name=module.params["tenant_space"],
+ )
+ await_operation(fusion, op)
+
+ return True
+
+
+def update_display_name(module, fusion, patches, pg):
+ if not module.params["display_name"]:
+ return
+ if module.params["display_name"] == pg.display_name:
+ return
+ patch = purefusion.PlacementGroupPatch(
+ display_name=purefusion.NullableString(module.params["display_name"]),
+ )
+ patches.append(patch)
+
+
+def update_array(module, fusion, patches, pg):
+ if not module.params["array"]:
+ return
+ if not pg.array:
+ module.warn(
+ "cannot see placement group array, probably missing required permissions to change it"
+ )
+ return
+ if pg.array.name == module.params["array"]:
+ return
+
+ patch = purefusion.PlacementGroupPatch(
+ array=purefusion.NullableString(module.params["array"]),
+ )
+ patches.append(patch)
+
+
+def update_pg(module, fusion, pg):
+ """Update Placement Group"""
+
+ pg_api_instance = purefusion.PlacementGroupsApi(fusion)
+ patches = []
+
+ update_display_name(module, fusion, patches, pg)
+ update_array(module, fusion, patches, pg)
+
+ if not module.check_mode:
+ for patch in patches:
+ op = pg_api_instance.update_placement_group(
+ patch,
+ tenant_name=module.params["tenant"],
+ tenant_space_name=module.params["tenant_space"],
+ placement_group_name=module.params["name"],
+ )
+ await_operation(fusion, op)
+
+ changed = len(patches) != 0
+ return changed
+
+
+def delete_pg(module, fusion):
+ """Delete Placement Group"""
+ pg_api_instance = purefusion.PlacementGroupsApi(fusion)
+ if not module.check_mode:
+ op = pg_api_instance.delete_placement_group(
+ placement_group_name=module.params["name"],
+ tenant_name=module.params["tenant"],
+ tenant_space_name=module.params["tenant_space"],
+ )
+ await_operation(fusion, op)
+
+ return True
+
+
+def main():
+ """Main code"""
+ argument_spec = fusion_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True),
+ display_name=dict(type="str"),
+ tenant=dict(type="str", required=True),
+ tenant_space=dict(type="str", required=True),
+ region=dict(type="str"),
+ availability_zone=dict(type="str", aliases=["az"]),
+ storage_service=dict(type="str"),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ array=dict(type="str"),
+ placement_engine=dict(
+ type="str",
+ choices=["heuristics", "pure1meta"],
+ removed_in_version="2.0.0",
+ removed_from_collection="purestorage.fusion",
+ ),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+ fusion = setup_fusion(module)
+
+ if module.params["placement_engine"]:
+ module.warn("placement_engine parameter will be deprecated in version 2.0.0")
+
+ changed = False
+
+ state = module.params["state"]
+ pgroup = get_pg(module, fusion)
+
+ if state == "present" and not pgroup:
+ module.fail_on_missing_params(
+ ["region", "availability_zone", "storage_service"]
+ )
+ changed = create_pg(module, fusion) or changed
+ if module.params["array"]:
+ # changing placement requires additional update
+ pgroup = get_pg(module, fusion)
+ changed = update_pg(module, fusion, pgroup) or changed
+ elif state == "present" and pgroup:
+ changed = update_pg(module, fusion, pgroup) or changed
+ elif state == "absent" and pgroup:
+ changed = delete_pg(module, fusion) or changed
+
+ module.exit_json(changed=changed)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/fusion/plugins/modules/fusion_pp.py b/ansible_collections/purestorage/fusion/plugins/modules/fusion_pp.py
new file mode 100644
index 000000000..abce9195c
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/plugins/modules/fusion_pp.py
@@ -0,0 +1,187 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2022, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING.GPLv3 or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: fusion_pp
+version_added: '1.0.0'
+short_description: Manage protection policies in Pure Storage Fusion
+description:
+- Manage protection policies in Pure Storage Fusion.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+notes:
+- Supports C(check mode).
+options:
+ name:
+ description:
+ - The name of the protection policy.
+ type: str
+ required: true
+ state:
+ description:
+ - Define whether the protection policy should exist or not.
+ default: present
+ choices: [ present, absent ]
+ type: str
+ display_name:
+ description:
+ - The human name of the protection policy.
+ - If not provided, defaults to I(name).
+ type: str
+ local_rpo:
+ description:
+ - Recovery Point Objective for snapshots.
+ - Value should be specified in minutes.
+ - Minimum value is 10 minutes.
+ type: str
+ local_retention:
+ description:
+ - Retention Duration for periodic snapshots.
+ - Minimum value is 10 minutes.
+ - Value can be provided as m(inutes), h(ours),
+ d(ays), w(eeks), or y(ears).
+ - If no unit is provided, minutes are assumed.
+ type: str
+extends_documentation_fragment:
+- purestorage.fusion.purestorage.fusion
+"""
+
+EXAMPLES = r"""
+- name: Create new protection policy foo
+ purestorage.fusion.fusion_pp:
+ name: foo
+ local_rpo: 10
+ local_retention: 4d
+ display_name: "foo pp"
+ issuer_id: key_name
+ private_key_file: "az-admin-private-key.pem"
+
+- name: Delete protection policy foo
+ purestorage.fusion.fusion_pp:
+ name: foo
+ state: absent
+ issuer_id: key_name
+ private_key_file: "az-admin-private-key.pem"
+"""
+
+RETURN = r"""
+"""
+
+try:
+ import fusion as purefusion
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.fusion.plugins.module_utils.fusion import (
+ fusion_argument_spec,
+)
+
+from ansible_collections.purestorage.fusion.plugins.module_utils.parsing import (
+ parse_minutes,
+)
+
+from ansible_collections.purestorage.fusion.plugins.module_utils.startup import (
+ setup_fusion,
+)
+from ansible_collections.purestorage.fusion.plugins.module_utils.operations import (
+ await_operation,
+)
+
+
+def get_pp(module, fusion):
+ """Return Protection Policy or None"""
+ pp_api_instance = purefusion.ProtectionPoliciesApi(fusion)
+ try:
+ return pp_api_instance.get_protection_policy(
+ protection_policy_name=module.params["name"]
+ )
+ except purefusion.rest.ApiException:
+ return None
+
+
+def create_pp(module, fusion):
+ """Create Protection Policy"""
+
+ pp_api_instance = purefusion.ProtectionPoliciesApi(fusion)
+ local_rpo = parse_minutes(module, module.params["local_rpo"])
+ local_retention = parse_minutes(module, module.params["local_retention"])
+ if local_retention < 1:
+ module.fail_json(msg="Local Retention must be a minimum of 1 minutes")
+ if local_rpo < 10:
+ module.fail_json(msg="Local RPO must be a minimum of 10 minutes")
+ changed = True
+ if not module.check_mode:
+ if not module.params["display_name"]:
+ display_name = module.params["name"]
+ else:
+ display_name = module.params["display_name"]
+ op = pp_api_instance.create_protection_policy(
+ purefusion.ProtectionPolicyPost(
+ name=module.params["name"],
+ display_name=display_name,
+ objectives=[
+ purefusion.RPO(type="RPO", rpo="PT" + str(local_rpo) + "M"),
+ purefusion.Retention(
+ type="Retention", after="PT" + str(local_retention) + "M"
+ ),
+ ],
+ )
+ )
+ await_operation(fusion, op)
+
+ module.exit_json(changed=changed)
+
+
+def delete_pp(module, fusion):
+ """Delete Protection Policy"""
+ pp_api_instance = purefusion.ProtectionPoliciesApi(fusion)
+ changed = True
+ if not module.check_mode:
+ op = pp_api_instance.delete_protection_policy(
+ protection_policy_name=module.params["name"],
+ )
+ await_operation(fusion, op)
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ """Main code"""
+ argument_spec = fusion_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True),
+ display_name=dict(type="str"),
+ local_rpo=dict(type="str"),
+ local_retention=dict(type="str"),
+ state=dict(type="str", default="present", choices=["present", "absent"]),
+ )
+ )
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+ fusion = setup_fusion(module)
+
+ state = module.params["state"]
+ policy = get_pp(module, fusion)
+
+ if not policy and state == "present":
+ module.fail_on_missing_params(["local_rpo", "local_retention"])
+ create_pp(module, fusion)
+ elif policy and state == "absent":
+ delete_pp(module, fusion)
+ else:
+ module.exit_json(changed=False)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/fusion/plugins/modules/fusion_ra.py b/ansible_collections/purestorage/fusion/plugins/modules/fusion_ra.py
new file mode 100644
index 000000000..7cfc7d866
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/plugins/modules/fusion_ra.py
@@ -0,0 +1,281 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2022, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING.GPLv3 or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: fusion_ra
+version_added: '1.0.0'
+short_description: Manage role assignments in Pure Storage Fusion
+description:
+- Create or delete a storage class in Pure Storage Fusion.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+notes:
+- Supports C(check mode).
+options:
+ role:
+ description:
+ - The name of the role to be assigned/unassigned.
+ type: str
+ required: true
+ state:
+ description:
+ - Define whether the role assingment should exist or not.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ user:
+ description:
+ - The username to assign the role to.
+ - Currently this only supports the Pure1 App ID.
+ - This should be provide in the same format as I(issuer_id).
+ type: str
+ principal:
+ description:
+ - The unique ID of the principal (User or API Client) to assign to the role.
+ type: str
+ api_client_key:
+ description:
+ - The key of API client to assign the role to.
+ type: str
+ scope:
+ description:
+ - The level to which the role is assigned.
+ choices: [ organization, tenant, tenant_space ]
+ default: organization
+ type: str
+ tenant:
+ description:
+ - The name of the tenant the user has the role applied to.
+ - Must be provided if I(scope) is set to either C(tenant) or C(tenant_space).
+ type: str
+ tenant_space:
+ description:
+ - The name of the tenant_space the user has the role applied to.
+ - Must be provided if I(scope) is set to C(tenant_space).
+ type: str
+extends_documentation_fragment:
+- purestorage.fusion.purestorage.fusion
+"""
+
+EXAMPLES = r"""
+- name: Assign role foo to user in tenant bar
+ purestorage.fusion.fusion_ra:
+ name: foo
+ user: key_name
+ tenant: bar
+ issuer_id: key_name
+ private_key_file: "az-admin-private-key.pem"
+
+- name: Delete role foo from user in tenant bar
+ purestorage.fusion.fusion_ra:
+ name: foo
+ user: key_name
+ tenant: bar
+ state: absent
+ issuer_id: key_name
+ private_key_file: "az-admin-private-key.pem"
+"""
+
+RETURN = r"""
+"""
+
+try:
+ import fusion as purefusion
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.fusion.plugins.module_utils.fusion import (
+ fusion_argument_spec,
+)
+
+from ansible_collections.purestorage.fusion.plugins.module_utils.operations import (
+ await_operation,
+)
+from ansible_collections.purestorage.fusion.plugins.module_utils.startup import (
+ setup_fusion,
+)
+
+
+def get_principal(module, fusion):
+ if module.params["principal"]:
+ return module.params["principal"]
+ if module.params["user"]:
+ principal = user_to_principal(fusion, module.params["user"])
+ if not principal:
+ module.fail_json(
+ msg="User {0} does not exist".format(module.params["user"])
+ )
+ return principal
+ if module.params["api_client_key"]:
+ principal = apiclient_to_principal(fusion, module.params["api_client_key"])
+ if not principal:
+ module.fail_json(
+ msg="API Client with key {0} does not exist".format(
+ module.params["api_client_key"]
+ )
+ )
+ return principal
+
+
+def user_to_principal(fusion, user_id):
+ """Given a human readable Fusion user, such as a Pure 1 App ID
+ return the associated principal
+ """
+ id_api_instance = purefusion.IdentityManagerApi(fusion)
+ users = id_api_instance.list_users()
+ for user in users:
+ if user.name == user_id:
+ return user.id
+ return None
+
+
+def apiclient_to_principal(fusion, api_client_key):
+ """Given an API client key, such as "pure1:apikey:123xXxyYyzYzASDF" (also known as issuer_id),
+ return the associated principal
+ """
+ id_api_instance = purefusion.IdentityManagerApi(fusion)
+ api_clients = id_api_instance.list_users(name=api_client_key)
+ if len(api_clients) > 0:
+ return api_clients[0].id
+ return None
+
+
+def get_scope(params):
+ """Given a scope type and associated tenant
+ and tenant_space, return the scope_link
+ """
+ scope_link = None
+ if params["scope"] == "organization":
+ scope_link = "/"
+ elif params["scope"] == "tenant":
+ scope_link = "/tenants/" + params["tenant"]
+ elif params["scope"] == "tenant_space":
+ scope_link = (
+ "/tenants/" + params["tenant"] + "/tenant-spaces/" + params["tenant_space"]
+ )
+ return scope_link
+
+
+def get_ra(module, fusion):
+ """Return Role Assignment or None"""
+ ra_api_instance = purefusion.RoleAssignmentsApi(fusion)
+ try:
+ principal = get_principal(module, fusion)
+ assignments = ra_api_instance.list_role_assignments(
+ role_name=module.params["role"],
+ principal=principal,
+ )
+ for assign in assignments:
+ scope = get_scope(module.params)
+ if assign.scope.self_link == scope:
+ return assign
+ return None
+ except purefusion.rest.ApiException:
+ return None
+
+
+def create_ra(module, fusion):
+ """Create Role Assignment"""
+
+ ra_api_instance = purefusion.RoleAssignmentsApi(fusion)
+
+ changed = True
+ if not module.check_mode:
+ principal = get_principal(module, fusion)
+ scope = get_scope(module.params)
+ assignment = purefusion.RoleAssignmentPost(scope=scope, principal=principal)
+ op = ra_api_instance.create_role_assignment(
+ assignment, role_name=module.params["role"]
+ )
+ await_operation(fusion, op)
+ module.exit_json(changed=changed)
+
+
+def delete_ra(module, fusion):
+ """Delete Role Assignment"""
+ changed = True
+ ra_api_instance = purefusion.RoleAssignmentsApi(fusion)
+ if not module.check_mode:
+ ra_name = get_ra(module, fusion).name
+ op = ra_api_instance.delete_role_assignment(
+ role_name=module.params["role"], role_assignment_name=ra_name
+ )
+ await_operation(fusion, op)
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ """Main code"""
+ argument_spec = fusion_argument_spec()
+ argument_spec.update(
+ dict(
+ api_client_key=dict(type="str", no_log=True),
+ principal=dict(type="str"),
+ role=dict(
+ type="str",
+ required=True,
+ deprecated_aliases=[
+ dict(
+ name="name",
+ date="2023-07-26",
+ collection_name="purefusion.fusion",
+ )
+ ],
+ ),
+ scope=dict(
+ type="str",
+ default="organization",
+ choices=["organization", "tenant", "tenant_space"],
+ ),
+ state=dict(type="str", default="present", choices=["present", "absent"]),
+ tenant=dict(type="str"),
+ tenant_space=dict(type="str"),
+ user=dict(type="str"),
+ )
+ )
+
+ required_if = [
+ ["scope", "tenant", ["tenant"]],
+ ["scope", "tenant_space", ["tenant", "tenant_space"]],
+ ]
+ mutually_exclusive = [
+ ("user", "principal", "api_client_key"),
+ ]
+ required_one_of = [
+ ("user", "principal", "api_client_key"),
+ ]
+
+ module = AnsibleModule(
+ argument_spec,
+ required_if=required_if,
+ supports_check_mode=True,
+ mutually_exclusive=mutually_exclusive,
+ required_one_of=required_one_of,
+ )
+ fusion = setup_fusion(module)
+
+ state = module.params["state"]
+ role_assignment = get_ra(module, fusion)
+
+ if not role_assignment and state == "present":
+ create_ra(module, fusion)
+ elif role_assignment and state == "absent":
+ delete_ra(module, fusion)
+ else:
+ module.exit_json(changed=False)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/fusion/plugins/modules/fusion_region.py b/ansible_collections/purestorage/fusion/plugins/modules/fusion_region.py
new file mode 100644
index 000000000..fbcbff4b0
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/plugins/modules/fusion_region.py
@@ -0,0 +1,180 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2022, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING.GPLv3 or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: fusion_region
+version_added: '1.1.0'
+short_description: Manage Regions in Pure Storage Fusion
+description:
+- Manage regions in Pure Storage Fusion.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+notes:
+- Supports C(check mode).
+options:
+ name:
+ description:
+ - The name of the Region.
+ type: str
+ required: true
+ state:
+ description:
+ - Define whether the Region should exist or not.
+ default: present
+ choices: [ present, absent ]
+ type: str
+ display_name:
+ description:
+ - The human name of the Region.
+ - If not provided, defaults to I(name).
+ type: str
+extends_documentation_fragment:
+- purestorage.fusion.purestorage.fusion
+"""
+
+EXAMPLES = r"""
+- name: Create new region foo
+ purestorage.fusion.fusion_region:
+ name: foo
+ display_name: "foo Region"
+ issuer_id: key_name
+ private_key_file: "az-admin-private-key.pem"
+
+- name: Update region foo
+ purestorage.fusion.fusion_region:
+ name: foo
+ display_name: "new foo Region"
+ issuer_id: key_name
+ private_key_file: "az-admin-private-key.pem"
+
+- name: Delete region foo
+ purestorage.fusion.fusion_region:
+ name: foo
+ state: absent
+ issuer_id: key_name
+ private_key_file: "az-admin-private-key.pem"
+"""
+
+RETURN = r"""
+"""
+
+try:
+ import fusion as purefusion
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.fusion.plugins.module_utils.fusion import (
+ fusion_argument_spec,
+)
+
+from ansible_collections.purestorage.fusion.plugins.module_utils.operations import (
+ await_operation,
+)
+from ansible_collections.purestorage.fusion.plugins.module_utils.startup import (
+ setup_fusion,
+)
+from ansible_collections.purestorage.fusion.plugins.module_utils import getters
+
+
+def get_region(module, fusion):
+ """Get Region or None"""
+ return getters.get_region(module, fusion, module.params["name"])
+
+
+def create_region(module, fusion):
+ """Create Region"""
+
+ reg_api_instance = purefusion.RegionsApi(fusion)
+
+ changed = True
+ if not module.check_mode:
+ if not module.params["display_name"]:
+ display_name = module.params["name"]
+ else:
+ display_name = module.params["display_name"]
+ region = purefusion.RegionPost(
+ name=module.params["name"],
+ display_name=display_name,
+ )
+ op = reg_api_instance.create_region(region)
+ await_operation(fusion, op)
+
+ module.exit_json(changed=changed)
+
+
+def delete_region(module, fusion):
+ """Delete Region"""
+
+ reg_api_instance = purefusion.RegionsApi(fusion)
+
+ changed = True
+ if not module.check_mode:
+ op = reg_api_instance.delete_region(region_name=module.params["name"])
+ await_operation(fusion, op)
+
+ module.exit_json(changed=changed)
+
+
+def update_region(module, fusion, region):
+ """Update Region settings"""
+ changed = False
+ reg_api_instance = purefusion.RegionsApi(fusion)
+
+ if (
+ module.params["display_name"]
+ and module.params["display_name"] != region.display_name
+ ):
+ changed = True
+ if not module.check_mode:
+ reg = purefusion.RegionPatch(
+ display_name=purefusion.NullableString(module.params["display_name"])
+ )
+ op = reg_api_instance.update_region(
+ reg,
+ region_name=module.params["name"],
+ )
+ await_operation(fusion, op)
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ """Main code"""
+ argument_spec = fusion_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True),
+ display_name=dict(type="str"),
+ state=dict(type="str", default="present", choices=["present", "absent"]),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+ fusion = setup_fusion(module)
+
+ state = module.params["state"]
+ region = get_region(module, fusion)
+
+ if not region and state == "present":
+ create_region(module, fusion)
+ elif region and state == "present":
+ update_region(module, fusion, region)
+ elif region and state == "absent":
+ delete_region(module, fusion)
+ else:
+ module.exit_json(changed=False)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/fusion/plugins/modules/fusion_sc.py b/ansible_collections/purestorage/fusion/plugins/modules/fusion_sc.py
new file mode 100644
index 000000000..2327b8d48
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/plugins/modules/fusion_sc.py
@@ -0,0 +1,255 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2022, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING.GPLv3 or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: fusion_sc
+version_added: '1.0.0'
+short_description: Manage storage classes in Pure Storage Fusion
+description:
+- Manage a storage class in Pure Storage Fusion.
+notes:
+- Supports C(check_mode).
+- It is not currently possible to update bw_limit or
+ iops_limit after a storage class has been created.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - The name of the storage class.
+ type: str
+ required: true
+ state:
+ description:
+ - Define whether the storage class should exist or not.
+ default: present
+ choices: [ present, absent ]
+ type: str
+ display_name:
+ description:
+ - The human name of the storage class.
+ - If not provided, defaults to I(name).
+ type: str
+ size_limit:
+ description:
+ - Volume size limit in M, G, T or P units.
+ - Must be between 1MB and 4PB.
+ - If not provided at creation, this will default to 4PB.
+ type: str
+ bw_limit:
+ description:
+ - The bandwidth limit in M or G units.
+ M will set MB/s.
+ G will set GB/s.
+ - Must be between 1MB/s and 512GB/s.
+ - If not provided at creation, this will default to 512GB/s.
+ type: str
+ iops_limit:
+ description:
+ - The IOPs limit - use value or K or M.
+ K will mean 1000.
+ M will mean 1000000.
+ - Must be between 100 and 100000000.
+ - If not provided at creation, this will default to 100000000.
+ type: str
+ storage_service:
+ description:
+ - Storage service to which the storage class belongs.
+ type: str
+ required: true
+extends_documentation_fragment:
+- purestorage.fusion.purestorage.fusion
+"""
+
+EXAMPLES = r"""
+- name: Create new storage class foo
+ purestorage.fusion.fusion_sc:
+ name: foo
+ size_limit: 100G
+ iops_limit: 100000
+ bw_limit: 25M
+ storage_service: service1
+ display_name: "test class"
+ issuer_id: key_name
+ private_key_file: "az-admin-private-key.pem"
+
+- name: Update storage class (only display_name change is supported)
+ purestorage.fusion.fusion_sc:
+ name: foo
+ display_name: "main class"
+ storage_service: service1
+ issuer_id: key_name
+ private_key_file: "az-admin-private-key.pem"
+
+- name: Delete storage class
+ purestorage.fusion.fusion_sc:
+ name: foo
+ storage_service: service1
+ state: absent
+ issuer_id: key_name
+ private_key_file: "az-admin-private-key.pem"
+"""
+
+RETURN = r"""
+"""
+
+try:
+ import fusion as purefusion
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.fusion.plugins.module_utils.fusion import (
+ fusion_argument_spec,
+)
+
+from ansible_collections.purestorage.fusion.plugins.module_utils.parsing import (
+ parse_number_with_metric_suffix,
+)
+from ansible_collections.purestorage.fusion.plugins.module_utils.startup import (
+ setup_fusion,
+)
+from ansible_collections.purestorage.fusion.plugins.module_utils.operations import (
+ await_operation,
+)
+
+
+def get_sc(module, fusion):
+ """Return Storage Class or None"""
+ sc_api_instance = purefusion.StorageClassesApi(fusion)
+ try:
+ return sc_api_instance.get_storage_class(
+ storage_class_name=module.params["name"],
+ storage_service_name=module.params["storage_service"],
+ )
+ except purefusion.rest.ApiException:
+ return None
+
+
+def create_sc(module, fusion):
+ """Create Storage Class"""
+
+ sc_api_instance = purefusion.StorageClassesApi(fusion)
+
+ if not module.params["size_limit"]:
+ module.params["size_limit"] = "4P"
+ if not module.params["iops_limit"]:
+ module.params["iops_limit"] = "100000000"
+ if not module.params["bw_limit"]:
+ module.params["bw_limit"] = "512G"
+ size_limit = parse_number_with_metric_suffix(module, module.params["size_limit"])
+ iops_limit = int(
+ parse_number_with_metric_suffix(
+ module, module.params["iops_limit"], factor=1000
+ )
+ )
+ bw_limit = parse_number_with_metric_suffix(module, module.params["bw_limit"])
+ if bw_limit < 1048576 or bw_limit > 549755813888: # 1MB/s to 512GB/s
+ module.fail_json(msg="Bandwidth limit is not within the required range")
+ if iops_limit < 100 or iops_limit > 100_000_000:
+ module.fail_json(msg="IOPs limit is not within the required range")
+ if size_limit < 1048576 or size_limit > 4503599627370496: # 1MB to 4PB
+ module.fail_json(msg="Size limit is not within the required range")
+
+ changed = True
+ if not module.check_mode:
+ if not module.params["display_name"]:
+ display_name = module.params["name"]
+ else:
+ display_name = module.params["display_name"]
+ s_class = purefusion.StorageClassPost(
+ name=module.params["name"],
+ size_limit=size_limit,
+ iops_limit=iops_limit,
+ bandwidth_limit=bw_limit,
+ display_name=display_name,
+ )
+ op = sc_api_instance.create_storage_class(
+ s_class, storage_service_name=module.params["storage_service"]
+ )
+ await_operation(fusion, op)
+
+ module.exit_json(changed=changed)
+
+
+def update_sc(module, fusion, s_class):
+ """Update Storage Class settings"""
+ changed = False
+ sc_api_instance = purefusion.StorageClassesApi(fusion)
+
+ if (
+ module.params["display_name"]
+ and module.params["display_name"] != s_class.display_name
+ ):
+ changed = True
+ if not module.check_mode:
+ sclass = purefusion.StorageClassPatch(
+ display_name=purefusion.NullableString(module.params["display_name"])
+ )
+ op = sc_api_instance.update_storage_class(
+ sclass,
+ storage_service_name=module.params["storage_service"],
+ storage_class_name=module.params["name"],
+ )
+ await_operation(fusion, op)
+
+ module.exit_json(changed=changed)
+
+
+def delete_sc(module, fusion):
+ """Delete Storage Class"""
+ sc_api_instance = purefusion.StorageClassesApi(fusion)
+ changed = True
+ if not module.check_mode:
+ op = sc_api_instance.delete_storage_class(
+ storage_class_name=module.params["name"],
+ storage_service_name=module.params["storage_service"],
+ )
+ await_operation(fusion, op)
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ """Main code"""
+ argument_spec = fusion_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True),
+ display_name=dict(type="str"),
+ iops_limit=dict(type="str"),
+ bw_limit=dict(type="str"),
+ size_limit=dict(type="str"),
+ storage_service=dict(type="str", required=True),
+ state=dict(type="str", default="present", choices=["present", "absent"]),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+ fusion = setup_fusion(module)
+
+ state = module.params["state"]
+ s_class = get_sc(module, fusion)
+
+ if not s_class and state == "present":
+ create_sc(module, fusion)
+ elif s_class and state == "present":
+ update_sc(module, fusion, s_class)
+ elif s_class and state == "absent":
+ delete_sc(module, fusion)
+ else:
+ module.exit_json(changed=False)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/fusion/plugins/modules/fusion_se.py b/ansible_collections/purestorage/fusion/plugins/modules/fusion_se.py
new file mode 100644
index 000000000..9eed4bea0
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/plugins/modules/fusion_se.py
@@ -0,0 +1,507 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2023, Simon Dodsley (simon@purestorage.com), Jan Kodera (jkodera@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING.GPLv3 or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: fusion_se
+version_added: '1.0.0'
+short_description: Manage storage endpoints in Pure Storage Fusion
+description:
+- Create or delete storage endpoints in Pure Storage Fusion.
+notes:
+- Supports C(check_mode).
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - The name of the storage endpoint.
+ type: str
+ required: true
+ display_name:
+ description:
+ - The human name of the storage endpoint.
+ - If not provided, defaults to I(name).
+ type: str
+ state:
+ description:
+ - Define whether the storage endpoint should exist or not.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ region:
+ description:
+ - The name of the region the availability zone is in
+ type: str
+ required: true
+ availability_zone:
+ aliases: [ az ]
+ description:
+ - The name of the availability zone for the storage endpoint.
+ type: str
+ required: true
+ endpoint_type:
+ description:
+ - "DEPRECATED: Will be removed in version 2.0.0"
+ - Type of the storage endpoint. Only iSCSI is available at the moment.
+ type: str
+ iscsi:
+ description:
+ - List of discovery interfaces.
+ type: list
+ elements: dict
+ suboptions:
+ address:
+ description:
+ - IP address to be used in the subnet of the storage endpoint.
+ - IP address must include a CIDR notation.
+ - Only IPv4 is supported at the moment.
+ type: str
+ gateway:
+ description:
+ - Address of the subnet gateway.
+ type: str
+ network_interface_groups:
+ description:
+ - List of network interface groups to assign to the address.
+ type: list
+ elements: str
+ cbs_azure_iscsi:
+ description:
+ - CBS Azure iSCSI
+ type: dict
+ suboptions:
+ storage_endpoint_collection_identity:
+ description:
+ - The Storage Endpoint Collection Identity which belongs to the Azure entities.
+ type: str
+ load_balancer:
+ description:
+ - The Load Balancer id which gives permissions to CBS array applications to modify the Load Balancer.
+ type: str
+ load_balancer_addresses:
+ description:
+ - The IPv4 addresses of the Load Balancer.
+ type: list
+ elements: str
+ network_interface_groups:
+ description:
+ - "DEPRECATED: Will be removed in version 2.0.0"
+ - List of network interface groups to assign to the storage endpoints.
+ type: list
+ elements: str
+ addresses:
+ description:
+ - "DEPRECATED: Will be removed in version 2.0.0"
+ - List of IP addresses to be used in the subnet of the storage endpoint.
+ - IP addresses must include a CIDR notation.
+ - Only IPv4 is supported at the moment.
+ type: list
+ elements: str
+ gateway:
+ description:
+ - "DEPRECATED: Will be removed in version 2.0.0"
+ - Address of the subnet gateway.
+ - Currently this must be provided.
+ type: str
+
+extends_documentation_fragment:
+- purestorage.fusion.purestorage.fusion
+"""
+
+EXAMPLES = r"""
+- name: Create new storage endpoint foo in AZ bar
+ purestorage.fusion.fusion_se:
+ name: foo
+ availability_zone: bar
+ region: us-west
+ iscsi:
+ - address: 10.21.200.124/24
+ gateway: 10.21.200.1
+ network_interface_groups:
+ - subnet-0
+ - address: 10.21.200.36/24
+ gateway: 10.21.200.2
+ network_interface_groups:
+ - subnet-0
+ - subnet-1
+ state: present
+ issuer_id: key_name
+ private_key_file: "az-admin-private-key.pem"
+
+- name: Create new CBS storage endpoint foo in AZ bar
+ purestorage.fusion.fusion_se:
+ name: foo
+ availability_zone: bar
+ region: us-west
+ cbs_azure_iscsi:
+ storage_endpoint_collection_identity: "/subscriptions/sub/resourcegroups/sec/providers/ms/userAssignedIdentities/secId"
+ load_balancer: "/subscriptions/sub/resourcegroups/sec/providers/ms/loadBalancers/sec-lb"
+ load_balancer_addresses:
+ - 10.21.200.1
+ - 10.21.200.2
+ state: present
+ app_id: key_name
+ key_file: "az-admin-private-key.pem"
+
+- name: Delete storage endpoint foo in AZ bar
+ purestorage.fusion.fusion_se:
+ name: foo
+ availability_zone: bar
+ region: us-west
+ state: absent
+ issuer_id: key_name
+ private_key_file: "az-admin-private-key.pem"
+
+- name: (DEPRECATED) Create new storage endpoint foo in AZ bar
+ purestorage.fusion.fusion_se:
+ name: foo
+ availability_zone: bar
+ gateway: 10.21.200.1
+ region: us-west
+ addresses:
+ - 10.21.200.124/24
+ - 10.21.200.36/24
+ network_interface_groups:
+ - subnet-0
+ state: present
+ issuer_id: key_name
+ private_key_file: "az-admin-private-key.pem"
+"""
+
+RETURN = r"""
+"""
+
+try:
+ import fusion as purefusion
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.fusion.plugins.module_utils.fusion import (
+ fusion_argument_spec,
+)
+
+from ansible_collections.purestorage.fusion.plugins.module_utils.networking import (
+ is_valid_network,
+ is_valid_address,
+)
+from ansible_collections.purestorage.fusion.plugins.module_utils.startup import (
+ setup_fusion,
+)
+from ansible_collections.purestorage.fusion.plugins.module_utils.operations import (
+ await_operation,
+)
+
+
+#######################################################################
+# DEPRECATED CODE SECTION STARTS
+
+
+def create_se_old(module, fusion):
+ """Create Storage Endpoint"""
+
+ se_api_instance = purefusion.StorageEndpointsApi(fusion)
+
+ changed = True
+
+ if not module.check_mode:
+ if not module.params["display_name"]:
+ display_name = module.params["name"]
+ else:
+ display_name = module.params["display_name"]
+ ifaces = []
+ for address in module.params["addresses"]:
+ if module.params["gateway"]:
+ iface = purefusion.StorageEndpointIscsiDiscoveryInterfacePost(
+ address=address,
+ gateway=module.params["gateway"],
+ network_interface_groups=module.params["network_interface_groups"],
+ )
+ else:
+ iface = purefusion.StorageEndpointIscsiDiscoveryInterfacePost(
+ address=address,
+ network_interface_groups=module.params["network_interface_groups"],
+ )
+ ifaces.append(iface)
+ op = purefusion.StorageEndpointPost(
+ endpoint_type="iscsi",
+ iscsi=purefusion.StorageEndpointIscsiPost(
+ discovery_interfaces=ifaces,
+ ),
+ name=module.params["name"],
+ display_name=display_name,
+ )
+ op = se_api_instance.create_storage_endpoint(
+ op,
+ region_name=module.params["region"],
+ availability_zone_name=module.params["availability_zone"],
+ )
+ await_operation(fusion, op)
+
+ module.exit_json(changed=changed)
+
+
+# DEPRECATED CODE SECTION ENDS
+#######################################################################
+
+
+def get_se(module, fusion):
+ """Storage Endpoint or None"""
+ se_api_instance = purefusion.StorageEndpointsApi(fusion)
+ try:
+ return se_api_instance.get_storage_endpoint(
+ region_name=module.params["region"],
+ storage_endpoint_name=module.params["name"],
+ availability_zone_name=module.params["availability_zone"],
+ )
+ except purefusion.rest.ApiException:
+ return None
+
+
+def create_se(module, fusion):
+ """Create Storage Endpoint"""
+ se_api_instance = purefusion.StorageEndpointsApi(fusion)
+
+ if not module.check_mode:
+ endpoint_type = None
+
+ iscsi = None
+ if module.params["iscsi"] is not None:
+ iscsi = purefusion.StorageEndpointIscsiPost(
+ discovery_interfaces=[
+ purefusion.StorageEndpointIscsiDiscoveryInterfacePost(**endpoint)
+ for endpoint in module.params["iscsi"]
+ ]
+ )
+ endpoint_type = "iscsi"
+
+ cbs_azure_iscsi = None
+ if module.params["cbs_azure_iscsi"] is not None:
+ cbs_azure_iscsi = purefusion.StorageEndpointCbsAzureIscsiPost(
+ storage_endpoint_collection_identity=module.params["cbs_azure_iscsi"][
+ "storage_endpoint_collection_identity"
+ ],
+ load_balancer=module.params["cbs_azure_iscsi"]["load_balancer"],
+ load_balancer_addresses=module.params["cbs_azure_iscsi"][
+ "load_balancer_addresses"
+ ],
+ )
+ endpoint_type = "cbs-azure-iscsi"
+
+ op = se_api_instance.create_storage_endpoint(
+ purefusion.StorageEndpointPost(
+ name=module.params["name"],
+ display_name=module.params["display_name"] or module.params["name"],
+ endpoint_type=endpoint_type,
+ iscsi=iscsi,
+ cbs_azure_iscsi=cbs_azure_iscsi,
+ ),
+ region_name=module.params["region"],
+ availability_zone_name=module.params["availability_zone"],
+ )
+ await_operation(fusion, op)
+
+ module.exit_json(changed=True)
+
+
+def delete_se(module, fusion):
+ """Delete Storage Endpoint"""
+ se_api_instance = purefusion.StorageEndpointsApi(fusion)
+ if not module.check_mode:
+ op = se_api_instance.delete_storage_endpoint(
+ region_name=module.params["region"],
+ availability_zone_name=module.params["availability_zone"],
+ storage_endpoint_name=module.params["name"],
+ )
+ await_operation(fusion, op)
+ module.exit_json(changed=True)
+
+
+def update_se(module, fusion, se):
+ """Update Storage Endpoint"""
+
+ se_api_instance = purefusion.StorageEndpointsApi(fusion)
+ patches = []
+ if (
+ module.params["display_name"]
+ and module.params["display_name"] != se.display_name
+ ):
+ patch = purefusion.StorageEndpointPatch(
+ display_name=purefusion.NullableString(module.params["display_name"]),
+ )
+ patches.append(patch)
+
+ if not module.check_mode:
+ for patch in patches:
+ op = se_api_instance.update_storage_endpoint(
+ patch,
+ region_name=module.params["region"],
+ availability_zone_name=module.params["availability_zone"],
+ storage_endpoint_name=module.params["name"],
+ )
+ await_operation(fusion, op)
+
+ changed = len(patches) != 0
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ """Main code"""
+ argument_spec = fusion_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True),
+ display_name=dict(type="str"),
+ region=dict(type="str", required=True),
+ availability_zone=dict(type="str", required=True, aliases=["az"]),
+ iscsi=dict(
+ type="list",
+ elements="dict",
+ options=dict(
+ address=dict(type="str"),
+ gateway=dict(type="str"),
+ network_interface_groups=dict(type="list", elements="str"),
+ ),
+ ),
+ cbs_azure_iscsi=dict(
+ type="dict",
+ options=dict(
+ storage_endpoint_collection_identity=dict(type="str"),
+ load_balancer=dict(type="str"),
+ load_balancer_addresses=dict(type="list", elements="str"),
+ ),
+ ),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ # deprecated, will be removed in 2.0.0
+ endpoint_type=dict(
+ type="str",
+ removed_in_version="2.0.0",
+ removed_from_collection="purestorage.fusion",
+ ),
+ addresses=dict(
+ type="list",
+ elements="str",
+ removed_in_version="2.0.0",
+ removed_from_collection="purestorage.fusion",
+ ),
+ gateway=dict(
+ type="str",
+ removed_in_version="2.0.0",
+ removed_from_collection="purestorage.fusion",
+ ),
+ network_interface_groups=dict(
+ type="list",
+ elements="str",
+ removed_in_version="2.0.0",
+ removed_from_collection="purestorage.fusion",
+ ),
+ )
+ )
+
+ mutually_exclusive = [
+ ("iscsi", "cbs_azure_iscsi"),
+ # can not use both deprecated and new fields at the same time
+ ("iscsi", "cbs_azure_iscsi", "addresses"),
+ ("iscsi", "cbs_azure_iscsi", "gateway"),
+ ("iscsi", "cbs_azure_iscsi", "network_interface_groups"),
+ ]
+
+ module = AnsibleModule(
+ argument_spec,
+ mutually_exclusive=mutually_exclusive,
+ supports_check_mode=True,
+ )
+ fusion = setup_fusion(module)
+
+ state = module.params["state"]
+
+ if module.params["endpoint_type"] is not None:
+ module.warn(
+ "'endpoint_type' parameter is deprecated and will be removed in the version 2.0"
+ )
+
+ deprecated_parameters = {"addresses", "gateway", "network_interface_groups"}
+ used_deprecated_parameters = [
+ key
+ for key in list(deprecated_parameters & module.params.keys())
+ if module.params[key] is not None
+ ]
+
+ if len(used_deprecated_parameters) > 0:
+ # user uses deprecated module interface
+ for param_name in used_deprecated_parameters:
+ module.warn(
+ f"'{param_name}' parameter is deprecated and will be removed in the version 2.0"
+ )
+
+ if module.params["addresses"]:
+ for address in module.params["addresses"]:
+ if not is_valid_network(address):
+ module.fail_json(
+ msg=f"'{address}' is not a valid address in CIDR notation"
+ )
+
+ sendp = get_se(module, fusion)
+
+ if state == "present" and not sendp:
+ module.fail_on_missing_params(["addresses"])
+ if not (module.params["addresses"]):
+ module.fail_json(
+ msg="At least one entry in 'addresses' is required to create new storage endpoint"
+ )
+ create_se_old(module, fusion)
+ elif state == "present" and sendp:
+ update_se(module, fusion, sendp)
+ elif state == "absent" and sendp:
+ delete_se(module, fusion)
+ else:
+ # user uses new module interface
+ if module.params["iscsi"] is not None:
+ for endpoint in module.params["iscsi"]:
+ address = endpoint["address"]
+ if not is_valid_network(address):
+ module.fail_json(
+ msg=f"'{address}' is not a valid address in CIDR notation"
+ )
+ gateway = endpoint["gateway"]
+ if not is_valid_address(gateway):
+ module.fail_json(
+ msg=f"'{gateway}' is not a valid IPv4 address notation"
+ )
+ if module.params["cbs_azure_iscsi"] is not None:
+ for address in module.params["cbs_azure_iscsi"]["load_balancer_addresses"]:
+ if not is_valid_address(address):
+ module.fail_json(
+ msg=f"'{address}' is not a valid IPv4 address notation"
+ )
+
+ sendp = get_se(module, fusion)
+
+ if state == "present" and not sendp:
+ if (
+ module.params["iscsi"] is None
+ and module.params["cbs_azure_iscsi"] is None
+ ):
+ module.fail_json(
+ msg="either 'iscsi' or `cbs_azure_iscsi` parameter is required when creating storage endpoint"
+ )
+ create_se(module, fusion)
+ elif state == "present" and sendp:
+ update_se(module, fusion, sendp)
+ elif state == "absent" and sendp:
+ delete_se(module, fusion)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/fusion/plugins/modules/fusion_ss.py b/ansible_collections/purestorage/fusion/plugins/modules/fusion_ss.py
new file mode 100644
index 000000000..3fdbb07dd
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/plugins/modules/fusion_ss.py
@@ -0,0 +1,208 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2022, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING.GPLv3 or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: fusion_ss
+version_added: '1.0.0'
+short_description: Manage storage services in Pure Storage Fusion
+description:
+- Manage a storage services in Pure Storage Fusion.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+notes:
+- Supports C(check mode).
+options:
+ name:
+ description:
+ - The name of the storage service.
+ type: str
+ required: true
+ state:
+ description:
+ - Define whether the storage service should exist or not.
+ default: present
+ choices: [ present, absent ]
+ type: str
+ display_name:
+ description:
+ - The human name of the storage service.
+ - If not provided, defaults to I(name).
+ type: str
+ hardware_types:
+ description:
+ - Hardware types to which the storage service applies.
+ type: list
+ elements: str
+ choices: [ flash-array-x, flash-array-c, flash-array-x-optane, flash-array-xl ]
+extends_documentation_fragment:
+- purestorage.fusion.purestorage.fusion
+"""
+
+EXAMPLES = r"""
+- name: Create new storage service foo
+ purestorage.fusion.fusion_ss:
+ name: foo
+ hardware_types:
+ - flash-array-x
+ - flash-array-x-optane
+ display_name: "test class"
+ issuer_id: key_name
+ private_key_file: "az-admin-private-key.pem"
+
+- name: Update storage service
+ purestorage.fusion.fusion_ss:
+ name: foo
+ display_name: "main class"
+ hardware_types:
+ - flash-array-c
+ issuer_id: key_name
+ private_key_file: "az-admin-private-key.pem"
+
+- name: Delete storage service
+ purestorage.fusion.fusion_ss:
+ name: foo
+ state: absent
+ issuer_id: key_name
+ private_key_file: "az-admin-private-key.pem"
+"""
+
+RETURN = r"""
+"""
+
+try:
+ import fusion as purefusion
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.fusion.plugins.module_utils.fusion import (
+ fusion_argument_spec,
+)
+from ansible_collections.purestorage.fusion.plugins.module_utils.startup import (
+ setup_fusion,
+)
+from ansible_collections.purestorage.fusion.plugins.module_utils import getters
+from ansible_collections.purestorage.fusion.plugins.module_utils.operations import (
+ await_operation,
+)
+
+
+def get_ss(module, fusion):
+ """Return Storage Service or None"""
+ return getters.get_ss(module, fusion, storage_service_name=module.params["name"])
+
+
+def create_ss(module, fusion):
+ """Create Storage Service"""
+
+ ss_api_instance = purefusion.StorageServicesApi(fusion)
+
+ changed = True
+ if not module.check_mode:
+ if not module.params["display_name"]:
+ display_name = module.params["name"]
+ else:
+ display_name = module.params["display_name"]
+ s_service = purefusion.StorageServicePost(
+ name=module.params["name"],
+ display_name=display_name,
+ hardware_types=module.params["hardware_types"],
+ )
+ op = ss_api_instance.create_storage_service(s_service)
+ await_operation(fusion, op)
+
+ module.exit_json(changed=changed)
+
+
+def delete_ss(module, fusion):
+ """Delete Storage Service"""
+
+ ss_api_instance = purefusion.StorageServicesApi(fusion)
+
+ changed = True
+ if not module.check_mode:
+ op = ss_api_instance.delete_storage_service(
+ storage_service_name=module.params["name"]
+ )
+ await_operation(fusion, op)
+
+ module.exit_json(changed=changed)
+
+
+def update_ss(module, fusion, ss):
+ """Update Storage Service"""
+
+ ss_api_instance = purefusion.StorageServicesApi(fusion)
+ patches = []
+ if (
+ module.params["display_name"]
+ and module.params["display_name"] != ss.display_name
+ ):
+ patch = purefusion.StorageServicePatch(
+ display_name=purefusion.NullableString(module.params["display_name"]),
+ )
+ patches.append(patch)
+
+ if not module.check_mode:
+ for patch in patches:
+ op = ss_api_instance.update_storage_service(
+ patch,
+ storage_service_name=module.params["name"],
+ )
+ await_operation(fusion, op)
+
+ changed = len(patches) != 0
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ """Main code"""
+ argument_spec = fusion_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True),
+ display_name=dict(type="str"),
+ hardware_types=dict(
+ type="list",
+ elements="str",
+ choices=[
+ "flash-array-x",
+ "flash-array-c",
+ "flash-array-x-optane",
+ "flash-array-xl",
+ ],
+ ),
+ state=dict(type="str", default="present", choices=["present", "absent"]),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+ fusion = setup_fusion(module)
+
+ state = module.params["state"]
+ s_service = get_ss(module, fusion)
+
+ if not s_service and state == "present":
+ module.fail_on_missing_params(["hardware_types"])
+ create_ss(module, fusion)
+ elif s_service and state == "present":
+ update_ss(module, fusion, s_service)
+ elif s_service and state == "absent":
+ delete_ss(module, fusion)
+ else:
+ module.exit_json(changed=False)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/fusion/plugins/modules/fusion_tenant.py b/ansible_collections/purestorage/fusion/plugins/modules/fusion_tenant.py
new file mode 100644
index 000000000..96e890a6b
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/plugins/modules/fusion_tenant.py
@@ -0,0 +1,169 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2022, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING.GPLv3 or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: fusion_tenant
+version_added: '1.0.0'
+short_description: Manage tenants in Pure Storage Fusion
+description:
+- Create,delete or update a tenant in Pure Storage Fusion.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+notes:
+- Supports C(check mode).
+options:
+ name:
+ description:
+ - The name of the tenant.
+ type: str
+ required: true
+ state:
+ description:
+ - Define whether the tenant should exist or not.
+ default: present
+ choices: [ present, absent ]
+ type: str
+ display_name:
+ description:
+ - The human name of the tenant.
+ - If not provided, defaults to I(name).
+ type: str
+extends_documentation_fragment:
+- purestorage.fusion.purestorage.fusion
+"""
+
+EXAMPLES = r"""
+- name: Create new tenat foo
+ purestorage.fusion.fusion_tenant:
+ name: foo
+ display_name: "tenant foo"
+ issuer_id: key_name
+ private_key_file: "az-admin-private-key.pem"
+
+- name: Delete tenat foo
+ purestorage.fusion.fusion_tenant:
+ name: foo
+ state: absent
+ issuer_id: key_name
+ private_key_file: "az-admin-private-key.pem"
+"""
+
+RETURN = r"""
+"""
+
+try:
+ import fusion as purefusion
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.fusion.plugins.module_utils.fusion import (
+ fusion_argument_spec,
+)
+from ansible_collections.purestorage.fusion.plugins.module_utils.startup import (
+ setup_fusion,
+)
+from ansible_collections.purestorage.fusion.plugins.module_utils import getters
+from ansible_collections.purestorage.fusion.plugins.module_utils.operations import (
+ await_operation,
+)
+
+
+def get_tenant(module, fusion):
+ """Return Tenant or None"""
+ return getters.get_tenant(module, fusion, tenant_name=module.params["name"])
+
+
+def create_tenant(module, fusion):
+ """Create Tenant"""
+
+ api_instance = purefusion.TenantsApi(fusion)
+ changed = True
+ if not module.check_mode:
+ if not module.params["display_name"]:
+ display_name = module.params["name"]
+ else:
+ display_name = module.params["display_name"]
+ tenant = purefusion.TenantPost(
+ name=module.params["name"],
+ display_name=display_name,
+ )
+ op = api_instance.create_tenant(tenant)
+ await_operation(fusion, op)
+
+ module.exit_json(changed=changed)
+
+
+def update_tenant(module, fusion, tenant):
+ """Update Tenant settings"""
+ changed = False
+ api_instance = purefusion.TenantsApi(fusion)
+
+ if (
+ module.params["display_name"]
+ and module.params["display_name"] != tenant.display_name
+ ):
+ changed = True
+ if not module.check_mode:
+ new_tenant = purefusion.TenantPatch(
+ display_name=purefusion.NullableString(module.params["display_name"]),
+ )
+ op = api_instance.update_tenant(
+ new_tenant,
+ tenant_name=module.params["name"],
+ )
+ await_operation(fusion, op)
+
+ module.exit_json(changed=changed)
+
+
+def delete_tenant(module, fusion):
+ """Delete Tenant"""
+ changed = True
+ api_instance = purefusion.TenantsApi(fusion)
+ if not module.check_mode:
+ op = api_instance.delete_tenant(tenant_name=module.params["name"])
+ await_operation(fusion, op)
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ """Main code"""
+ argument_spec = fusion_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True),
+ display_name=dict(type="str"),
+ state=dict(type="str", default="present", choices=["present", "absent"]),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+ fusion = setup_fusion(module)
+
+ state = module.params["state"]
+ tenant = get_tenant(module, fusion)
+
+ if not tenant and state == "present":
+ create_tenant(module, fusion)
+ elif tenant and state == "present":
+ update_tenant(module, fusion, tenant)
+ elif tenant and state == "absent":
+ delete_tenant(module, fusion)
+ else:
+ module.exit_json(changed=False)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/fusion/plugins/modules/fusion_tn.py b/ansible_collections/purestorage/fusion/plugins/modules/fusion_tn.py
new file mode 100644
index 000000000..717b1e46f
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/plugins/modules/fusion_tn.py
@@ -0,0 +1,122 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2022, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING.GPLv3 or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: fusion_tn
+version_added: '1.0.0'
+deprecated:
+ removed_at_date: "2023-07-26"
+ why: Tenant Networks were removed as a concept in Pure Storage Fusion
+ alternative: most of the functionality can be replicated using M(purestorage.fusion.fusion_se) and M(purestorage.fusion.fusion_nig)
+short_description: Manage tenant networks in Pure Storage Fusion
+description:
+- Create or delete tenant networks in Pure Storage Fusion.
+notes:
+- Supports C(check_mode).
+- Currently this only supports a single tenant subnet per tenant network.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - The name of the tenant network.
+ type: str
+ display_name:
+ description:
+ - The human name of the tenant network.
+ - If not provided, defaults to I(name).
+ type: str
+ state:
+ description:
+ - Define whether the tenant network should exist or not.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ region:
+ description:
+ - The name of the region the availability zone is in
+ type: str
+ availability_zone:
+ aliases: [ az ]
+ description:
+ - The name of the availability zone for the tenant network.
+ type: str
+ provider_subnets:
+ description:
+ - List of provider subnets to assign to the tenant networks subnet.
+ type: list
+ elements: str
+ addresses:
+ description:
+ - List of IP addresses to be used in the subnet of the tenant network.
+ - IP addresses must include a CIDR notation.
+ - IPv4 and IPv6 are fully supported.
+ type: list
+ elements: str
+ gateway:
+ description:
+ - Address of the subnet gateway.
+ - Currently this must be provided.
+ type: str
+ mtu:
+ description:
+ - MTU setting for the subnet.
+ default: 1500
+ type: int
+ prefix:
+ description:
+ - Network prefix in CIDR format.
+ - This will be deprecated soon.
+ type: str
+extends_documentation_fragment:
+- purestorage.fusion.purestorage.fusion
+"""
+
+# this module does nothing, thus no example is provided
+EXAMPLES = r"""
+"""
+
+RETURN = r"""
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.fusion.plugins.module_utils.fusion import (
+ fusion_argument_spec,
+)
+
+
+def main():
+ """Main code"""
+ argument_spec = fusion_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str"),
+ region=dict(type="str"),
+ display_name=dict(type="str"),
+ availability_zone=dict(type="str", aliases=["az"]),
+ prefix=dict(type="str"),
+ gateway=dict(type="str"),
+ mtu=dict(type="int", default=1500),
+ provider_subnets=dict(type="list", elements="str"),
+ addresses=dict(type="list", elements="str"),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ )
+ )
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+ module.warn(
+ "This module is deprecated, doesn't work, and will be removed in the version 2.0."
+ " Please, use purestorage.fusion.fusion_se and purestorage.fusion.fusion_nig instead."
+ )
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/fusion/plugins/modules/fusion_ts.py b/ansible_collections/purestorage/fusion/plugins/modules/fusion_ts.py
new file mode 100644
index 000000000..33fb0187a
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/plugins/modules/fusion_ts.py
@@ -0,0 +1,187 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2022, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING.GPLv3 or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: fusion_ts
+version_added: '1.0.0'
+short_description: Manage tenant spaces in Pure Storage Fusion
+description:
+- Create, update or delete a tenant spaces in Pure Storage Fusion.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+notes:
+- Supports C(check mode).
+options:
+ name:
+ description:
+ - The name of the tenant space.
+ type: str
+ required: true
+ display_name:
+ description:
+ - The human name of the tenant space.
+ - If not provided, defaults to I(name).
+ type: str
+ state:
+ description:
+ - Define whether the tenant space should exist or not.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ tenant:
+ description:
+ - The name of the tenant.
+ type: str
+ required: true
+extends_documentation_fragment:
+- purestorage.fusion.purestorage.fusion
+"""
+
+EXAMPLES = r"""
+- name: Create new tenant space foo for tenant bar
+ purestorage.fusion.fusion_ts:
+ name: foo
+ tenant: bar
+ state: present
+ issuer_id: key_name
+ private_key_file: "az-admin-private-key.pem"
+
+- name: Delete tenant space foo in tenant bar
+ purestorage.fusion.fusion_ts:
+ name: foo
+ tenant: bar
+ state: absent
+ issuer_id: key_name
+ private_key_file: "az-admin-private-key.pem"
+"""
+
+RETURN = r"""
+"""
+
+try:
+ import fusion as purefusion
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.fusion.plugins.module_utils.fusion import (
+ fusion_argument_spec,
+)
+from ansible_collections.purestorage.fusion.plugins.module_utils.startup import (
+ setup_fusion,
+)
+from ansible_collections.purestorage.fusion.plugins.module_utils import getters
+from ansible_collections.purestorage.fusion.plugins.module_utils.operations import (
+ await_operation,
+)
+
+
+def get_ts(module, fusion):
+ """Tenant Space or None"""
+ return getters.get_ts(module, fusion, tenant_space_name=module.params["name"])
+
+
+def create_ts(module, fusion):
+ """Create Tenant Space"""
+
+ ts_api_instance = purefusion.TenantSpacesApi(fusion)
+
+ changed = True
+ if not module.check_mode:
+ if not module.params["display_name"]:
+ display_name = module.params["name"]
+ else:
+ display_name = module.params["display_name"]
+ tspace = purefusion.TenantSpacePost(
+ name=module.params["name"],
+ display_name=display_name,
+ )
+ op = ts_api_instance.create_tenant_space(
+ tspace,
+ tenant_name=module.params["tenant"],
+ )
+ await_operation(fusion, op)
+
+ module.exit_json(changed=changed)
+
+
+def update_ts(module, fusion, ts):
+ """Update Tenant Space"""
+
+ ts_api_instance = purefusion.TenantSpacesApi(fusion)
+ patches = []
+ if (
+ module.params["display_name"]
+ and module.params["display_name"] != ts.display_name
+ ):
+ patch = purefusion.TenantSpacePatch(
+ display_name=purefusion.NullableString(module.params["display_name"]),
+ )
+ patches.append(patch)
+
+ if not module.check_mode:
+ for patch in patches:
+ op = ts_api_instance.update_tenant_space(
+ patch,
+ tenant_name=module.params["tenant"],
+ tenant_space_name=module.params["name"],
+ )
+ await_operation(fusion, op)
+
+ changed = len(patches) != 0
+
+ module.exit_json(changed=changed)
+
+
+def delete_ts(module, fusion):
+ """Delete Tenant Space"""
+ changed = True
+ ts_api_instance = purefusion.TenantSpacesApi(fusion)
+ if not module.check_mode:
+ op = ts_api_instance.delete_tenant_space(
+ tenant_name=module.params["tenant"],
+ tenant_space_name=module.params["name"],
+ )
+ await_operation(fusion, op)
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ """Main code"""
+ argument_spec = fusion_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True),
+ display_name=dict(type="str"),
+ tenant=dict(type="str", required=True),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+ fusion = setup_fusion(module)
+
+ state = module.params["state"]
+ tspace = get_ts(module, fusion)
+
+ if state == "present" and not tspace:
+ create_ts(module, fusion)
+ elif state == "present" and tspace:
+ update_ts(module, fusion, tspace)
+ elif state == "absent" and tspace:
+ delete_ts(module, fusion)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/fusion/plugins/modules/fusion_volume.py b/ansible_collections/purestorage/fusion/plugins/modules/fusion_volume.py
new file mode 100644
index 000000000..5b19064f5
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/plugins/modules/fusion_volume.py
@@ -0,0 +1,450 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2023, Simon Dodsley (simon@purestorage.com), Jan Kodera (jkodera@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING.GPLv3 or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: fusion_volume
+version_added: '1.0.0'
+short_description: Manage volumes in Pure Storage Fusion
+description:
+- Create, update or delete a volume in Pure Storage Fusion.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+notes:
+- Supports C(check mode).
+options:
+ name:
+ description:
+ - The name of the volume.
+ type: str
+ required: true
+ display_name:
+ description:
+ - The human name of the volume.
+ - If not provided, defaults to I(name).
+ type: str
+ state:
+ description:
+ - Define whether the volume should exist or not.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ tenant:
+ description:
+ - The name of the tenant.
+ type: str
+ required: true
+ tenant_space:
+ description:
+ - The name of the tenant space.
+ type: str
+ required: true
+ eradicate:
+ description:
+ - "Wipes the volume instead of a soft delete if true. Must be used with `state: absent`."
+ type: bool
+ default: false
+ size:
+ description:
+ - Volume size in M, G, T or P units.
+ type: str
+ storage_class:
+ description:
+ - The name of the storage class.
+ type: str
+ placement_group:
+ description:
+ - The name of the placement group.
+ type: str
+ protection_policy:
+ description:
+ - The name of the protection policy.
+ type: str
+ host_access_policies:
+ description:
+ - 'A list of host access policies to connect the volume to.
+ To clear, assign empty list: host_access_policies: []'
+ type: list
+ elements: str
+ rename:
+ description:
+ - New name for volume.
+ type: str
+extends_documentation_fragment:
+- purestorage.fusion.purestorage.fusion
+"""
+
+EXAMPLES = r"""
+- name: Create new volume named foo in storage_class fred
+ purestorage.fusion.fusion_volume:
+ name: foo
+ storage_class: fred
+ size: 1T
+ tenant: test
+ tenant_space: space_1
+ state: present
+ issuer_id: key_name
+ private_key_file: "az-admin-private-key.pem"
+
+- name: Extend the size of an existing volume named foo
+ purestorage.fusion.fusion_volume:
+ name: foo
+ size: 2T
+ tenant: test
+ tenant_space: space_1
+ state: present
+ issuer_id: key_name
+ private_key_file: "az-admin-private-key.pem"
+
+- name: Delete volume named foo
+ purestorage.fusion.fusion_volume:
+ name: foo
+ tenant: test
+ tenant_space: space_1
+ state: absent
+ issuer_id: key_name
+ private_key_file: "az-admin-private-key.pem"
+"""
+
+RETURN = r"""
+"""
+
+try:
+ import fusion as purefusion
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.fusion.plugins.module_utils.fusion import (
+ fusion_argument_spec,
+)
+from ansible_collections.purestorage.fusion.plugins.module_utils.parsing import (
+ parse_number_with_metric_suffix,
+)
+from ansible_collections.purestorage.fusion.plugins.module_utils.startup import (
+ setup_fusion,
+)
+from ansible_collections.purestorage.fusion.plugins.module_utils.operations import (
+ await_operation,
+)
+
+
+def get_volume(module, fusion):
+ """Return Volume or None"""
+ volume_api_instance = purefusion.VolumesApi(fusion)
+ try:
+ return volume_api_instance.get_volume(
+ tenant_name=module.params["tenant"],
+ tenant_space_name=module.params["tenant_space"],
+ volume_name=module.params["name"],
+ )
+ except purefusion.rest.ApiException:
+ return None
+
+
+def get_wanted_haps(module):
+ """Return set of host access policies to assign"""
+ if not module.params["host_access_policies"]:
+ return set()
+ # looks like yaml parsing can leave in some spaces if coma-delimited .so strip() the names
+ return set([hap.strip() for hap in module.params["host_access_policies"]])
+
+
+def extract_current_haps(volume):
+ """Return set of host access policies that volume currently has"""
+ if not volume.host_access_policies:
+ return set()
+ return set([hap.name for hap in volume.host_access_policies])
+
+
+def create_volume(module, fusion):
+ """Create Volume"""
+
+ size = parse_number_with_metric_suffix(module, module.params["size"])
+
+ if not module.check_mode:
+ display_name = module.params["display_name"] or module.params["name"]
+ volume_api_instance = purefusion.VolumesApi(fusion)
+ volume = purefusion.VolumePost(
+ size=size,
+ storage_class=module.params["storage_class"],
+ placement_group=module.params["placement_group"],
+ name=module.params["name"],
+ display_name=display_name,
+ protection_policy=module.params["protection_policy"],
+ )
+ op = volume_api_instance.create_volume(
+ volume,
+ tenant_name=module.params["tenant"],
+ tenant_space_name=module.params["tenant_space"],
+ )
+ await_operation(fusion, op)
+
+ return True
+
+
+def update_host_access_policies(module, current, patches):
+ wanted = module.params
+ # 'wanted[...] is not None' to differentiate between empty list and no list
+ if wanted["host_access_policies"] is not None:
+ current_haps = extract_current_haps(current)
+ wanted_haps = get_wanted_haps(module)
+ if wanted_haps != current_haps:
+ patch = purefusion.VolumePatch(
+ host_access_policies=purefusion.NullableString(",".join(wanted_haps))
+ )
+ patches.append(patch)
+
+
+def update_destroyed(module, current, patches):
+ wanted = module.params
+ destroyed = wanted["state"] != "present"
+ if destroyed != current.destroyed:
+ patch = purefusion.VolumePatch(destroyed=purefusion.NullableBoolean(destroyed))
+ patches.append(patch)
+ if destroyed and not module.params["eradicate"]:
+ module.warn(
+ (
+ "Volume '{0}' is being soft deleted to prevent data loss, "
+ "if you want to wipe it immediately to reclaim used space, add 'eradicate: true'"
+ ).format(current.name)
+ )
+
+
+def update_display_name(module, current, patches):
+ wanted = module.params
+ if wanted["display_name"] and wanted["display_name"] != current.display_name:
+ patch = purefusion.VolumePatch(
+ display_name=purefusion.NullableString(wanted["display_name"])
+ )
+ patches.append(patch)
+
+
+def update_storage_class(module, current, patches):
+ wanted = module.params
+ if (
+ wanted["storage_class"]
+ and wanted["storage_class"] != current.storage_class.name
+ ):
+ patch = purefusion.VolumePatch(
+ storage_class=purefusion.NullableString(wanted["storage_class"])
+ )
+ patches.append(patch)
+
+
+def update_placement_group(module, current, patches):
+ wanted = module.params
+ if (
+ wanted["placement_group"]
+ and wanted["placement_group"] != current.placement_group.name
+ ):
+ patch = purefusion.VolumePatch(
+ placement_group=purefusion.NullableString(wanted["placement_group"])
+ )
+ patches.append(patch)
+
+
+def update_size(module, current, patches):
+ wanted = module.params
+ if wanted["size"]:
+ wanted_size = parse_number_with_metric_suffix(module, wanted["size"])
+ if wanted_size != current.size:
+ patch = purefusion.VolumePatch(size=purefusion.NullableSize(wanted_size))
+ patches.append(patch)
+
+
+def update_protection_policy(module, current, patches):
+ wanted = module.params
+ current_policy = current.protection_policy.name if current.protection_policy else ""
+ if (
+ wanted["protection_policy"] is not None
+ and wanted["protection_policy"] != current_policy
+ ):
+ patch = purefusion.VolumePatch(
+ protection_policy=purefusion.NullableString(wanted["protection_policy"])
+ )
+ patches.append(patch)
+
+
+def apply_patches(module, fusion, patches):
+ volume_api_instance = purefusion.VolumesApi(fusion)
+ for patch in patches:
+ op = volume_api_instance.update_volume(
+ patch,
+ volume_name=module.params["name"],
+ tenant_name=module.params["tenant"],
+ tenant_space_name=module.params["tenant_space"],
+ )
+ await_operation(fusion, op)
+
+
+def update_volume(module, fusion):
+ """Update Volume size, placement group, protection policy, storage class, HAPs"""
+ current = get_volume(module, fusion)
+ patches = []
+
+ if not current:
+ # cannot update nonexistent volume
+ # Note for check mode: the reasons this codepath is ran in check mode
+ # is to catch any argument errors and to compute 'changed'. Basically
+ # all argument checks are kept in validate_arguments() to filter the
+ # first part. The second part MAY diverge flow from the real run here if
+ # create_volume() created the volume and update was then run to update
+ # its properties. HOWEVER we don't really care in that case because
+ # create_volume() already sets 'changed' to true, so any 'changed'
+ # result from update_volume() would not change it.
+ return False
+
+ # volumes with 'destroyed' flag are kinda special because we can't change
+ # most of their properties while in this state, so we need to set it last
+ # and unset it first if changed, respectively
+ if module.params["state"] == "present":
+ update_destroyed(module, current, patches)
+ update_size(module, current, patches)
+ update_protection_policy(module, current, patches)
+ update_display_name(module, current, patches)
+ update_storage_class(module, current, patches)
+ update_placement_group(module, current, patches)
+ update_host_access_policies(module, current, patches)
+ elif module.params["state"] == "absent" and not current.destroyed:
+ update_size(module, current, patches)
+ update_protection_policy(module, current, patches)
+ update_display_name(module, current, patches)
+ update_storage_class(module, current, patches)
+ update_placement_group(module, current, patches)
+ update_host_access_policies(module, current, patches)
+ update_destroyed(module, current, patches)
+
+ if not module.check_mode:
+ apply_patches(module, fusion, patches)
+
+ changed = len(patches) != 0
+ return changed
+
+
+def eradicate_volume(module, fusion):
+ """Eradicate Volume"""
+ current = get_volume(module, fusion)
+ if module.check_mode:
+ return current or module.params["state"] == "present"
+ if not current:
+ return False
+
+ # update_volume() should be called before eradicate=True and it should
+ # ensure the volume is destroyed and HAPs are unassigned
+ if not current.destroyed or current.host_access_policies:
+ module.fail_json(
+ msg="BUG: inconsistent state, eradicate_volume() cannot be called with current.destroyed=False or any host_access_policies"
+ )
+
+ volume_api_instance = purefusion.VolumesApi(fusion)
+ op = volume_api_instance.delete_volume(
+ volume_name=module.params["name"],
+ tenant_name=module.params["tenant"],
+ tenant_space_name=module.params["tenant_space"],
+ )
+ await_operation(fusion, op)
+
+ return True
+
+
+def validate_arguments(module, volume):
+ """Validates most argument conditions and possible unacceptable argument combinations"""
+ state = module.params["state"]
+
+ if state == "present" and not volume:
+ module.fail_on_missing_params(["placement_group", "storage_class", "size"])
+
+ if module.params["state"] == "absent" and (
+ module.params["host_access_policies"]
+ or (volume and volume.host_access_policies)
+ ):
+ module.fail_json(
+ msg=(
+ "Volume must have no host access policies when destroyed, either revert the delete "
+ "by setting 'state: present' or remove all HAPs by 'host_access_policies: []'"
+ )
+ )
+
+ if state == "present" and module.params["eradicate"]:
+ module.fail_json(
+ msg="'eradicate: true' cannot be used together with 'state: present'"
+ )
+
+ if module.params["size"]:
+ size = parse_number_with_metric_suffix(module, module.params["size"])
+ if size < 1048576 or size > 4503599627370496: # 1MB to 4PB
+ module.fail_json(
+ msg="Size is not within the required range, size must be between 1MB and 4PB"
+ )
+
+
+def main():
+ """Main code"""
+ argument_spec = fusion_argument_spec()
+ deprecated_hosts = dict(
+ name="hosts", date="2023-07-26", collection_name="purefusion.fusion"
+ )
+ argument_spec.update(
+ dict(
+ name=dict(type="str", required=True),
+ display_name=dict(type="str"),
+ rename=dict(
+ type="str",
+ removed_at_date="2023-07-26",
+ removed_from_collection="purestorage.fusion",
+ ),
+ tenant=dict(type="str", required=True),
+ tenant_space=dict(type="str", required=True),
+ placement_group=dict(type="str"),
+ storage_class=dict(type="str"),
+ protection_policy=dict(type="str"),
+ host_access_policies=dict(
+ type="list", elements="str", deprecated_aliases=[deprecated_hosts]
+ ),
+ eradicate=dict(type="bool", default=False),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ size=dict(type="str"),
+ )
+ )
+
+ required_by = {
+ "placement_group": "storage_class",
+ }
+
+ module = AnsibleModule(
+ argument_spec,
+ required_by=required_by,
+ supports_check_mode=True,
+ )
+ fusion = setup_fusion(module)
+
+ state = module.params["state"]
+
+ volume = get_volume(module, fusion)
+
+ validate_arguments(module, volume)
+
+ if state == "absent" and not volume:
+ module.exit_json(changed=False)
+
+ changed = False
+ if state == "present" and not volume:
+ changed = changed | create_volume(module, fusion)
+ # volume might exist even if soft-deleted, so we still have to update it
+ changed = changed | update_volume(module, fusion)
+ if module.params["eradicate"]:
+ changed = changed | eradicate_volume(module, fusion)
+
+ module.exit_json(changed=changed)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/fusion/tests/functional/README.md b/ansible_collections/purestorage/fusion/tests/functional/README.md
new file mode 100644
index 000000000..d7edc6609
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/tests/functional/README.md
@@ -0,0 +1,35 @@
+# Functional tests
+
+Functional tests aims at testing each module as a whole.
+They make sure the module parses given parameters into correct API calls.
+
+Specific functions of modules should be tested in Unit tests.
+
+## Running tests
+
+```bash
+pytest tests/functional
+```
+
+## Adding new tests
+
+Every module tested should consist (at least) of the following cases:
+
+- test_module_fails_on_wrong_parameters
+- test_NAME_create_name
+- test_NAME_create_without_display_name
+- test_NAME_create_exception
+- test_NAME_create_op_fails
+- test_NAME_create_op_exception
+- test_NAME_update
+- test_NAME_update_exception
+- test_NAME_update_op_fails
+- test_NAME_update_op_exception
+- test_NAME_present_not_changed
+- test_NAME_absent_not_changed
+- test_NAME_delete
+- test_NAME_delete_exception
+- test_NAME_delete_op_fails
+- test_NAME_delete_op_exception
+
+See already existing tests (e.g. `test_fusion_region.py`) for inspiration.
diff --git a/ansible_collections/purestorage/fusion/tests/functional/__init__.py b/ansible_collections/purestorage/fusion/tests/functional/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/tests/functional/__init__.py
diff --git a/ansible_collections/purestorage/fusion/tests/functional/test_fusion_api_client.py b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_api_client.py
new file mode 100644
index 000000000..77f753656
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_api_client.py
@@ -0,0 +1,361 @@
+# -*- coding: utf-8 -*-
+
+# (c) 2023, Andrej Pajtas (apajtas@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING.GPLv3 or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from dataclasses import asdict, dataclass
+from unittest.mock import MagicMock, patch
+
+import fusion as purefusion
+import pytest
+from ansible.module_utils import basic
+from ansible_collections.purestorage.fusion.plugins.modules import fusion_api_client
+from ansible_collections.purestorage.fusion.tests.functional.utils import (
+ AnsibleExitJson,
+ AnsibleFailJson,
+ exit_json,
+ fail_json,
+ set_module_args,
+)
+from urllib3.exceptions import HTTPError
+
+# GLOBAL MOCKS
+fusion_api_client.setup_fusion = MagicMock(
+ return_value=purefusion.api_client.ApiClient()
+)
+purefusion.api_client.ApiClient.call_api = MagicMock(
+ side_effect=Exception("API call not mocked!")
+)
+basic.AnsibleModule.exit_json = exit_json
+basic.AnsibleModule.fail_json = fail_json
+
+
+@dataclass
+class FakeApiClient:
+ id: str
+ self_link: str
+ name: str
+ display_name: str
+ issuer: str
+ public_key: str
+ last_key_update: float
+ last_used: float
+ creator_id: str
+
+
+@pytest.fixture
+def current_clients():
+ return [
+ FakeApiClient(
+ "1",
+ "self_link_value",
+ "client1",
+ "client1",
+ "apikey:name:thisisnotreal",
+ "0123456789",
+ 12345,
+ 12345,
+ "1234",
+ ),
+ FakeApiClient(
+ "2",
+ "self_link_value",
+ "client2",
+ "client2",
+ "apikey:name:thisisnotreal",
+ "0123456789",
+ 12345,
+ 12345,
+ "1234",
+ ),
+ FakeApiClient(
+ "3",
+ "self_link_value",
+ "client3",
+ "client3",
+ "apikey:name:thisisnotreal",
+ "0123456789",
+ 12345,
+ 12345,
+ "1234",
+ ),
+ ]
+
+
+@patch("fusion.IdentityManagerApi")
+@pytest.mark.parametrize(
+ "module_args",
+ [
+ # required parameter 'name` is missing
+ {
+ "state": "present",
+ "public_key": "0123456789",
+ },
+ # unknown parameter 'extra' is provided
+ {
+ "state": "present",
+ "name": "client1",
+ "public_key": "0123456789",
+ "extra": "value",
+ },
+ # parameter 'state` has incorrect value
+ {
+ "state": "cool",
+ "name": "client1",
+ "public_key": "0123456789",
+ },
+ ],
+)
+def test_module_fails_on_wrong_parameters(m_im_api, module_args, current_clients):
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.list_api_clients = MagicMock(return_value=current_clients)
+ api_obj.get_api_client = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_api_client = MagicMock()
+ api_obj.delete_api_client = MagicMock()
+ m_im_api.return_value = api_obj
+
+ # run module
+ with pytest.raises(AnsibleFailJson):
+ fusion_api_client.main()
+
+ # check api was not called at all
+ api_obj.list_api_clients.assert_not_called()
+ api_obj.get_api_client.assert_not_called()
+ api_obj.create_api_client.assert_not_called()
+ api_obj.delete_api_client.assert_not_called()
+
+
+@patch("fusion.IdentityManagerApi")
+def test_api_client_create(m_im_api, current_clients):
+ module_args = {
+ "state": "present",
+ "name": "new_client",
+ "public_key": "0123456789",
+ "app_id": "ABCD1234",
+ "key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.list_api_clients = MagicMock(return_value=current_clients)
+ api_obj.get_api_client = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_api_client = MagicMock()
+ api_obj.delete_api_client = MagicMock()
+ m_im_api.return_value = api_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_api_client.main()
+
+ assert exc.value.changed is True
+
+ # check api was called correctly
+ api_obj.list_api_clients.assert_called_once_with()
+ api_obj.get_api_client.assert_not_called()
+ api_obj.create_api_client.assert_called_once_with(
+ purefusion.APIClientPost(
+ public_key=module_args["public_key"],
+ display_name=module_args["name"],
+ )
+ )
+ api_obj.delete_api_client.assert_not_called()
+
+
+@patch("fusion.IdentityManagerApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, HTTPError),
+ ],
+)
+def test_api_client_create_exception(
+ m_im_api, exec_original, exec_catch, current_clients
+):
+ module_args = {
+ "state": "present",
+ "name": "new_client",
+ "public_key": "0123456789",
+ "app_id": "ABCD1234",
+ "key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.list_api_clients = MagicMock(return_value=current_clients)
+ api_obj.get_api_client = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_api_client = MagicMock(side_effect=exec_original)
+ api_obj.delete_api_client = MagicMock()
+ m_im_api.return_value = api_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_api_client.main()
+
+ # check api was called correctly
+ api_obj.list_api_clients.assert_called_once_with()
+ api_obj.get_api_client.assert_not_called()
+ api_obj.create_api_client.assert_called_once_with(
+ purefusion.APIClientPost(
+ public_key=module_args["public_key"],
+ display_name=module_args["name"],
+ )
+ )
+ api_obj.delete_api_client.assert_not_called()
+
+
+@patch("fusion.IdentityManagerApi")
+def test_api_client_present_not_changed(m_im_api, current_clients):
+ current_api_client = current_clients[0]
+ module_args = {
+ "state": "present",
+ "name": current_api_client.display_name,
+ "public_key": current_api_client.public_key,
+ "app_id": "ABCD1234",
+ "key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.list_api_clients = MagicMock(return_value=current_clients)
+ api_obj.get_api_client = MagicMock(
+ return_value=purefusion.APIClient(**asdict(current_api_client))
+ )
+ api_obj.create_api_client = MagicMock()
+ api_obj.delete_api_client = MagicMock()
+ m_im_api.return_value = api_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_api_client.main()
+
+ assert exc.value.changed is False
+
+ # check api was called correctly
+ api_obj.list_api_clients.assert_called_once_with()
+ api_obj.get_api_client.assert_not_called()
+ api_obj.create_api_client.assert_not_called()
+ api_obj.delete_api_client.assert_not_called()
+
+
+@patch("fusion.IdentityManagerApi")
+def test_api_client_absent_not_changed(m_im_api, current_clients):
+ module_args = {
+ "state": "absent",
+ "name": "non_existing_client",
+ "public_key": "0123456789",
+ "app_id": "ABCD1234",
+ "key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.list_api_clients = MagicMock(return_value=current_clients)
+ api_obj.get_api_client = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_api_client = MagicMock()
+ api_obj.delete_api_client = MagicMock()
+ m_im_api.return_value = api_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_api_client.main()
+
+ assert exc.value.changed is False
+
+ # check api was called correctly
+ api_obj.list_api_clients.assert_called_once_with()
+ api_obj.get_api_client.assert_not_called()
+ api_obj.create_api_client.assert_not_called()
+ api_obj.delete_api_client.assert_not_called()
+
+
+@patch("fusion.IdentityManagerApi")
+def test_api_client_delete(m_im_api, current_clients):
+ current_api_client = current_clients[0]
+ module_args = {
+ "state": "absent",
+ "name": current_api_client.display_name,
+ "public_key": current_api_client.public_key,
+ "app_id": "ABCD1234",
+ "key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.list_api_clients = MagicMock(return_value=current_clients)
+ api_obj.get_api_client = MagicMock(
+ return_value=purefusion.APIClient(**asdict(current_api_client))
+ )
+ api_obj.create_api_client = MagicMock()
+ api_obj.delete_api_client = MagicMock()
+ m_im_api.return_value = api_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_api_client.main()
+
+ assert exc.value.changed is True
+
+ # check api was called correctly
+ api_obj.list_api_clients.assert_called_once_with()
+ api_obj.get_api_client.assert_not_called()
+ api_obj.create_api_client.assert_not_called()
+ api_obj.delete_api_client.assert_called_once_with(
+ api_client_id=current_api_client.id
+ )
+
+
+@patch("fusion.IdentityManagerApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, HTTPError),
+ ],
+)
+def test_api_client_delete_exception(
+ m_im_api, exec_original, exec_catch, current_clients
+):
+ current_api_client = current_clients[0]
+ module_args = {
+ "state": "absent",
+ "name": current_api_client.display_name,
+ "public_key": current_api_client.public_key,
+ "app_id": "ABCD1234",
+ "key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.list_api_clients = MagicMock(return_value=current_clients)
+ api_obj.get_api_client = MagicMock(
+ return_value=purefusion.APIClient(**asdict(current_api_client))
+ )
+ api_obj.create_api_client = MagicMock()
+ api_obj.delete_api_client = MagicMock(side_effect=exec_original)
+ m_im_api.return_value = api_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_api_client.main()
+
+ # check api was called correctly
+ api_obj.list_api_clients.assert_called_once_with()
+ api_obj.get_api_client.assert_not_called()
+ api_obj.create_api_client.assert_not_called()
+ api_obj.delete_api_client.assert_called_once_with(
+ api_client_id=current_api_client.id
+ )
diff --git a/ansible_collections/purestorage/fusion/tests/functional/test_fusion_array.py b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_array.py
new file mode 100644
index 000000000..0343bb1dc
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_array.py
@@ -0,0 +1,1331 @@
+# -*- coding: utf-8 -*-
+
+# (c) 2023, Andrej Pajtas (apajtas@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING.GPLv3 or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from unittest.mock import MagicMock, call, patch
+
+import fusion as purefusion
+import pytest
+from ansible.module_utils import basic
+from ansible_collections.purestorage.fusion.plugins.module_utils.errors import (
+ OperationException,
+)
+from ansible_collections.purestorage.fusion.plugins.modules import fusion_array
+from ansible_collections.purestorage.fusion.tests.functional.utils import (
+ AnsibleExitJson,
+ AnsibleFailJson,
+ FailedOperationMock,
+ OperationMock,
+ SuccessfulOperationMock,
+ exit_json,
+ fail_json,
+ set_module_args,
+)
+from urllib3.exceptions import HTTPError
+
+# GLOBAL MOCKS
+fusion_array.setup_fusion = MagicMock(return_value=purefusion.api_client.ApiClient())
+purefusion.api_client.ApiClient.call_api = MagicMock(
+ side_effect=Exception("API call not mocked!")
+)
+basic.AnsibleModule.exit_json = exit_json
+basic.AnsibleModule.fail_json = fail_json
+
+
+@pytest.fixture
+def module_args():
+ return {
+ "state": "present",
+ "name": "array1",
+ "display_name": "Array 1",
+ "region": "region1",
+ "availability_zone": "az1",
+ "appliance_id": "23984573498573",
+ "host_name": "array_1",
+ "hardware_type": "flash-array-x",
+ "maintenance_mode": False,
+ "unavailable_mode": False,
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+
+
+@pytest.fixture
+def current_array(module_args):
+ return {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"],
+ "display_name": module_args["display_name"],
+ "region": module_args["region"],
+ "availability_zone": module_args["availability_zone"],
+ "appliance_id": module_args["appliance_id"],
+ "apartment_id": "76586785687",
+ "host_name": module_args["host_name"],
+ "hardware_type": module_args["hardware_type"],
+ "maintenance_mode": module_args["maintenance_mode"],
+ "unavailable_mode": module_args["unavailable_mode"],
+ }
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.ArraysApi")
+@pytest.mark.parametrize(
+ "module_args",
+ [
+ # required parameter 'name` is missing
+ {
+ "state": "present",
+ "display_name": "Array 1",
+ "region": "region1",
+ "availability_zone": "az1",
+ "appliance_id": "23984573498573",
+ "host_name": "array_1",
+ "hardware_type": "flash-array-x",
+ "maintenance_mode": False,
+ "unavailable_mode": False,
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ # required parameter 'region` is missing
+ {
+ "state": "present",
+ "name": "array1",
+ "display_name": "Array 1",
+ "availability_zone": "az1",
+ "appliance_id": "23984573498573",
+ "host_name": "array_1",
+ "hardware_type": "flash-array-x",
+ "maintenance_mode": False,
+ "unavailable_mode": False,
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ # required parameter 'availability_zone` is missing
+ {
+ "state": "present",
+ "name": "array1",
+ "display_name": "Array 1",
+ "region": "region1",
+ "appliance_id": "23984573498573",
+ "host_name": "array_1",
+ "hardware_type": "flash-array-x",
+ "maintenance_mode": False,
+ "unavailable_mode": False,
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ # unknown parameter 'extra' is provided
+ {
+ "state": "present",
+ "name": "array1",
+ "display_name": "Array 1",
+ "availability_zone": "az1",
+ "region": "region1",
+ "appliance_id": "23984573498573",
+ "host_name": "array_1",
+ "hardware_type": "flash-array-x",
+ "maintenance_mode": False,
+ "unavailable_mode": False,
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ "extra": "value",
+ },
+ # parameter 'state` has incorrect value
+ {
+ "state": "cool",
+ "name": "array1",
+ "display_name": "Array 1",
+ "availability_zone": "az1",
+ "region": "region1",
+ "appliance_id": "23984573498573",
+ "host_name": "array_1",
+ "hardware_type": "flash-array-x",
+ "maintenance_mode": False,
+ "unavailable_mode": False,
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ # parameter 'hardware_type` has incorrect value
+ {
+ "state": "present",
+ "name": "array1",
+ "display_name": "Array 1",
+ "availability_zone": "az1",
+ "region": "region1",
+ "appliance_id": "23984573498573",
+ "host_name": "array_1",
+ "hardware_type": "hdd-array-x",
+ "maintenance_mode": False,
+ "unavailable_mode": False,
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ # parameter 'maintenance_mode` has incorrect value
+ {
+ "state": "present",
+ "name": "array1",
+ "display_name": "Array 1",
+ "availability_zone": "az1",
+ "region": "region1",
+ "appliance_id": "23984573498573",
+ "host_name": "array_1",
+ "hardware_type": "flash-array-x",
+ "maintenance_mode": "string",
+ "unavailable_mode": False,
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ # parameter 'unavailable_mode` has incorrect value
+ {
+ "state": "present",
+ "name": "array1",
+ "display_name": "Array 1",
+ "availability_zone": "az1",
+ "region": "region1",
+ "appliance_id": "23984573498573",
+ "host_name": "array_1",
+ "hardware_type": "flash-array-x",
+ "maintenance_mode": False,
+ "unavailable_mode": "string",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ ],
+)
+def test_module_fails_on_wrong_parameters(m_array_api, m_op_api, module_args):
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_array = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_array = MagicMock(return_value=OperationMock(1))
+ api_obj.update_array = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_array = MagicMock(return_value=OperationMock(3))
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+ m_array_api.return_value = api_obj
+
+ # run module
+ with pytest.raises(AnsibleFailJson):
+ fusion_array.main()
+
+ # check api was not called at all
+ api_obj.get_array.assert_not_called()
+ api_obj.create_array.assert_not_called()
+ api_obj.update_array.assert_not_called()
+ api_obj.delete_array.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.ArraysApi")
+@pytest.mark.parametrize(
+ "module_args",
+ [
+ # required parameter 'hardware_type` for creating resource is missing
+ {
+ "state": "present",
+ "name": "array1",
+ "display_name": "Array 1",
+ "region": "region1",
+ "availability_zone": "az1",
+ "appliance_id": "23984573498573",
+ "host_name": "array_1",
+ "maintenance_mode": False,
+ "unavailable_mode": False,
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ # required parameter 'host_name` for creating resource is missing
+ {
+ "state": "present",
+ "name": "array1",
+ "display_name": "Array 1",
+ "region": "region1",
+ "availability_zone": "az1",
+ "appliance_id": "23984573498573",
+ "hardware_type": "flash-array-x",
+ "maintenance_mode": False,
+ "unavailable_mode": False,
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ # required parameter 'appliance_id` for creating resource is missing
+ {
+ "state": "present",
+ "name": "array1",
+ "display_name": "Array 1",
+ "region": "region1",
+ "availability_zone": "az1",
+ "host_name": "array_1",
+ "hardware_type": "flash-array-x",
+ "maintenance_mode": False,
+ "unavailable_mode": False,
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ ],
+)
+def test_array_create_fails_on_wrong_parameters(m_array_api, m_op_api, module_args):
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_array = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_array = MagicMock(return_value=OperationMock(1))
+ api_obj.update_array = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_array = MagicMock(return_value=OperationMock(3))
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+ m_array_api.return_value = api_obj
+
+ # run module
+ with pytest.raises(AnsibleFailJson):
+ fusion_array.main()
+
+ # check api was not called at all
+ api_obj.get_array.assert_called_once_with(
+ array_name=module_args["name"],
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ )
+ api_obj.create_array.assert_not_called()
+ api_obj.update_array.assert_not_called()
+ api_obj.delete_array.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.ArraysApi")
+@pytest.mark.parametrize(
+ "hw_type",
+ [
+ "flash-array-x",
+ "flash-array-c",
+ "flash-array-x-optane",
+ "flash-array-xl",
+ ],
+)
+@pytest.mark.parametrize("main_m", [True, False])
+@pytest.mark.parametrize("unav_m", [True, False])
+def test_array_create(m_array_api, m_op_api, hw_type, main_m, unav_m, module_args):
+ module_args["hardware_type"] = hw_type
+ module_args["maintenance_mode"] = main_m
+ module_args["unavailable_mode"] = unav_m
+ created_array = {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"],
+ "display_name": module_args["display_name"],
+ "region": module_args["region"],
+ "availability_zone": module_args["availability_zone"],
+ "appliance_id": module_args["appliance_id"],
+ "apartment_id": "76586785687",
+ "host_name": module_args["host_name"],
+ "hardware_type": module_args["hardware_type"],
+ "maintenance_mode": not module_args[
+ "maintenance_mode"
+ ], # so we can test patching
+ "unavailable_mode": not module_args[
+ "unavailable_mode"
+ ], # so we can test patching
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_array = MagicMock(
+ side_effect=[purefusion.rest.ApiException, purefusion.Array(**created_array)]
+ )
+ api_obj.create_array = MagicMock(return_value=OperationMock(1))
+ api_obj.update_array = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_array = MagicMock(return_value=OperationMock(3))
+ m_array_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_array.main()
+
+ assert exc.value.changed
+
+ # check api was called correctly
+ api_obj.get_array.assert_called_with(
+ array_name=module_args["name"],
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ )
+ api_obj.create_array.assert_called_once_with(
+ purefusion.ArrayPost(
+ hardware_type=module_args["hardware_type"],
+ display_name=module_args["display_name"],
+ host_name=module_args["host_name"],
+ name=module_args["name"],
+ appliance_id=module_args["appliance_id"],
+ ),
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ )
+ api_obj.update_array.assert_has_calls(
+ [
+ call(
+ purefusion.ArrayPatch(
+ maintenance_mode=purefusion.NullableBoolean(
+ module_args["maintenance_mode"]
+ )
+ ),
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ array_name=module_args["name"],
+ ),
+ call(
+ purefusion.ArrayPatch(
+ unavailable_mode=purefusion.NullableBoolean(
+ module_args["unavailable_mode"]
+ )
+ ),
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ array_name=module_args["name"],
+ ),
+ ],
+ any_order=True,
+ )
+ api_obj.delete_array.assert_not_called()
+ op_obj.get_operation.assert_has_calls(
+ [
+ call(1),
+ call(2),
+ call(2),
+ ]
+ )
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.ArraysApi")
+def test_array_create_without_display_name(m_array_api, m_op_api, module_args):
+ del module_args["display_name"]
+ created_array = {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"],
+ "display_name": module_args["name"],
+ "region": module_args["region"],
+ "availability_zone": module_args["availability_zone"],
+ "appliance_id": module_args["appliance_id"],
+ "apartment_id": "76586785687",
+ "host_name": module_args["host_name"],
+ "hardware_type": module_args["hardware_type"],
+ "maintenance_mode": not module_args["maintenance_mode"],
+ "unavailable_mode": not module_args["unavailable_mode"],
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_array = MagicMock(
+ side_effect=[purefusion.rest.ApiException, purefusion.Array(**created_array)]
+ )
+ api_obj.create_array = MagicMock(return_value=OperationMock(1))
+ api_obj.update_array = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_array = MagicMock(return_value=OperationMock(3))
+ m_array_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_array.main()
+
+ assert exc.value.changed
+
+ # check api was called correctly
+ api_obj.get_array.assert_called_with(
+ array_name=module_args["name"],
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ )
+ api_obj.create_array.assert_called_once_with(
+ purefusion.ArrayPost(
+ hardware_type=module_args["hardware_type"],
+ display_name=module_args["name"],
+ host_name=module_args["host_name"],
+ name=module_args["name"],
+ appliance_id=module_args["appliance_id"],
+ ),
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ )
+ api_obj.update_array.assert_has_calls(
+ [
+ call(
+ purefusion.ArrayPatch(
+ maintenance_mode=purefusion.NullableBoolean(
+ module_args["maintenance_mode"]
+ )
+ ),
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ array_name=module_args["name"],
+ ),
+ call(
+ purefusion.ArrayPatch(
+ unavailable_mode=purefusion.NullableBoolean(
+ module_args["unavailable_mode"]
+ )
+ ),
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ array_name=module_args["name"],
+ ),
+ ],
+ any_order=True,
+ )
+ api_obj.delete_array.assert_not_called()
+ op_obj.get_operation.assert_has_calls(
+ [
+ call(1),
+ call(2),
+ call(2),
+ ]
+ )
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.ArraysApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, HTTPError),
+ ],
+)
+def test_array_create_exception(
+ m_array_api, m_op_api, exec_original, exec_catch, module_args
+):
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_array = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_array = MagicMock(side_effect=exec_original)
+ api_obj.update_array = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_array = MagicMock(return_value=OperationMock(3))
+ m_array_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_array.main()
+
+ # check api was called correctly
+ api_obj.get_array.assert_called_once_with(
+ array_name=module_args["name"],
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ )
+ api_obj.create_array.assert_called_once_with(
+ purefusion.ArrayPost(
+ hardware_type=module_args["hardware_type"],
+ display_name=module_args["display_name"],
+ host_name=module_args["host_name"],
+ name=module_args["name"],
+ appliance_id=module_args["appliance_id"],
+ ),
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ )
+ api_obj.update_array.assert_not_called()
+ api_obj.delete_array.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.ArraysApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, HTTPError),
+ ],
+)
+def test_array_create_second_exception(
+ m_array_api, m_op_api, exec_original, exec_catch, module_args
+):
+ created_array = {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"],
+ "display_name": module_args["name"],
+ "region": module_args["region"],
+ "availability_zone": module_args["availability_zone"],
+ "appliance_id": module_args["appliance_id"],
+ "apartment_id": "76586785687",
+ "host_name": module_args["host_name"],
+ "hardware_type": module_args["hardware_type"],
+ "maintenance_mode": not module_args["maintenance_mode"],
+ "unavailable_mode": not module_args["unavailable_mode"],
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_array = MagicMock(
+ side_effect=[purefusion.rest.ApiException, purefusion.Array(**created_array)]
+ )
+ api_obj.create_array = MagicMock(return_value=OperationMock(1))
+ api_obj.update_array = MagicMock(side_effect=exec_original)
+ api_obj.delete_array = MagicMock(return_value=OperationMock(3))
+ m_array_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_array.main()
+
+ # check api was called correctly
+ api_obj.get_array.assert_called_with(
+ array_name=module_args["name"],
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ )
+ api_obj.create_array.assert_called_once_with(
+ purefusion.ArrayPost(
+ hardware_type=module_args["hardware_type"],
+ display_name=module_args["display_name"],
+ host_name=module_args["host_name"],
+ name=module_args["name"],
+ appliance_id=module_args["appliance_id"],
+ ),
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ )
+ api_obj.update_array.assert_called_once()
+ api_obj.delete_array.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(1)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.ArraysApi")
+def test_array_create_op_fails(m_array_api, m_op_api, module_args):
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_array = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_array = MagicMock(return_value=OperationMock(1))
+ api_obj.update_array = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_array = MagicMock(return_value=OperationMock(3))
+ m_array_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(OperationException):
+ fusion_array.main()
+
+ # check api was called correctly
+ api_obj.get_array.assert_called_once_with(
+ array_name=module_args["name"],
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ )
+ api_obj.create_array.assert_called_once_with(
+ purefusion.ArrayPost(
+ hardware_type=module_args["hardware_type"],
+ display_name=module_args["display_name"],
+ host_name=module_args["host_name"],
+ name=module_args["name"],
+ appliance_id=module_args["appliance_id"],
+ ),
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ )
+ api_obj.update_array.assert_not_called()
+ api_obj.delete_array.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(1)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.ArraysApi")
+def test_array_create_second_op_fails(m_array_api, m_op_api, module_args):
+ created_array = {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"],
+ "display_name": module_args["name"],
+ "region": module_args["region"],
+ "availability_zone": module_args["availability_zone"],
+ "appliance_id": module_args["appliance_id"],
+ "apartment_id": "76586785687",
+ "host_name": module_args["host_name"],
+ "hardware_type": module_args["hardware_type"],
+ "maintenance_mode": not module_args["maintenance_mode"],
+ "unavailable_mode": not module_args["unavailable_mode"],
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_array = MagicMock(
+ side_effect=[purefusion.rest.ApiException, purefusion.Array(**created_array)]
+ )
+ api_obj.create_array = MagicMock(return_value=OperationMock(1))
+ api_obj.update_array = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_array = MagicMock(return_value=OperationMock(3))
+ m_array_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(
+ side_effect=[SuccessfulOperationMock, FailedOperationMock]
+ )
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(OperationException):
+ fusion_array.main()
+
+ # check api was called correctly
+ api_obj.get_array.assert_called_with(
+ array_name=module_args["name"],
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ )
+ api_obj.create_array.assert_called_once_with(
+ purefusion.ArrayPost(
+ hardware_type=module_args["hardware_type"],
+ display_name=module_args["display_name"],
+ host_name=module_args["host_name"],
+ name=module_args["name"],
+ appliance_id=module_args["appliance_id"],
+ ),
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ )
+ api_obj.update_array.assert_called_once()
+ api_obj.delete_array.assert_not_called()
+ op_obj.get_operation.assert_has_calls(
+ [
+ call(1),
+ call(2),
+ ]
+ )
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.ArraysApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, OperationException),
+ ],
+)
+def test_array_create_op_exception(
+ m_array_api, m_op_api, exec_original, exec_catch, module_args
+):
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_array = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_array = MagicMock(return_value=OperationMock(1))
+ api_obj.update_array = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_array = MagicMock(return_value=OperationMock(3))
+ m_array_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(side_effect=exec_original)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_array.main()
+
+ # check api was called correctly
+ api_obj.get_array.assert_called_once_with(
+ array_name=module_args["name"],
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ )
+ api_obj.create_array.assert_called_once_with(
+ purefusion.ArrayPost(
+ hardware_type=module_args["hardware_type"],
+ display_name=module_args["display_name"],
+ host_name=module_args["host_name"],
+ name=module_args["name"],
+ appliance_id=module_args["appliance_id"],
+ ),
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ )
+ api_obj.update_array.assert_not_called()
+ api_obj.delete_array.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(1)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.ArraysApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, OperationException),
+ ],
+)
+def test_array_create_second_op_exception(
+ m_array_api, m_op_api, exec_original, exec_catch, module_args
+):
+ created_array = {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"],
+ "display_name": module_args["name"],
+ "region": module_args["region"],
+ "availability_zone": module_args["availability_zone"],
+ "appliance_id": module_args["appliance_id"],
+ "apartment_id": "76586785687",
+ "host_name": module_args["host_name"],
+ "hardware_type": module_args["hardware_type"],
+ "maintenance_mode": not module_args["maintenance_mode"],
+ "unavailable_mode": not module_args["unavailable_mode"],
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_array = MagicMock(
+ side_effect=[purefusion.rest.ApiException, purefusion.Array(**created_array)]
+ )
+ api_obj.create_array = MagicMock(return_value=OperationMock(1))
+ api_obj.update_array = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_array = MagicMock(return_value=OperationMock(3))
+ m_array_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(
+ side_effect=[SuccessfulOperationMock, exec_original]
+ )
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_array.main()
+
+ # check api was called correctly
+ api_obj.get_array.assert_called_with(
+ array_name=module_args["name"],
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ )
+ api_obj.create_array.assert_called_once_with(
+ purefusion.ArrayPost(
+ hardware_type=module_args["hardware_type"],
+ display_name=module_args["display_name"],
+ host_name=module_args["host_name"],
+ name=module_args["name"],
+ appliance_id=module_args["appliance_id"],
+ ),
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ )
+ api_obj.update_array.assert_called_once()
+ api_obj.delete_array.assert_not_called()
+ op_obj.get_operation.assert_has_calls(
+ [
+ call(1),
+ call(2),
+ ]
+ )
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.ArraysApi")
+def test_array_update(m_array_api, m_op_api, module_args, current_array):
+ current_array["display_name"] = None
+ current_array["host_name"] = "something"
+ current_array["maintenance_mode"] = not current_array["maintenance_mode"]
+ current_array["unavailable_mode"] = not current_array["unavailable_mode"]
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_array = MagicMock(return_value=purefusion.Array(**current_array))
+ api_obj.create_array = MagicMock(return_value=OperationMock(1))
+ api_obj.update_array = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_array = MagicMock(return_value=OperationMock(3))
+ m_array_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_array.main()
+
+ assert exc.value.changed
+
+ # check api was called correctly
+ api_obj.get_array.assert_called_with(
+ array_name=module_args["name"],
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ )
+ api_obj.create_array.assert_not_called()
+ api_obj.update_array.assert_has_calls(
+ [
+ call(
+ purefusion.ArrayPatch(
+ display_name=purefusion.NullableString(module_args["display_name"])
+ ),
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ array_name=module_args["name"],
+ ),
+ call(
+ purefusion.ArrayPatch(
+ host_name=purefusion.NullableString(module_args["host_name"])
+ ),
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ array_name=module_args["name"],
+ ),
+ call(
+ purefusion.ArrayPatch(
+ unavailable_mode=purefusion.NullableBoolean(
+ module_args["unavailable_mode"]
+ )
+ ),
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ array_name=module_args["name"],
+ ),
+ call(
+ purefusion.ArrayPatch(
+ maintenance_mode=purefusion.NullableBoolean(
+ module_args["maintenance_mode"]
+ )
+ ),
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ array_name=module_args["name"],
+ ),
+ ],
+ any_order=True,
+ )
+ api_obj.delete_array.assert_not_called()
+ op_obj.get_operation.assert_has_calls(
+ [
+ call(2),
+ call(2),
+ call(2),
+ call(2),
+ ]
+ )
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.ArraysApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, HTTPError),
+ ],
+)
+def test_array_update_exception(
+ m_array_api, m_op_api, exec_original, exec_catch, module_args, current_array
+):
+ current_array["display_name"] = None
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_array = MagicMock(return_value=purefusion.Array(**current_array))
+ api_obj.create_array = MagicMock(return_value=OperationMock(1))
+ api_obj.update_array = MagicMock(side_effect=exec_original)
+ api_obj.delete_array = MagicMock(return_value=OperationMock(3))
+ m_array_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_array.main()
+
+ # check api was called correctly
+ api_obj.get_array.assert_called_with(
+ array_name=module_args["name"],
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ )
+ api_obj.create_array.assert_not_called()
+ api_obj.update_array.assert_called_once()
+ api_obj.delete_array.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.ArraysApi")
+def test_array_update_op_fails(m_array_api, m_op_api, module_args, current_array):
+ current_array["display_name"] = None
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_array = MagicMock(return_value=purefusion.Array(**current_array))
+ api_obj.create_array = MagicMock(return_value=OperationMock(1))
+ api_obj.update_array = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_array = MagicMock(return_value=OperationMock(3))
+ m_array_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(OperationException):
+ fusion_array.main()
+
+ # check api was called correctly
+ api_obj.get_array.assert_called_with(
+ array_name=module_args["name"],
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ )
+ api_obj.create_array.assert_not_called()
+ api_obj.update_array.assert_called_once()
+ api_obj.delete_array.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(2)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.ArraysApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, OperationException),
+ ],
+)
+def test_array_update_op_exception(
+ m_array_api, m_op_api, exec_original, exec_catch, module_args, current_array
+):
+ current_array["display_name"] = None
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_array = MagicMock(return_value=purefusion.Array(**current_array))
+ api_obj.create_array = MagicMock(return_value=OperationMock(1))
+ api_obj.update_array = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_array = MagicMock(return_value=OperationMock(3))
+ m_array_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(side_effect=exec_original)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_array.main()
+
+ # check api was called correctly
+ api_obj.get_array.assert_called_with(
+ array_name=module_args["name"],
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ )
+ api_obj.create_array.assert_not_called()
+ api_obj.update_array.assert_called_once()
+ api_obj.delete_array.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(2)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.ArraysApi")
+def test_array_present_not_changed(m_array_api, m_op_api, module_args, current_array):
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_array = MagicMock(return_value=purefusion.Array(**current_array))
+ api_obj.create_array = MagicMock(return_value=OperationMock(1))
+ api_obj.update_array = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_array = MagicMock(return_value=OperationMock(3))
+ m_array_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_array.main()
+
+ assert not exc.value.changed
+
+ # check api was called correctly
+ api_obj.get_array.assert_called_with(
+ array_name=module_args["name"],
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ )
+ api_obj.create_array.assert_not_called()
+ api_obj.update_array.assert_not_called()
+ api_obj.delete_array.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.ArraysApi")
+def test_array_absent_not_changed(m_array_api, m_op_api, module_args):
+ module_args["state"] = "absent"
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_array = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_array = MagicMock(return_value=OperationMock(1))
+ api_obj.update_array = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_array = MagicMock(return_value=OperationMock(3))
+ m_array_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_array.main()
+
+ assert not exc.value.changed
+
+ # check api was called correctly
+ api_obj.get_array.assert_called_with(
+ array_name=module_args["name"],
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ )
+ api_obj.create_array.assert_not_called()
+ api_obj.update_array.assert_not_called()
+ api_obj.delete_array.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.ArraysApi")
+def test_array_delete(m_array_api, m_op_api, module_args, current_array):
+ module_args["state"] = "absent"
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_array = MagicMock(return_value=purefusion.Array(**current_array))
+ api_obj.create_array = MagicMock(return_value=OperationMock(1))
+ api_obj.update_array = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_array = MagicMock(return_value=OperationMock(3))
+ m_array_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_array.main()
+
+ assert exc.value.changed
+
+ # check api was called correctly
+ api_obj.get_array.assert_called_once_with(
+ array_name=module_args["name"],
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ )
+ api_obj.create_array.assert_not_called()
+ api_obj.update_array.assert_not_called()
+ api_obj.delete_array.assert_called_once_with(
+ region_name=module_args["region"],
+ availability_zone_name=module_args["availability_zone"],
+ array_name=module_args["name"],
+ )
+ op_obj.get_operation.assert_called_once_with(3)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.ArraysApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, HTTPError),
+ ],
+)
+def test_array_delete_exception(
+ m_array_api, m_op_api, exec_original, exec_catch, module_args, current_array
+):
+ module_args["state"] = "absent"
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_array = MagicMock(return_value=purefusion.Array(**current_array))
+ api_obj.create_array = MagicMock(return_value=OperationMock(1))
+ api_obj.update_array = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_array = MagicMock(side_effect=exec_original)
+ m_array_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_array.main()
+
+ # check api was called correctly
+ api_obj.get_array.assert_called_once_with(
+ array_name=module_args["name"],
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ )
+ api_obj.create_array.assert_not_called()
+ api_obj.update_array.assert_not_called()
+ api_obj.delete_array.assert_called_once_with(
+ region_name=module_args["region"],
+ availability_zone_name=module_args["availability_zone"],
+ array_name=module_args["name"],
+ )
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.ArraysApi")
+def test_array_delete_op_fails(m_array_api, m_op_api, module_args, current_array):
+ module_args["state"] = "absent"
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_array = MagicMock(return_value=purefusion.Array(**current_array))
+ api_obj.create_array = MagicMock(return_value=OperationMock(1))
+ api_obj.update_array = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_array = MagicMock(return_value=OperationMock(3))
+ m_array_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(OperationException):
+ fusion_array.main()
+
+ # check api was called correctly
+ api_obj.get_array.assert_called_once_with(
+ array_name=module_args["name"],
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ )
+ api_obj.create_array.assert_not_called()
+ api_obj.update_array.assert_not_called()
+ api_obj.delete_array.assert_called_once_with(
+ region_name=module_args["region"],
+ availability_zone_name=module_args["availability_zone"],
+ array_name=module_args["name"],
+ )
+ op_obj.get_operation.assert_called_once_with(3)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.ArraysApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, OperationException),
+ ],
+)
+def test_array_delete_op_exception(
+ m_array_api, m_op_api, exec_original, exec_catch, module_args, current_array
+):
+ module_args["state"] = "absent"
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_array = MagicMock(return_value=purefusion.Array(**current_array))
+ api_obj.create_array = MagicMock(return_value=OperationMock(1))
+ api_obj.update_array = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_array = MagicMock(return_value=OperationMock(3))
+ m_array_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(side_effect=exec_original)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_array.main()
+
+ # check api was called correctly
+ api_obj.get_array.assert_called_once_with(
+ array_name=module_args["name"],
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ )
+ api_obj.create_array.assert_not_called()
+ api_obj.update_array.assert_not_called()
+ api_obj.delete_array.assert_called_once_with(
+ region_name=module_args["region"],
+ availability_zone_name=module_args["availability_zone"],
+ array_name=module_args["name"],
+ )
+ op_obj.get_operation.assert_called_once_with(3)
diff --git a/ansible_collections/purestorage/fusion/tests/functional/test_fusion_az.py b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_az.py
new file mode 100644
index 000000000..c49f958a2
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_az.py
@@ -0,0 +1,717 @@
+# -*- coding: utf-8 -*-
+
+# (c) 2023, Andrej Pajtas (apajtas@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING.GPLv3 or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from unittest.mock import MagicMock, patch
+
+import fusion as purefusion
+import pytest
+from ansible.module_utils import basic
+from ansible_collections.purestorage.fusion.plugins.module_utils.errors import (
+ OperationException,
+)
+from ansible_collections.purestorage.fusion.plugins.modules import fusion_az
+from ansible_collections.purestorage.fusion.tests.functional.utils import (
+ AnsibleExitJson,
+ AnsibleFailJson,
+ FailedOperationMock,
+ OperationMock,
+ SuccessfulOperationMock,
+ exit_json,
+ fail_json,
+ set_module_args,
+)
+from urllib3.exceptions import HTTPError
+
+# GLOBAL MOCKS
+fusion_az.setup_fusion = MagicMock(return_value=purefusion.api_client.ApiClient())
+purefusion.api_client.ApiClient.call_api = MagicMock(
+ side_effect=Exception("API call not mocked!")
+)
+basic.AnsibleModule.exit_json = exit_json
+basic.AnsibleModule.fail_json = fail_json
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.AvailabilityZonesApi")
+@pytest.mark.parametrize(
+ "module_args",
+ [
+ # required parameter 'name` is missing
+ {
+ "state": "present",
+ "region": "region1",
+ "display_name": "Availability Zone 1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ # required parameter 'region` is missing
+ {
+ "state": "present",
+ "name": "az1",
+ "display_name": "Availability Zone 1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ # unknown parameter 'extra' is provided
+ {
+ "state": "present",
+ "name": "az1",
+ "region": "region1",
+ "display_name": "Availability Zone 1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ "extra": "value",
+ },
+ # parameter 'state` has incorrect value
+ {
+ "state": "cool",
+ "name": "az1",
+ "region": "region1",
+ "display_name": "Availability Zone 1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ ],
+)
+def test_module_op_fails_on_wrong_parameters(m_az_api, m_op_api, module_args):
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_availability_zone = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_availability_zone = MagicMock(return_value=OperationMock(1))
+ api_obj.update_availability_zone = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_availability_zone = MagicMock(return_value=OperationMock(3))
+ m_az_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ # run module
+ with pytest.raises(AnsibleFailJson):
+ fusion_az.main()
+
+ # check api was not called at all
+ api_obj.get_region.assert_not_called()
+ api_obj.create_availability_zone.assert_not_called()
+ api_obj.update_availability_zone.assert_not_called()
+ api_obj.delete_availability_zone.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.AvailabilityZonesApi")
+def test_az_create(m_az_api, m_op_api):
+ module_args = {
+ "state": "present",
+ "name": "az1",
+ "region": "region1",
+ "display_name": "Availability Zone 1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_availability_zone = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_availability_zone = MagicMock(return_value=OperationMock(1))
+ api_obj.update_availability_zone = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_availability_zone = MagicMock(return_value=OperationMock(3))
+ m_az_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_az.main()
+
+ assert exc.value.changed
+
+ api_obj.get_region.get_availability_zone(
+ availability_zone_name=module_args["name"],
+ region_name=module_args["region"],
+ )
+ api_obj.create_availability_zone.assert_called_once_with(
+ purefusion.AvailabilityZonePost(
+ name=module_args["name"],
+ display_name=module_args["display_name"],
+ ),
+ region_name=module_args["region"],
+ )
+ api_obj.update_availability_zone.assert_not_called()
+ api_obj.delete_availability_zone.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(1)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.AvailabilityZonesApi")
+def test_az_create_without_display_name(m_az_api, m_op_api):
+ module_args = {
+ "state": "present",
+ "name": "az1",
+ "region": "region1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_availability_zone = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_availability_zone = MagicMock(return_value=OperationMock(1))
+ api_obj.update_availability_zone = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_availability_zone = MagicMock(return_value=OperationMock(3))
+ m_az_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_az.main()
+
+ assert exc.value.changed
+
+ api_obj.get_region.get_availability_zone(
+ availability_zone_name=module_args["name"],
+ region_name=module_args["region"],
+ )
+ api_obj.create_availability_zone.assert_called_once_with(
+ purefusion.AvailabilityZonePost(
+ name=module_args["name"],
+ display_name=module_args["name"],
+ ),
+ region_name=module_args["region"],
+ )
+ api_obj.update_availability_zone.assert_not_called()
+ api_obj.delete_availability_zone.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(1)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.AvailabilityZonesApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, HTTPError),
+ ],
+)
+def test_az_create_exception(m_az_api, m_op_api, exec_original, exec_catch):
+ module_args = {
+ "state": "present",
+ "name": "az1",
+ "region": "region1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_availability_zone = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_availability_zone = MagicMock(side_effect=exec_original)
+ api_obj.update_availability_zone = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_availability_zone = MagicMock(return_value=OperationMock(3))
+ m_az_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_az.main()
+
+ api_obj.get_region.get_availability_zone(
+ availability_zone_name=module_args["name"],
+ region_name=module_args["region"],
+ )
+ api_obj.create_availability_zone.assert_called_once_with(
+ purefusion.AvailabilityZonePost(
+ name=module_args["name"],
+ display_name=module_args["name"],
+ ),
+ region_name=module_args["region"],
+ )
+ api_obj.update_availability_zone.assert_not_called()
+ api_obj.delete_availability_zone.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.AvailabilityZonesApi")
+def test_az_create_op_fails(m_az_api, m_op_api):
+ module_args = {
+ "state": "present",
+ "name": "az1",
+ "region": "region1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_availability_zone = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_availability_zone = MagicMock(return_value=OperationMock(1))
+ api_obj.update_availability_zone = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_availability_zone = MagicMock(return_value=OperationMock(3))
+ m_az_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(OperationException):
+ fusion_az.main()
+
+ api_obj.get_region.get_availability_zone(
+ availability_zone_name=module_args["name"],
+ region_name=module_args["region"],
+ )
+ api_obj.create_availability_zone.assert_called_once_with(
+ purefusion.AvailabilityZonePost(
+ name=module_args["name"],
+ display_name=module_args["name"],
+ ),
+ region_name=module_args["region"],
+ )
+ api_obj.update_availability_zone.assert_not_called()
+ api_obj.delete_availability_zone.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(1)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.AvailabilityZonesApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, OperationException),
+ ],
+)
+def test_az_create_op_exception(m_az_api, m_op_api, exec_original, exec_catch):
+ module_args = {
+ "state": "present",
+ "name": "az1",
+ "region": "region1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_availability_zone = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_availability_zone = MagicMock(return_value=OperationMock(1))
+ api_obj.update_availability_zone = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_availability_zone = MagicMock(return_value=OperationMock(3))
+ m_az_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(side_effect=exec_original)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_az.main()
+
+ api_obj.get_region.get_availability_zone(
+ availability_zone_name=module_args["name"],
+ region_name=module_args["region"],
+ )
+ api_obj.create_availability_zone.assert_called_once_with(
+ purefusion.AvailabilityZonePost(
+ name=module_args["name"],
+ display_name=module_args["name"],
+ ),
+ region_name=module_args["region"],
+ )
+ api_obj.update_availability_zone.assert_not_called()
+ api_obj.delete_availability_zone.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(1)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.AvailabilityZonesApi")
+def test_az_update(m_az_api, m_op_api):
+ # NOTE: Availability Zone does not have PATCH method, thus no action is expected
+ module_args = {
+ "state": "present",
+ "name": "az1",
+ "region": "region1",
+ "display_name": "Availability Zone 1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ current_az = {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"], # name must match
+ "region": module_args["region"], # region must match
+ "display_name": None,
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_availability_zone = MagicMock(
+ return_value=purefusion.AvailabilityZone(**current_az)
+ )
+ api_obj.create_availability_zone = MagicMock(return_value=OperationMock(1))
+ api_obj.update_availability_zone = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_availability_zone = MagicMock(return_value=OperationMock(3))
+ m_az_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_az.main()
+
+ assert not exc.value.changed
+
+ api_obj.get_region.get_availability_zone(
+ availability_zone_name=module_args["name"],
+ region_name=module_args["region"],
+ )
+ api_obj.create_availability_zone.assert_not_called()
+ api_obj.update_availability_zone.assert_not_called()
+ api_obj.delete_availability_zone.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.AvailabilityZonesApi")
+def test_az_present_not_changed(m_az_api, m_op_api):
+ module_args = {
+ "state": "present",
+ "name": "az1",
+ "region": "region1",
+ "display_name": "Availability Zone 1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ current_az = {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"], # name must match
+ "region": module_args["region"], # region must match
+ "display_name": module_args["display_name"],
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_availability_zone = MagicMock(
+ return_value=purefusion.AvailabilityZone(**current_az)
+ )
+ api_obj.create_availability_zone = MagicMock(return_value=OperationMock(1))
+ api_obj.update_availability_zone = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_availability_zone = MagicMock(return_value=OperationMock(3))
+ m_az_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_az.main()
+
+ assert not exc.value.changed
+
+ api_obj.get_region.get_availability_zone(
+ availability_zone_name=module_args["name"],
+ region_name=module_args["region"],
+ )
+ api_obj.create_availability_zone.assert_not_called()
+ api_obj.update_availability_zone.assert_not_called()
+ api_obj.delete_availability_zone.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.AvailabilityZonesApi")
+def test_az_absent_not_changed(m_az_api, m_op_api):
+ module_args = {
+ "state": "absent",
+ "name": "az1",
+ "region": "region1",
+ "display_name": "Availability Zone 1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_availability_zone = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_availability_zone = MagicMock(return_value=OperationMock(1))
+ api_obj.update_availability_zone = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_availability_zone = MagicMock(return_value=OperationMock(3))
+ m_az_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_az.main()
+
+ assert not exc.value.changed
+
+ api_obj.get_region.get_availability_zone(
+ availability_zone_name=module_args["name"],
+ region_name=module_args["region"],
+ )
+ api_obj.create_availability_zone.assert_not_called()
+ api_obj.update_availability_zone.assert_not_called()
+ api_obj.delete_availability_zone.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.AvailabilityZonesApi")
+def test_az_delete(m_az_api, m_op_api):
+ module_args = {
+ "state": "absent",
+ "name": "az1",
+ "region": "region1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ current_az = {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"], # name must match
+ "region": module_args["region"], # region must match
+ "display_name": None,
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_availability_zone = MagicMock(
+ return_value=purefusion.AvailabilityZone(**current_az)
+ )
+ api_obj.create_availability_zone = MagicMock(return_value=OperationMock(1))
+ api_obj.update_availability_zone = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_availability_zone = MagicMock(return_value=OperationMock(3))
+ m_az_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_az.main()
+
+ assert exc.value.changed
+
+ api_obj.get_region.get_availability_zone(
+ availability_zone_name=module_args["name"],
+ region_name=module_args["region"],
+ )
+ api_obj.create_availability_zone.assert_not_called()
+ api_obj.update_availability_zone.assert_not_called()
+ api_obj.delete_availability_zone.assert_called_once_with(
+ availability_zone_name=module_args["name"],
+ region_name=module_args["region"],
+ )
+ op_obj.get_operation.assert_called_once_with(3)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.AvailabilityZonesApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, HTTPError),
+ ],
+)
+def test_az_delete_exception(m_az_api, m_op_api, exec_original, exec_catch):
+ module_args = {
+ "state": "absent",
+ "name": "az1",
+ "region": "region1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ current_az = {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"], # name must match
+ "region": module_args["region"], # region must match
+ "display_name": None,
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_availability_zone = MagicMock(
+ return_value=purefusion.AvailabilityZone(**current_az)
+ )
+ api_obj.create_availability_zone = MagicMock(return_value=OperationMock(1))
+ api_obj.update_availability_zone = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_availability_zone = MagicMock(side_effect=exec_original)
+ m_az_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_az.main()
+
+ api_obj.get_region.get_availability_zone(
+ availability_zone_name=module_args["name"],
+ region_name=module_args["region"],
+ )
+ api_obj.create_availability_zone.assert_not_called()
+ api_obj.update_availability_zone.assert_not_called()
+ api_obj.delete_availability_zone.assert_called_once_with(
+ availability_zone_name=module_args["name"],
+ region_name=module_args["region"],
+ )
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.AvailabilityZonesApi")
+def test_az_delete_op_fails(m_az_api, m_op_api):
+ module_args = {
+ "state": "absent",
+ "name": "az1",
+ "region": "region1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ current_az = {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"], # name must match
+ "region": module_args["region"], # region must match
+ "display_name": None,
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_availability_zone = MagicMock(
+ return_value=purefusion.AvailabilityZone(**current_az)
+ )
+ api_obj.create_availability_zone = MagicMock(return_value=OperationMock(1))
+ api_obj.update_availability_zone = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_availability_zone = MagicMock(return_value=OperationMock(3))
+ m_az_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(OperationException):
+ fusion_az.main()
+
+ api_obj.get_region.get_availability_zone(
+ availability_zone_name=module_args["name"],
+ region_name=module_args["region"],
+ )
+ api_obj.create_availability_zone.assert_not_called()
+ api_obj.update_availability_zone.assert_not_called()
+ api_obj.delete_availability_zone.assert_called_once_with(
+ availability_zone_name=module_args["name"],
+ region_name=module_args["region"],
+ )
+ op_obj.get_operation.assert_called_once_with(3)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.AvailabilityZonesApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, OperationException),
+ ],
+)
+def test_az_delete_op_exception(m_az_api, m_op_api, exec_original, exec_catch):
+ module_args = {
+ "state": "absent",
+ "name": "az1",
+ "region": "region1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ current_az = {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"], # name must match
+ "region": module_args["region"], # region must match
+ "display_name": None,
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_availability_zone = MagicMock(
+ return_value=purefusion.AvailabilityZone(**current_az)
+ )
+ api_obj.create_availability_zone = MagicMock(return_value=OperationMock(1))
+ api_obj.update_availability_zone = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_availability_zone = MagicMock(return_value=OperationMock(3))
+ m_az_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(side_effect=exec_original)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_az.main()
+
+ api_obj.get_region.get_availability_zone(
+ availability_zone_name=module_args["name"],
+ region_name=module_args["region"],
+ )
+ api_obj.create_availability_zone.assert_not_called()
+ api_obj.update_availability_zone.assert_not_called()
+ api_obj.delete_availability_zone.assert_called_once_with(
+ availability_zone_name=module_args["name"],
+ region_name=module_args["region"],
+ )
+ op_obj.get_operation.assert_called_once_with(3)
diff --git a/ansible_collections/purestorage/fusion/tests/functional/test_fusion_hap.py b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_hap.py
new file mode 100644
index 000000000..6491c71da
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_hap.py
@@ -0,0 +1,889 @@
+# -*- coding: utf-8 -*-
+
+# (c) 2023, Andrej Pajtas (apajtas@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING.GPLv3 or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from unittest.mock import MagicMock, patch
+
+import fusion
+import fusion as purefusion
+import pytest
+from ansible.module_utils import basic
+from ansible_collections.purestorage.fusion.plugins.module_utils.errors import (
+ OperationException,
+)
+from ansible_collections.purestorage.fusion.plugins.modules import fusion_hap
+from ansible_collections.purestorage.fusion.tests.functional.utils import (
+ AnsibleExitJson,
+ AnsibleFailJson,
+ FailedOperationMock,
+ OperationMock,
+ SuccessfulOperationMock,
+ exit_json,
+ fail_json,
+ set_module_args,
+)
+from urllib3.exceptions import HTTPError
+
+# GLOBAL MOCKS
+fusion_hap.setup_fusion = MagicMock(return_value=purefusion.api_client.ApiClient())
+purefusion.api_client.ApiClient.call_api = MagicMock(
+ side_effect=Exception("API call not mocked!")
+)
+basic.AnsibleModule.exit_json = exit_json
+basic.AnsibleModule.fail_json = fail_json
+
+
+@pytest.fixture
+def module_args():
+ """Module arguments to create new HAP."""
+ return {
+ "state": "present",
+ "name": "hap_new",
+ "display_name": "Host Access Policy New",
+ "iqn": "iqn.2023-05.com.purestorage:420qp2c0699",
+ "personality": "aix",
+ "app_id": "ABCD1234",
+ "key_file": "private-key.pem",
+ }
+
+
+@pytest.fixture
+def current_hap_list():
+ return fusion.HostAccessPolicyList(
+ count=3,
+ more_items_remaining=False,
+ items=[
+ fusion.HostAccessPolicy(
+ id="1",
+ self_link="self_link_value",
+ name="hap1",
+ display_name="Host Access Policy 1",
+ iqn="iqn.2023-05.com.purestorage:420qp2c0261",
+ personality="aix",
+ ),
+ fusion.HostAccessPolicy(
+ id="2",
+ self_link="self_link_value",
+ name="hap2",
+ display_name="Host Access Policy 2",
+ iqn="iqn.2023-05.com.purestorage:420qp2c0262",
+ personality="windows",
+ ),
+ fusion.HostAccessPolicy(
+ id="3",
+ self_link="self_link_value",
+ name="hap3",
+ display_name="Host Access Policy 3",
+ iqn="iqn.2023-05.com.purestorage:420qp2c0263",
+ personality="solaris",
+ ),
+ ],
+ )
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.HostAccessPoliciesApi")
+@pytest.mark.parametrize(
+ "module_args",
+ [
+ # required parameter 'name` is missing
+ {
+ "state": "present",
+ "display_name": "Host Access Policy 1",
+ "iqn": "iqn.2023-05.com.purestorage:420qp2c0261",
+ "personality": "aix",
+ "app_id": "ABCD1234",
+ "key_file": "private-key.pem",
+ },
+ # 'state' is 'present' but 'iqn' is not provided
+ {
+ "state": "present",
+ "name": "hap1",
+ "display_name": "Host Access Policy 1",
+ "personality": "aix",
+ "app_id": "ABCD1234",
+ "key_file": "private-key.pem",
+ },
+ # unknown parameter 'extra' is provided
+ {
+ "state": "present",
+ "name": "hap1",
+ "display_name": "Host Access Policy 1",
+ "iqn": "iqn.2023-05.com.purestorage:420qp2c0261",
+ "personality": "aix",
+ "app_id": "ABCD1234",
+ "key_file": "private-key.pem",
+ "extra": "value",
+ },
+ # parameter 'state` has incorrect value
+ {
+ "state": "cool",
+ "name": "hap1",
+ "display_name": "Host Access Policy 1",
+ "iqn": "iqn.2023-05.com.purestorage:420qp2c0261",
+ "personality": "aix",
+ "app_id": "ABCD1234",
+ "key_file": "private-key.pem",
+ },
+ # parameter 'personality` has incorrect value
+ {
+ "state": "present",
+ "name": "hap1",
+ "display_name": "Host Access Policy 1",
+ "iqn": "iqn.2023-05.com.purestorage:420qp2c0261",
+ "personality": "cool",
+ "app_id": "ABCD1234",
+ "key_file": "private-key.pem",
+ },
+ ],
+)
+def test_module_fails_on_wrong_parameters(
+ m_hap_api, m_op_api, module_args, current_hap_list
+):
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.list_host_access_policies = MagicMock(return_value=current_hap_list)
+ api_obj.get_host_access_policy = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_host_access_policy = MagicMock(return_value=OperationMock(1))
+ api_obj.update_host_access_policy = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_host_access_policy = MagicMock(return_value=OperationMock(3))
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+ m_hap_api.return_value = api_obj
+
+ # run module
+ with pytest.raises(AnsibleFailJson):
+ fusion_hap.main()
+
+ # check api was not called at all
+ api_obj.list_host_access_policies.assert_not_called()
+ api_obj.get_host_access_policy.assert_not_called()
+ api_obj.create_host_access_policy.assert_not_called()
+ api_obj.update_host_access_policy.assert_not_called()
+ api_obj.delete_host_access_policy.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.HostAccessPoliciesApi")
+@pytest.mark.parametrize(
+ "name",
+ [
+ "",
+ "space space",
+ "toolongname_toolongname_toolongname_toolongname_toolongname_toolongname",
+ "end_with_underscore_",
+ "_start_with_underscore",
+ ],
+)
+def test_hap_fail_on_invalid_name(
+ m_hap_api, m_op_api, module_args, current_hap_list, name
+):
+ module_args["name"] = name
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.list_host_access_policies = MagicMock(
+ side_effect=purefusion.rest.ApiException
+ )
+ api_obj.get_host_access_policy = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_host_access_policy = MagicMock(return_value=OperationMock(1))
+ api_obj.update_host_access_policy = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_host_access_policy = MagicMock(return_value=OperationMock(3))
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+ m_hap_api.return_value = api_obj
+
+ # run module
+ with pytest.raises(AnsibleFailJson):
+ fusion_hap.main()
+
+ # check api was not called at all
+ api_obj.list_host_access_policies.assert_not_called()
+ api_obj.get_host_access_policy.assert_not_called()
+ api_obj.create_host_access_policy.assert_not_called()
+ api_obj.update_host_access_policy.assert_not_called()
+ api_obj.delete_host_access_policy.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.HostAccessPoliciesApi")
+@pytest.mark.parametrize(
+ "iqn",
+ [
+ "qn.2023-05.com.purestorage:420qp2c0261",
+ "iqn2023-05.com.purestorage:420qp2c0261",
+ "iqn.202305.com.purestorage:420qp2c0261",
+ "iqn.2023-05com.purestorage:420qp2c0261",
+ "iqn.2023-05.com.purestorage:",
+ "iqn.2023-05..purestorage:420qp2c0261",
+ ".2023-05.com.purestorage:420qp2c0261",
+ "2023-05.com.purestorage:420qp2c0261",
+ ],
+)
+def test_hap_fail_on_invalid_iqn(
+ m_hap_api, m_op_api, module_args, current_hap_list, iqn
+):
+ module_args["iqn"] = iqn
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.list_host_access_policies = MagicMock(
+ side_effect=purefusion.rest.ApiException
+ )
+ api_obj.get_host_access_policy = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_host_access_policy = MagicMock(return_value=OperationMock(1))
+ api_obj.update_host_access_policy = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_host_access_policy = MagicMock(return_value=OperationMock(3))
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+ m_hap_api.return_value = api_obj
+
+ # run module
+ with pytest.raises(AnsibleFailJson):
+ fusion_hap.main()
+
+ # check api was not called at all
+ api_obj.list_host_access_policies.assert_not_called()
+ api_obj.get_host_access_policy.assert_not_called()
+ api_obj.create_host_access_policy.assert_not_called()
+ api_obj.update_host_access_policy.assert_not_called()
+ api_obj.delete_host_access_policy.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.HostAccessPoliciesApi")
+def test_hap_create(m_hap_api, m_op_api, module_args, current_hap_list):
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.list_host_access_policies = MagicMock(return_value=current_hap_list)
+ api_obj.get_host_access_policy = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_host_access_policy = MagicMock(return_value=OperationMock(1))
+ api_obj.update_host_access_policy = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_host_access_policy = MagicMock(return_value=OperationMock(3))
+ m_hap_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_hap.main()
+
+ assert exc.value.changed is True
+
+ # check api was called correctly
+ api_obj.list_host_access_policies.assert_called_once_with()
+ api_obj.get_host_access_policy.assert_called_once_with(
+ host_access_policy_name=module_args["name"]
+ )
+ api_obj.create_host_access_policy.assert_called_once_with(
+ purefusion.HostAccessPoliciesPost(
+ iqn=module_args["iqn"],
+ personality=module_args["personality"],
+ name=module_args["name"],
+ display_name=module_args["display_name"],
+ )
+ )
+ api_obj.update_host_access_policy.assert_not_called()
+ api_obj.delete_host_access_policy.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(1)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.HostAccessPoliciesApi")
+def test_hap_create_without_display_name(
+ m_hap_api, m_op_api, module_args, current_hap_list
+):
+ del module_args["display_name"]
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.list_host_access_policies = MagicMock(return_value=current_hap_list)
+ api_obj.get_host_access_policy = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_host_access_policy = MagicMock(return_value=OperationMock(1))
+ api_obj.update_host_access_policy = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_host_access_policy = MagicMock(return_value=OperationMock(3))
+ m_hap_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_hap.main()
+
+ assert exc.value.changed is True
+
+ # check api was called correctly
+ api_obj.list_host_access_policies.assert_called_once_with()
+ api_obj.get_host_access_policy.assert_called_once_with(
+ host_access_policy_name=module_args["name"]
+ )
+ api_obj.create_host_access_policy.assert_called_once_with(
+ purefusion.HostAccessPoliciesPost(
+ iqn=module_args["iqn"],
+ personality=module_args["personality"],
+ name=module_args["name"],
+ display_name=module_args["name"],
+ )
+ )
+ api_obj.update_host_access_policy.assert_not_called()
+ api_obj.delete_host_access_policy.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(1)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.HostAccessPoliciesApi")
+def test_hap_create_iqn_exists(m_hap_api, m_op_api, module_args, current_hap_list):
+ module_args["iqn"] = current_hap_list.items[0].iqn
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.list_host_access_policies = MagicMock(return_value=current_hap_list)
+ api_obj.get_host_access_policy = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_host_access_policy = MagicMock(return_value=OperationMock(1))
+ api_obj.update_host_access_policy = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_host_access_policy = MagicMock(return_value=OperationMock(3))
+ m_hap_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleFailJson) as exc:
+ fusion_hap.main()
+
+ # check api was called correctly
+ api_obj.list_host_access_policies.assert_called_once_with()
+ api_obj.get_host_access_policy.assert_called_once_with(
+ host_access_policy_name=module_args["name"]
+ )
+ api_obj.create_host_access_policy.assert_not_called()
+ api_obj.update_host_access_policy.assert_not_called()
+ api_obj.delete_host_access_policy.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.HostAccessPoliciesApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, HTTPError),
+ ],
+)
+def test_hap_create_exception(
+ m_hap_api, m_op_api, exec_original, exec_catch, module_args, current_hap_list
+):
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.list_host_access_policies = MagicMock(return_value=current_hap_list)
+ api_obj.get_host_access_policy = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_host_access_policy = MagicMock(side_effect=exec_original)
+ api_obj.update_host_access_policy = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_host_access_policy = MagicMock(return_value=OperationMock(3))
+ m_hap_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_hap.main()
+
+ # check api was called correctly
+ api_obj.list_host_access_policies.assert_called_once_with()
+ api_obj.get_host_access_policy.assert_called_once_with(
+ host_access_policy_name=module_args["name"]
+ )
+ api_obj.create_host_access_policy.assert_called_once_with(
+ purefusion.HostAccessPoliciesPost(
+ iqn=module_args["iqn"],
+ personality=module_args["personality"],
+ name=module_args["name"],
+ display_name=module_args["display_name"],
+ )
+ )
+ api_obj.update_host_access_policy.assert_not_called()
+ api_obj.delete_host_access_policy.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.HostAccessPoliciesApi")
+def test_hap_create_op_fails(m_hap_api, m_op_api, module_args, current_hap_list):
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.list_host_access_policies = MagicMock(return_value=current_hap_list)
+ api_obj.get_host_access_policy = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_host_access_policy = MagicMock(return_value=OperationMock(1))
+ api_obj.update_host_access_policy = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_host_access_policy = MagicMock(return_value=OperationMock(3))
+ m_hap_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(OperationException):
+ fusion_hap.main()
+
+ # check api was called correctly
+ api_obj.list_host_access_policies.assert_called_once_with()
+ api_obj.get_host_access_policy.assert_called_once_with(
+ host_access_policy_name=module_args["name"]
+ )
+ api_obj.create_host_access_policy.assert_called_once_with(
+ purefusion.HostAccessPoliciesPost(
+ iqn=module_args["iqn"],
+ personality=module_args["personality"],
+ name=module_args["name"],
+ display_name=module_args["display_name"],
+ )
+ )
+ api_obj.update_host_access_policy.assert_not_called()
+ api_obj.delete_host_access_policy.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(1)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.HostAccessPoliciesApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, OperationException),
+ ],
+)
+def test_hap_create_op_exception(
+ m_hap_api, m_op_api, exec_original, exec_catch, module_args, current_hap_list
+):
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.list_host_access_policies = MagicMock(return_value=current_hap_list)
+ api_obj.get_host_access_policy = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_host_access_policy = MagicMock(return_value=OperationMock(1))
+ api_obj.update_host_access_policy = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_host_access_policy = MagicMock(return_value=OperationMock(3))
+ m_hap_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(side_effect=exec_original)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_hap.main()
+
+ # check api was called correctly
+ api_obj.list_host_access_policies.assert_called_once_with()
+ api_obj.get_host_access_policy.assert_called_once_with(
+ host_access_policy_name=module_args["name"]
+ )
+ api_obj.create_host_access_policy.assert_called_once_with(
+ purefusion.HostAccessPoliciesPost(
+ iqn=module_args["iqn"],
+ personality=module_args["personality"],
+ name=module_args["name"],
+ display_name=module_args["display_name"],
+ )
+ )
+ api_obj.update_host_access_policy.assert_not_called()
+ api_obj.delete_host_access_policy.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(1)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.HostAccessPoliciesApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, HTTPError),
+ ],
+)
+def test_hap_list_exception(
+ m_hap_api, m_op_api, exec_original, exec_catch, module_args, current_hap_list
+):
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.list_host_access_policies = MagicMock(side_effect=exec_original)
+ api_obj.get_host_access_policy = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_host_access_policy = MagicMock(side_effect=OperationMock(1))
+ api_obj.update_host_access_policy = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_host_access_policy = MagicMock(return_value=OperationMock(3))
+ m_hap_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_hap.main()
+
+ # check api was called correctly
+ api_obj.list_host_access_policies.assert_called_once_with()
+ api_obj.get_host_access_policy.assert_called_once_with(
+ host_access_policy_name=module_args["name"]
+ )
+ api_obj.create_host_access_policy.assert_not_called()
+ api_obj.update_host_access_policy.assert_not_called()
+ api_obj.delete_host_access_policy.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.HostAccessPoliciesApi")
+def test_hap_update(m_hap_api, m_op_api, module_args, current_hap_list):
+ # NOTE: Host Access Policy does not have PATCH method, thus no action is expected
+ current_hap = current_hap_list.items[0]
+ module_args["name"] = current_hap.name
+ module_args["display_name"] = "New Display Name"
+ module_args["iqn"] = current_hap.iqn
+ module_args["personality"] = (
+ "windows" if current_hap.personality != "windows" else "linux"
+ )
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.list_host_access_policies = MagicMock(return_value=current_hap_list)
+ api_obj.get_host_access_policy = MagicMock(return_value=current_hap)
+ api_obj.create_host_access_policy = MagicMock(return_value=OperationMock(1))
+ api_obj.update_host_access_policy = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_host_access_policy = MagicMock(return_value=OperationMock(3))
+ m_hap_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_hap.main()
+
+ assert exc.value.changed is False
+
+ # check api was called correctly
+ api_obj.list_host_access_policies.assert_called_once_with()
+ api_obj.get_host_access_policy.assert_called_once_with(
+ host_access_policy_name=module_args["name"]
+ )
+ api_obj.create_host_access_policy.assert_not_called()
+ api_obj.update_host_access_policy.assert_not_called()
+ api_obj.delete_host_access_policy.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.HostAccessPoliciesApi")
+def test_hap_present_not_changed(m_hap_api, m_op_api, module_args, current_hap_list):
+ current_hap = current_hap_list.items[0]
+ module_args["name"] = current_hap.name
+ module_args["display_name"] = current_hap.display_name
+ module_args["iqn"] = current_hap.iqn
+ module_args["personality"] = current_hap.personality
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.list_host_access_policies = MagicMock(return_value=current_hap_list)
+ api_obj.get_host_access_policy = MagicMock(return_value=current_hap)
+ api_obj.create_host_access_policy = MagicMock(return_value=OperationMock(1))
+ api_obj.update_host_access_policy = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_host_access_policy = MagicMock(return_value=OperationMock(3))
+ m_hap_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_hap.main()
+
+ assert exc.value.changed is False
+
+ # check api was called correctly
+ api_obj.list_host_access_policies.assert_called_once_with()
+ api_obj.get_host_access_policy.assert_called_once_with(
+ host_access_policy_name=module_args["name"]
+ )
+ api_obj.create_host_access_policy.assert_not_called()
+ api_obj.update_host_access_policy.assert_not_called()
+ api_obj.delete_host_access_policy.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.HostAccessPoliciesApi")
+def test_hap_absent_not_changed(m_hap_api, m_op_api, module_args, current_hap_list):
+ module_args["state"] = "absent"
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.list_host_access_policies = MagicMock(return_value=current_hap_list)
+ api_obj.get_host_access_policy = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_host_access_policy = MagicMock(return_value=OperationMock(1))
+ api_obj.update_host_access_policy = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_host_access_policy = MagicMock(return_value=OperationMock(3))
+ m_hap_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_hap.main()
+
+ assert exc.value.changed is False
+
+ # check api was called correctly
+ api_obj.list_host_access_policies.assert_called_once_with()
+ api_obj.get_host_access_policy.assert_called_once_with(
+ host_access_policy_name=module_args["name"]
+ )
+ api_obj.create_host_access_policy.assert_not_called()
+ api_obj.update_host_access_policy.assert_not_called()
+ api_obj.delete_host_access_policy.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.HostAccessPoliciesApi")
+def test_hap_delete(m_hap_api, m_op_api, module_args, current_hap_list):
+ current_hap = current_hap_list.items[0]
+ module_args["state"] = "absent"
+ module_args["name"] = current_hap.name
+ module_args["display_name"] = current_hap.display_name
+ module_args["iqn"] = current_hap.iqn
+ module_args["personality"] = current_hap.personality
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.list_host_access_policies = MagicMock(return_value=current_hap_list)
+ api_obj.get_host_access_policy = MagicMock(return_value=current_hap)
+ api_obj.create_host_access_policy = MagicMock(return_value=OperationMock(1))
+ api_obj.update_host_access_policy = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_host_access_policy = MagicMock(return_value=OperationMock(3))
+ m_hap_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_hap.main()
+
+ assert exc.value.changed is True
+
+ # check api was called correctly
+ api_obj.list_host_access_policies.assert_called_once_with()
+ api_obj.get_host_access_policy.assert_called_once_with(
+ host_access_policy_name=module_args["name"]
+ )
+ api_obj.create_host_access_policy.assert_not_called()
+ api_obj.update_host_access_policy.assert_not_called()
+ api_obj.delete_host_access_policy.assert_called_once_with(
+ host_access_policy_name=module_args["name"]
+ )
+ op_obj.get_operation.assert_called_once_with(3)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.HostAccessPoliciesApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, HTTPError),
+ ],
+)
+def test_hap_delete_exception(
+ m_hap_api, m_op_api, exec_original, exec_catch, module_args, current_hap_list
+):
+ current_hap = current_hap_list.items[0]
+ module_args["state"] = "absent"
+ module_args["name"] = current_hap.name
+ module_args["display_name"] = current_hap.display_name
+ module_args["iqn"] = current_hap.iqn
+ module_args["personality"] = current_hap.personality
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.list_host_access_policies = MagicMock(return_value=current_hap_list)
+ api_obj.get_host_access_policy = MagicMock(return_value=current_hap)
+ api_obj.create_host_access_policy = MagicMock(return_value=OperationMock(1))
+ api_obj.update_host_access_policy = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_host_access_policy = MagicMock(side_effect=exec_original)
+ m_hap_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_hap.main()
+
+ # check api was called correctly
+ api_obj.list_host_access_policies.assert_called_once_with()
+ api_obj.get_host_access_policy.assert_called_once_with(
+ host_access_policy_name=module_args["name"]
+ )
+ api_obj.create_host_access_policy.assert_not_called()
+ api_obj.update_host_access_policy.assert_not_called()
+ api_obj.delete_host_access_policy.assert_called_once_with(
+ host_access_policy_name=module_args["name"]
+ )
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.HostAccessPoliciesApi")
+def test_hap_delete_op_fails(m_hap_api, m_op_api, module_args, current_hap_list):
+ current_hap = current_hap_list.items[0]
+ module_args["state"] = "absent"
+ module_args["name"] = current_hap.name
+ module_args["display_name"] = current_hap.display_name
+ module_args["iqn"] = current_hap.iqn
+ module_args["personality"] = current_hap.personality
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.list_host_access_policies = MagicMock(return_value=current_hap_list)
+ api_obj.get_host_access_policy = MagicMock(return_value=current_hap)
+ api_obj.create_host_access_policy = MagicMock(return_value=OperationMock(1))
+ api_obj.update_host_access_policy = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_host_access_policy = MagicMock(return_value=OperationMock(3))
+ m_hap_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(OperationException):
+ fusion_hap.main()
+
+ # check api was called correctly
+ api_obj.list_host_access_policies.assert_called_once_with()
+ api_obj.get_host_access_policy.assert_called_once_with(
+ host_access_policy_name=module_args["name"]
+ )
+ api_obj.create_host_access_policy.assert_not_called()
+ api_obj.update_host_access_policy.assert_not_called()
+ api_obj.delete_host_access_policy.assert_called_once_with(
+ host_access_policy_name=module_args["name"]
+ )
+ op_obj.get_operation.assert_called_once_with(3)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.HostAccessPoliciesApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, OperationException),
+ ],
+)
+def test_hap_delete_op_exception(
+ m_hap_api, m_op_api, exec_original, exec_catch, module_args, current_hap_list
+):
+ current_hap = current_hap_list.items[0]
+ module_args["state"] = "absent"
+ module_args["name"] = current_hap.name
+ module_args["display_name"] = current_hap.display_name
+ module_args["iqn"] = current_hap.iqn
+ module_args["personality"] = current_hap.personality
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.list_host_access_policies = MagicMock(return_value=current_hap_list)
+ api_obj.get_host_access_policy = MagicMock(return_value=current_hap)
+ api_obj.create_host_access_policy = MagicMock(return_value=OperationMock(1))
+ api_obj.update_host_access_policy = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_host_access_policy = MagicMock(return_value=OperationMock(3))
+ m_hap_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(side_effect=exec_original)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_hap.main()
+
+ # check api was called correctly
+ api_obj.list_host_access_policies.assert_called_once_with()
+ api_obj.get_host_access_policy.assert_called_once_with(
+ host_access_policy_name=module_args["name"]
+ )
+ api_obj.create_host_access_policy.assert_not_called()
+ api_obj.update_host_access_policy.assert_not_called()
+ api_obj.delete_host_access_policy.assert_called_once_with(
+ host_access_policy_name=module_args["name"]
+ )
+ op_obj.get_operation.assert_called_once_with(3)
diff --git a/ansible_collections/purestorage/fusion/tests/functional/test_fusion_hw.py b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_hw.py
new file mode 100644
index 000000000..3ad109b64
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_hw.py
@@ -0,0 +1,115 @@
+# -*- coding: utf-8 -*-
+
+# (c) 2023, Andrej Pajtas (apajtas@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING.GPLv3 or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from unittest.mock import MagicMock
+
+import fusion as purefusion
+import pytest
+from ansible.module_utils import basic
+from ansible_collections.purestorage.fusion.plugins.modules import fusion_hw
+from ansible_collections.purestorage.fusion.tests.functional.utils import (
+ AnsibleExitJson,
+ AnsibleFailJson,
+ exit_json,
+ fail_json,
+ set_module_args,
+)
+
+# GLOBAL MOCKS
+fusion_hw.setup_fusion = MagicMock(return_value=purefusion.api_client.ApiClient())
+purefusion.api_client.ApiClient.call_api = MagicMock(
+ side_effect=Exception("API call not mocked!")
+)
+basic.AnsibleModule.exit_json = exit_json
+basic.AnsibleModule.fail_json = fail_json
+
+
+@pytest.mark.parametrize(
+ "module_args",
+ [
+ # unknown parameter 'extra' is provided
+ {
+ "state": "present",
+ "name": "hw1",
+ "display_name": "Hardware Type 1",
+ "array_type": "FA//X",
+ "media_type": "random",
+ "app_id": "ABCD1234",
+ "key_file": "private-key.pem",
+ "extra": "value",
+ },
+ # parameter 'state` has incorrect value
+ {
+ "state": "cool",
+ "name": "hw1",
+ "display_name": "Hardware Type 1",
+ "array_type": "FA//X",
+ "media_type": "random",
+ "app_id": "ABCD1234",
+ "key_file": "private-key.pem",
+ },
+ # parameter 'state` has incorrect value
+ {
+ "state": "absent",
+ "name": "hw1",
+ "display_name": "Hardware Type 1",
+ "array_type": "FA//X",
+ "media_type": "random",
+ "app_id": "ABCD1234",
+ "key_file": "private-key.pem",
+ },
+ # parameter 'array_type` has incorrect value
+ {
+ "state": "present",
+ "name": "hw1",
+ "display_name": "Hardware Type 1",
+ "array_type": "wrong",
+ "media_type": "random",
+ "app_id": "ABCD1234",
+ "key_file": "private-key.pem",
+ },
+ ],
+)
+def test_module_fails_on_wrong_parameters(module_args):
+ set_module_args(module_args)
+
+ # run module
+ with pytest.raises(AnsibleFailJson):
+ fusion_hw.main()
+
+
+@pytest.mark.parametrize("state", [None, "present"])
+@pytest.mark.parametrize("name", [None, "hw_type_name1"])
+@pytest.mark.parametrize("display_name", [None, "Super weird Display Name 12 3"])
+@pytest.mark.parametrize("array_type", [None, "FA//X", "FA//C"])
+@pytest.mark.parametrize("media_type", [None, "random"])
+def test_hw_does_not_call_api(state, name, display_name, array_type, media_type):
+ module_args = {
+ "app_id": "ABCD1234",
+ "key_file": "private-key.pem",
+ }
+ if state is not None:
+ module_args["state"] = state
+ if name is not None:
+ module_args["name"] = name
+ if display_name is not None:
+ module_args["display_name"] = display_name
+ if array_type is not None:
+ module_args["array_type"] = array_type
+ if media_type is not None:
+ module_args["media_type"] = media_type
+ set_module_args(module_args)
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_hw.main()
+
+ assert exc.value.changed is False
+
+ # NOTE: api call assertion is handled by global mock
diff --git a/ansible_collections/purestorage/fusion/tests/functional/test_fusion_info.py b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_info.py
new file mode 100644
index 000000000..784b550cd
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_info.py
@@ -0,0 +1,2383 @@
+# -*- coding: utf-8 -*-
+
+# (c) 2023, Andrej Pajtas (apajtas@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING.GPLv3 or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+import os
+from itertools import combinations
+from unittest.mock import MagicMock, call, patch
+
+import fusion as purefusion
+import pytest
+from ansible.module_utils import basic
+from ansible_collections.purestorage.fusion.plugins.modules import fusion_info
+from ansible_collections.purestorage.fusion.tests.functional.utils import (
+ AnsibleExitJson,
+ exit_json,
+ fail_json,
+ set_module_args,
+)
+from ansible_collections.purestorage.fusion.tests.helpers import (
+ ApiExceptionsMockGenerator,
+)
+from urllib3.exceptions import HTTPError
+import time
+
+# GLOBAL MOCKS
+fusion_info.setup_fusion = MagicMock(return_value=purefusion.api_client.ApiClient())
+purefusion.api_client.ApiClient.call_api = MagicMock(
+ side_effect=Exception("API call not mocked!")
+)
+basic.AnsibleModule.exit_json = exit_json
+basic.AnsibleModule.fail_json = fail_json
+
+VALID_SUBSETS = {
+ "all",
+ "minimum",
+ "roles",
+ "users",
+ "arrays",
+ "hardware_types",
+ "volumes",
+ "host_access_policies",
+ "storage_classes",
+ "protection_policies",
+ "placement_groups",
+ "network_interfaces",
+ "availability_zones",
+ "storage_endpoints",
+ "snapshots",
+ "storage_services",
+ "tenants",
+ "tenant_spaces",
+ "network_interface_groups",
+ "api_clients",
+ "regions",
+}
+
+EXPECTED_KEYS = {
+ "all": {
+ "default",
+ "hardware_types",
+ "users",
+ "availability_zones",
+ "roles",
+ "role_assignments",
+ "storage_services",
+ "volumes",
+ "protection_policies",
+ "placement_groups",
+ "storage_classes",
+ "network_interfaces",
+ "host_access_policies",
+ "tenants",
+ "tenant_spaces",
+ "storage_endpoints",
+ "api_clients",
+ "network_interface_groups",
+ "volume_snapshots",
+ "snapshots",
+ "arrays",
+ "regions",
+ },
+ "minimum": {"default"},
+ "arrays": {"arrays"},
+ "hardware_types": {"hardware_types"},
+ "users": {"users"},
+ "availability_zones": {"availability_zones"},
+ "roles": {"roles", "role_assignments"},
+ "storage_services": {"storage_services"},
+ "volumes": {"volumes"},
+ "protection_policies": {"protection_policies"},
+ "placement_groups": {"placement_groups"},
+ "storage_classes": {"storage_classes"},
+ "network_interfaces": {"network_interfaces"},
+ "host_access_policies": {"host_access_policies"},
+ "tenants": {"tenants"},
+ "tenant_spaces": {"tenant_spaces"},
+ "storage_endpoints": {"storage_endpoints"},
+ "api_clients": {"api_clients"},
+ "network_interface_groups": {"network_interface_groups"},
+ "snapshots": {"snapshots", "volume_snapshots"},
+ "regions": {"regions"},
+}
+
+RESP_VERSION = purefusion.Version(version=1)
+RESP_AS = purefusion.Space(
+ resource=purefusion.ResourceReference(
+ id="333",
+ name="res_ref_name",
+ kind="kind_value",
+ self_link="self_link_value",
+ ),
+ total_physical_space=1,
+ unique_space=1,
+ snapshot_space=1,
+)
+RESP_AP = purefusion.Performance(
+ resource=purefusion.ResourceReference(
+ id="222",
+ name="res_ref_name",
+ kind="kind_value",
+ self_link="self_link_value",
+ ),
+ reads_per_sec=12345,
+ read_latency_us=1000,
+ read_bandwidth=5000000,
+ writes_per_sec=12611,
+ write_latency_us=2000,
+ write_bandwidth=4000000,
+)
+RESP_LU = [
+ purefusion.User(
+ id="390",
+ name="username1",
+ self_link="self_link_value",
+ display_name="User's Name 1",
+ email="user1@email.com",
+ ),
+ purefusion.User(
+ id="391",
+ name="username2",
+ self_link="self_link_value",
+ display_name="User's Name 2",
+ email="user2@email.com",
+ ),
+]
+RESP_PP = purefusion.ProtectionPolicyList(
+ count=2,
+ more_items_remaining=False,
+ items=[
+ purefusion.ProtectionPolicy(
+ id="098",
+ name="pp1",
+ self_link="self_link_value",
+ display_name="Protection Policy 1",
+ objectives=[],
+ ),
+ purefusion.ProtectionPolicy(
+ id="099",
+ name="pp2",
+ self_link="self_link_value",
+ display_name="Protection Policy 2",
+ objectives=[],
+ ),
+ ],
+)
+RESP_HAP = purefusion.HostAccessPolicyList(
+ count=2,
+ more_items_remaining=False,
+ items=[
+ purefusion.HostAccessPolicy(
+ id="900",
+ name="hap1",
+ self_link="self_link_value",
+ display_name="Host Access Policy 1",
+ iqn="iqn.2023-05.com.purestorage:420qp2c0261",
+ personality="aix",
+ ),
+ purefusion.HostAccessPolicy(
+ id="901",
+ name="hap2",
+ self_link="self_link_value",
+ display_name="Host Access Policy 2",
+ iqn="iqn.2023-05.com.purestorage:420qp2c0262",
+ personality="linux",
+ ),
+ ],
+)
+RESP_HT = purefusion.HardwareTypeList(
+ count=2,
+ more_items_remaining=False,
+ items=[
+ purefusion.HardwareType(
+ id="500",
+ name="ht1",
+ self_link="self_link_value",
+ display_name="Hardware Type 1",
+ array_type="FA//X",
+ media_type="whatever",
+ ),
+ purefusion.HardwareType(
+ id="501",
+ name="ht2",
+ self_link="self_link_value",
+ display_name="Hardware Type 2",
+ array_type="FA//C",
+ media_type="whatever",
+ ),
+ ],
+)
+RESP_SS = purefusion.StorageServiceList(
+ count=2,
+ more_items_remaining=False,
+ items=[
+ purefusion.StorageService(
+ id="502",
+ name="ss1",
+ self_link="self_link_value",
+ display_name="Storage Service 1",
+ hardware_types=[
+ purefusion.HardwareTypeRef(
+ id="910",
+ name="res_ref_name",
+ kind="kind_value",
+ self_link="self_link_value",
+ ),
+ purefusion.HardwareTypeRef(
+ id="911",
+ name="res_ref_name",
+ kind="kind_value",
+ self_link="self_link_value",
+ ),
+ ],
+ ),
+ purefusion.StorageService(
+ id="503",
+ name="ss2",
+ self_link="self_link_value",
+ display_name="Storage Service 3",
+ hardware_types=[
+ purefusion.HardwareTypeRef(
+ id="912",
+ name="res_ref_name",
+ kind="kind_value",
+ self_link="self_link_value",
+ )
+ ],
+ ),
+ ],
+)
+RESP_TENANTS = purefusion.TenantList(
+ count=2,
+ more_items_remaining=False,
+ items=[
+ purefusion.Tenant(
+ id="504",
+ name="t1",
+ self_link="self_link_value",
+ display_name="Tenant 1",
+ tenant_spaces_link="ts_link",
+ ),
+ purefusion.Tenant(
+ id="505",
+ name="t2",
+ self_link="self_link_value",
+ display_name="Tenant 2",
+ tenant_spaces_link="ts_link",
+ ),
+ ],
+)
+RESP_REGIONS = purefusion.RegionList(
+ count=2,
+ more_items_remaining=False,
+ items=[
+ purefusion.Region(
+ id="506",
+ name="region1",
+ self_link="self_link_value",
+ display_name="Region 1",
+ ),
+ purefusion.Region(
+ id="507",
+ name="region2",
+ self_link="self_link_value",
+ display_name="Region 2",
+ ),
+ ],
+)
+RESP_ROLES = [
+ purefusion.Role(
+ id="902",
+ name="role1",
+ self_link="self_link_value",
+ display_name="Role 1",
+ description="nice description",
+ assignable_scopes=["scope1", "scope2"],
+ ),
+ purefusion.Role(
+ id="903",
+ name="role2",
+ self_link="self_link_value",
+ display_name="Role 2",
+ description="not so nice description",
+ assignable_scopes=["scope3", "scope2"],
+ ),
+]
+RESP_SC = purefusion.StorageClassList(
+ count=2,
+ more_items_remaining=False,
+ items=[
+ purefusion.StorageClass(
+ id="508",
+ name="sc1",
+ self_link="self_link_value",
+ display_name="Storage Class 1",
+ storage_service=purefusion.StorageServiceRef(
+ id="509",
+ name="res_ref_name",
+ kind="kind_value",
+ self_link="self_link_value",
+ ),
+ size_limit=12345678,
+ iops_limit=10000,
+ bandwidth_limit=2000000,
+ ),
+ purefusion.StorageClass(
+ id="510",
+ name="sc2",
+ self_link="self_link_value",
+ display_name="Storage Class 2",
+ storage_service=purefusion.StorageServiceRef(
+ id="511",
+ name="res_ref_name",
+ kind="kind_value",
+ self_link="self_link_value",
+ ),
+ size_limit=12345679,
+ iops_limit=10001,
+ bandwidth_limit=2000001,
+ ),
+ ],
+)
+RESP_RA = [
+ purefusion.RoleAssignment(
+ id="904",
+ name="ra1",
+ self_link="self_link_value",
+ display_name="Role Assignment 1",
+ role=purefusion.RoleRef(
+ id="512",
+ name="res_ref_name",
+ kind="kind_value",
+ self_link="self_link_value",
+ ),
+ scope=purefusion.ResourceReference(
+ id="513",
+ name="res_ref_name",
+ kind="kind_value",
+ self_link="self_link_value",
+ ),
+ principal="user1",
+ ),
+ purefusion.RoleAssignment(
+ id="905",
+ name="ra2",
+ self_link="self_link_value",
+ display_name="Role Assignment 2",
+ role=purefusion.RoleRef(
+ id="513",
+ name="res_ref_name",
+ kind="kind_value",
+ self_link="self_link_value",
+ ),
+ scope=purefusion.ResourceReference(
+ id="514",
+ name="res_ref_name",
+ kind="kind_value",
+ self_link="self_link_value",
+ ),
+ principal="user2",
+ ),
+]
+RESP_TS = purefusion.TenantSpaceList(
+ count=2,
+ more_items_remaining=False,
+ items=[
+ purefusion.TenantSpace(
+ id="515",
+ name="ts1",
+ self_link="self_link_value",
+ display_name="Tenant Space 1",
+ tenant=RESP_TENANTS.items[0].name,
+ volumes_link="link_value1",
+ snapshots_link="link_value2",
+ placement_groups_link="link_value3",
+ ),
+ purefusion.TenantSpace(
+ id="516",
+ name="ts2",
+ self_link="self_link_value",
+ display_name="Tenant Space 2",
+ tenant=RESP_TENANTS.items[1].name,
+ volumes_link="link_value4",
+ snapshots_link="link_value5",
+ placement_groups_link="link_value6",
+ ),
+ ],
+)
+RESP_VOLUMES = purefusion.VolumeList(
+ count=1,
+ more_items_remaining=False,
+ items=[
+ purefusion.Volume(
+ id="517",
+ name="volume1",
+ self_link="self_link_value",
+ display_name="Volume 1",
+ size=4000000,
+ tenant=purefusion.TenantRef(
+ id="518",
+ name="res_ref_name",
+ kind="kind_value",
+ self_link="self_link_value",
+ ),
+ tenant_space=purefusion.TenantSpaceRef(
+ id="519",
+ name="res_ref_name",
+ kind="kind_value",
+ self_link="self_link_value",
+ ),
+ storage_class=purefusion.StorageClassRef(
+ id="520",
+ name="res_ref_name",
+ kind="kind_value",
+ self_link="self_link_value",
+ ),
+ protection_policy=purefusion.ProtectionPolicyRef(
+ id="521",
+ name="res_ref_name",
+ kind="kind_value",
+ self_link="self_link_value",
+ ),
+ placement_group=purefusion.PlacementGroupRef(
+ id="522",
+ name="res_ref_name",
+ kind="kind_value",
+ self_link="self_link_value",
+ ),
+ array=purefusion.ArrayRef(
+ id="905",
+ name="res_ref_name",
+ kind="kind_value",
+ self_link="self_link_value",
+ ),
+ created_at=485743825,
+ source_volume_snapshot=purefusion.VolumeSnapshotRef(
+ id="523",
+ name="res_ref_name",
+ kind="kind_value",
+ self_link="self_link_value",
+ ),
+ host_access_policies=[
+ purefusion.HostAccessPolicyRef(
+ id="524",
+ name="res_ref_name",
+ kind="kind_value",
+ self_link="self_link_value",
+ )
+ ],
+ serial_number="123923482034",
+ target=purefusion.Target(
+ iscsi=purefusion.Iscsi(
+ iqn="iqn.2023-05.com.purestorage:420qp2c0222",
+ addresses=["125.1.2.4"],
+ )
+ ),
+ time_remaining=1000000,
+ destroyed=False,
+ )
+ ],
+)
+RESP_PG = purefusion.PlacementGroupList(
+ count=1,
+ more_items_remaining=False,
+ items=[
+ purefusion.PlacementGroup(
+ id="525",
+ name="pg1",
+ self_link="self_link_value",
+ display_name="Placement Group 1",
+ tenant=purefusion.TenantRef(
+ id="526",
+ name="res_ref_name",
+ kind="kind_value",
+ self_link="self_link_value",
+ ),
+ tenant_space=purefusion.TenantSpaceRef(
+ id="527",
+ name="res_ref_name",
+ kind="kind_value",
+ self_link="self_link_value",
+ ),
+ placement_engine=purefusion.PlacementEngine(),
+ availability_zone=purefusion.AvailabilityZoneRef(
+ id="528",
+ name="res_ref_name",
+ kind="kind_value",
+ self_link="self_link_value",
+ ),
+ protocols=purefusion.Target(
+ iscsi=purefusion.Iscsi(
+ iqn="iqn.2023-05.com.purestorage:420qp2c0211",
+ addresses=["125.1.2.5"],
+ )
+ ),
+ storage_service=purefusion.StorageServiceRef(
+ id="529",
+ name="res_ref_name",
+ kind="kind_value",
+ self_link="self_link_value",
+ ),
+ array=purefusion.ArrayRef(
+ id="530",
+ name="res_ref_name",
+ kind="kind_value",
+ self_link="self_link_value",
+ ),
+ )
+ ],
+)
+RESP_SNAPSHOTS = purefusion.SnapshotList(
+ count=1,
+ more_items_remaining=False,
+ items=[
+ purefusion.Snapshot(
+ id="531",
+ name="snapshot1",
+ self_link="self_link_value",
+ display_name="Snapshot 1",
+ tenant=purefusion.TenantRef(
+ id="531",
+ name="res_ref_name",
+ kind="kind_value",
+ self_link="self_link_value",
+ ),
+ tenant_space=purefusion.TenantSpaceRef(
+ id="532",
+ name="res_ref_name",
+ kind="kind_value",
+ self_link="self_link_value",
+ ),
+ volume_snapshots_link="link_to_volume_snapshot",
+ protection_policy=purefusion.ProtectionPolicyRef(
+ id="533",
+ name="res_ref_name",
+ kind="kind_value",
+ self_link="self_link_value",
+ ),
+ time_remaining=23432,
+ destroyed=False,
+ )
+ ],
+)
+RESP_AZ = purefusion.AvailabilityZoneList(
+ count=3,
+ more_items_remaining=False,
+ items=[
+ purefusion.AvailabilityZone(
+ id="534",
+ name="az1",
+ self_link="self_link_value",
+ display_name="Availability Zone 1",
+ region=purefusion.RegionRef(
+ id="535",
+ name="res_ref_name",
+ kind="kind_value",
+ self_link="self_link_value",
+ ),
+ ),
+ purefusion.AvailabilityZone(
+ id="536",
+ name="az2",
+ self_link="self_link_value",
+ display_name="Availability Zone 2",
+ region=purefusion.RegionRef(
+ id="537",
+ name="res_ref_name",
+ kind="kind_value",
+ self_link="self_link_value",
+ ),
+ ),
+ purefusion.AvailabilityZone(
+ id="537",
+ name="az3",
+ self_link="self_link_value",
+ display_name="Availability Zone 3",
+ region=purefusion.RegionRef(
+ id="538",
+ name="res_ref_name",
+ kind="kind_value",
+ self_link="self_link_value",
+ ),
+ ),
+ ],
+)
+RESP_NIG = purefusion.NetworkInterfaceGroupList(
+ count=1,
+ more_items_remaining=False,
+ items=[
+ purefusion.NetworkInterfaceGroup(
+ id="538",
+ name="nig1",
+ self_link="self_link_value",
+ display_name="Network Interface Group 1",
+ region=purefusion.RegionRef(
+ id="539",
+ name="res_ref_name",
+ kind="kind_value",
+ self_link="self_link_value",
+ ),
+ availability_zone=purefusion.AvailabilityZoneRef(
+ id="540",
+ name="res_ref_name",
+ kind="kind_value",
+ self_link="self_link_value",
+ ),
+ group_type="eth",
+ eth=purefusion.NetworkInterfaceGroupEth(
+ prefix="10.21.200.0/24", gateway="10.21.200.1", vlan=None, mtu=1600
+ ),
+ )
+ ],
+)
+RESP_SE = purefusion.StorageEndpointList(
+ count=1,
+ more_items_remaining=False,
+ items=[
+ purefusion.StorageEndpoint(
+ id="541",
+ name="se1",
+ self_link="self_link_value",
+ display_name="Storage Endpoint 1",
+ region=purefusion.RegionRef(
+ id="542",
+ name="res_ref_name",
+ kind="kind_value",
+ self_link="self_link_value",
+ ),
+ availability_zone=purefusion.AvailabilityZoneRef(
+ id="543",
+ name="res_ref_name",
+ kind="kind_value",
+ self_link="self_link_value",
+ ),
+ endpoint_type="iscsi",
+ iscsi=purefusion.StorageEndpointIscsi(
+ discovery_interfaces=[
+ purefusion.StorageEndpointIscsiDiscoveryInterface(
+ address="10.21.200.5/24",
+ gateway="10.21.200.0",
+ mtu=2000,
+ network_interface_groups=[
+ purefusion.NetworkInterfaceGroupRef(
+ id="544",
+ name="res_ref_name",
+ kind="kind_value",
+ self_link="self_link_value",
+ ),
+ ],
+ ),
+ purefusion.StorageEndpointIscsiDiscoveryInterface(
+ address="10.21.200.6/24",
+ gateway="10.21.200.0",
+ mtu=2100,
+ network_interface_groups=[],
+ ),
+ ]
+ ),
+ )
+ ],
+)
+RESP_NI = purefusion.NetworkInterfaceList(
+ count=1,
+ more_items_remaining=False,
+ items=[
+ purefusion.NetworkInterface(
+ id="545",
+ name="ni1",
+ self_link="self_link_value",
+ display_name="Network Interface 1",
+ region=purefusion.RegionRef(
+ id="546",
+ name="res_ref_name",
+ kind="kind_value",
+ self_link="self_link_value",
+ ),
+ availability_zone=purefusion.AvailabilityZoneRef(
+ id="547",
+ name="res_ref_name",
+ kind="kind_value",
+ self_link="self_link_value",
+ ),
+ array=purefusion.ArrayRef(
+ id="548",
+ name="res_ref_name",
+ kind="kind_value",
+ self_link="self_link_value",
+ ),
+ interface_type="eth",
+ eth=purefusion.NetworkInterfaceEth(
+ address="10.21.200.6/24",
+ gateway="10.21.200.0",
+ mac_address="E3-18-55-D8-8C-F4",
+ mtu=1233,
+ vlan=2,
+ ),
+ services=["a", "b"],
+ enabled=True,
+ network_interface_group=purefusion.NetworkInterfaceGroupRef(
+ id="906",
+ name="res_ref_name",
+ kind="kind_value",
+ self_link="self_link_value",
+ ),
+ max_speed=3224232,
+ )
+ ],
+)
+RESP_ARRAYS = purefusion.ArrayList(
+ count=1,
+ more_items_remaining=False,
+ items=[
+ purefusion.Array(
+ id="549",
+ name="array1",
+ self_link="self_link_value",
+ display_name="Array 1",
+ apartment_id="234214351",
+ hardware_type=purefusion.HardwareTypeRef(
+ id="550",
+ name="res_ref_name",
+ kind="kind_value",
+ self_link="self_link_value",
+ ),
+ region=purefusion.RegionRef(
+ id="551",
+ name="res_ref_name",
+ kind="kind_value",
+ self_link="self_link_value",
+ ),
+ availability_zone=purefusion.AvailabilityZoneRef(
+ id="552",
+ name="res_ref_name",
+ kind="kind_value",
+ self_link="self_link_value",
+ ),
+ appliance_id="2734298849",
+ host_name="super_host",
+ maintenance_mode=False,
+ unavailable_mode=False,
+ )
+ ],
+)
+RESP_AC = [
+ purefusion.APIClient(
+ id="553",
+ name="client1",
+ self_link="self_link_value",
+ display_name="API Client 1",
+ issuer="apikey:name:thisisnotreal",
+ public_key="0123456789",
+ last_key_update=1684421184201,
+ last_used=1684421290201,
+ creator_id="1234",
+ ),
+ purefusion.APIClient(
+ id="554",
+ name="client2",
+ self_link="self_link_value",
+ display_name="API Client 2",
+ issuer="apikey:name:thisissuperreal",
+ public_key="0987654321",
+ last_key_update=1684421184201,
+ last_used=1684421290201,
+ creator_id="4321",
+ ),
+]
+RESP_VS = purefusion.VolumeSnapshotList(
+ count=1,
+ more_items_remaining=False,
+ items=[
+ purefusion.VolumeSnapshot(
+ id="555",
+ name="vs1",
+ self_link="self_link_value",
+ display_name="Volume Snapshot 1",
+ serial_number="235235235345",
+ volume_serial_number="544236456346345",
+ created_at=1684421184201,
+ consistency_id="666666",
+ destroyed=False,
+ time_remaining=1684421290201,
+ size=19264036,
+ tenant=purefusion.TenantRef(
+ id="556",
+ name="res_ref_name",
+ kind="kind_value",
+ self_link="self_link_value",
+ ),
+ tenant_space=purefusion.TenantSpaceRef(
+ id="557",
+ name="res_ref_name",
+ kind="kind_value",
+ self_link="self_link_value",
+ ),
+ snapshot=purefusion.VolumeSnapshotRef(
+ id="558",
+ name="res_ref_name",
+ kind="kind_value",
+ self_link="self_link_value",
+ ),
+ volume=purefusion.VolumeRef(
+ id="559",
+ name="res_ref_name",
+ kind="kind_value",
+ self_link="self_link_value",
+ ),
+ protection_policy=purefusion.ProtectionPolicyRef(
+ id="560",
+ name="res_ref_name",
+ kind="kind_value",
+ self_link="self_link_value",
+ ),
+ placement_group=purefusion.PlacementGroupRef(
+ id="561",
+ name="res_ref_name",
+ kind="kind_value",
+ self_link="self_link_value",
+ ),
+ )
+ ],
+)
+
+
+@patch.dict(os.environ, {"TZ": "UTC"})
+@patch("fusion.DefaultApi")
+@patch("fusion.IdentityManagerApi")
+@patch("fusion.ProtectionPoliciesApi")
+@patch("fusion.HostAccessPoliciesApi")
+@patch("fusion.HardwareTypesApi")
+@patch("fusion.StorageServicesApi")
+@patch("fusion.TenantsApi")
+@patch("fusion.RegionsApi")
+@patch("fusion.RolesApi")
+@patch("fusion.StorageClassesApi")
+@patch("fusion.RoleAssignmentsApi")
+@patch("fusion.TenantSpacesApi")
+@patch("fusion.VolumesApi")
+@patch("fusion.VolumeSnapshotsApi")
+@patch("fusion.PlacementGroupsApi")
+@patch("fusion.SnapshotsApi")
+@patch("fusion.AvailabilityZonesApi")
+@patch("fusion.ArraysApi")
+@patch("fusion.NetworkInterfaceGroupsApi")
+@patch("fusion.StorageEndpointsApi")
+@patch("fusion.NetworkInterfacesApi")
+@pytest.mark.parametrize(
+ # all single options + all subsets of two options
+ "gather_subset",
+ [
+ *combinations(
+ VALID_SUBSETS,
+ 2,
+ ),
+ *[[option] for option in VALID_SUBSETS],
+ ],
+)
+def test_info_gather_subset(
+ # API mocks
+ m_ni_api,
+ m_se_api,
+ m_nig_api,
+ m_array_api,
+ m_az_api,
+ m_snapshot_api,
+ m_pg_api,
+ m_vs_api,
+ m_volume_api,
+ m_ts_api,
+ m_ra_api,
+ m_sc_api,
+ m_role_api,
+ m_region_api,
+ m_tenant_api,
+ m_ss_api,
+ m_hw_api,
+ m_hap_api,
+ m_pp_api,
+ m_im_api,
+ m_default_api,
+ # test parameters
+ gather_subset,
+):
+ """
+ Test that fusion_info module accepts single 'gather_subset' options and all subsets of two 'gather_subset' options.
+ """
+ # NOTE: here we use the same MagicMock object for all APIs to make the test simpler, this has no harm to the logic of the test
+ api_obj = MagicMock()
+ api_obj.get_version = MagicMock(return_value=RESP_VERSION)
+ api_obj.get_array_space = MagicMock(return_value=RESP_AS)
+ api_obj.get_array_performance = MagicMock(return_value=RESP_AP)
+ api_obj.list_users = MagicMock(return_value=RESP_LU)
+ api_obj.list_protection_policies = MagicMock(return_value=RESP_PP)
+ api_obj.list_host_access_policies = MagicMock(return_value=RESP_HAP)
+ api_obj.list_hardware_types = MagicMock(return_value=RESP_HT)
+ api_obj.list_storage_services = MagicMock(return_value=RESP_SS)
+ api_obj.list_tenants = MagicMock(return_value=RESP_TENANTS)
+ api_obj.list_regions = MagicMock(return_value=RESP_REGIONS)
+ api_obj.list_roles = MagicMock(return_value=RESP_ROLES)
+ api_obj.list_storage_classes = MagicMock(return_value=RESP_SC)
+ api_obj.list_role_assignments = MagicMock(return_value=RESP_RA)
+ api_obj.list_tenant_spaces = MagicMock(return_value=RESP_TS)
+ api_obj.list_volumes = MagicMock(return_value=RESP_VOLUMES)
+ api_obj.list_placement_groups = MagicMock(return_value=RESP_PG)
+ api_obj.list_snapshots = MagicMock(return_value=RESP_SNAPSHOTS)
+ api_obj.list_availability_zones = MagicMock(return_value=RESP_AZ)
+ api_obj.list_network_interface_groups = MagicMock(return_value=RESP_NIG)
+ api_obj.list_storage_endpoints = MagicMock(return_value=RESP_SE)
+ api_obj.list_network_interfaces = MagicMock(return_value=RESP_NI)
+ api_obj.list_arrays = MagicMock(return_value=RESP_ARRAYS)
+ api_obj.list_api_clients = MagicMock(return_value=RESP_AC)
+ api_obj.list_volume_snapshots = MagicMock(return_value=RESP_VS)
+ m_ni_api.return_value = api_obj
+ m_se_api.return_value = api_obj
+ m_nig_api.return_value = api_obj
+ m_array_api.return_value = api_obj
+ m_az_api.return_value = api_obj
+ m_snapshot_api.return_value = api_obj
+ m_pg_api.return_value = api_obj
+ m_vs_api.return_value = api_obj
+ m_volume_api.return_value = api_obj
+ m_ts_api.return_value = api_obj
+ m_ra_api.return_value = api_obj
+ m_sc_api.return_value = api_obj
+ m_role_api.return_value = api_obj
+ m_region_api.return_value = api_obj
+ m_tenant_api.return_value = api_obj
+ m_ss_api.return_value = api_obj
+ m_hw_api.return_value = api_obj
+ m_hap_api.return_value = api_obj
+ m_pp_api.return_value = api_obj
+ m_im_api.return_value = api_obj
+ m_default_api.return_value = api_obj
+
+ time.tzset()
+
+ set_module_args(
+ {
+ "gather_subset": gather_subset,
+ "app_id": "ABCD1234",
+ "key_file": "private-key.pem",
+ }
+ )
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_info.main()
+
+ assert exc.value.changed is False
+
+ expected_keys = {}
+ for option in gather_subset:
+ expected_keys = {*expected_keys, *EXPECTED_KEYS[option]}
+
+ assert exc.value.fusion_info.keys() == expected_keys
+
+ if "hardware_types" in gather_subset or "all" in gather_subset:
+ api_obj.list_hardware_types.assert_called_with()
+ assert "hardware_types" in exc.value.fusion_info
+ assert exc.value.fusion_info["hardware_types"] == {
+ hw_type.name: {
+ "array_type": hw_type.array_type,
+ "display_name": hw_type.display_name,
+ "media_type": hw_type.media_type,
+ }
+ for hw_type in RESP_HT.items
+ }
+ elif "minimum" in gather_subset:
+ api_obj.list_hardware_types.assert_called_with()
+ assert "default" in exc.value.fusion_info
+ assert "hardware_types" in exc.value.fusion_info["default"]
+ assert exc.value.fusion_info["default"]["hardware_types"] == len(RESP_HT.items)
+ else:
+ api_obj.list_hardware_types.assert_not_called()
+
+ if "users" in gather_subset or "all" in gather_subset:
+ api_obj.list_users.assert_called_with()
+ assert "users" in exc.value.fusion_info
+ assert exc.value.fusion_info["users"] == {
+ user.name: {
+ "display_name": user.display_name,
+ "email": user.email,
+ "id": user.id,
+ }
+ for user in RESP_LU
+ }
+ elif "minimum" in gather_subset:
+ api_obj.list_users.assert_called_with()
+ assert "default" in exc.value.fusion_info
+ assert "users" in exc.value.fusion_info["default"]
+ assert exc.value.fusion_info["default"]["users"] == len(RESP_LU)
+ else:
+ api_obj.list_users.assert_not_called()
+
+ if "availability_zones" in gather_subset or "all" in gather_subset:
+ api_obj.list_regions.assert_called_with()
+ api_obj.list_availability_zones.assert_has_calls(
+ [call(region_name=region.name) for region in RESP_REGIONS.items],
+ any_order=True,
+ )
+ assert "availability_zones" in exc.value.fusion_info
+ assert exc.value.fusion_info["availability_zones"] == {
+ zone.name: {
+ "display_name": zone.display_name,
+ "region": zone.region.name,
+ }
+ for zone in RESP_AZ.items
+ }
+ elif "minimum" in gather_subset:
+ api_obj.list_regions.assert_called_with()
+ api_obj.list_availability_zones.assert_has_calls(
+ [call(region_name=region.name) for region in RESP_REGIONS.items],
+ any_order=True,
+ )
+ assert "default" in exc.value.fusion_info
+ assert "availability_zones" in exc.value.fusion_info["default"]
+ assert exc.value.fusion_info["default"]["availability_zones"] == len(
+ RESP_REGIONS.items
+ ) * len(RESP_AZ.items)
+
+ if "roles" in gather_subset or "all" in gather_subset:
+ api_obj.list_roles.assert_called_with()
+ api_obj.list_role_assignments.assert_has_calls(
+ [call(role_name=role.name) for role in RESP_ROLES],
+ any_order=True,
+ )
+ assert "roles" in exc.value.fusion_info
+ assert "role_assignments" in exc.value.fusion_info
+ assert exc.value.fusion_info["roles"] == {
+ role.name: {
+ "display_name": role.display_name,
+ "scopes": role.assignable_scopes,
+ }
+ for role in RESP_ROLES
+ }
+ assert exc.value.fusion_info["role_assignments"] == {
+ ra.name: {
+ "display_name": ra.display_name,
+ "role": ra.role.name,
+ "scope": ra.scope.name,
+ }
+ for ra in RESP_RA
+ }
+ elif "minimum" in gather_subset:
+ api_obj.list_roles.assert_called_with()
+ api_obj.list_role_assignments.assert_has_calls(
+ [call(role_name=role.name) for role in RESP_ROLES],
+ any_order=True,
+ )
+ assert "default" in exc.value.fusion_info
+ assert "roles" in exc.value.fusion_info["default"]
+ assert "role_assignments" in exc.value.fusion_info["default"]
+ assert exc.value.fusion_info["default"]["roles"] == len(RESP_ROLES)
+ assert exc.value.fusion_info["default"]["role_assignments"] == len(
+ RESP_ROLES
+ ) * len(RESP_RA)
+ else:
+ api_obj.list_roles.assert_not_called()
+ api_obj.list_role_assignments.assert_not_called()
+
+ if "storage_services" in gather_subset or "all" in gather_subset:
+ api_obj.list_storage_services.assert_called_with()
+ assert "storage_services" in exc.value.fusion_info
+ assert exc.value.fusion_info["storage_services"] == {
+ service.name: {
+ "display_name": service.display_name,
+ "hardware_types": [hwtype.name for hwtype in service.hardware_types],
+ }
+ for service in RESP_SS.items
+ }
+ elif "minimum" in gather_subset:
+ api_obj.list_storage_services.assert_called_with()
+ assert "default" in exc.value.fusion_info
+ assert "storage_services" in exc.value.fusion_info["default"]
+ assert exc.value.fusion_info["default"]["storage_services"] == len(
+ RESP_SS.items
+ )
+
+ if "volumes" in gather_subset or "all" in gather_subset:
+ api_obj.list_tenants.assert_called_with()
+ api_obj.list_tenant_spaces.assert_has_calls(
+ [call(tenant_name=tenant.name) for tenant in RESP_TENANTS.items],
+ any_order=True,
+ )
+ api_obj.list_volumes.assert_has_calls(
+ [
+ call(
+ tenant_name=tenant.name,
+ tenant_space_name=ts.name,
+ )
+ for ts in RESP_TS.items
+ for tenant in RESP_TENANTS.items
+ ],
+ any_order=True,
+ )
+ assert "volumes" in exc.value.fusion_info
+ assert exc.value.fusion_info["volumes"] == {
+ tenant.name
+ + "/"
+ + tenant_space.name
+ + "/"
+ + volume.name: {
+ "tenant": tenant.name,
+ "tenant_space": tenant_space.name,
+ "name": volume.name,
+ "size": volume.size,
+ "display_name": volume.display_name,
+ "placement_group": volume.placement_group.name,
+ "source_volume_snapshot": getattr(
+ volume.source_volume_snapshot, "name", None
+ ),
+ "protection_policy": getattr(volume.protection_policy, "name", None),
+ "storage_class": volume.storage_class.name,
+ "serial_number": volume.serial_number,
+ "target": {
+ "iscsi": {
+ "addresses": volume.target.iscsi.addresses,
+ "iqn": volume.target.iscsi.iqn,
+ },
+ "nvme": {
+ "addresses": None,
+ "nqn": None,
+ },
+ "fc": {
+ "addresses": None,
+ "wwns": None,
+ },
+ },
+ "array": volume.array.name,
+ }
+ for volume in RESP_VOLUMES.items
+ for tenant_space in RESP_TS.items
+ for tenant in RESP_TENANTS.items
+ }
+ elif "minimum" in gather_subset:
+ api_obj.list_tenants.assert_called_with()
+ api_obj.list_tenant_spaces.assert_has_calls(
+ [call(tenant_name=tenant.name) for tenant in RESP_TENANTS.items],
+ any_order=True,
+ )
+ api_obj.list_volumes.assert_has_calls(
+ [
+ call(
+ tenant_name=tenant.name,
+ tenant_space_name=ts.name,
+ )
+ for ts in RESP_TS.items
+ for tenant in RESP_TENANTS.items
+ ],
+ any_order=True,
+ )
+ assert "default" in exc.value.fusion_info
+ assert "volumes" in exc.value.fusion_info["default"]
+ assert exc.value.fusion_info["default"]["volumes"] == len(
+ RESP_TENANTS.items
+ ) * len(RESP_TS.items) * len(RESP_VOLUMES.items)
+ else:
+ api_obj.list_volumes.assert_not_called()
+
+ if "protection_policies" in gather_subset or "all" in gather_subset:
+ api_obj.list_protection_policies.assert_called_with()
+ assert "protection_policies" in exc.value.fusion_info
+ assert exc.value.fusion_info["protection_policies"] == {
+ policy.name: {
+ "objectives": policy.objectives,
+ }
+ for policy in RESP_PP.items
+ }
+ elif "minimum" in gather_subset:
+ api_obj.list_protection_policies.assert_called_with()
+ assert "default" in exc.value.fusion_info
+ assert "protection_policies" in exc.value.fusion_info["default"]
+ assert exc.value.fusion_info["default"]["protection_policies"] == len(
+ RESP_PP.items
+ )
+ else:
+ api_obj.list_protection_policies.assert_not_called()
+
+ if "storage_classes" in gather_subset or "all" in gather_subset:
+ api_obj.list_storage_services.assert_called_with()
+ api_obj.list_storage_classes.assert_has_calls(
+ [call(storage_service_name=service.name) for service in RESP_SS.items],
+ any_order=True,
+ )
+ assert "storage_classes" in exc.value.fusion_info
+ assert exc.value.fusion_info["storage_classes"] == {
+ s_class.name: {
+ "bandwidth_limit": getattr(s_class, "bandwidth_limit", None),
+ "iops_limit": getattr(s_class, "iops_limit", None),
+ "size_limit": getattr(s_class, "size_limit", None),
+ "display_name": s_class.display_name,
+ "storage_service": service.name,
+ }
+ for s_class in RESP_SC.items
+ for service in RESP_SS.items
+ }
+ elif "minimum" in gather_subset:
+ api_obj.list_storage_services.assert_called_with()
+ api_obj.list_storage_classes.assert_has_calls(
+ [call(storage_service_name=service.name) for service in RESP_SS.items],
+ any_order=True,
+ )
+ assert "default" in exc.value.fusion_info
+ assert "storage_classes" in exc.value.fusion_info["default"]
+ assert exc.value.fusion_info["default"]["storage_classes"] == len(
+ RESP_SC.items
+ ) * len(RESP_SS.items)
+ else:
+ api_obj.list_storage_classes.assert_not_called()
+
+ if "network_interfaces" in gather_subset or "all" in gather_subset:
+ api_obj.list_regions.assert_called_with()
+ api_obj.list_availability_zones.assert_has_calls(
+ [call(region_name=region.name) for region in RESP_REGIONS.items],
+ any_order=True,
+ )
+ api_obj.list_arrays.assert_has_calls(
+ [
+ call(
+ availability_zone_name=az.name,
+ region_name=region.name,
+ )
+ for region in RESP_REGIONS.items
+ for az in RESP_AZ.items
+ ],
+ any_order=True,
+ )
+ api_obj.list_network_interfaces.assert_has_calls(
+ [
+ call(
+ array_name=array.name,
+ availability_zone_name=az.name,
+ region_name=region.name,
+ )
+ for region in RESP_REGIONS.items
+ for az in RESP_AZ.items
+ for array in RESP_ARRAYS.items
+ ],
+ any_order=True,
+ )
+ assert "network_interfaces" in exc.value.fusion_info
+ assert exc.value.fusion_info["network_interfaces"] == {
+ az.name
+ + "/"
+ + array.name: {
+ nic.name: {
+ "enabled": nic.enabled,
+ "display_name": nic.display_name,
+ "interface_type": nic.interface_type,
+ "services": nic.services,
+ "max_speed": nic.max_speed,
+ "vlan": nic.eth.vlan,
+ "address": nic.eth.address,
+ "mac_address": nic.eth.mac_address,
+ "gateway": nic.eth.gateway,
+ "mtu": nic.eth.mtu,
+ "network_interface_group": nic.network_interface_group.name,
+ "availability_zone": nic.availability_zone.name,
+ }
+ for nic in RESP_NI.items
+ }
+ for region in RESP_REGIONS.items
+ for az in RESP_AZ.items
+ for array in RESP_ARRAYS.items
+ }
+ elif "minimum" in gather_subset:
+ api_obj.list_regions.assert_called_with()
+ api_obj.list_availability_zones.assert_has_calls(
+ [call(region_name=region.name) for region in RESP_REGIONS.items],
+ any_order=True,
+ )
+ api_obj.list_arrays.assert_has_calls(
+ [
+ call(
+ availability_zone_name=az.name,
+ region_name=region.name,
+ )
+ for region in RESP_REGIONS.items
+ for az in RESP_AZ.items
+ ],
+ any_order=True,
+ )
+ api_obj.list_network_interfaces.assert_has_calls(
+ [
+ call(
+ array_name=array.name,
+ availability_zone_name=az.name,
+ region_name=region.name,
+ )
+ for region in RESP_REGIONS.items
+ for az in RESP_AZ.items
+ for array in RESP_ARRAYS.items
+ ],
+ any_order=True,
+ )
+ assert "default" in exc.value.fusion_info
+ assert "network_interfaces" in exc.value.fusion_info["default"]
+ assert exc.value.fusion_info["default"]["network_interfaces"] == len(
+ RESP_REGIONS.items
+ ) * len(RESP_AZ.items) * len(RESP_ARRAYS.items) * len(RESP_NI.items)
+ else:
+ api_obj.list_network_interfaces.assert_not_called()
+
+ if "host_access_policies" in gather_subset or "all" in gather_subset:
+ api_obj.list_host_access_policies.assert_called_with()
+ assert "host_access_policies" in exc.value.fusion_info
+ assert exc.value.fusion_info["host_access_policies"] == {
+ host.name: {
+ "personality": host.personality,
+ "display_name": host.display_name,
+ "iqn": host.iqn,
+ }
+ for host in RESP_HAP.items
+ }
+ elif "minimum" in gather_subset:
+ api_obj.list_host_access_policies.assert_called_with()
+ assert "default" in exc.value.fusion_info
+ assert "host_access_policies" in exc.value.fusion_info["default"]
+ assert exc.value.fusion_info["default"]["host_access_policies"] == len(
+ RESP_HAP.items
+ )
+ else:
+ api_obj.list_host_access_policies.assert_not_called()
+
+ if "arrays" in gather_subset or "all" in gather_subset:
+ api_obj.list_regions.assert_called_with()
+ api_obj.list_availability_zones.assert_has_calls(
+ [call(region_name=region.name) for region in RESP_REGIONS.items],
+ any_order=True,
+ )
+ api_obj.list_arrays.assert_has_calls(
+ [
+ call(
+ availability_zone_name=az.name,
+ region_name=region.name,
+ )
+ for region in RESP_REGIONS.items
+ for az in RESP_AZ.items
+ ],
+ any_order=True,
+ )
+ api_obj.get_array_space.assert_has_calls(
+ [
+ call(
+ array_name=array.name,
+ availability_zone_name=az.name,
+ region_name=region.name,
+ )
+ for region in RESP_REGIONS.items
+ for az in RESP_AZ.items
+ for array in RESP_ARRAYS.items
+ ],
+ any_order=True,
+ )
+ api_obj.get_array_performance.assert_has_calls(
+ [
+ call(
+ array_name=array.name,
+ availability_zone_name=az.name,
+ region_name=region.name,
+ )
+ for region in RESP_REGIONS.items
+ for az in RESP_AZ.items
+ for array in RESP_ARRAYS.items
+ ],
+ any_order=True,
+ )
+ assert "arrays" in exc.value.fusion_info
+ assert exc.value.fusion_info["arrays"] == {
+ array.name: {
+ "region": region.name,
+ "availability_zone": az.name,
+ "host_name": array.host_name,
+ "maintenance_mode": array.maintenance_mode,
+ "unavailable_mode": array.unavailable_mode,
+ "display_name": array.display_name,
+ "hardware_type": array.hardware_type.name,
+ "appliance_id": array.appliance_id,
+ "apartment_id": getattr(array, "apartment_id", None),
+ "space": {
+ "total_physical_space": RESP_AS.total_physical_space,
+ },
+ "performance": {
+ "read_bandwidth": RESP_AP.read_bandwidth,
+ "read_latency_us": RESP_AP.read_latency_us,
+ "reads_per_sec": RESP_AP.reads_per_sec,
+ "write_bandwidth": RESP_AP.write_bandwidth,
+ "write_latency_us": RESP_AP.write_latency_us,
+ "writes_per_sec": RESP_AP.writes_per_sec,
+ },
+ }
+ for region in RESP_REGIONS.items
+ for az in RESP_AZ.items
+ for array in RESP_ARRAYS.items
+ }
+ elif "minimum" in gather_subset:
+ api_obj.list_regions.assert_called_with()
+ api_obj.list_availability_zones.assert_has_calls(
+ [call(region_name=region.name) for region in RESP_REGIONS.items],
+ any_order=True,
+ )
+ api_obj.list_arrays.assert_has_calls(
+ [
+ call(
+ availability_zone_name=az.name,
+ region_name=region.name,
+ )
+ for region in RESP_REGIONS.items
+ for az in RESP_AZ.items
+ ],
+ any_order=True,
+ )
+ api_obj.get_array_space.assert_not_called()
+ api_obj.get_array_performance.assert_not_called()
+ assert "default" in exc.value.fusion_info
+ assert "arrays" in exc.value.fusion_info["default"]
+ assert exc.value.fusion_info["default"]["arrays"] == len(
+ RESP_REGIONS.items
+ ) * len(RESP_AZ.items) * len(RESP_ARRAYS.items)
+ else:
+ api_obj.get_array_space.assert_not_called()
+ api_obj.get_array_performance.assert_not_called()
+
+ if "tenants" in gather_subset or "all" in gather_subset:
+ api_obj.list_tenants.assert_called_with()
+ assert "tenants" in exc.value.fusion_info
+ assert exc.value.fusion_info["tenants"] == {
+ tenant.name: {
+ "display_name": tenant.display_name,
+ }
+ for tenant in RESP_TENANTS.items
+ }
+ elif "minimum" in gather_subset:
+ api_obj.list_tenants.assert_called_with()
+ assert "default" in exc.value.fusion_info
+ assert "tenants" in exc.value.fusion_info["default"]
+ assert exc.value.fusion_info["default"]["tenants"] == len(RESP_TENANTS.items)
+
+ if "tenant_spaces" in gather_subset or "all" in gather_subset:
+ api_obj.list_tenants.assert_called_with()
+ api_obj.list_tenant_spaces.assert_has_calls(
+ [call(tenant_name=tenant.name) for tenant in RESP_TENANTS.items],
+ any_order=True,
+ )
+ assert "tenant_spaces" in exc.value.fusion_info
+ assert exc.value.fusion_info["tenant_spaces"] == {
+ tenant.name
+ + "/"
+ + ts.name: {
+ "tenant": tenant.name,
+ "display_name": ts.display_name,
+ }
+ for tenant in RESP_TENANTS.items
+ for ts in RESP_TS.items
+ }
+ elif "minimum" in gather_subset:
+ api_obj.list_tenants.assert_called_with()
+ api_obj.list_tenant_spaces.assert_has_calls(
+ [call(tenant_name=tenant.name) for tenant in RESP_TENANTS.items],
+ any_order=True,
+ )
+ assert "default" in exc.value.fusion_info
+ assert "tenant_spaces" in exc.value.fusion_info["default"]
+ assert exc.value.fusion_info["default"]["tenant_spaces"] == len(
+ RESP_TENANTS.items
+ ) * len(RESP_TS.items)
+
+ if "storage_endpoints" in gather_subset or "all" in gather_subset:
+ api_obj.list_regions.assert_called_with()
+ api_obj.list_availability_zones.assert_has_calls(
+ [call(region_name=region.name) for region in RESP_REGIONS.items],
+ any_order=True,
+ )
+ api_obj.list_storage_endpoints.assert_has_calls(
+ [
+ call(
+ availability_zone_name=az.name,
+ region_name=region.name,
+ )
+ for region in RESP_REGIONS.items
+ for az in RESP_AZ.items
+ ],
+ any_order=True,
+ )
+ assert "storage_endpoints" in exc.value.fusion_info
+ assert exc.value.fusion_info["storage_endpoints"] == {
+ region.name
+ + "/"
+ + az.name
+ + "/"
+ + endpoint.name: {
+ "display_name": endpoint.display_name,
+ "endpoint_type": endpoint.endpoint_type,
+ "iscsi_interfaces": [
+ {
+ "address": iface.address,
+ "gateway": iface.gateway,
+ "mtu": iface.mtu,
+ "network_interface_groups": [
+ nig.name for nig in iface.network_interface_groups
+ ],
+ }
+ for iface in endpoint.iscsi.discovery_interfaces
+ ],
+ }
+ for region in RESP_REGIONS.items
+ for az in RESP_AZ.items
+ for endpoint in RESP_SE.items
+ }
+ elif "minimum" in gather_subset:
+ api_obj.list_regions.assert_called_with()
+ api_obj.list_availability_zones.assert_has_calls(
+ [call(region_name=region.name) for region in RESP_REGIONS.items],
+ any_order=True,
+ )
+ api_obj.list_storage_endpoints.assert_has_calls(
+ [
+ call(
+ availability_zone_name=az.name,
+ region_name=region.name,
+ )
+ for region in RESP_REGIONS.items
+ for az in RESP_AZ.items
+ ],
+ any_order=True,
+ )
+ assert "default" in exc.value.fusion_info
+ assert "storage_endpoints" in exc.value.fusion_info["default"]
+ assert exc.value.fusion_info["default"]["storage_endpoints"] == len(
+ RESP_REGIONS.items
+ ) * len(RESP_AZ.items) * len(RESP_SE.items)
+ else:
+ api_obj.list_storage_endpoints.assert_not_called()
+
+ if "api_clients" in gather_subset or "all" in gather_subset:
+ api_obj.list_api_clients.assert_called_with()
+ assert "api_clients" in exc.value.fusion_info
+ assert exc.value.fusion_info["api_clients"] == {
+ client.name: {
+ "display_name": client.display_name,
+ "issuer": client.issuer,
+ "public_key": client.public_key,
+ "creator_id": client.creator_id,
+ "last_key_update": "Thu, 18 May 2023 14:46:24 UTC",
+ "last_used": "Thu, 18 May 2023 14:48:10 UTC",
+ }
+ for client in RESP_AC
+ }
+ elif "minimum" in gather_subset:
+ # api_clients is not in default dict
+ api_obj.list_api_clients.assert_not_called()
+ assert "default" in exc.value.fusion_info
+ assert "api_clients" not in exc.value.fusion_info["default"]
+ else:
+ api_obj.list_api_clients.assert_not_called()
+
+ if "snapshots" in gather_subset or "all" in gather_subset:
+ api_obj.list_tenants.assert_called_with()
+ api_obj.list_tenant_spaces.assert_has_calls(
+ [call(tenant_name=tenant.name) for tenant in RESP_TENANTS.items],
+ any_order=True,
+ )
+ api_obj.list_snapshots.assert_has_calls(
+ [
+ call(
+ tenant_name=tenant.name,
+ tenant_space_name=ts.name,
+ )
+ for ts in RESP_TS.items
+ for tenant in RESP_TENANTS.items
+ ],
+ any_order=True,
+ )
+ api_obj.list_volume_snapshots.assert_has_calls(
+ [
+ call(
+ tenant_name=tenant.name,
+ tenant_space_name=ts.name,
+ snapshot_name=snap.name,
+ )
+ for snap in RESP_SNAPSHOTS.items
+ for ts in RESP_TS.items
+ for tenant in RESP_TENANTS.items
+ ],
+ any_order=True,
+ )
+ assert "snapshots" in exc.value.fusion_info
+ assert "volume_snapshots" in exc.value.fusion_info
+ assert exc.value.fusion_info["snapshots"] == {
+ tenant.name
+ + "/"
+ + ts.name
+ + "/"
+ + snap.name: {
+ "display_name": snap.display_name,
+ "protection_policy": snap.protection_policy,
+ "time_remaining": "0 hours, 0 mins, 23 secs",
+ "volume_snapshots_link": snap.volume_snapshots_link,
+ }
+ for snap in RESP_SNAPSHOTS.items
+ for ts in RESP_TS.items
+ for tenant in RESP_TENANTS.items
+ }
+
+ assert exc.value.fusion_info["volume_snapshots"] == {
+ tenant.name
+ + "/"
+ + ts.name
+ + "/"
+ + snap.name
+ + "/"
+ + vsnap.name: {
+ "size": vsnap.size,
+ "display_name": vsnap.display_name,
+ "protection_policy": vsnap.protection_policy,
+ "serial_number": vsnap.serial_number,
+ "created_at": "Thu, 18 May 2023 14:46:24 UTC",
+ "time_remaining": "14 hours, 48 mins, 10 secs",
+ "placement_group": vsnap.placement_group.name,
+ }
+ for vsnap in RESP_VS.items
+ for snap in RESP_SNAPSHOTS.items
+ for ts in RESP_TS.items
+ for tenant in RESP_TENANTS.items
+ }
+ elif "minimum" in gather_subset:
+ api_obj.list_tenants.assert_called_with()
+ api_obj.list_tenant_spaces.assert_has_calls(
+ [call(tenant_name=tenant.name) for tenant in RESP_TENANTS.items],
+ any_order=True,
+ )
+ api_obj.list_snapshots.assert_has_calls(
+ [
+ call(
+ tenant_name=tenant.name,
+ tenant_space_name=ts.name,
+ )
+ for ts in RESP_TS.items
+ for tenant in RESP_TENANTS.items
+ ],
+ any_order=True,
+ )
+ api_obj.list_volume_snapshots.assert_not_called()
+ assert "default" in exc.value.fusion_info
+ assert "snapshots" in exc.value.fusion_info["default"]
+ assert exc.value.fusion_info["default"]["snapshots"] == len(
+ RESP_TENANTS.items
+ ) * len(RESP_TS.items) * len(RESP_SNAPSHOTS.items)
+ else:
+ api_obj.list_snapshots.assert_not_called()
+ api_obj.list_volume_snapshots.assert_not_called()
+
+ if "network_interface_groups" in gather_subset or "all" in gather_subset:
+ api_obj.list_regions.assert_called_with()
+ api_obj.list_availability_zones.assert_has_calls(
+ [call(region_name=region.name) for region in RESP_REGIONS.items],
+ any_order=True,
+ )
+ api_obj.list_network_interface_groups.assert_has_calls(
+ [
+ call(
+ availability_zone_name=az.name,
+ region_name=region.name,
+ )
+ for region in RESP_REGIONS.items
+ for az in RESP_AZ.items
+ ],
+ any_order=True,
+ )
+ assert "network_interface_groups" in exc.value.fusion_info
+ assert exc.value.fusion_info["network_interface_groups"] == {
+ region.name
+ + "/"
+ + az.name
+ + "/"
+ + nig.name: {
+ "display_name": nig.display_name,
+ "gateway": nig.eth.gateway,
+ "prefix": nig.eth.prefix,
+ "mtu": nig.eth.mtu,
+ }
+ for nig in RESP_NIG.items
+ for region in RESP_REGIONS.items
+ for az in RESP_AZ.items
+ }
+ elif "minimum" in gather_subset:
+ api_obj.list_regions.assert_called_with()
+ api_obj.list_availability_zones.assert_has_calls(
+ [call(region_name=region.name) for region in RESP_REGIONS.items],
+ any_order=True,
+ )
+ api_obj.list_network_interface_groups.assert_has_calls(
+ [
+ call(
+ availability_zone_name=az.name,
+ region_name=region.name,
+ )
+ for region in RESP_REGIONS.items
+ for az in RESP_AZ.items
+ ],
+ any_order=True,
+ )
+ assert "default" in exc.value.fusion_info
+ assert "network_interface_groups" in exc.value.fusion_info["default"]
+ assert exc.value.fusion_info["default"]["network_interface_groups"] == len(
+ RESP_NIG.items
+ ) * len(RESP_REGIONS.items) * len(RESP_AZ.items)
+ else:
+ api_obj.list_network_interface_groups.assert_not_called()
+
+ if "placement_groups" in gather_subset or "all" in gather_subset:
+ api_obj.list_tenants.assert_called_with()
+ api_obj.list_tenant_spaces.assert_has_calls(
+ [call(tenant_name=tenant.name) for tenant in RESP_TENANTS.items],
+ any_order=True,
+ )
+ api_obj.list_volumes.list_placement_groups(
+ [
+ call(
+ tenant_name=tenant.name,
+ tenant_space_name=ts.name,
+ )
+ for ts in RESP_TS.items
+ for tenant in RESP_TENANTS.items
+ ],
+ any_order=True,
+ )
+ assert "placement_groups" in exc.value.fusion_info
+ assert exc.value.fusion_info["placement_groups"] == {
+ tenant.name
+ + "/"
+ + ts.name
+ + "/"
+ + group.name: {
+ "tenant": group.tenant.name,
+ "display_name": group.display_name,
+ "placement_engine": group.placement_engine,
+ "tenant_space": group.tenant_space.name,
+ "az": group.availability_zone.name,
+ "array": getattr(group.array, "name", None),
+ }
+ for group in RESP_PG.items
+ for ts in RESP_TS.items
+ for tenant in RESP_TENANTS.items
+ }
+ elif "minimum" in gather_subset:
+ api_obj.list_tenants.assert_called_with()
+ api_obj.list_tenant_spaces.assert_has_calls(
+ [call(tenant_name=tenant.name) for tenant in RESP_TENANTS.items],
+ any_order=True,
+ )
+ api_obj.list_volumes.list_placement_groups(
+ [
+ call(
+ tenant_name=tenant.name,
+ tenant_space_name=ts.name,
+ )
+ for ts in RESP_TS.items
+ for tenant in RESP_TENANTS.items
+ ],
+ any_order=True,
+ )
+ assert "default" in exc.value.fusion_info
+ assert "placement_groups" in exc.value.fusion_info["default"]
+ assert exc.value.fusion_info["default"]["placement_groups"] == len(
+ RESP_PG.items
+ ) * len(RESP_TENANTS.items) * len(RESP_TS.items)
+ else:
+ api_obj.list_placement_groups.assert_not_called()
+
+ if "regions" in gather_subset or "all" in gather_subset:
+ api_obj.list_regions.assert_called_with()
+ assert "regions" in exc.value.fusion_info
+ assert exc.value.fusion_info["regions"] == {
+ region.name: {
+ "display_name": region.display_name,
+ }
+ for region in RESP_REGIONS.items
+ }
+ elif "minimum" in gather_subset:
+ api_obj.list_regions.assert_called_with()
+ assert "default" in exc.value.fusion_info
+ assert "regions" in exc.value.fusion_info["default"]
+ assert exc.value.fusion_info["default"]["regions"] == len(RESP_REGIONS.items)
+
+
+@patch("fusion.DefaultApi")
+@patch("fusion.IdentityManagerApi")
+@patch("fusion.ProtectionPoliciesApi")
+@patch("fusion.HostAccessPoliciesApi")
+@patch("fusion.HardwareTypesApi")
+@patch("fusion.StorageServicesApi")
+@patch("fusion.TenantsApi")
+@patch("fusion.RegionsApi")
+@patch("fusion.RolesApi")
+@patch("fusion.StorageClassesApi")
+@patch("fusion.RoleAssignmentsApi")
+@patch("fusion.TenantSpacesApi")
+@patch("fusion.VolumesApi")
+@patch("fusion.VolumeSnapshotsApi")
+@patch("fusion.PlacementGroupsApi")
+@patch("fusion.SnapshotsApi")
+@patch("fusion.AvailabilityZonesApi")
+@patch("fusion.ArraysApi")
+@patch("fusion.NetworkInterfaceGroupsApi")
+@patch("fusion.StorageEndpointsApi")
+@patch("fusion.NetworkInterfacesApi")
+@pytest.mark.parametrize("subset", VALID_SUBSETS)
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, HTTPError),
+ (ApiExceptionsMockGenerator.create_permission_denied(), AnsibleExitJson),
+ ],
+)
+def test_info_exception(
+ # API mocks
+ m_ni_api,
+ m_se_api,
+ m_nig_api,
+ m_array_api,
+ m_az_api,
+ m_snapshot_api,
+ m_pg_api,
+ m_vs_api,
+ m_volume_api,
+ m_ts_api,
+ m_ra_api,
+ m_sc_api,
+ m_role_api,
+ m_region_api,
+ m_tenant_api,
+ m_ss_api,
+ m_hw_api,
+ m_hap_api,
+ m_pp_api,
+ m_im_api,
+ m_default_api,
+ # test parameter
+ subset,
+ # exceptions
+ exec_original,
+ exec_catch,
+):
+ """
+ Test that fusion_info propagates exceptions correctly.
+ """
+ # NOTE: here we use the same MagicMock object for all APIs to make the test simpler, this has no harm to the logic of the test
+ api_obj = MagicMock()
+ api_obj.get_version = MagicMock(
+ return_value=RESP_VERSION, side_effect=exec_original
+ )
+ api_obj.get_array_space = MagicMock(return_value=RESP_AS, side_effect=exec_original)
+ api_obj.get_array_performance = MagicMock(
+ return_value=RESP_AP, side_effect=exec_original
+ )
+ api_obj.list_users = MagicMock(return_value=RESP_LU, side_effect=exec_original)
+ api_obj.list_protection_policies = MagicMock(
+ return_value=RESP_PP, side_effect=exec_original
+ )
+ api_obj.list_host_access_policies = MagicMock(
+ return_value=RESP_HAP, side_effect=exec_original
+ )
+ api_obj.list_hardware_types = MagicMock(
+ return_value=RESP_HT, side_effect=exec_original
+ )
+ api_obj.list_storage_services = MagicMock(
+ return_value=RESP_SS, side_effect=exec_original
+ )
+ api_obj.list_tenants = MagicMock(
+ return_value=RESP_TENANTS, side_effect=exec_original
+ )
+ api_obj.list_regions = MagicMock(
+ return_value=RESP_REGIONS, side_effect=exec_original
+ )
+ api_obj.list_roles = MagicMock(return_value=RESP_ROLES, side_effect=exec_original)
+ api_obj.list_storage_classes = MagicMock(
+ return_value=RESP_SC, side_effect=exec_original
+ )
+ api_obj.list_role_assignments = MagicMock(
+ return_value=RESP_RA, side_effect=exec_original
+ )
+ api_obj.list_tenant_spaces = MagicMock(
+ return_value=RESP_TS, side_effect=exec_original
+ )
+ api_obj.list_volumes = MagicMock(
+ return_value=RESP_VOLUMES, side_effect=exec_original
+ )
+ api_obj.list_placement_groups = MagicMock(
+ return_value=RESP_PG, side_effect=exec_original
+ )
+ api_obj.list_snapshots = MagicMock(
+ return_value=RESP_SNAPSHOTS, side_effect=exec_original
+ )
+ api_obj.list_availability_zones = MagicMock(
+ return_value=RESP_AZ, side_effect=exec_original
+ )
+ api_obj.list_network_interface_groups = MagicMock(
+ return_value=RESP_NIG, side_effect=exec_original
+ )
+ api_obj.list_storage_endpoints = MagicMock(
+ return_value=RESP_SE, side_effect=exec_original
+ )
+ api_obj.list_network_interfaces = MagicMock(
+ return_value=RESP_NI, side_effect=exec_original
+ )
+ api_obj.list_arrays = MagicMock(return_value=RESP_ARRAYS, side_effect=exec_original)
+ api_obj.list_api_clients = MagicMock(
+ return_value=RESP_AC, side_effect=exec_original
+ )
+ api_obj.list_volume_snapshots = MagicMock(
+ return_value=RESP_VS, side_effect=exec_original
+ )
+ m_ni_api.return_value = api_obj
+ m_se_api.return_value = api_obj
+ m_nig_api.return_value = api_obj
+ m_array_api.return_value = api_obj
+ m_az_api.return_value = api_obj
+ m_snapshot_api.return_value = api_obj
+ m_pg_api.return_value = api_obj
+ m_vs_api.return_value = api_obj
+ m_volume_api.return_value = api_obj
+ m_ts_api.return_value = api_obj
+ m_ra_api.return_value = api_obj
+ m_sc_api.return_value = api_obj
+ m_role_api.return_value = api_obj
+ m_region_api.return_value = api_obj
+ m_tenant_api.return_value = api_obj
+ m_ss_api.return_value = api_obj
+ m_hw_api.return_value = api_obj
+ m_hap_api.return_value = api_obj
+ m_pp_api.return_value = api_obj
+ m_im_api.return_value = api_obj
+ m_default_api.return_value = api_obj
+
+ set_module_args(
+ {
+ "gather_subset": [subset],
+ "app_id": "ABCD1234",
+ "key_file": "private-key.pem",
+ }
+ )
+
+ with pytest.raises(exec_catch) as exc:
+ fusion_info.main()
+
+ # in case of permission denied error, check correct output
+ if exec_catch == AnsibleExitJson:
+ assert exc.value.changed is False
+
+ expected_keys = EXPECTED_KEYS[subset]
+ assert exc.value.fusion_info.keys() == expected_keys
+ for key in expected_keys:
+ if key == "default":
+ for k in exc.value.fusion_info[key]:
+ assert exc.value.fusion_info[key][k] is None
+ else:
+ assert exc.value.fusion_info[key] is None
+
+
+@patch("fusion.StorageServicesApi")
+def test_info_hidden_fields_storage_services(m_ss_api):
+ set_module_args(
+ {
+ "gather_subset": ["storage_services"],
+ "app_id": "ABCD1234",
+ "key_file": "private-key.pem",
+ }
+ )
+
+ response = purefusion.StorageServiceList(
+ count=1,
+ more_items_remaining=False,
+ items=[
+ purefusion.StorageService(
+ id="503",
+ name="ss2",
+ self_link="self_link_value",
+ display_name="Storage Service 3",
+ hardware_types=None, # can be None if not enough permissions
+ ),
+ ],
+ )
+
+ api_obj = MagicMock()
+ api_obj.list_storage_services = MagicMock(return_value=response)
+ m_ss_api.return_value = api_obj
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_info.main()
+
+ expected = {
+ "storage_services": {
+ service.name: {
+ "display_name": service.display_name,
+ "hardware_types": None,
+ }
+ for service in response.items
+ },
+ }
+ assert exc.value.fusion_info == expected
+
+
+@patch("fusion.RegionsApi")
+@patch("fusion.AvailabilityZonesApi")
+@patch("fusion.StorageEndpointsApi")
+def test_info_hidden_fields_storage_endpoints(m_ss_api, m_az_api, m_region_api):
+ set_module_args(
+ {
+ "gather_subset": ["storage_endpoints"],
+ "app_id": "ABCD1234",
+ "key_file": "private-key.pem",
+ }
+ )
+
+ response = purefusion.StorageEndpointList(
+ count=1,
+ more_items_remaining=False,
+ items=[
+ purefusion.StorageEndpoint(
+ id="541",
+ name="se1",
+ self_link="self_link_value",
+ display_name="Storage Endpoint 1",
+ region=purefusion.RegionRef(
+ id="542",
+ name="res_ref_name",
+ kind="kind_value",
+ self_link="self_link_value",
+ ),
+ availability_zone=purefusion.AvailabilityZoneRef(
+ id="543",
+ name="res_ref_name",
+ kind="kind_value",
+ self_link="self_link_value",
+ ),
+ endpoint_type="iscsi",
+ iscsi=purefusion.StorageEndpointIscsi(
+ discovery_interfaces=[
+ purefusion.StorageEndpointIscsiDiscoveryInterface(
+ address="10.21.200.5/24",
+ gateway="10.21.200.0",
+ mtu=2000,
+ network_interface_groups=None,
+ ),
+ ]
+ ),
+ )
+ ],
+ )
+
+ api_obj = MagicMock()
+ api_obj.list_regions = MagicMock(return_value=RESP_REGIONS)
+ api_obj.list_availability_zones = MagicMock(return_value=RESP_AZ)
+ api_obj.list_storage_endpoints = MagicMock(return_value=response)
+ m_ss_api.return_value = api_obj
+ m_az_api.return_value = api_obj
+ m_region_api.return_value = api_obj
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_info.main()
+
+ expected = {
+ "storage_endpoints": {
+ region.name
+ + "/"
+ + az.name
+ + "/"
+ + endpoint.name: {
+ "display_name": endpoint.display_name,
+ "endpoint_type": endpoint.endpoint_type,
+ "iscsi_interfaces": [
+ {
+ "address": iface.address,
+ "gateway": iface.gateway,
+ "mtu": iface.mtu,
+ "network_interface_groups": None,
+ }
+ for iface in endpoint.iscsi.discovery_interfaces
+ ],
+ }
+ for region in RESP_REGIONS.items
+ for az in RESP_AZ.items
+ for endpoint in response.items
+ },
+ }
+ assert exc.value.fusion_info == expected
+
+
+@patch("fusion.TenantsApi")
+@patch("fusion.TenantSpacesApi")
+@patch("fusion.VolumesApi")
+def test_info_hidden_fields_volumes(m_ss_api, m_az_api, m_region_api):
+ set_module_args(
+ {
+ "gather_subset": ["volumes"],
+ "app_id": "ABCD1234",
+ "key_file": "private-key.pem",
+ }
+ )
+
+ response = purefusion.VolumeList(
+ count=1,
+ more_items_remaining=False,
+ items=[
+ purefusion.Volume(
+ id="517",
+ name="volume1",
+ self_link="self_link_value",
+ display_name="Volume 1",
+ size=4000000,
+ tenant=purefusion.TenantRef(
+ id="518",
+ name="res_ref_name",
+ kind="kind_value",
+ self_link="self_link_value",
+ ),
+ tenant_space=purefusion.TenantSpaceRef(
+ id="519",
+ name="res_ref_name",
+ kind="kind_value",
+ self_link="self_link_value",
+ ),
+ storage_class=purefusion.StorageClassRef(
+ id="520",
+ name="res_ref_name",
+ kind="kind_value",
+ self_link="self_link_value",
+ ),
+ protection_policy=purefusion.ProtectionPolicyRef(
+ id="521",
+ name="res_ref_name",
+ kind="kind_value",
+ self_link="self_link_value",
+ ),
+ placement_group=purefusion.PlacementGroupRef(
+ id="522",
+ name="res_ref_name",
+ kind="kind_value",
+ self_link="self_link_value",
+ ),
+ array=None,
+ created_at=485743825,
+ source_volume_snapshot=purefusion.VolumeSnapshotRef(
+ id="523",
+ name="res_ref_name",
+ kind="kind_value",
+ self_link="self_link_value",
+ ),
+ host_access_policies=[
+ purefusion.HostAccessPolicyRef(
+ id="524",
+ name="res_ref_name",
+ kind="kind_value",
+ self_link="self_link_value",
+ )
+ ],
+ serial_number="123923482034",
+ target=purefusion.Target(
+ iscsi=purefusion.Iscsi(
+ iqn="iqn.2023-05.com.purestorage:420qp2c0222",
+ addresses=["125.1.2.4"],
+ )
+ ),
+ time_remaining=1000000,
+ destroyed=False,
+ )
+ ],
+ )
+
+ api_obj = MagicMock()
+ api_obj.list_tenants = MagicMock(return_value=RESP_TENANTS)
+ api_obj.list_tenant_spaces = MagicMock(return_value=RESP_TS)
+ api_obj.list_volumes = MagicMock(return_value=response)
+ m_ss_api.return_value = api_obj
+ m_az_api.return_value = api_obj
+ m_region_api.return_value = api_obj
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_info.main()
+
+ expected = {
+ "volumes": {
+ tenant.name
+ + "/"
+ + tenant_space.name
+ + "/"
+ + volume.name: {
+ "tenant": tenant.name,
+ "tenant_space": tenant_space.name,
+ "name": volume.name,
+ "size": volume.size,
+ "display_name": volume.display_name,
+ "placement_group": volume.placement_group.name,
+ "source_volume_snapshot": getattr(
+ volume.source_volume_snapshot, "name", None
+ ),
+ "protection_policy": getattr(volume.protection_policy, "name", None),
+ "storage_class": volume.storage_class.name,
+ "serial_number": volume.serial_number,
+ "target": {
+ "iscsi": {
+ "addresses": volume.target.iscsi.addresses,
+ "iqn": volume.target.iscsi.iqn,
+ },
+ "nvme": {
+ "addresses": None,
+ "nqn": None,
+ },
+ "fc": {
+ "addresses": None,
+ "wwns": None,
+ },
+ },
+ "array": None,
+ }
+ for volume in response.items
+ for tenant_space in RESP_TS.items
+ for tenant in RESP_TENANTS.items
+ }
+ }
+ assert exc.value.fusion_info == expected
+
+
+@patch("fusion.DefaultApi")
+@patch("fusion.IdentityManagerApi")
+@patch("fusion.ProtectionPoliciesApi")
+@patch("fusion.HostAccessPoliciesApi")
+@patch("fusion.HardwareTypesApi")
+@patch("fusion.StorageServicesApi")
+@patch("fusion.TenantsApi")
+@patch("fusion.RegionsApi")
+@patch("fusion.RolesApi")
+@patch("fusion.StorageClassesApi")
+@patch("fusion.RoleAssignmentsApi")
+@patch("fusion.TenantSpacesApi")
+@patch("fusion.VolumesApi")
+@patch("fusion.VolumeSnapshotsApi")
+@patch("fusion.PlacementGroupsApi")
+@patch("fusion.SnapshotsApi")
+@patch("fusion.AvailabilityZonesApi")
+@patch("fusion.ArraysApi")
+@patch("fusion.NetworkInterfaceGroupsApi")
+@patch("fusion.StorageEndpointsApi")
+@patch("fusion.NetworkInterfacesApi")
+def test_info_permission_denied_minimum(
+ m_ni_api,
+ m_se_api,
+ m_nig_api,
+ m_array_api,
+ m_az_api,
+ m_snapshot_api,
+ m_pg_api,
+ m_vs_api,
+ m_volume_api,
+ m_ts_api,
+ m_ra_api,
+ m_sc_api,
+ m_role_api,
+ m_region_api,
+ m_tenant_api,
+ m_ss_api,
+ m_hw_api,
+ m_hap_api,
+ m_pp_api,
+ m_im_api,
+ m_default_api,
+):
+ """
+ Test that default dict works correctly with permission denied errors.
+ """
+ exec = ApiExceptionsMockGenerator.create_permission_denied()
+
+ api_obj = MagicMock()
+ api_obj.get_version = MagicMock(return_value=RESP_VERSION, side_effect=exec)
+ api_obj.get_array_space = MagicMock(return_value=RESP_AS, side_effect=exec)
+ api_obj.get_array_performance = MagicMock(return_value=RESP_AP, side_effect=exec)
+ api_obj.list_users = MagicMock(return_value=RESP_LU, side_effect=exec)
+ api_obj.list_protection_policies = MagicMock(return_value=RESP_PP, side_effect=exec)
+ api_obj.list_host_access_policies = MagicMock(
+ return_value=RESP_HAP, side_effect=exec
+ )
+ api_obj.list_hardware_types = MagicMock(return_value=RESP_HT, side_effect=exec)
+ api_obj.list_storage_services = MagicMock(return_value=RESP_SS, side_effect=exec)
+ api_obj.list_tenants = MagicMock(return_value=RESP_TENANTS, side_effect=exec)
+ api_obj.list_regions = MagicMock(return_value=RESP_REGIONS, side_effect=exec)
+ api_obj.list_roles = MagicMock(return_value=RESP_ROLES, side_effect=exec)
+ api_obj.list_storage_classes = MagicMock(return_value=RESP_SC, side_effect=exec)
+ api_obj.list_role_assignments = MagicMock(return_value=RESP_RA, side_effect=exec)
+ api_obj.list_tenant_spaces = MagicMock(return_value=RESP_TS, side_effect=exec)
+ api_obj.list_volumes = MagicMock(return_value=RESP_VOLUMES, side_effect=exec)
+ api_obj.list_placement_groups = MagicMock(return_value=RESP_PG, side_effect=exec)
+ api_obj.list_snapshots = MagicMock(return_value=RESP_SNAPSHOTS, side_effect=exec)
+ api_obj.list_availability_zones = MagicMock(return_value=RESP_AZ, side_effect=exec)
+ api_obj.list_network_interface_groups = MagicMock(
+ return_value=RESP_NIG, side_effect=exec
+ )
+ api_obj.list_storage_endpoints = MagicMock(return_value=RESP_SE, side_effect=exec)
+ api_obj.list_network_interfaces = MagicMock(return_value=RESP_NI, side_effect=exec)
+ api_obj.list_arrays = MagicMock(return_value=RESP_ARRAYS, side_effect=exec)
+ api_obj.list_api_clients = MagicMock(return_value=RESP_AC, side_effect=exec)
+ api_obj.list_volume_snapshots = MagicMock(return_value=RESP_VS, side_effect=exec)
+ m_ni_api.return_value = api_obj
+ m_se_api.return_value = api_obj
+ m_nig_api.return_value = api_obj
+ m_array_api.return_value = api_obj
+ m_az_api.return_value = api_obj
+ m_snapshot_api.return_value = api_obj
+ m_pg_api.return_value = api_obj
+ m_vs_api.return_value = api_obj
+ m_volume_api.return_value = api_obj
+ m_ts_api.return_value = api_obj
+ m_ra_api.return_value = api_obj
+ m_sc_api.return_value = api_obj
+ m_role_api.return_value = api_obj
+ m_region_api.return_value = api_obj
+ m_tenant_api.return_value = api_obj
+ m_ss_api.return_value = api_obj
+ m_hw_api.return_value = api_obj
+ m_hap_api.return_value = api_obj
+ m_pp_api.return_value = api_obj
+ m_im_api.return_value = api_obj
+ m_default_api.return_value = api_obj
+
+ set_module_args(
+ {
+ "gather_subset": ["minimum"],
+ "app_id": "ABCD1234",
+ "key_file": "private-key.pem",
+ }
+ )
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_info.main()
+
+ assert exc.value.changed is False
+ assert "default" in exc.value.fusion_info
+ assert {
+ "version": None,
+ "users": None,
+ "protection_policies": None,
+ "host_access_policies": None,
+ "hardware_types": None,
+ "storage_services": None,
+ "tenants": None,
+ "regions": None,
+ "storage_classes": None,
+ "roles": None,
+ "role_assignments": None,
+ "tenant_spaces": None,
+ "volumes": None,
+ "placement_groups": None,
+ "snapshots": None,
+ "availability_zones": None,
+ "arrays": None,
+ "network_interfaces": None,
+ "network_interface_groups": None,
+ "storage_endpoints": None,
+ } == exc.value.fusion_info["default"]
diff --git a/ansible_collections/purestorage/fusion/tests/functional/test_fusion_nig.py b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_nig.py
new file mode 100644
index 000000000..3a7b7ca5c
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_nig.py
@@ -0,0 +1,1239 @@
+# -*- coding: utf-8 -*-
+
+# (c) 2023, Andrej Pajtas (apajtas@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING.GPLv3 or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from unittest.mock import MagicMock, patch
+
+import fusion as purefusion
+import pytest
+from ansible.module_utils import basic
+from ansible_collections.purestorage.fusion.plugins.module_utils.errors import (
+ OperationException,
+)
+from ansible_collections.purestorage.fusion.plugins.modules import fusion_nig
+from ansible_collections.purestorage.fusion.tests.functional.utils import (
+ AnsibleExitJson,
+ AnsibleFailJson,
+ FailedOperationMock,
+ OperationMock,
+ SuccessfulOperationMock,
+ exit_json,
+ fail_json,
+ set_module_args,
+)
+from urllib3.exceptions import HTTPError
+
+# GLOBAL MOCKS
+fusion_nig.setup_fusion = MagicMock(return_value=purefusion.api_client.ApiClient())
+purefusion.api_client.ApiClient.call_api = MagicMock(
+ side_effect=Exception("API call not mocked!")
+)
+basic.AnsibleModule.exit_json = exit_json
+basic.AnsibleModule.fail_json = fail_json
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.NetworkInterfaceGroupsApi")
+@pytest.mark.parametrize(
+ "module_args",
+ [
+ # required parameter 'name` is missing
+ {
+ "state": "present",
+ "display_name": "Network Interface Group 1",
+ "availability_zone": "az1",
+ "region": "region1",
+ "prefix": "10.21.200.0/24",
+ "gateway": "10.21.200.1",
+ "mtu": 1300,
+ "group_type": "eth",
+ "app_id": "ABCD1234",
+ "key_file": "private-key.pem",
+ },
+ # required parameter 'availability_zone` is missing
+ {
+ "state": "present",
+ "name": "nig1",
+ "display_name": "Network Interface Group 1",
+ "region": "region1",
+ "prefix": "10.21.200.0/24",
+ "gateway": "10.21.200.1",
+ "mtu": 1300,
+ "group_type": "eth",
+ "app_id": "ABCD1234",
+ "key_file": "private-key.pem",
+ },
+ # required parameter 'region` is missing
+ {
+ "state": "present",
+ "name": "nig1",
+ "display_name": "Network Interface Group 1",
+ "availability_zone": "az1",
+ "prefix": "10.21.200.0/24",
+ "gateway": "10.21.200.1",
+ "mtu": 1300,
+ "group_type": "eth",
+ "app_id": "ABCD1234",
+ "key_file": "private-key.pem",
+ },
+ # unknown parameter 'extra' is provided
+ {
+ "state": "present",
+ "name": "nig1",
+ "display_name": "Network Interface Group 1",
+ "app_id": "ABCD1234",
+ "key_file": "private-key.pem",
+ "extra": "value",
+ },
+ # parameter 'state` has incorrect value
+ {
+ "state": "cool",
+ "name": "nig1",
+ "display_name": "Network Interface Group 1",
+ "availability_zone": "az1",
+ "region": "region1",
+ "prefix": "10.21.200.0/24",
+ "gateway": "10.21.200.1",
+ "mtu": 1300,
+ "group_type": "eth",
+ "app_id": "ABCD1234",
+ "key_file": "private-key.pem",
+ },
+ # parameter 'group_type` has incorrect value
+ {
+ "state": "present",
+ "name": "nig1",
+ "display_name": "Network Interface Group 1",
+ "availability_zone": "az1",
+ "region": "region1",
+ "prefix": "10.21.200.0/24",
+ "gateway": "10.21.200.1",
+ "mtu": 1300,
+ "group_type": "supergroup",
+ "app_id": "ABCD1234",
+ "key_file": "private-key.pem",
+ },
+ ],
+)
+def test_module_fails_on_wrong_parameters(m_nig_api, m_op_api, module_args):
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_network_interface_group = MagicMock(
+ side_effect=purefusion.rest.ApiException
+ )
+ api_obj.create_network_interface_group = MagicMock(return_value=OperationMock(1))
+ api_obj.update_network_interface_group = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_network_interface_group = MagicMock(return_value=OperationMock(3))
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+ m_nig_api.return_value = api_obj
+
+ # run module
+ with pytest.raises(AnsibleFailJson):
+ fusion_nig.main()
+
+ # check api was not called at all
+ api_obj.get_network_interface_group.assert_not_called()
+ api_obj.create_network_interface_group.assert_not_called()
+ api_obj.update_network_interface_group.assert_not_called()
+ api_obj.delete_network_interface_group.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.NetworkInterfaceGroupsApi")
+def test_nig_create_fails_on_missing_prefix(m_nig_api, m_op_api):
+ module_args = {
+ "state": "present",
+ "name": "nig1",
+ "display_name": "Network Interface Group 1",
+ "availability_zone": "az1",
+ "region": "region1",
+ "gateway": "10.21.200.1",
+ "mtu": 1300,
+ "group_type": "eth",
+ "app_id": "ABCD1234",
+ "key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_network_interface_group = MagicMock(
+ side_effect=purefusion.rest.ApiException
+ )
+ api_obj.create_network_interface_group = MagicMock(return_value=OperationMock(1))
+ api_obj.update_network_interface_group = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_network_interface_group = MagicMock(return_value=OperationMock(3))
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+ m_nig_api.return_value = api_obj
+
+ # run module
+ with pytest.raises(AnsibleFailJson):
+ fusion_nig.main()
+
+ # check api was not called at all
+ api_obj.get_network_interface_group.assert_called_once_with(
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ network_interface_group_name=module_args["name"],
+ )
+ api_obj.create_network_interface_group.assert_not_called()
+ api_obj.update_network_interface_group.assert_not_called()
+ api_obj.delete_network_interface_group.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.NetworkInterfaceGroupsApi")
+def test_nig_create(m_nig_api, m_op_api):
+ module_args = {
+ "state": "present",
+ "name": "nig1",
+ "display_name": "Network Interface Group 1",
+ "availability_zone": "az1",
+ "region": "region1",
+ "prefix": "10.21.200.0/24",
+ "gateway": "10.21.200.1",
+ "mtu": 1300,
+ "group_type": "eth",
+ "app_id": "ABCD1234",
+ "key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_network_interface_group = MagicMock(
+ side_effect=purefusion.rest.ApiException
+ )
+ api_obj.create_network_interface_group = MagicMock(return_value=OperationMock(1))
+ api_obj.update_network_interface_group = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_network_interface_group = MagicMock(return_value=OperationMock(3))
+ m_nig_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_nig.main()
+
+ assert exc.value.changed is True
+
+ # check api was called correctly
+ api_obj.get_network_interface_group.assert_called_once_with(
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ network_interface_group_name=module_args["name"],
+ )
+ api_obj.create_network_interface_group.assert_called_once_with(
+ purefusion.NetworkInterfaceGroupPost(
+ group_type="eth",
+ eth=purefusion.NetworkInterfaceGroupEthPost(
+ prefix=module_args["prefix"],
+ gateway=module_args["gateway"],
+ mtu=module_args["mtu"],
+ ),
+ name=module_args["name"],
+ display_name=module_args["display_name"],
+ ),
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ )
+ api_obj.update_network_interface_group.assert_not_called()
+ api_obj.delete_network_interface_group.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(1)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.NetworkInterfaceGroupsApi")
+def test_nig_create_without_display_name(m_nig_api, m_op_api):
+ module_args = {
+ "state": "present",
+ "name": "nig1",
+ "availability_zone": "az1",
+ "region": "region1",
+ "prefix": "10.21.200.0/24",
+ "gateway": "10.21.200.1",
+ "mtu": 1300,
+ "group_type": "eth",
+ "app_id": "ABCD1234",
+ "key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_network_interface_group = MagicMock(
+ side_effect=purefusion.rest.ApiException
+ )
+ api_obj.create_network_interface_group = MagicMock(return_value=OperationMock(1))
+ api_obj.update_network_interface_group = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_network_interface_group = MagicMock(return_value=OperationMock(3))
+ m_nig_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_nig.main()
+
+ assert exc.value.changed is True
+
+ # check api was called correctly
+ api_obj.get_network_interface_group.assert_called_once_with(
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ network_interface_group_name=module_args["name"],
+ )
+ api_obj.create_network_interface_group.assert_called_once_with(
+ purefusion.NetworkInterfaceGroupPost(
+ group_type="eth",
+ eth=purefusion.NetworkInterfaceGroupEthPost(
+ prefix=module_args["prefix"],
+ gateway=module_args["gateway"],
+ mtu=module_args["mtu"],
+ ),
+ name=module_args["name"],
+ display_name=module_args["name"],
+ ),
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ )
+ api_obj.update_network_interface_group.assert_not_called()
+ api_obj.delete_network_interface_group.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(1)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.NetworkInterfaceGroupsApi")
+def test_nig_create_without_gateway(m_nig_api, m_op_api):
+ module_args = {
+ "state": "present",
+ "name": "nig1",
+ "display_name": "Network Interface Group 1",
+ "availability_zone": "az1",
+ "region": "region1",
+ "prefix": "10.21.200.0/24",
+ "mtu": 1300,
+ "group_type": "eth",
+ "app_id": "ABCD1234",
+ "key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_network_interface_group = MagicMock(
+ side_effect=purefusion.rest.ApiException
+ )
+ api_obj.create_network_interface_group = MagicMock(return_value=OperationMock(1))
+ api_obj.update_network_interface_group = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_network_interface_group = MagicMock(return_value=OperationMock(3))
+ m_nig_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_nig.main()
+
+ assert exc.value.changed is True
+
+ # check api was called correctly
+ api_obj.get_network_interface_group.assert_called_once_with(
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ network_interface_group_name=module_args["name"],
+ )
+ api_obj.create_network_interface_group.assert_called_once_with(
+ purefusion.NetworkInterfaceGroupPost(
+ group_type="eth",
+ eth=purefusion.NetworkInterfaceGroupEthPost(
+ prefix=module_args["prefix"],
+ mtu=module_args["mtu"],
+ ),
+ name=module_args["name"],
+ display_name=module_args["display_name"],
+ ),
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ )
+ api_obj.update_network_interface_group.assert_not_called()
+ api_obj.delete_network_interface_group.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(1)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.NetworkInterfaceGroupsApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, HTTPError),
+ ],
+)
+def test_nig_create_exception(m_nig_api, m_op_api, exec_original, exec_catch):
+ module_args = {
+ "state": "present",
+ "name": "nig1",
+ "display_name": "Network Interface Group 1",
+ "availability_zone": "az1",
+ "region": "region1",
+ "prefix": "10.21.200.0/24",
+ "mtu": 1300,
+ "group_type": "eth",
+ "app_id": "ABCD1234",
+ "key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_network_interface_group = MagicMock(
+ side_effect=purefusion.rest.ApiException
+ )
+ api_obj.create_network_interface_group = MagicMock(side_effect=exec_original)
+ api_obj.update_network_interface_group = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_network_interface_group = MagicMock(return_value=OperationMock(3))
+ m_nig_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_nig.main()
+
+ # check api was called correctly
+ api_obj.get_network_interface_group.assert_called_once_with(
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ network_interface_group_name=module_args["name"],
+ )
+ api_obj.create_network_interface_group.assert_called_once_with(
+ purefusion.NetworkInterfaceGroupPost(
+ group_type="eth",
+ eth=purefusion.NetworkInterfaceGroupEthPost(
+ prefix=module_args["prefix"],
+ mtu=module_args["mtu"],
+ ),
+ name=module_args["name"],
+ display_name=module_args["display_name"],
+ ),
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ )
+ api_obj.update_network_interface_group.assert_not_called()
+ api_obj.delete_network_interface_group.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.NetworkInterfaceGroupsApi")
+def test_nig_create_op_fails(m_nig_api, m_op_api):
+ module_args = {
+ "state": "present",
+ "name": "nig1",
+ "display_name": "Network Interface Group 1",
+ "availability_zone": "az1",
+ "region": "region1",
+ "prefix": "10.21.200.0/24",
+ "mtu": 1300,
+ "group_type": "eth",
+ "app_id": "ABCD1234",
+ "key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_network_interface_group = MagicMock(
+ side_effect=purefusion.rest.ApiException
+ )
+ api_obj.create_network_interface_group = MagicMock(return_value=OperationMock(1))
+ api_obj.update_network_interface_group = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_network_interface_group = MagicMock(return_value=OperationMock(3))
+ m_nig_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(OperationException):
+ fusion_nig.main()
+
+ # check api was called correctly
+ api_obj.get_network_interface_group.assert_called_once_with(
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ network_interface_group_name=module_args["name"],
+ )
+ api_obj.create_network_interface_group.assert_called_once_with(
+ purefusion.NetworkInterfaceGroupPost(
+ group_type="eth",
+ eth=purefusion.NetworkInterfaceGroupEthPost(
+ prefix=module_args["prefix"],
+ mtu=module_args["mtu"],
+ ),
+ name=module_args["name"],
+ display_name=module_args["display_name"],
+ ),
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ )
+ api_obj.update_network_interface_group.assert_not_called()
+ api_obj.delete_network_interface_group.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(1)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.NetworkInterfaceGroupsApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, OperationException),
+ ],
+)
+def test_nig_create_op_exception(m_nig_api, m_op_api, exec_original, exec_catch):
+ module_args = {
+ "state": "present",
+ "name": "nig1",
+ "display_name": "Network Interface Group 1",
+ "availability_zone": "az1",
+ "region": "region1",
+ "prefix": "10.21.200.0/24",
+ "mtu": 1300,
+ "group_type": "eth",
+ "app_id": "ABCD1234",
+ "key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_network_interface_group = MagicMock(
+ side_effect=purefusion.rest.ApiException
+ )
+ api_obj.create_network_interface_group = MagicMock(return_value=OperationMock(1))
+ api_obj.update_network_interface_group = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_network_interface_group = MagicMock(return_value=OperationMock(3))
+ m_nig_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(side_effect=exec_original)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_nig.main()
+
+ # check api was called correctly
+ api_obj.get_network_interface_group.assert_called_once_with(
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ network_interface_group_name=module_args["name"],
+ )
+ api_obj.create_network_interface_group.assert_called_once_with(
+ purefusion.NetworkInterfaceGroupPost(
+ group_type="eth",
+ eth=purefusion.NetworkInterfaceGroupEthPost(
+ prefix=module_args["prefix"],
+ mtu=module_args["mtu"],
+ ),
+ name=module_args["name"],
+ display_name=module_args["display_name"],
+ ),
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ )
+ api_obj.update_network_interface_group.assert_not_called()
+ api_obj.delete_network_interface_group.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(1)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.NetworkInterfaceGroupsApi")
+def test_nig_update(m_nig_api, m_op_api):
+ current_nig = purefusion.NetworkInterfaceGroup(
+ id="1",
+ self_link="self_link_value",
+ name="nig1",
+ display_name=None,
+ region="region1",
+ availability_zone="az1",
+ group_type="eth",
+ eth=purefusion.NetworkInterfaceGroupEth(
+ prefix="str",
+ gateway="str",
+ vlan=3,
+ mtu=1300,
+ ),
+ )
+ module_args = {
+ "state": "present",
+ "name": current_nig.name, # must match
+ "display_name": "New Name", # should be updated
+ "availability_zone": current_nig.availability_zone, # must match
+ "region": current_nig.region, # must match
+ "prefix": "12.19.150.0/23", # should not be updated
+ "mtu": current_nig.eth.mtu + 100, # should not be updated
+ "group_type": "eth",
+ "app_id": "ABCD1234",
+ "key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_network_interface_group = MagicMock(return_value=current_nig)
+ api_obj.create_network_interface_group = MagicMock(return_value=OperationMock(1))
+ api_obj.update_network_interface_group = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_network_interface_group = MagicMock(return_value=OperationMock(3))
+ m_nig_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_nig.main()
+
+ assert exc.value.changed is True
+
+ # check api was called correctly
+ api_obj.get_network_interface_group.assert_called_once_with(
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ network_interface_group_name=module_args["name"],
+ )
+ api_obj.create_network_interface_group.assert_not_called()
+ api_obj.update_network_interface_group.assert_called_once_with(
+ purefusion.NetworkInterfaceGroupPatch(
+ display_name=purefusion.NullableString(module_args["display_name"]),
+ ),
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ network_interface_group_name=module_args["name"],
+ )
+ api_obj.delete_network_interface_group.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(2)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.NetworkInterfaceGroupsApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, HTTPError),
+ ],
+)
+def test_nig_update_exception(m_nig_api, m_op_api, exec_original, exec_catch):
+ current_nig = purefusion.NetworkInterfaceGroup(
+ id="1",
+ self_link="self_link_value",
+ name="nig1",
+ display_name=None,
+ region="region1",
+ availability_zone="az1",
+ group_type="eth",
+ eth=purefusion.NetworkInterfaceGroupEth(
+ prefix="str",
+ gateway="str",
+ vlan=3,
+ mtu=1300,
+ ),
+ )
+ module_args = {
+ "state": "present",
+ "name": current_nig.name, # must match
+ "display_name": "New Name", # should be updated
+ "availability_zone": current_nig.availability_zone, # must match
+ "region": current_nig.region, # must match
+ "prefix": "12.19.150.0/23", # should not be updated
+ "mtu": current_nig.eth.mtu + 100, # should not be updated
+ "group_type": "eth",
+ "app_id": "ABCD1234",
+ "key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_network_interface_group = MagicMock(return_value=current_nig)
+ api_obj.create_network_interface_group = MagicMock(return_value=OperationMock(1))
+ api_obj.update_network_interface_group = MagicMock(side_effect=exec_original)
+ api_obj.delete_network_interface_group = MagicMock(return_value=OperationMock(3))
+ m_nig_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_nig.main()
+
+ # check api was called correctly
+ api_obj.get_network_interface_group.assert_called_once_with(
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ network_interface_group_name=module_args["name"],
+ )
+ api_obj.create_network_interface_group.assert_not_called()
+ api_obj.update_network_interface_group.assert_called_once_with(
+ purefusion.NetworkInterfaceGroupPatch(
+ display_name=purefusion.NullableString(module_args["display_name"]),
+ ),
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ network_interface_group_name=module_args["name"],
+ )
+ api_obj.delete_network_interface_group.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.NetworkInterfaceGroupsApi")
+def test_nig_update_op_fails(m_nig_api, m_op_api):
+ current_nig = purefusion.NetworkInterfaceGroup(
+ id="1",
+ self_link="self_link_value",
+ name="nig1",
+ display_name=None,
+ region="region1",
+ availability_zone="az1",
+ group_type="eth",
+ eth=purefusion.NetworkInterfaceGroupEth(
+ prefix="str",
+ gateway="str",
+ vlan=3,
+ mtu=1300,
+ ),
+ )
+ module_args = {
+ "state": "present",
+ "name": current_nig.name, # must match
+ "display_name": "New Name", # should be updated
+ "availability_zone": current_nig.availability_zone, # must match
+ "region": current_nig.region, # must match
+ "prefix": "12.19.150.0/23", # should not be updated
+ "mtu": current_nig.eth.mtu + 100, # should not be updated
+ "group_type": "eth",
+ "app_id": "ABCD1234",
+ "key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_network_interface_group = MagicMock(return_value=current_nig)
+ api_obj.create_network_interface_group = MagicMock(return_value=OperationMock(1))
+ api_obj.update_network_interface_group = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_network_interface_group = MagicMock(return_value=OperationMock(3))
+ m_nig_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(OperationException):
+ fusion_nig.main()
+
+ # check api was called correctly
+ api_obj.get_network_interface_group.assert_called_once_with(
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ network_interface_group_name=module_args["name"],
+ )
+ api_obj.create_network_interface_group.assert_not_called()
+ api_obj.update_network_interface_group.assert_called_once_with(
+ purefusion.NetworkInterfaceGroupPatch(
+ display_name=purefusion.NullableString(module_args["display_name"]),
+ ),
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ network_interface_group_name=module_args["name"],
+ )
+ api_obj.delete_network_interface_group.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(2)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.NetworkInterfaceGroupsApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, OperationException),
+ ],
+)
+def test_nig_update_op_exception(m_nig_api, m_op_api, exec_original, exec_catch):
+ current_nig = purefusion.NetworkInterfaceGroup(
+ id="1",
+ self_link="self_link_value",
+ name="nig1",
+ display_name=None,
+ region="region1",
+ availability_zone="az1",
+ group_type="eth",
+ eth=purefusion.NetworkInterfaceGroupEth(
+ prefix="str",
+ gateway="str",
+ vlan=3,
+ mtu=1300,
+ ),
+ )
+ module_args = {
+ "state": "present",
+ "name": current_nig.name, # must match
+ "display_name": "New Name", # should be updated
+ "availability_zone": current_nig.availability_zone, # must match
+ "region": current_nig.region, # must match
+ "prefix": "12.19.150.0/23", # should not be updated
+ "mtu": current_nig.eth.mtu + 100, # should not be updated
+ "group_type": "eth",
+ "app_id": "ABCD1234",
+ "key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_network_interface_group = MagicMock(return_value=current_nig)
+ api_obj.create_network_interface_group = MagicMock(return_value=OperationMock(1))
+ api_obj.update_network_interface_group = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_network_interface_group = MagicMock(return_value=OperationMock(3))
+ m_nig_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(side_effect=exec_original)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_nig.main()
+
+ # check api was called correctly
+ api_obj.get_network_interface_group.assert_called_once_with(
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ network_interface_group_name=module_args["name"],
+ )
+ api_obj.create_network_interface_group.assert_not_called()
+ api_obj.update_network_interface_group.assert_called_once_with(
+ purefusion.NetworkInterfaceGroupPatch(
+ display_name=purefusion.NullableString(module_args["display_name"]),
+ ),
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ network_interface_group_name=module_args["name"],
+ )
+ api_obj.delete_network_interface_group.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(2)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.NetworkInterfaceGroupsApi")
+def test_nig_present_not_changed(m_nig_api, m_op_api):
+ current_nig = purefusion.NetworkInterfaceGroup(
+ id="1",
+ self_link="self_link_value",
+ name="nig1",
+ display_name="Display Name",
+ region="region1",
+ availability_zone="az1",
+ group_type="eth",
+ eth=purefusion.NetworkInterfaceGroupEth(
+ prefix="str",
+ gateway="str",
+ vlan=3,
+ mtu=1300,
+ ),
+ )
+ module_args = {
+ "state": "present",
+ "name": current_nig.name, # must match
+ "display_name": current_nig.display_name, # should not be updated
+ "availability_zone": current_nig.availability_zone, # must match
+ "region": current_nig.region, # must match
+ "prefix": "12.19.150.0/23", # should not be updated
+ "mtu": current_nig.eth.mtu + 100, # should not be updated
+ "group_type": "eth",
+ "app_id": "ABCD1234",
+ "key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_network_interface_group = MagicMock(return_value=current_nig)
+ api_obj.create_network_interface_group = MagicMock(return_value=OperationMock(1))
+ api_obj.update_network_interface_group = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_network_interface_group = MagicMock(return_value=OperationMock(3))
+ m_nig_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_nig.main()
+
+ assert exc.value.changed is False
+
+ # check api was called correctly
+ api_obj.get_network_interface_group.assert_called_once_with(
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ network_interface_group_name=module_args["name"],
+ )
+ api_obj.create_network_interface_group.assert_not_called()
+ api_obj.update_network_interface_group.assert_not_called()
+ api_obj.delete_network_interface_group.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.NetworkInterfaceGroupsApi")
+def test_nig_absent_not_changed(m_nig_api, m_op_api):
+ module_args = {
+ "state": "absent",
+ "name": "nig1",
+ "display_name": "Network Interface Group 1",
+ "availability_zone": "az1",
+ "region": "region1",
+ "app_id": "ABCD1234",
+ "key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_network_interface_group = MagicMock(
+ side_effect=purefusion.rest.ApiException
+ )
+ api_obj.create_network_interface_group = MagicMock(return_value=OperationMock(1))
+ api_obj.update_network_interface_group = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_network_interface_group = MagicMock(return_value=OperationMock(3))
+ m_nig_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_nig.main()
+
+ assert exc.value.changed is False
+
+ # check api was called correctly
+ api_obj.get_network_interface_group.assert_called_once_with(
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ network_interface_group_name=module_args["name"],
+ )
+ api_obj.create_network_interface_group.assert_not_called()
+ api_obj.update_network_interface_group.assert_not_called()
+ api_obj.delete_network_interface_group.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.NetworkInterfaceGroupsApi")
+def test_nig_delete(m_nig_api, m_op_api):
+ current_nig = purefusion.NetworkInterfaceGroup(
+ id="1",
+ self_link="self_link_value",
+ name="nig1",
+ display_name="Display Name",
+ region="region1",
+ availability_zone="az1",
+ group_type="eth",
+ eth=purefusion.NetworkInterfaceGroupEth(
+ prefix="str",
+ gateway="str",
+ vlan=3,
+ mtu=1300,
+ ),
+ )
+ module_args = {
+ "state": "absent",
+ "name": current_nig.name, # must match
+ "availability_zone": current_nig.availability_zone, # must match
+ "region": current_nig.region, # must match
+ "app_id": "ABCD1234",
+ "key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_network_interface_group = MagicMock(return_value=current_nig)
+ api_obj.create_network_interface_group = MagicMock(return_value=OperationMock(1))
+ api_obj.update_network_interface_group = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_network_interface_group = MagicMock(return_value=OperationMock(3))
+ m_nig_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_nig.main()
+
+ assert exc.value.changed is True
+
+ # check api was called correctly
+ api_obj.get_network_interface_group.assert_called_once_with(
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ network_interface_group_name=module_args["name"],
+ )
+ api_obj.create_network_interface_group.assert_not_called()
+ api_obj.update_network_interface_group.assert_not_called()
+ api_obj.delete_network_interface_group.assert_called_once_with(
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ network_interface_group_name=module_args["name"],
+ )
+ op_obj.get_operation.assert_called_once_with(3)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.NetworkInterfaceGroupsApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, HTTPError),
+ ],
+)
+def test_nig_delete_exception(m_nig_api, m_op_api, exec_original, exec_catch):
+ current_nig = purefusion.NetworkInterfaceGroup(
+ id="1",
+ self_link="self_link_value",
+ name="nig1",
+ display_name="Display Name",
+ region="region1",
+ availability_zone="az1",
+ group_type="eth",
+ eth=purefusion.NetworkInterfaceGroupEth(
+ prefix="str",
+ gateway="str",
+ vlan=3,
+ mtu=1300,
+ ),
+ )
+ module_args = {
+ "state": "absent",
+ "name": current_nig.name, # must match
+ "availability_zone": current_nig.availability_zone, # must match
+ "region": current_nig.region, # must match
+ "app_id": "ABCD1234",
+ "key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_network_interface_group = MagicMock(return_value=current_nig)
+ api_obj.create_network_interface_group = MagicMock(return_value=OperationMock(1))
+ api_obj.update_network_interface_group = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_network_interface_group = MagicMock(side_effect=exec_original)
+ m_nig_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_nig.main()
+
+ # check api was called correctly
+ api_obj.get_network_interface_group.assert_called_once_with(
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ network_interface_group_name=module_args["name"],
+ )
+ api_obj.create_network_interface_group.assert_not_called()
+ api_obj.update_network_interface_group.assert_not_called()
+ api_obj.delete_network_interface_group.assert_called_once_with(
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ network_interface_group_name=module_args["name"],
+ )
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.NetworkInterfaceGroupsApi")
+def test_nig_delete_op_fails(m_nig_api, m_op_api):
+ current_nig = purefusion.NetworkInterfaceGroup(
+ id="1",
+ self_link="self_link_value",
+ name="nig1",
+ display_name="Display Name",
+ region="region1",
+ availability_zone="az1",
+ group_type="eth",
+ eth=purefusion.NetworkInterfaceGroupEth(
+ prefix="str",
+ gateway="str",
+ vlan=3,
+ mtu=1300,
+ ),
+ )
+ module_args = {
+ "state": "absent",
+ "name": current_nig.name, # must match
+ "availability_zone": current_nig.availability_zone, # must match
+ "region": current_nig.region, # must match
+ "app_id": "ABCD1234",
+ "key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_network_interface_group = MagicMock(return_value=current_nig)
+ api_obj.create_network_interface_group = MagicMock(return_value=OperationMock(1))
+ api_obj.update_network_interface_group = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_network_interface_group = MagicMock(return_value=OperationMock(3))
+ m_nig_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(OperationException):
+ fusion_nig.main()
+
+ # check api was called correctly
+ api_obj.get_network_interface_group.assert_called_once_with(
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ network_interface_group_name=module_args["name"],
+ )
+ api_obj.create_network_interface_group.assert_not_called()
+ api_obj.update_network_interface_group.assert_not_called()
+ api_obj.delete_network_interface_group.assert_called_once_with(
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ network_interface_group_name=module_args["name"],
+ )
+ op_obj.get_operation.assert_called_once_with(3)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.NetworkInterfaceGroupsApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, OperationException),
+ ],
+)
+def test_nig_delete_op_exception(m_nig_api, m_op_api, exec_original, exec_catch):
+ current_nig = purefusion.NetworkInterfaceGroup(
+ id="1",
+ self_link="self_link_value",
+ name="nig1",
+ display_name="Display Name",
+ region="region1",
+ availability_zone="az1",
+ group_type="eth",
+ eth=purefusion.NetworkInterfaceGroupEth(
+ prefix="str",
+ gateway="str",
+ vlan=3,
+ mtu=1300,
+ ),
+ )
+ module_args = {
+ "state": "absent",
+ "name": current_nig.name, # must match
+ "availability_zone": current_nig.availability_zone, # must match
+ "region": current_nig.region, # must match
+ "app_id": "ABCD1234",
+ "key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_network_interface_group = MagicMock(return_value=current_nig)
+ api_obj.create_network_interface_group = MagicMock(return_value=OperationMock(1))
+ api_obj.update_network_interface_group = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_network_interface_group = MagicMock(return_value=OperationMock(3))
+ m_nig_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(side_effect=exec_original)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_nig.main()
+
+ # check api was called correctly
+ api_obj.get_network_interface_group.assert_called_once_with(
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ network_interface_group_name=module_args["name"],
+ )
+ api_obj.create_network_interface_group.assert_not_called()
+ api_obj.update_network_interface_group.assert_not_called()
+ api_obj.delete_network_interface_group.assert_called_once_with(
+ availability_zone_name=module_args["availability_zone"],
+ region_name=module_args["region"],
+ network_interface_group_name=module_args["name"],
+ )
+ op_obj.get_operation.assert_called_once_with(3)
diff --git a/ansible_collections/purestorage/fusion/tests/functional/test_fusion_pg.py b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_pg.py
new file mode 100644
index 000000000..2f0601e12
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_pg.py
@@ -0,0 +1,1595 @@
+# -*- coding: utf-8 -*-
+
+# (c) 2023 Pure Storage, Inc.
+# GNU General Public License v3.0+ (see COPYING.GPLv3 or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from unittest.mock import MagicMock, patch, call
+
+import fusion as purefusion
+import pytest
+from ansible.module_utils import basic
+from ansible_collections.purestorage.fusion.plugins.module_utils.errors import (
+ OperationException,
+)
+from ansible_collections.purestorage.fusion.plugins.modules import fusion_pg
+from ansible_collections.purestorage.fusion.tests.functional.utils import (
+ AnsibleExitJson,
+ AnsibleFailJson,
+ OperationMock,
+ exit_json,
+ fail_json,
+ set_module_args,
+ side_effects_with_exceptions,
+)
+from urllib3.exceptions import HTTPError
+
+# GLOBAL MOCKS
+fusion_pg.setup_fusion = MagicMock(return_value=purefusion.api_client.ApiClient())
+purefusion.api_client.ApiClient.call_api = MagicMock(
+ side_effect=Exception("API call not mocked!")
+)
+basic.AnsibleModule.exit_json = exit_json
+basic.AnsibleModule.fail_json = fail_json
+
+
+@pytest.fixture
+def module_args_present():
+ return {
+ "name": "placement_group1",
+ "tenant": "tenant1",
+ "tenant_space": "tenant_space1",
+ "region": "region1",
+ "availability_zone": "availability_zone1",
+ "storage_service": "storage_service1",
+ "state": "present",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+
+
+@pytest.fixture
+def module_args_absent():
+ return {
+ "name": "placement_group1",
+ "tenant": "tenant1",
+ "tenant_space": "tenant_space1",
+ "state": "absent",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.PlacementGroupsApi")
+@pytest.mark.parametrize(
+ ("module_args", "get_not_called"),
+ [
+ # 'name` is missing
+ (
+ {
+ "tenant": "tenant1",
+ "tenant_space": "tenant_space1",
+ "region": "region1",
+ "availability_zone": "availability_zone1",
+ "storage_service": "storage_service1",
+ "state": "present",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ True,
+ ),
+ # 'tenant` is missing
+ (
+ {
+ "name": "placement_group1",
+ "tenant_space": "tenant_space1",
+ "region": "region1",
+ "availability_zone": "availability_zone1",
+ "storage_service": "storage_service1",
+ "state": "present",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ True,
+ ),
+ # 'tenant space` is missing
+ (
+ {
+ "name": "placement_group1",
+ "tenant": "tenant1",
+ "region": "region1",
+ "availability_zone": "availability_zone1",
+ "storage_service": "storage_service1",
+ "state": "present",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ True,
+ ),
+ # 'region` is missing
+ (
+ {
+ "name": "placement_group1",
+ "tenant": "tenant1",
+ "tenant_space": "tenant_space1",
+ "availability_zone": "availability_zone1",
+ "storage_service": "storage_service1",
+ "state": "present",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ False,
+ ),
+ # 'availability_zone` is missing
+ (
+ {
+ "name": "placement_group1",
+ "tenant": "tenant1",
+ "tenant_space": "tenant_space1",
+ "region": "region1",
+ "storage_service": "storage_service1",
+ "state": "present",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ False,
+ ),
+ # 'storage_service` is missing
+ (
+ {
+ "name": "placement_group1",
+ "tenant": "tenant1",
+ "tenant_space": "tenant_space1",
+ "region": "region1",
+ "availability_zone": "availability_zone1",
+ "state": "present",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ False,
+ ),
+ # 'state` is invalid
+ (
+ {
+ "name": "placement_group1",
+ "tenant": "tenant1",
+ "tenant_space": "tenant_space1",
+ "region": "region1",
+ "availability_zone": "availability_zone1",
+ "storage_service": "storage_service1",
+ "state": "past",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ False,
+ ),
+ ],
+)
+def test_module_args_wrong(pg_api_init, op_api_init, module_args, get_not_called):
+ set_module_args(module_args)
+
+ pg_mock = MagicMock()
+ pg_mock.get_placement_group = MagicMock(side_effect=purefusion.rest.ApiException)
+ pg_mock.create_placement_group = MagicMock(side_effect=NotImplementedError())
+ pg_mock.update_placement_group = MagicMock(side_effect=NotImplementedError())
+ pg_mock.delete_placement_group = MagicMock(side_effect=NotImplementedError())
+ pg_api_init.return_value = pg_mock
+
+ op_mock = MagicMock()
+ op_mock.get_operation = MagicMock(side_effect=purefusion.rest.ApiException)
+ op_api_init.return_value = op_mock
+
+ with pytest.raises(AnsibleFailJson):
+ fusion_pg.main()
+
+ if get_not_called:
+ pg_mock.get_placement_group.assert_not_called()
+ elif pg_mock.get_placement_group.called:
+ pg_mock.get_placement_group.assert_called_with(
+ tenant_name="tenant1",
+ tenant_space_name="tenant_space1",
+ placement_group_name="placement_group1",
+ )
+ pg_mock.create_placement_group.assert_not_called()
+ pg_mock.update_placement_group.assert_not_called()
+ pg_mock.delete_placement_group.assert_not_called()
+ op_mock.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.PlacementGroupsApi")
+def test_pg_create_ok(pg_api_init, op_api_init, module_args_present):
+ module_args = module_args_present
+ module_args["display_name"] = "some_display_name"
+ set_module_args(module_args)
+
+ pg_mock = MagicMock()
+ pg_mock.get_placement_group = MagicMock(side_effect=purefusion.rest.ApiException)
+ pg_mock.create_placement_group = MagicMock(return_value=OperationMock("op1"))
+ pg_mock.update_placement_group = MagicMock(side_effect=NotImplementedError())
+ pg_mock.delete_placement_group = MagicMock(side_effect=NotImplementedError())
+ pg_api_init.return_value = pg_mock
+
+ op_mock = MagicMock()
+ op_mock.get_operation = MagicMock(return_value=OperationMock("op1", success=True))
+ op_api_init.return_value = op_mock
+
+ with pytest.raises(AnsibleExitJson) as excinfo:
+ fusion_pg.main()
+ assert excinfo.value.changed
+
+ pg_mock.get_placement_group.assert_called_with(
+ tenant_name="tenant1",
+ tenant_space_name="tenant_space1",
+ placement_group_name="placement_group1",
+ )
+ pg_mock.create_placement_group.assert_called_with(
+ purefusion.PlacementGroupPost(
+ name="placement_group1",
+ display_name="some_display_name",
+ availability_zone="availability_zone1",
+ region="region1",
+ storage_service="storage_service1",
+ ),
+ tenant_name="tenant1",
+ tenant_space_name="tenant_space1",
+ )
+ pg_mock.update_placement_group.assert_not_called()
+ pg_mock.delete_placement_group.assert_not_called()
+ op_mock.get_operation.assert_called_with("op1")
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.PlacementGroupsApi")
+def test_pg_create_without_display_name_ok(
+ pg_api_init, op_api_init, module_args_present
+):
+ module_args = module_args_present
+ set_module_args(module_args)
+
+ pg_mock = MagicMock()
+ pg_mock.get_placement_group = MagicMock(side_effect=purefusion.rest.ApiException)
+ pg_mock.create_placement_group = MagicMock(return_value=OperationMock("op1"))
+ pg_mock.update_placement_group = MagicMock(side_effect=NotImplementedError())
+ pg_mock.delete_placement_group = MagicMock(side_effect=NotImplementedError())
+ pg_api_init.return_value = pg_mock
+
+ op_mock = MagicMock()
+ op_mock.get_operation = MagicMock(return_value=OperationMock("op1", success=True))
+ op_api_init.return_value = op_mock
+
+ with pytest.raises(AnsibleExitJson) as excinfo:
+ fusion_pg.main()
+ assert excinfo.value.changed
+
+ pg_mock.get_placement_group.assert_called_with(
+ tenant_name="tenant1",
+ tenant_space_name="tenant_space1",
+ placement_group_name="placement_group1",
+ )
+ pg_mock.create_placement_group.assert_called_with(
+ purefusion.PlacementGroupPost(
+ name="placement_group1",
+ display_name="placement_group1",
+ availability_zone="availability_zone1",
+ region="region1",
+ storage_service="storage_service1",
+ ),
+ tenant_name="tenant1",
+ tenant_space_name="tenant_space1",
+ )
+ pg_mock.update_placement_group.assert_not_called()
+ pg_mock.delete_placement_group.assert_not_called()
+ op_mock.get_operation.assert_called_with("op1")
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.PlacementGroupsApi")
+@pytest.mark.parametrize(
+ ("raised_exception", "expected_exception"),
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, HTTPError),
+ ],
+)
+def test_pg_create_exception(
+ pg_api_init, op_api_init, raised_exception, expected_exception, module_args_present
+):
+ set_module_args(module_args_present)
+
+ pg_mock = MagicMock()
+ pg_mock.get_placement_group = MagicMock(side_effect=purefusion.rest.ApiException)
+ pg_mock.create_placement_group = MagicMock(side_effect=raised_exception)
+ pg_mock.update_placement_group = MagicMock(side_effect=NotImplementedError())
+ pg_mock.delete_placement_group = MagicMock(side_effect=NotImplementedError())
+ pg_api_init.return_value = pg_mock
+
+ op_mock = MagicMock()
+ op_mock.get_operation = MagicMock(side_effect=NotImplementedError())
+ op_api_init.return_value = op_mock
+
+ with pytest.raises(expected_exception):
+ fusion_pg.main()
+
+ pg_mock.get_placement_group.assert_called_with(
+ tenant_name="tenant1",
+ tenant_space_name="tenant_space1",
+ placement_group_name="placement_group1",
+ )
+ pg_mock.create_placement_group.assert_called_with(
+ purefusion.PlacementGroupPost(
+ name="placement_group1",
+ display_name="placement_group1",
+ availability_zone="availability_zone1",
+ region="region1",
+ storage_service="storage_service1",
+ ),
+ tenant_name="tenant1",
+ tenant_space_name="tenant_space1",
+ )
+ pg_mock.delete_placement_group.assert_not_called()
+ pg_mock.update_placement_group.assert_not_called()
+ op_mock.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.PlacementGroupsApi")
+def test_pg_create_op_fails(pg_api_init, op_api_init, module_args_present):
+ module_args = module_args_present
+ set_module_args(module_args)
+
+ pg_mock = MagicMock()
+ pg_mock.get_placement_group = MagicMock(side_effect=purefusion.rest.ApiException)
+ pg_mock.create_placement_group = MagicMock(return_value=OperationMock(id="op1"))
+ pg_mock.update_placement_group = MagicMock(side_effect=NotImplementedError())
+ pg_mock.delete_placement_group = MagicMock(side_effect=NotImplementedError())
+ pg_api_init.return_value = pg_mock
+
+ op_mock = MagicMock()
+ op_mock.get_operation = MagicMock(return_value=OperationMock("op1", success=False))
+ op_api_init.return_value = op_mock
+
+ with pytest.raises(OperationException):
+ fusion_pg.main()
+
+ pg_mock.get_placement_group.assert_called_with(
+ tenant_name="tenant1",
+ tenant_space_name="tenant_space1",
+ placement_group_name="placement_group1",
+ )
+ pg_mock.create_placement_group.assert_called_with(
+ purefusion.PlacementGroupPost(
+ name="placement_group1",
+ display_name="placement_group1",
+ availability_zone="availability_zone1",
+ region="region1",
+ storage_service="storage_service1",
+ ),
+ tenant_name="tenant1",
+ tenant_space_name="tenant_space1",
+ )
+ pg_mock.update_placement_group.assert_not_called()
+ pg_mock.delete_placement_group.assert_not_called()
+ op_mock.get_operation.assert_called_with("op1")
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.PlacementGroupsApi")
+def test_pg_create_triggers_update_ok(pg_api_init, op_api_init):
+ module_args = {
+ "name": "placement_group1",
+ "display_name": "some_display_name",
+ "tenant": "tenant1",
+ "tenant_space": "tenant_space1",
+ "region": "region1",
+ "availability_zone": "availability_zone1",
+ "storage_service": "storage_service1",
+ "array": "array2",
+ "state": "present",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ get_placement_group_effects = [
+ purefusion.rest.ApiException(),
+ purefusion.PlacementGroup(
+ id="placement_group1_id",
+ name="placement_group1",
+ display_name="some_display_name",
+ self_link="test_self_link",
+ tenant=purefusion.TenantRef(
+ id="tenant1_id",
+ name="tenant1",
+ kind="Tenant",
+ self_link="some_self_link",
+ ),
+ tenant_space=purefusion.TenantSpaceRef(
+ id="tenant_space1_id",
+ name="tenant_space1",
+ kind="TenantSpace",
+ self_link="some_self_link",
+ ),
+ availability_zone=purefusion.AvailabilityZoneRef(
+ id="availability_zone1_id",
+ name="availability_zone1",
+ kind="AvailabilityZone",
+ self_link="some_self_link",
+ ),
+ placement_engine="heuristics",
+ protocols=[],
+ storage_service=purefusion.StorageServiceRef(
+ id="storage_service1_id",
+ name="storage_service",
+ kind="StorageService",
+ self_link="some_self_link",
+ ),
+ array=purefusion.ArrayRef(
+ id="array1_id", name="array1", kind="Array", self_link="some_self_link"
+ ),
+ ),
+ ]
+
+ pg_mock = MagicMock()
+ pg_mock.get_placement_group = MagicMock(
+ side_effect=side_effects_with_exceptions(get_placement_group_effects)
+ )
+ pg_mock.create_placement_group = MagicMock(return_value=OperationMock("op1"))
+ pg_mock.update_placement_group = MagicMock(return_value=OperationMock("op2"))
+ pg_mock.delete_placement_group = MagicMock(side_effect=NotImplementedError())
+ pg_api_init.return_value = pg_mock
+
+ op_mock = MagicMock()
+ op_mock.get_operation = MagicMock(return_value=OperationMock("op1", success=True))
+ op_api_init.return_value = op_mock
+
+ with pytest.raises(AnsibleExitJson) as excinfo:
+ fusion_pg.main()
+ assert excinfo.value.changed
+
+ pg_mock.get_placement_group.assert_has_calls(
+ [
+ call(
+ tenant_name="tenant1",
+ tenant_space_name="tenant_space1",
+ placement_group_name="placement_group1",
+ ),
+ call(
+ tenant_name="tenant1",
+ tenant_space_name="tenant_space1",
+ placement_group_name="placement_group1",
+ ),
+ ],
+ any_order=True,
+ )
+ pg_mock.create_placement_group.assert_called_with(
+ purefusion.PlacementGroupPost(
+ name="placement_group1",
+ display_name="some_display_name",
+ availability_zone="availability_zone1",
+ region="region1",
+ storage_service="storage_service1",
+ ),
+ tenant_name="tenant1",
+ tenant_space_name="tenant_space1",
+ )
+ pg_mock.update_placement_group.assert_called_with(
+ purefusion.PlacementGroupPatch(array=purefusion.NullableString(value="array2")),
+ tenant_name="tenant1",
+ tenant_space_name="tenant_space1",
+ placement_group_name="placement_group1",
+ )
+ pg_mock.delete_placement_group.assert_not_called()
+ op_mock.get_operation.assert_has_calls([call("op1"), call("op2")], any_order=True)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.PlacementGroupsApi")
+@pytest.mark.parametrize(
+ ("raised_exception", "expected_exception"),
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, HTTPError),
+ ],
+)
+def test_pg_create_triggers_update_exception(
+ pg_api_init, op_api_init, raised_exception, expected_exception
+):
+ module_args = {
+ "name": "placement_group1",
+ "display_name": "some_display_name",
+ "tenant": "tenant1",
+ "tenant_space": "tenant_space1",
+ "region": "region1",
+ "availability_zone": "availability_zone1",
+ "storage_service": "storage_service1",
+ "array": "array2",
+ "state": "present",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ get_placement_group_effects = [
+ purefusion.rest.ApiException(),
+ purefusion.PlacementGroup(
+ id="placement_group1_id",
+ name="placement_group1",
+ display_name="some_display_name",
+ self_link="test_self_link",
+ tenant=purefusion.TenantRef(
+ id="tenant1_id",
+ name="tenant1",
+ kind="Tenant",
+ self_link="some_self_link",
+ ),
+ tenant_space=purefusion.TenantSpaceRef(
+ id="tenant_space1_id",
+ name="tenant_space1",
+ kind="TenantSpace",
+ self_link="some_self_link",
+ ),
+ availability_zone=purefusion.AvailabilityZoneRef(
+ id="availability_zone1_id",
+ name="availability_zone1",
+ kind="AvailabilityZone",
+ self_link="some_self_link",
+ ),
+ placement_engine="heuristics",
+ protocols=[],
+ storage_service=purefusion.StorageServiceRef(
+ id="storage_service1_id",
+ name="storage_service",
+ kind="StorageService",
+ self_link="some_self_link",
+ ),
+ array=purefusion.ArrayRef(
+ id="array1_id", name="array1", kind="Array", self_link="some_self_link"
+ ),
+ ),
+ ]
+
+ pg_mock = MagicMock()
+ pg_mock.get_placement_group = MagicMock(
+ side_effect=side_effects_with_exceptions(get_placement_group_effects)
+ )
+ pg_mock.create_placement_group = MagicMock(return_value=OperationMock("op1"))
+ pg_mock.update_placement_group = MagicMock(side_effect=raised_exception)
+ pg_mock.delete_placement_group = MagicMock(side_effect=NotImplementedError())
+ pg_api_init.return_value = pg_mock
+
+ op_mock = MagicMock()
+ op_mock.get_operation = MagicMock(return_value=OperationMock("op1", success=True))
+ op_api_init.return_value = op_mock
+
+ with pytest.raises(expected_exception):
+ fusion_pg.main()
+
+ pg_mock.get_placement_group.assert_has_calls(
+ [
+ call(
+ tenant_name="tenant1",
+ tenant_space_name="tenant_space1",
+ placement_group_name="placement_group1",
+ ),
+ call(
+ tenant_name="tenant1",
+ tenant_space_name="tenant_space1",
+ placement_group_name="placement_group1",
+ ),
+ ],
+ any_order=True,
+ )
+ pg_mock.create_placement_group.assert_called_with(
+ purefusion.PlacementGroupPost(
+ name="placement_group1",
+ display_name="some_display_name",
+ availability_zone="availability_zone1",
+ region="region1",
+ storage_service="storage_service1",
+ ),
+ tenant_name="tenant1",
+ tenant_space_name="tenant_space1",
+ )
+ pg_mock.update_placement_group.assert_called_with(
+ purefusion.PlacementGroupPatch(array=purefusion.NullableString(value="array2")),
+ tenant_name="tenant1",
+ tenant_space_name="tenant_space1",
+ placement_group_name="placement_group1",
+ )
+ pg_mock.delete_placement_group.assert_not_called()
+ op_mock.get_operation.assert_called_with("op1")
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.PlacementGroupsApi")
+def test_pg_create_triggers_update_op_fails(pg_api_init, op_api_init):
+ module_args = {
+ "name": "placement_group1",
+ "display_name": "some_display_name",
+ "tenant": "tenant1",
+ "tenant_space": "tenant_space1",
+ "region": "region1",
+ "availability_zone": "availability_zone1",
+ "storage_service": "storage_service1",
+ "array": "array2",
+ "state": "present",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ get_placement_group_effects = [
+ purefusion.rest.ApiException(),
+ purefusion.PlacementGroup(
+ id="placement_group1_id",
+ name="placement_group1",
+ display_name="some_display_name",
+ self_link="test_self_link",
+ tenant=purefusion.TenantRef(
+ id="tenant1_id",
+ name="tenant1",
+ kind="Tenant",
+ self_link="some_self_link",
+ ),
+ tenant_space=purefusion.TenantSpaceRef(
+ id="tenant_space1_id",
+ name="tenant_space1",
+ kind="TenantSpace",
+ self_link="some_self_link",
+ ),
+ availability_zone=purefusion.AvailabilityZoneRef(
+ id="availability_zone1_id",
+ name="availability_zone1",
+ kind="AvailabilityZone",
+ self_link="some_self_link",
+ ),
+ placement_engine="heuristics",
+ protocols=[],
+ storage_service=purefusion.StorageServiceRef(
+ id="storage_service1_id",
+ name="storage_service",
+ kind="StorageService",
+ self_link="some_self_link",
+ ),
+ array=purefusion.ArrayRef(
+ id="array1_id", name="array1", kind="Array", self_link="some_self_link"
+ ),
+ ),
+ ]
+
+ pg_mock = MagicMock()
+ pg_mock.get_placement_group = MagicMock(
+ side_effect=side_effects_with_exceptions(get_placement_group_effects)
+ )
+ pg_mock.create_placement_group = MagicMock(return_value=OperationMock("op1"))
+ pg_mock.update_placement_group = MagicMock(return_value=OperationMock("op2"))
+ pg_mock.delete_placement_group = MagicMock(side_effect=NotImplementedError())
+ pg_api_init.return_value = pg_mock
+
+ op_mock = MagicMock()
+ op_mock.get_operation = MagicMock(
+ side_effect=[
+ OperationMock("op1", success=True),
+ OperationMock("op2", success=False),
+ ]
+ )
+ op_api_init.return_value = op_mock
+
+ with pytest.raises(OperationException):
+ fusion_pg.main()
+
+ pg_mock.get_placement_group.assert_has_calls(
+ [
+ call(
+ tenant_name="tenant1",
+ tenant_space_name="tenant_space1",
+ placement_group_name="placement_group1",
+ ),
+ call(
+ tenant_name="tenant1",
+ tenant_space_name="tenant_space1",
+ placement_group_name="placement_group1",
+ ),
+ ],
+ any_order=True,
+ )
+ pg_mock.create_placement_group.assert_called_with(
+ purefusion.PlacementGroupPost(
+ name="placement_group1",
+ display_name="some_display_name",
+ availability_zone="availability_zone1",
+ region="region1",
+ storage_service="storage_service1",
+ ),
+ tenant_name="tenant1",
+ tenant_space_name="tenant_space1",
+ )
+ pg_mock.update_placement_group.assert_called_with(
+ purefusion.PlacementGroupPatch(array=purefusion.NullableString(value="array2")),
+ tenant_name="tenant1",
+ tenant_space_name="tenant_space1",
+ placement_group_name="placement_group1",
+ )
+ pg_mock.delete_placement_group.assert_not_called()
+ op_mock.get_operation.assert_has_calls([call("op1"), call("op2")])
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.PlacementGroupsApi")
+@pytest.mark.parametrize(
+ "test_case",
+ [
+ # patch 'display_name`
+ {
+ "current_state": purefusion.PlacementGroup(
+ id="placement_group1_id",
+ name="placement_group1",
+ display_name="placement_group1_display_name",
+ self_link="test_self_link",
+ tenant=purefusion.TenantRef(
+ id="tenant1_id",
+ name="tenant1",
+ kind="Tenant",
+ self_link="some_self_link",
+ ),
+ tenant_space=purefusion.TenantSpaceRef(
+ id="tenant_space1_id",
+ name="tenant_space1",
+ kind="TenantSpace",
+ self_link="some_self_link",
+ ),
+ availability_zone=purefusion.AvailabilityZoneRef(
+ id="availability_zone1_id",
+ name="availability_zone1",
+ kind="AvailabilityZone",
+ self_link="some_self_link",
+ ),
+ placement_engine="heuristics",
+ protocols=[],
+ storage_service=purefusion.StorageServiceRef(
+ id="storage_service1_id",
+ name="storage_service",
+ kind="StorageService",
+ self_link="some_self_link",
+ ),
+ array=purefusion.ArrayRef(
+ id="array1_id",
+ name="array1",
+ kind="Array",
+ self_link="some_self_link",
+ ),
+ ),
+ "module_args": {
+ "name": "placement_group1",
+ "display_name": "different_display_name",
+ "tenant": "tenant1",
+ "tenant_space": "tenant_space1",
+ "region": "region1",
+ "availability_zone": "availability_zone1",
+ "storage_service": "storage_service1",
+ "state": "present",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ "patches": [
+ purefusion.PlacementGroupPatch(
+ display_name=purefusion.NullableString(
+ value="different_display_name"
+ ),
+ ),
+ ],
+ },
+ # patch 'array`
+ {
+ "current_state": purefusion.PlacementGroup(
+ id="placement_group1_id",
+ name="placement_group1",
+ display_name="placement_group1",
+ self_link="test_self_link",
+ tenant=purefusion.TenantRef(
+ id="tenant1_id",
+ name="tenant1",
+ kind="Tenant",
+ self_link="some_self_link",
+ ),
+ tenant_space=purefusion.TenantSpaceRef(
+ id="tenant_space1_id",
+ name="tenant_space1",
+ kind="TenantSpace",
+ self_link="some_self_link",
+ ),
+ availability_zone=purefusion.AvailabilityZoneRef(
+ id="availability_zone1_id",
+ name="availability_zone1",
+ kind="AvailabilityZone",
+ self_link="some_self_link",
+ ),
+ placement_engine="heuristics",
+ protocols=[],
+ storage_service=purefusion.StorageServiceRef(
+ id="storage_service1_id",
+ name="storage_service",
+ kind="StorageService",
+ self_link="some_self_link",
+ ),
+ array=purefusion.ArrayRef(
+ id="array1_id",
+ name="array1",
+ kind="Array",
+ self_link="some_self_link",
+ ),
+ ),
+ "module_args": {
+ "name": "placement_group1",
+ "tenant": "tenant1",
+ "tenant_space": "tenant_space1",
+ "region": "region1",
+ "availability_zone": "availability_zone1",
+ "storage_service": "storage_service1",
+ "array": "array2",
+ "state": "present",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ "patches": [
+ purefusion.PlacementGroupPatch(
+ array=purefusion.NullableString(value="array2"),
+ ),
+ ],
+ },
+ # patch all
+ {
+ "current_state": purefusion.PlacementGroup(
+ id="placement_group1_id",
+ name="placement_group1",
+ display_name="placement_group1_display_name",
+ self_link="test_self_link",
+ tenant=purefusion.TenantRef(
+ id="tenant1_id",
+ name="tenant1",
+ kind="Tenant",
+ self_link="some_self_link",
+ ),
+ tenant_space=purefusion.TenantSpaceRef(
+ id="tenant_space1_id",
+ name="tenant_space1",
+ kind="TenantSpace",
+ self_link="some_self_link",
+ ),
+ availability_zone=purefusion.AvailabilityZoneRef(
+ id="availability_zone1_id",
+ name="availability_zone1",
+ kind="AvailabilityZone",
+ self_link="some_self_link",
+ ),
+ placement_engine="heuristics",
+ protocols=[],
+ storage_service=purefusion.StorageServiceRef(
+ id="storage_service1_id",
+ name="storage_service",
+ kind="StorageService",
+ self_link="some_self_link",
+ ),
+ array=purefusion.ArrayRef(
+ id="array1_id",
+ name="array1",
+ kind="Array",
+ self_link="some_self_link",
+ ),
+ ),
+ "module_args": {
+ "name": "placement_group1",
+ "display_name": "different_display_name",
+ "tenant": "tenant1",
+ "tenant_space": "tenant_space1",
+ "region": "region1",
+ "availability_zone": "availability_zone1",
+ "storage_service": "storage_service1",
+ "array": "array2",
+ "state": "present",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ "patches": [
+ purefusion.PlacementGroupPatch(
+ display_name=purefusion.NullableString(
+ value="different_display_name"
+ ),
+ ),
+ purefusion.PlacementGroupPatch(
+ array=purefusion.NullableString(value="array2"),
+ ),
+ ],
+ },
+ ],
+)
+def test_pg_update_ok(pg_api_init, op_api_init, test_case):
+ module_args = test_case["module_args"]
+ set_module_args(module_args)
+
+ get_operation_calls = [
+ call("op{0}".format(i)) for i in range(len(test_case["patches"]))
+ ]
+ update_placement_group_return_vals = [
+ OperationMock(id="op{0}".format(i)) for i in range(len(test_case["patches"]))
+ ]
+ update_placement_group_calls = [
+ call(
+ p,
+ tenant_name="tenant1",
+ tenant_space_name="tenant_space1",
+ placement_group_name="placement_group1",
+ )
+ for p in test_case["patches"]
+ ]
+
+ pg_mock = MagicMock()
+ pg_mock.get_placement_group = MagicMock(return_value=test_case["current_state"])
+ pg_mock.create_placement_group = MagicMock(side_effect=NotImplementedError())
+ pg_mock.update_placement_group = MagicMock(
+ side_effect=update_placement_group_return_vals
+ )
+ pg_mock.delete_placement_group = MagicMock(side_effect=NotImplementedError())
+ pg_api_init.return_value = pg_mock
+
+ op_mock = MagicMock()
+ op_mock.get_operation = MagicMock(
+ side_effect=lambda op_id: OperationMock(id=op_id, success=True)
+ )
+ op_api_init.return_value = op_mock
+
+ with pytest.raises(AnsibleExitJson) as excinfo:
+ fusion_pg.main()
+ assert excinfo.value.changed
+
+ pg_mock.get_placement_group.assert_called_with(
+ tenant_name="tenant1",
+ tenant_space_name="tenant_space1",
+ placement_group_name="placement_group1",
+ )
+ pg_mock.create_placement_group.assert_not_called()
+ pg_mock.update_placement_group.assert_has_calls(
+ update_placement_group_calls, any_order=True
+ )
+ pg_mock.delete_placement_group.assert_not_called()
+ op_mock.get_operation.assert_has_calls(get_operation_calls, any_order=True)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.PlacementGroupsApi")
+@pytest.mark.parametrize("failing_patch", [0, 1])
+@pytest.mark.parametrize(
+ ("raised_exception", "expected_exception"),
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, HTTPError),
+ ],
+)
+def test_pg_update_exception(
+ pg_api_init, op_api_init, failing_patch, raised_exception, expected_exception
+):
+ module_args = {
+ "name": "placement_group1",
+ "display_name": "different_display_name",
+ "tenant": "tenant1",
+ "tenant_space": "tenant_space1",
+ "region": "region1",
+ "availability_zone": "availability_zone1",
+ "storage_service": "storage_service1",
+ "array": "array2",
+ "state": "present",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ patches = [
+ purefusion.PlacementGroupPatch(
+ display_name=purefusion.NullableString(value="different_display_name"),
+ ),
+ purefusion.PlacementGroupPatch(
+ array=purefusion.NullableString(value="array2"),
+ ),
+ ]
+
+ pg_mock = MagicMock()
+ pg_mock.get_placement_group = MagicMock(
+ return_value=purefusion.PlacementGroup(
+ id="placement_group1_id",
+ name="placement_group1",
+ display_name="placement_group1_display_name",
+ self_link="test_self_link",
+ tenant=purefusion.TenantRef(
+ id="tenant1_id",
+ name="tenant1",
+ kind="Tenant",
+ self_link="some_self_link",
+ ),
+ tenant_space=purefusion.TenantSpaceRef(
+ id="tenant_space1_id",
+ name="tenant_space1",
+ kind="TenantSpace",
+ self_link="some_self_link",
+ ),
+ availability_zone=purefusion.AvailabilityZoneRef(
+ id="availability_zone1_id",
+ name="availability_zone1",
+ kind="AvailabilityZone",
+ self_link="some_self_link",
+ ),
+ placement_engine="heuristics",
+ protocols=[],
+ storage_service=purefusion.StorageServiceRef(
+ id="storage_service1_id",
+ name="storage_service",
+ kind="StorageService",
+ self_link="some_self_link",
+ ),
+ array=purefusion.ArrayRef(
+ id="array1_id", name="array1", kind="Array", self_link="some_self_link"
+ ),
+ )
+ )
+ pg_mock.create_placement_group = MagicMock(side_effect=NotImplementedError())
+ pg_mock.update_placement_group = MagicMock(
+ side_effect=throw_on_specific_patch(patches, failing_patch, raised_exception, 0)
+ )
+ pg_mock.delete_placement_group = MagicMock(side_effect=NotImplementedError())
+ pg_api_init.return_value = pg_mock
+
+ op_mock = MagicMock()
+ op_mock.get_operation = MagicMock(
+ side_effect=lambda op_id: OperationMock(id=op_id, success=True)
+ )
+ op_api_init.return_value = op_mock
+
+ with pytest.raises(expected_exception):
+ fusion_pg.main()
+
+ pg_mock.get_placement_group.assert_called_with(
+ tenant_name="tenant1",
+ tenant_space_name="tenant_space1",
+ placement_group_name="placement_group1",
+ )
+ pg_mock.create_placement_group.assert_not_called()
+ pg_mock.delete_placement_group.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.PlacementGroupsApi")
+@pytest.mark.parametrize("failing_patch", [0, 1])
+@pytest.mark.parametrize(
+ ("raised_exception", "expected_exception"),
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, HTTPError),
+ ],
+)
+def test_pg_update_exception(
+ pg_api_init, op_api_init, failing_patch, raised_exception, expected_exception
+):
+ module_args = {
+ "name": "placement_group1",
+ "display_name": "different_display_name",
+ "tenant": "tenant1",
+ "tenant_space": "tenant_space1",
+ "region": "region1",
+ "availability_zone": "availability_zone1",
+ "storage_service": "storage_service1",
+ "array": "array2",
+ "state": "present",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ patches = [
+ purefusion.PlacementGroupPatch(
+ display_name=purefusion.NullableString(value="different_display_name"),
+ ),
+ purefusion.PlacementGroupPatch(
+ array=purefusion.NullableString(value="array2"),
+ ),
+ ]
+
+ pg_mock = MagicMock()
+ pg_mock.get_placement_group = MagicMock(
+ return_value=purefusion.PlacementGroup(
+ id="placement_group1_id",
+ name="placement_group1",
+ display_name="placement_group1_display_name",
+ self_link="test_self_link",
+ tenant=purefusion.TenantRef(
+ id="tenant1_id",
+ name="tenant1",
+ kind="Tenant",
+ self_link="some_self_link",
+ ),
+ tenant_space=purefusion.TenantSpaceRef(
+ id="tenant_space1_id",
+ name="tenant_space1",
+ kind="TenantSpace",
+ self_link="some_self_link",
+ ),
+ availability_zone=purefusion.AvailabilityZoneRef(
+ id="availability_zone1_id",
+ name="availability_zone1",
+ kind="AvailabilityZone",
+ self_link="some_self_link",
+ ),
+ placement_engine="heuristics",
+ protocols=[],
+ storage_service=purefusion.StorageServiceRef(
+ id="storage_service1_id",
+ name="storage_service",
+ kind="StorageService",
+ self_link="some_self_link",
+ ),
+ array=purefusion.ArrayRef(
+ id="array1_id", name="array1", kind="Array", self_link="some_self_link"
+ ),
+ )
+ )
+ pg_mock.create_placement_group = MagicMock(side_effect=NotImplementedError())
+ pg_mock.update_placement_group = MagicMock(
+ side_effect=throw_on_specific_patch(patches, failing_patch, raised_exception, 0)
+ )
+ pg_mock.delete_placement_group = MagicMock(side_effect=NotImplementedError())
+ pg_api_init.return_value = pg_mock
+
+ op_mock = MagicMock()
+ op_mock.get_operation = MagicMock(
+ side_effect=lambda op_id: OperationMock(id=op_id, success=True)
+ )
+ op_api_init.return_value = op_mock
+
+ with pytest.raises(expected_exception) as excinfo:
+ fusion_pg.main()
+
+ pg_mock.get_placement_group.assert_called_with(
+ tenant_name="tenant1",
+ tenant_space_name="tenant_space1",
+ placement_group_name="placement_group1",
+ )
+ pg_mock.create_placement_group.assert_not_called()
+ pg_mock.delete_placement_group.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.PlacementGroupsApi")
+@pytest.mark.parametrize("failing_patch", [0, 1])
+def test_pg_update_op_fails(pg_api_init, op_api_init, failing_patch):
+ module_args = {
+ "name": "placement_group1",
+ "display_name": "different_display_name",
+ "tenant": "tenant1",
+ "tenant_space": "tenant_space1",
+ "region": "region1",
+ "availability_zone": "availability_zone1",
+ "storage_service": "storage_service1",
+ "array": "array2",
+ "state": "present",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ patches = [
+ purefusion.PlacementGroupPatch(
+ display_name=purefusion.NullableString(value="different_display_name"),
+ ),
+ purefusion.PlacementGroupPatch(
+ array=purefusion.NullableString(value="array2"),
+ ),
+ ]
+ ops = ["op0", "op1"]
+
+ pg_mock = MagicMock()
+ pg_mock.get_placement_group = MagicMock(
+ return_value=purefusion.PlacementGroup(
+ id="placement_group1_id",
+ name="placement_group1",
+ display_name="placement_group1_display_name",
+ self_link="test_self_link",
+ tenant=purefusion.TenantRef(
+ id="tenant1_id",
+ name="tenant1",
+ kind="Tenant",
+ self_link="some_self_link",
+ ),
+ tenant_space=purefusion.TenantSpaceRef(
+ id="tenant_space1_id",
+ name="tenant_space1",
+ kind="TenantSpace",
+ self_link="some_self_link",
+ ),
+ availability_zone=purefusion.AvailabilityZoneRef(
+ id="availability_zone1_id",
+ name="availability_zone1",
+ kind="AvailabilityZone",
+ self_link="some_self_link",
+ ),
+ placement_engine="heuristics",
+ protocols=[],
+ storage_service=purefusion.StorageServiceRef(
+ id="storage_service1_id",
+ name="storage_service",
+ kind="StorageService",
+ self_link="some_self_link",
+ ),
+ array=purefusion.ArrayRef(
+ id="array1_id", name="array1", kind="Array", self_link="some_self_link"
+ ),
+ )
+ )
+ pg_mock.create_placement_group = MagicMock(side_effect=NotImplementedError())
+ pg_mock.update_placement_group = MagicMock(
+ side_effect=lambda patch, tenant_name, tenant_space_name, placement_group_name: OperationMock(
+ id="op{0}".format(patches.index(patch))
+ )
+ )
+ pg_mock.delete_placement_group = MagicMock(side_effect=NotImplementedError())
+ pg_api_init.return_value = pg_mock
+
+ op_mock = MagicMock()
+ op_mock.get_operation = MagicMock(
+ side_effect=lambda id: OperationMock(
+ id=id, success=ops.index(id) != failing_patch
+ )
+ )
+ op_api_init.return_value = op_mock
+
+ with pytest.raises(OperationException):
+ fusion_pg.main()
+
+ pg_mock.get_placement_group.assert_called_with(
+ tenant_name="tenant1",
+ tenant_space_name="tenant_space1",
+ placement_group_name="placement_group1",
+ )
+ pg_mock.create_placement_group.assert_not_called()
+ pg_mock.delete_placement_group.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.PlacementGroupsApi")
+def test_pg_delete_ok(pg_api_init, op_api_init, module_args_absent):
+ module_args = module_args_absent
+ set_module_args(module_args)
+
+ pg_mock = MagicMock()
+ pg_mock.get_placement_group = MagicMock(
+ return_value=purefusion.PlacementGroup(
+ id="placement_group1_id",
+ name="placement_group1",
+ display_name="placement_group1_display_name",
+ self_link="test_self_link",
+ tenant=purefusion.TenantRef(
+ id="tenant1_id",
+ name="tenant1",
+ kind="Tenant",
+ self_link="some_self_link",
+ ),
+ tenant_space=purefusion.TenantSpaceRef(
+ id="tenant_space1_id",
+ name="tenant_space1",
+ kind="TenantSpace",
+ self_link="some_self_link",
+ ),
+ availability_zone=purefusion.AvailabilityZoneRef(
+ id="availability_zone1_id",
+ name="availability_zone1",
+ kind="AvailabilityZone",
+ self_link="some_self_link",
+ ),
+ placement_engine="heuristics",
+ protocols=[],
+ storage_service=purefusion.StorageServiceRef(
+ id="storage_service1_id",
+ name="storage_service",
+ kind="StorageService",
+ self_link="some_self_link",
+ ),
+ array=purefusion.ArrayRef(
+ id="array1_id", name="array1", kind="Array", self_link="some_self_link"
+ ),
+ )
+ )
+ pg_mock.create_placement_group = MagicMock(side_effect=NotImplementedError())
+ pg_mock.update_placement_group = MagicMock(side_effect=NotImplementedError())
+ pg_mock.delete_placement_group = MagicMock(return_value=OperationMock(id="op1"))
+ pg_api_init.return_value = pg_mock
+
+ op_mock = MagicMock()
+ op_mock.get_operation = MagicMock(
+ return_value=OperationMock(id="op1", success=True)
+ )
+ op_api_init.return_value = op_mock
+
+ with pytest.raises(AnsibleExitJson) as excinfo:
+ fusion_pg.main()
+ assert excinfo.value.changed
+
+ pg_mock.get_placement_group.assert_called_with(
+ tenant_name="tenant1",
+ tenant_space_name="tenant_space1",
+ placement_group_name="placement_group1",
+ )
+ pg_mock.create_placement_group.assert_not_called()
+ pg_mock.update_placement_group.assert_not_called()
+ pg_mock.delete_placement_group.assert_called_with(
+ tenant_name="tenant1",
+ tenant_space_name="tenant_space1",
+ placement_group_name="placement_group1",
+ )
+ op_mock.get_operation.assert_called_with("op1")
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.PlacementGroupsApi")
+@pytest.mark.parametrize(
+ ("raised_exception", "expected_exception"),
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, HTTPError),
+ ],
+)
+def test_pg_delete_exception(
+ pg_api_init, op_api_init, raised_exception, expected_exception, module_args_absent
+):
+ module_args = module_args_absent
+ set_module_args(module_args)
+
+ pg_mock = MagicMock()
+ pg_mock.get_placement_group = MagicMock(
+ return_value=purefusion.PlacementGroup(
+ id="placement_group1_id",
+ name="placement_group1",
+ display_name="placement_group1_display_name",
+ self_link="test_self_link",
+ tenant=purefusion.TenantRef(
+ id="tenant1_id",
+ name="tenant1",
+ kind="Tenant",
+ self_link="some_self_link",
+ ),
+ tenant_space=purefusion.TenantSpaceRef(
+ id="tenant_space1_id",
+ name="tenant_space1",
+ kind="TenantSpace",
+ self_link="some_self_link",
+ ),
+ availability_zone=purefusion.AvailabilityZoneRef(
+ id="availability_zone1_id",
+ name="availability_zone1",
+ kind="AvailabilityZone",
+ self_link="some_self_link",
+ ),
+ placement_engine="heuristics",
+ protocols=[],
+ storage_service=purefusion.StorageServiceRef(
+ id="storage_service1_id",
+ name="storage_service",
+ kind="StorageService",
+ self_link="some_self_link",
+ ),
+ array=purefusion.ArrayRef(
+ id="array1_id", name="array1", kind="Array", self_link="some_self_link"
+ ),
+ )
+ )
+ pg_mock.create_placement_group = MagicMock(side_effect=NotImplementedError())
+ pg_mock.update_placement_group = MagicMock(side_effect=NotImplementedError())
+ pg_mock.delete_placement_group = MagicMock(side_effect=raised_exception)
+ pg_api_init.return_value = pg_mock
+
+ op_mock = MagicMock()
+ op_mock.get_operation = MagicMock(side_effect=NotImplementedError())
+ op_api_init.return_value = op_mock
+
+ with pytest.raises(expected_exception):
+ fusion_pg.main()
+
+ pg_mock.get_placement_group.assert_called_with(
+ tenant_name="tenant1",
+ tenant_space_name="tenant_space1",
+ placement_group_name="placement_group1",
+ )
+ pg_mock.create_placement_group.assert_not_called()
+ pg_mock.update_placement_group.assert_not_called()
+ pg_mock.delete_placement_group.assert_called_with(
+ tenant_name="tenant1",
+ tenant_space_name="tenant_space1",
+ placement_group_name="placement_group1",
+ )
+ op_mock.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.PlacementGroupsApi")
+def test_pg_delete_op_fails(pg_api_init, op_api_init, module_args_absent):
+ module_args = module_args_absent
+ set_module_args(module_args)
+
+ pg_mock = MagicMock()
+ pg_mock.get_placement_group = MagicMock(
+ return_value=purefusion.PlacementGroup(
+ id="placement_group1_id",
+ name="placement_group1",
+ display_name="placement_group1_display_name",
+ self_link="test_self_link",
+ tenant=purefusion.TenantRef(
+ id="tenant1_id",
+ name="tenant1",
+ kind="Tenant",
+ self_link="some_self_link",
+ ),
+ tenant_space=purefusion.TenantSpaceRef(
+ id="tenant_space1_id",
+ name="tenant_space1",
+ kind="TenantSpace",
+ self_link="some_self_link",
+ ),
+ availability_zone=purefusion.AvailabilityZoneRef(
+ id="availability_zone1_id",
+ name="availability_zone1",
+ kind="AvailabilityZone",
+ self_link="some_self_link",
+ ),
+ placement_engine="heuristics",
+ protocols=[],
+ storage_service=purefusion.StorageServiceRef(
+ id="storage_service1_id",
+ name="storage_service",
+ kind="StorageService",
+ self_link="some_self_link",
+ ),
+ array=purefusion.ArrayRef(
+ id="array1_id", name="array1", kind="Array", self_link="some_self_link"
+ ),
+ )
+ )
+ pg_mock.create_placement_group = MagicMock(side_effect=NotImplementedError())
+ pg_mock.update_placement_group = MagicMock(side_effect=NotImplementedError())
+ pg_mock.delete_placement_group = MagicMock(return_value=OperationMock(id="op1"))
+ pg_api_init.return_value = pg_mock
+
+ op_mock = MagicMock()
+ op_mock.get_operation = MagicMock(
+ return_value=OperationMock(id="op1", success=False)
+ )
+ op_api_init.return_value = op_mock
+
+ with pytest.raises(OperationException):
+ fusion_pg.main()
+
+ pg_mock.get_placement_group.assert_called_with(
+ tenant_name="tenant1",
+ tenant_space_name="tenant_space1",
+ placement_group_name="placement_group1",
+ )
+ pg_mock.create_placement_group.assert_not_called()
+ pg_mock.update_placement_group.assert_not_called()
+ pg_mock.delete_placement_group.assert_called_with(
+ tenant_name="tenant1",
+ tenant_space_name="tenant_space1",
+ placement_group_name="placement_group1",
+ )
+ op_mock.get_operation.assert_called_with("op1")
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.PlacementGroupsApi")
+def test_pg_present_not_changed(pg_api_init, op_api_init, module_args_present):
+ module_args = module_args_present
+ set_module_args(module_args)
+
+ pg_mock = MagicMock()
+ pg_mock.get_placement_group = MagicMock(
+ return_value=purefusion.PlacementGroup(
+ id="placement_group1_id",
+ name="placement_group1",
+ display_name="placement_group1_display_name",
+ self_link="test_self_link",
+ tenant=purefusion.TenantRef(
+ id="tenant1_id",
+ name="tenant1",
+ kind="Tenant",
+ self_link="some_self_link",
+ ),
+ tenant_space=purefusion.TenantSpaceRef(
+ id="tenant_space1_id",
+ name="tenant_space1",
+ kind="TenantSpace",
+ self_link="some_self_link",
+ ),
+ availability_zone=purefusion.AvailabilityZoneRef(
+ id="availability_zone1_id",
+ name="availability_zone1",
+ kind="AvailabilityZone",
+ self_link="some_self_link",
+ ),
+ placement_engine="heuristics",
+ protocols=[],
+ storage_service=purefusion.StorageServiceRef(
+ id="storage_service1_id",
+ name="storage_service",
+ kind="StorageService",
+ self_link="some_self_link",
+ ),
+ array=purefusion.ArrayRef(
+ id="array1_id", name="array1", kind="Array", self_link="some_self_link"
+ ),
+ )
+ )
+ pg_mock.create_placement_group = MagicMock(side_effect=NotImplementedError())
+ pg_mock.update_placement_group = MagicMock(side_effect=NotImplementedError())
+ pg_mock.delete_placement_group = MagicMock(side_effect=NotImplementedError())
+ pg_api_init.return_value = pg_mock
+
+ op_mock = MagicMock()
+ op_mock.get_operation = MagicMock(side_effect=NotImplementedError())
+ op_api_init.return_value = op_mock
+
+ with pytest.raises(AnsibleExitJson) as excinfo:
+ fusion_pg.main()
+ assert not excinfo.value.changed
+
+ pg_mock.get_placement_group.assert_called_with(
+ tenant_name="tenant1",
+ tenant_space_name="tenant_space1",
+ placement_group_name="placement_group1",
+ )
+ pg_mock.create_placement_group.assert_not_called()
+ pg_mock.update_placement_group.assert_not_called()
+ pg_mock.delete_placement_group.assert_not_called()
+ op_mock.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.PlacementGroupsApi")
+def test_pg_absent_not_changed(pg_api_init, op_api_init, module_args_absent):
+ module_args = module_args_absent
+ set_module_args(module_args)
+
+ pg_mock = MagicMock()
+ pg_mock.get_placement_group = MagicMock(side_effect=purefusion.rest.ApiException)
+ pg_mock.create_placement_group = MagicMock(side_effect=NotImplementedError())
+ pg_mock.update_placement_group = MagicMock(side_effect=NotImplementedError())
+ pg_mock.delete_placement_group = MagicMock(side_effect=NotImplementedError())
+ pg_api_init.return_value = pg_mock
+
+ op_mock = MagicMock()
+ op_mock.get_operation = MagicMock(side_effect=NotImplementedError())
+ op_api_init.return_value = op_mock
+
+ with pytest.raises(AnsibleExitJson) as excinfo:
+ fusion_pg.main()
+ assert not excinfo.value.changed
+
+ pg_mock.get_placement_group.assert_called_with(
+ tenant_name="tenant1",
+ tenant_space_name="tenant_space1",
+ placement_group_name="placement_group1",
+ )
+ pg_mock.create_placement_group.assert_not_called()
+ pg_mock.update_placement_group.assert_not_called()
+ pg_mock.delete_placement_group.assert_not_called()
+ op_mock.get_operation.assert_not_called()
+
+
+def throw_on_specific_patch(patches, failing_patch_idx, raised_exception, op_offset):
+ patches = patches.copy()
+
+ def _update_side_effect(patch, **kwargs):
+ idx = patches.index(patch)
+ if idx == failing_patch_idx:
+ raise raised_exception()
+ return OperationMock(id="op{0}".format(op_offset + idx))
+
+ return _update_side_effect
diff --git a/ansible_collections/purestorage/fusion/tests/functional/test_fusion_pp.py b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_pp.py
new file mode 100644
index 000000000..519caea40
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_pp.py
@@ -0,0 +1,528 @@
+# -*- coding: utf-8 -*-
+
+# (c) 2023 Pure Storage, Inc.
+# GNU General Public License v3.0+ (see COPYING.GPLv3 or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from unittest.mock import MagicMock, patch
+
+import fusion as purefusion
+import pytest
+from ansible.module_utils import basic
+from ansible_collections.purestorage.fusion.plugins.module_utils.errors import (
+ OperationException,
+)
+from ansible_collections.purestorage.fusion.plugins.modules import fusion_pp
+from ansible_collections.purestorage.fusion.tests.functional.utils import (
+ AnsibleExitJson,
+ AnsibleFailJson,
+ OperationMock,
+ exit_json,
+ fail_json,
+ set_module_args,
+)
+from urllib3.exceptions import HTTPError
+
+# GLOBAL MOCKS
+fusion_pp.setup_fusion = MagicMock(return_value=purefusion.api_client.ApiClient())
+purefusion.api_client.ApiClient.call_api = MagicMock(
+ side_effect=Exception("API call not mocked!")
+)
+basic.AnsibleModule.exit_json = exit_json
+basic.AnsibleModule.fail_json = fail_json
+
+
+@pytest.fixture
+def module_args_present():
+ return {
+ "name": "protection_policy1",
+ "local_rpo": 43,
+ "local_retention": "2H",
+ "state": "present",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+
+
+@pytest.fixture
+def module_args_absent():
+ return {
+ "name": "protection_policy1",
+ "state": "absent",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.ProtectionPoliciesApi")
+@pytest.mark.parametrize(
+ ("module_args", "get_not_called"),
+ [
+ # 'name` is missing
+ (
+ {
+ "local_rpo": 10,
+ "local_retention": "10M",
+ "state": "present",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ True,
+ ),
+ # 'local_rpo` is missing
+ (
+ {
+ "name": "protection_policy1",
+ "local_retention": "10M",
+ "state": "present",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ False,
+ ),
+ # 'local_retention` is missing
+ (
+ {
+ "name": "protection_policy1",
+ "local_rpo": 10,
+ "state": "present",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ False,
+ ),
+ # 'local_rpo` is invalid
+ (
+ {
+ "name": "protection_policy1",
+ "local_rpo": 10,
+ "local_retention": "10yen",
+ "state": "present",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ False,
+ ),
+ # 'local_retention` is invalid
+ (
+ {
+ "name": "protection_policy1",
+ "local_rpo": "10bread",
+ "local_retention": "bre",
+ "state": "present",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ False,
+ ),
+ # 'state` is invalid
+ (
+ {
+ "name": "protection_policy1",
+ "local_rpo": 10,
+ "local_retention": 10,
+ "state": "past",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ False,
+ ),
+ ],
+)
+def test_module_args_wrong(pp_api_init, op_api_init, module_args, get_not_called):
+ set_module_args(module_args)
+
+ pp_mock = MagicMock()
+ pp_mock.get_protection_policy = MagicMock(side_effect=purefusion.rest.ApiException)
+ pp_mock.create_protection_policy = MagicMock(side_effect=NotImplementedError())
+ pp_mock.delete_protection_policy = MagicMock(side_effect=NotImplementedError())
+ pp_api_init.return_value = pp_mock
+
+ op_mock = MagicMock()
+ op_mock.get_operation = MagicMock(side_effect=purefusion.rest.ApiException)
+ op_api_init.return_value = op_mock
+
+ with pytest.raises(AnsibleFailJson):
+ fusion_pp.main()
+
+ if get_not_called:
+ pp_mock.get_protection_policy.assert_not_called()
+ if pp_mock.get_protection_policy.called:
+ pp_mock.get_protection_policy.assert_called_with(
+ protection_policy_name="protection_policy1"
+ )
+ pp_mock.create_protection_policy.assert_not_called()
+ pp_mock.delete_protection_policy.assert_not_called()
+ op_mock.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.ProtectionPoliciesApi")
+def test_pp_create_ok(pp_api_init, op_api_init, module_args_present):
+ module_args = module_args_present
+ module_args["display_name"] = "some_display_name"
+
+ set_module_args(module_args)
+
+ pp_mock = MagicMock()
+ pp_mock.get_protection_policy = MagicMock(side_effect=purefusion.rest.ApiException)
+ pp_mock.create_protection_policy = MagicMock(return_value=OperationMock("op1"))
+ pp_mock.delete_protection_policy = MagicMock(side_effect=NotImplementedError())
+ pp_api_init.return_value = pp_mock
+
+ op_mock = MagicMock()
+ op_mock.get_operation = MagicMock(return_value=OperationMock("op1", success=True))
+ op_api_init.return_value = op_mock
+
+ with pytest.raises(AnsibleExitJson) as excinfo:
+ fusion_pp.main()
+ assert excinfo.value.changed
+
+ pp_mock.get_protection_policy.assert_called_with(
+ protection_policy_name="protection_policy1"
+ )
+ pp_mock.create_protection_policy.assert_called_with(
+ purefusion.ProtectionPolicyPost(
+ name="protection_policy1",
+ display_name="some_display_name",
+ objectives=[
+ purefusion.RPO(type="RPO", rpo="PT43M"),
+ purefusion.Retention(type="Retention", after="PT120M"),
+ ],
+ )
+ )
+ pp_mock.delete_protection_policy.assert_not_called()
+ op_mock.get_operation.assert_called_with("op1")
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.ProtectionPoliciesApi")
+def test_pp_create_without_display_name_ok(
+ pp_api_init, op_api_init, module_args_present
+):
+ module_args = module_args_present
+ set_module_args(module_args)
+
+ pp_mock = MagicMock()
+ pp_mock.get_protection_policy = MagicMock(side_effect=purefusion.rest.ApiException)
+ pp_mock.create_protection_policy = MagicMock(return_value=OperationMock("op1"))
+ pp_mock.delete_protection_policy = MagicMock(side_effect=NotImplementedError())
+ pp_api_init.return_value = pp_mock
+
+ op_mock = MagicMock()
+ op_mock.get_operation = MagicMock(return_value=OperationMock("op1", success=True))
+ op_api_init.return_value = op_mock
+
+ with pytest.raises(AnsibleExitJson) as excinfo:
+ fusion_pp.main()
+ assert excinfo.value.changed
+
+ pp_mock.get_protection_policy.assert_called_with(
+ protection_policy_name="protection_policy1"
+ )
+ pp_mock.create_protection_policy.assert_called_with(
+ purefusion.ProtectionPolicyPost(
+ name="protection_policy1",
+ display_name="protection_policy1",
+ objectives=[
+ purefusion.RPO(type="RPO", rpo="PT43M"),
+ purefusion.Retention(type="Retention", after="PT120M"),
+ ],
+ )
+ )
+ pp_mock.delete_protection_policy.assert_not_called()
+ op_mock.get_operation.assert_called_with("op1")
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.ProtectionPoliciesApi")
+@pytest.mark.parametrize(
+ ("raised_exception", "expected_exception"),
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, HTTPError),
+ ],
+)
+def test_pp_create_exception(
+ pp_api_init, op_api_init, raised_exception, expected_exception, module_args_present
+):
+ module_args = module_args_present
+ set_module_args(module_args)
+
+ pp_mock = MagicMock()
+ pp_mock.get_protection_policy = MagicMock(side_effect=purefusion.rest.ApiException)
+ pp_mock.create_protection_policy = MagicMock(side_effect=raised_exception)
+ pp_mock.delete_protection_policy = MagicMock(side_effect=NotImplementedError())
+ pp_api_init.return_value = pp_mock
+
+ op_mock = MagicMock()
+ op_mock.get_operation = MagicMock(side_effect=NotImplementedError())
+ op_api_init.return_value = op_mock
+
+ with pytest.raises(expected_exception):
+ fusion_pp.main()
+
+ pp_mock.get_protection_policy.assert_called_with(
+ protection_policy_name="protection_policy1"
+ )
+ pp_mock.create_protection_policy.assert_called_with(
+ purefusion.ProtectionPolicyPost(
+ name="protection_policy1",
+ display_name="protection_policy1",
+ objectives=[
+ purefusion.RPO(type="RPO", rpo="PT43M"),
+ purefusion.Retention(type="Retention", after="PT120M"),
+ ],
+ )
+ )
+ pp_mock.delete_protection_policy.assert_not_called()
+ op_mock.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.ProtectionPoliciesApi")
+def test_pp_create_op_fails(pp_api_init, op_api_init, module_args_present):
+ module_args = module_args_present
+ set_module_args(module_args)
+
+ pp_mock = MagicMock()
+ pp_mock.get_protection_policy = MagicMock(side_effect=purefusion.rest.ApiException)
+ pp_mock.create_protection_policy = MagicMock(return_value=OperationMock(id="op1"))
+ pp_mock.delete_protection_policy = MagicMock(side_effect=NotImplementedError())
+ pp_api_init.return_value = pp_mock
+
+ op_mock = MagicMock()
+ op_mock.get_operation = MagicMock(return_value=OperationMock("op1", success=False))
+ op_api_init.return_value = op_mock
+
+ with pytest.raises(OperationException):
+ fusion_pp.main()
+
+ pp_mock.get_protection_policy.assert_called_with(
+ protection_policy_name="protection_policy1"
+ )
+ pp_mock.create_protection_policy.assert_called_with(
+ purefusion.ProtectionPolicyPost(
+ name="protection_policy1",
+ display_name="protection_policy1",
+ objectives=[
+ purefusion.RPO(type="RPO", rpo="PT43M"),
+ purefusion.Retention(type="Retention", after="PT120M"),
+ ],
+ )
+ )
+ pp_mock.delete_protection_policy.assert_not_called()
+ op_mock.get_operation.assert_called_with("op1")
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.ProtectionPoliciesApi")
+def test_pp_delete_ok(pp_api_init, op_api_init, module_args_absent):
+ module_args = module_args_absent
+ set_module_args(module_args)
+
+ pp_mock = MagicMock()
+ pp_mock.get_protection_policy = MagicMock(
+ return_value=purefusion.ProtectionPolicy(
+ id="protection_policy1_id",
+ name="protection_policy1",
+ display_name="protection_policy1_display_name",
+ self_link="test_self_link",
+ objectives=[
+ purefusion.RPO(type="RPO", rpo="PT43M"),
+ purefusion.Retention(type="Retention", after="PT120M"),
+ ],
+ )
+ )
+ pp_mock.create_protection_policy = MagicMock(side_effect=NotImplementedError())
+ pp_mock.delete_protection_policy = MagicMock(return_value=OperationMock(id="op1"))
+ pp_api_init.return_value = pp_mock
+
+ op_mock = MagicMock()
+ op_mock.get_operation = MagicMock(
+ return_value=OperationMock(id="op1", success=True)
+ )
+ op_api_init.return_value = op_mock
+
+ with pytest.raises(AnsibleExitJson) as excinfo:
+ fusion_pp.main()
+ assert excinfo.value.changed
+
+ pp_mock.get_protection_policy.assert_called_with(
+ protection_policy_name="protection_policy1"
+ )
+ pp_mock.create_protection_policy.assert_not_called()
+ pp_mock.delete_protection_policy.assert_called_with(
+ protection_policy_name="protection_policy1"
+ )
+ op_mock.get_operation.assert_called_with("op1")
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.ProtectionPoliciesApi")
+@pytest.mark.parametrize(
+ ("raised_exception", "expected_exception"),
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, HTTPError),
+ ],
+)
+def test_pp_delete_exception(
+ pp_api_init, op_api_init, raised_exception, expected_exception, module_args_absent
+):
+ module_args = module_args_absent
+ set_module_args(module_args)
+
+ pp_mock = MagicMock()
+ pp_mock.get_protection_policy = MagicMock(
+ return_value=purefusion.ProtectionPolicy(
+ id="protection_policy1_id",
+ name="protection_policy1",
+ display_name="protection_policy1_display_name",
+ self_link="test_self_link",
+ objectives=[
+ purefusion.RPO(type="RPO", rpo="PT43M"),
+ purefusion.Retention(type="Retention", after="PT120M"),
+ ],
+ )
+ )
+ pp_mock.create_protection_policy = MagicMock(side_effect=NotImplementedError())
+ pp_mock.delete_protection_policy = MagicMock(side_effect=raised_exception)
+ pp_api_init.return_value = pp_mock
+
+ op_mock = MagicMock()
+ op_mock.get_operation = MagicMock(side_effect=NotImplementedError())
+ op_api_init.return_value = op_mock
+
+ with pytest.raises(expected_exception):
+ fusion_pp.main()
+
+ pp_mock.get_protection_policy.assert_called_with(
+ protection_policy_name="protection_policy1"
+ )
+ pp_mock.create_protection_policy.assert_not_called()
+ pp_mock.delete_protection_policy.assert_called_with(
+ protection_policy_name="protection_policy1"
+ )
+ op_mock.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.ProtectionPoliciesApi")
+def test_pp_delete_op_fails(pp_api_init, op_api_init, module_args_absent):
+ module_args = module_args_absent
+ set_module_args(module_args)
+
+ pp_mock = MagicMock()
+ pp_mock.get_protection_policy = MagicMock(
+ return_value=purefusion.ProtectionPolicy(
+ id="protection_policy1_id",
+ name="protection_policy1",
+ display_name="protection_policy1_display_name",
+ self_link="test_self_link",
+ objectives=[
+ purefusion.RPO(type="RPO", rpo="PT43M"),
+ purefusion.Retention(type="Retention", after="PT120M"),
+ ],
+ )
+ )
+ pp_mock.create_protection_policy = MagicMock(side_effect=NotImplementedError())
+ pp_mock.delete_protection_policy = MagicMock(return_value=OperationMock(id="op1"))
+ pp_api_init.return_value = pp_mock
+
+ op_mock = MagicMock()
+ op_mock.get_operation = MagicMock(
+ return_value=OperationMock(id="op1", success=False)
+ )
+ op_api_init.return_value = op_mock
+
+ with pytest.raises(OperationException):
+ fusion_pp.main()
+
+ pp_mock.get_protection_policy.assert_called_with(
+ protection_policy_name="protection_policy1"
+ )
+ pp_mock.create_protection_policy.assert_not_called()
+ pp_mock.delete_protection_policy.assert_called_with(
+ protection_policy_name="protection_policy1"
+ )
+ op_mock.get_operation.assert_called_with("op1")
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.ProtectionPoliciesApi")
+def test_pp_present_not_changed(pp_api_init, op_api_init):
+ module_args = {
+ "name": "protection_policy1",
+ "display_name": "some_display_name",
+ "local_rpo": 43,
+ "local_retention": "2H",
+ "state": "present",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ pp_mock = MagicMock()
+ pp_mock.get_protection_policy = MagicMock(
+ return_value=purefusion.ProtectionPolicy(
+ id="protection_policy1_id",
+ name="protection_policy1",
+ display_name="some_display_name",
+ self_link="test_self_link",
+ objectives=[
+ purefusion.RPO(type="RPO", rpo="PT43M"),
+ purefusion.Retention(type="Retention", after="PT120M"),
+ ],
+ )
+ )
+ pp_mock.create_protection_policy = MagicMock(side_effect=NotImplementedError())
+ pp_mock.delete_protection_policy = MagicMock(side_effect=NotImplementedError())
+ pp_api_init.return_value = pp_mock
+
+ op_mock = MagicMock()
+ op_mock.get_operation = MagicMock(side_effect=NotImplementedError())
+ op_api_init.return_value = op_mock
+
+ with pytest.raises(AnsibleExitJson) as excinfo:
+ fusion_pp.main()
+ assert not excinfo.value.changed
+
+ pp_mock.get_protection_policy.assert_called_with(
+ protection_policy_name="protection_policy1"
+ )
+ pp_mock.create_protection_policy.assert_not_called()
+ pp_mock.delete_protection_policy.assert_not_called()
+ op_mock.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.ProtectionPoliciesApi")
+def test_pp_absent_not_changed(pp_api_init, op_api_init, module_args_absent):
+ module_args = module_args_absent
+ set_module_args(module_args)
+
+ pp_mock = MagicMock()
+ pp_mock.get_protection_policy = MagicMock(side_effect=purefusion.rest.ApiException)
+ pp_mock.create_protection_policy = MagicMock(side_effect=NotImplementedError())
+ pp_mock.delete_protection_policy = MagicMock(side_effect=NotImplementedError())
+ pp_api_init.return_value = pp_mock
+
+ op_mock = MagicMock()
+ op_mock.get_operation = MagicMock(side_effect=NotImplementedError())
+ op_api_init.return_value = op_mock
+
+ with pytest.raises(AnsibleExitJson) as excinfo:
+ fusion_pp.main()
+ assert not excinfo.value.changed
+
+ pp_mock.get_protection_policy.assert_called_with(
+ protection_policy_name="protection_policy1"
+ )
+ pp_mock.create_protection_policy.assert_not_called()
+ pp_mock.delete_protection_policy.assert_not_called()
+ op_mock.get_operation.assert_not_called()
diff --git a/ansible_collections/purestorage/fusion/tests/functional/test_fusion_ra.py b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_ra.py
new file mode 100644
index 000000000..6456fa7d7
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_ra.py
@@ -0,0 +1,813 @@
+# -*- coding: utf-8 -*-
+
+# (c) 2023 Pure Storage, Inc.
+# GNU General Public License v3.0+ (see COPYING.GPLv3 or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from unittest.mock import MagicMock, patch
+
+import fusion as purefusion
+import pytest
+from ansible.module_utils import basic
+from ansible_collections.purestorage.fusion.plugins.module_utils.errors import (
+ OperationException,
+)
+from ansible_collections.purestorage.fusion.plugins.modules import fusion_ra
+from ansible_collections.purestorage.fusion.tests.functional.utils import (
+ AnsibleExitJson,
+ AnsibleFailJson,
+ OperationMock,
+ exit_json,
+ fail_json,
+ set_module_args,
+)
+from urllib3.exceptions import HTTPError
+
+# GLOBAL MOCKS
+fusion_ra.setup_fusion = MagicMock(return_value=purefusion.api_client.ApiClient())
+purefusion.api_client.ApiClient.call_api = MagicMock(
+ side_effect=Exception("API call not mocked!")
+)
+basic.AnsibleModule.exit_json = exit_json
+basic.AnsibleModule.fail_json = fail_json
+
+
+@pytest.fixture
+def module_args_present():
+ return {
+ "state": "present",
+ "role": "az-admin",
+ "user": "user1",
+ "scope": "organization",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+
+
+@pytest.fixture
+def module_args_absent():
+ return {
+ "state": "absent",
+ "role": "az-admin",
+ "user": "user1",
+ "scope": "organization",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.IdentityManagerApi")
+@patch("fusion.RoleAssignmentsApi")
+@pytest.mark.parametrize(
+ "module_args",
+ [
+ # 'role` is missing
+ {
+ "state": "present",
+ "tenant": "tenant1",
+ "tenant_space": "tenant_space1",
+ "user": "user1",
+ "scope": "tenant_space",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ # 'user` is missing
+ {
+ "state": "present",
+ "role": "tenant-space-admin",
+ "tenant": "tenant1",
+ "tenant_space": "tenant_space1",
+ "scope": "tenant_space",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ # 'scope` is invalid
+ {
+ "state": "present",
+ "role": "tenant-space-admin",
+ "tenant": "tenant1",
+ "tenant_space": "tenant_space1",
+ "user": "user1",
+ "scope": "bikini_bottom",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ # 'state` is invalid
+ {
+ "state": "past",
+ "role": "tenant-space-admin",
+ "tenant": "tenant1",
+ "tenant_space": "tenant_space1",
+ "user": "user1",
+ "scope": "tenant_space",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ # 'tenant` is missing #1
+ {
+ "state": "present",
+ "role": "tenant-space-admin",
+ "tenant_space": "tenant_space1",
+ "user": "user1",
+ "scope": "tenant_space",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ # 'tenant` is missing #2
+ {
+ "state": "present",
+ "role": "tenant-space-admin",
+ "tenant_space": "tenant_space1",
+ "user": "user1",
+ "scope": "tenant",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ # 'tenant_space` is missing
+ {
+ "state": "present",
+ "role": "tenant-space-admin",
+ "tenant": "tenant1",
+ "user": "user1",
+ "scope": "tenant_space",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ # both 'principal` and `user` are specified
+ {
+ "state": "present",
+ "role": "tenant-space-admin",
+ "tenant": "tenant1",
+ "user": "user1",
+ "principal": "123456",
+ "scope": "tenant_space",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ # both 'principal` and `api_client_key` are specified
+ {
+ "state": "present",
+ "role": "tenant-space-admin",
+ "tenant": "tenant1",
+ "api_client_key": "pure1:apikey:asdf123XYZ",
+ "principal": "123456",
+ "scope": "tenant_space",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ ],
+)
+def test_module_args_wrong(ra_api_init, im_api_init, op_api_init, module_args):
+ set_module_args(module_args)
+
+ ra_mock = MagicMock()
+ ra_mock.list_role_assignments = MagicMock(side_effect=NotImplementedError())
+ ra_mock.create_role_assignment = MagicMock(side_effect=NotImplementedError())
+ ra_mock.delete_role_assignment = MagicMock(side_effect=NotImplementedError())
+ ra_api_init.return_value = ra_mock
+
+ im_mock = MagicMock()
+ im_mock.list_users = MagicMock(
+ return_value=[
+ purefusion.User(
+ id="principal1",
+ self_link="test_value",
+ name="user1",
+ email="example@example.com",
+ )
+ ]
+ )
+ im_api_init.return_value = im_mock
+
+ op_mock = MagicMock()
+ op_mock.get_operation = MagicMock(side_effect=purefusion.rest.ApiException)
+ op_api_init.return_value = op_mock
+
+ with pytest.raises(AnsibleFailJson):
+ fusion_ra.main()
+
+ ra_mock.list_role_assignments.assert_not_called()
+ ra_mock.create_role_assignment.assert_not_called()
+ ra_mock.delete_role_assignment.assert_not_called()
+ op_mock.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.IdentityManagerApi")
+@patch("fusion.RoleAssignmentsApi")
+def test_ra_user_does_not_exist(
+ ra_api_init, im_api_init, op_api_init, module_args_present
+):
+ module_args = module_args_present
+ set_module_args(module_args)
+
+ ra_mock = MagicMock()
+ ra_mock.list_role_assignments = MagicMock(side_effect=purefusion.rest.ApiException)
+ ra_mock.create_role_assignment = MagicMock(side_effect=NotImplementedError())
+ ra_mock.delete_role_assignment = MagicMock(side_effect=NotImplementedError())
+ ra_api_init.return_value = ra_mock
+
+ im_mock = MagicMock()
+ im_mock.list_users = MagicMock(return_value=[])
+ im_api_init.return_value = im_mock
+
+ op_mock = MagicMock()
+ op_mock.get_operation = MagicMock(side_effect=purefusion.rest.ApiException)
+ op_api_init.return_value = op_mock
+
+ with pytest.raises(AnsibleFailJson):
+ fusion_ra.main()
+
+ ra_mock.list_role_assignments.assert_not_called()
+ ra_mock.create_role_assignment.assert_not_called()
+ ra_mock.delete_role_assignment.assert_not_called()
+ op_mock.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.IdentityManagerApi")
+@patch("fusion.RoleAssignmentsApi")
+@pytest.mark.parametrize(
+ "args_and_scope",
+ [
+ # organization scope
+ (
+ {
+ "state": "present",
+ "role": "az-admin",
+ "user": "user1",
+ "scope": "organization",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ "/",
+ ),
+ # tenant scope
+ (
+ {
+ "state": "present",
+ "role": "tenant-admin",
+ "user": "user1",
+ "scope": "tenant",
+ "tenant": "tenant1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ "/tenants/tenant1",
+ ),
+ # tenant space scope
+ (
+ {
+ "state": "present",
+ "role": "tenant-space-admin",
+ "user": "user1",
+ "scope": "tenant_space",
+ "tenant": "tenant1",
+ "tenant_space": "tenant_space1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ "/tenants/tenant1/tenant-spaces/tenant_space1",
+ ),
+ # principal instead of user
+ (
+ {
+ "state": "present",
+ "role": "az-admin",
+ "principal": "principal1",
+ "scope": "organization",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ "/",
+ ),
+ # api_client_key instead of user
+ (
+ {
+ "state": "present",
+ "role": "az-admin",
+ "api_client_key": "pure1:apikey:asdf123XYZ",
+ "scope": "organization",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ "/",
+ ),
+ ],
+)
+def test_ra_create_ok(ra_api_init, im_api_init, op_api_init, args_and_scope):
+ module_args = args_and_scope[0]
+ ra_scope = args_and_scope[1]
+ set_module_args(module_args)
+
+ ra_mock = MagicMock()
+ ra_mock.list_role_assignments = MagicMock(return_value=[])
+ ra_mock.create_role_assignment = MagicMock(return_value=OperationMock("op1"))
+ ra_mock.delete_role_assignment = MagicMock(side_effect=NotImplementedError())
+ ra_api_init.return_value = ra_mock
+
+ im_mock = MagicMock()
+ im_mock.list_users = MagicMock(
+ return_value=[
+ purefusion.User(
+ id="principal1",
+ self_link="test_value",
+ name="user1",
+ email="example@example.com",
+ )
+ ]
+ )
+ im_api_init.return_value = im_mock
+
+ op_mock = MagicMock()
+ op_mock.get_operation = MagicMock(return_value=OperationMock("op1", success=True))
+ op_api_init.return_value = op_mock
+
+ with pytest.raises(AnsibleExitJson) as excinfo:
+ fusion_ra.main()
+ assert excinfo.value.changed
+
+ ra_mock.list_role_assignments.assert_called_with(
+ role_name=module_args["role"], principal="principal1"
+ )
+ ra_mock.create_role_assignment.assert_called_with(
+ purefusion.RoleAssignmentPost(scope=ra_scope, principal="principal1"),
+ role_name=module_args["role"],
+ )
+ ra_mock.delete_role_assignment.assert_not_called()
+ op_mock.get_operation.assert_called_with("op1")
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.IdentityManagerApi")
+@patch("fusion.RoleAssignmentsApi")
+@pytest.mark.parametrize(
+ ("raised_exception", "expected_exception"),
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, HTTPError),
+ ],
+)
+def test_ra_create_exception(
+ ra_api_init,
+ im_api_init,
+ op_api_init,
+ raised_exception,
+ expected_exception,
+ module_args_present,
+):
+ module_args = module_args_present
+ set_module_args(module_args)
+
+ ra_mock = MagicMock()
+ ra_mock.list_role_assignments = MagicMock(return_value=[])
+ ra_mock.create_role_assignment = MagicMock(side_effect=raised_exception)
+ ra_mock.delete_role_assignment = MagicMock(side_effect=NotImplementedError())
+ ra_api_init.return_value = ra_mock
+
+ im_mock = MagicMock()
+ im_mock.list_users = MagicMock(
+ return_value=[
+ purefusion.User(
+ id="principal1",
+ self_link="test_value",
+ name="user1",
+ email="example@example.com",
+ )
+ ]
+ )
+ im_api_init.return_value = im_mock
+
+ op_mock = MagicMock()
+ op_mock.get_operation = MagicMock(side_effect=NotImplementedError())
+ op_api_init.return_value = op_mock
+
+ with pytest.raises(expected_exception):
+ fusion_ra.main()
+
+ ra_mock.list_role_assignments.assert_called_with(
+ role_name="az-admin", principal="principal1"
+ )
+ ra_mock.create_role_assignment.assert_called_with(
+ purefusion.RoleAssignmentPost(scope="/", principal="principal1"),
+ role_name="az-admin",
+ )
+ ra_mock.delete_role_assignment.assert_not_called()
+ op_mock.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.IdentityManagerApi")
+@patch("fusion.RoleAssignmentsApi")
+def test_ra_create_op_fails(ra_api_init, im_api_init, op_api_init, module_args_present):
+ module_args = module_args_present
+ set_module_args(module_args)
+
+ ra_mock = MagicMock()
+ ra_mock.list_role_assignments = MagicMock(return_value=[])
+ ra_mock.create_role_assignment = MagicMock(return_value=OperationMock(id="op1"))
+ ra_mock.delete_role_assignment = MagicMock(side_effect=NotImplementedError())
+ ra_api_init.return_value = ra_mock
+
+ im_mock = MagicMock()
+ im_mock.list_users = MagicMock(
+ return_value=[
+ purefusion.User(
+ id="principal1",
+ self_link="test_value",
+ name="user1",
+ email="example@example.com",
+ )
+ ]
+ )
+ im_api_init.return_value = im_mock
+
+ op_mock = MagicMock()
+ op_mock.get_operation = MagicMock(return_value=OperationMock("op1", success=False))
+ op_api_init.return_value = op_mock
+
+ with pytest.raises(OperationException):
+ fusion_ra.main()
+
+ ra_mock.list_role_assignments.assert_called_with(
+ role_name="az-admin", principal="principal1"
+ )
+ ra_mock.create_role_assignment.assert_called_with(
+ purefusion.RoleAssignmentPost(scope="/", principal="principal1"),
+ role_name="az-admin",
+ )
+ ra_mock.delete_role_assignment.assert_not_called()
+ op_mock.get_operation.assert_called_with("op1")
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.IdentityManagerApi")
+@patch("fusion.RoleAssignmentsApi")
+@pytest.mark.parametrize(
+ "args_and_scope",
+ [
+ # organization scope
+ (
+ {
+ "state": "absent",
+ "role": "az-admin",
+ "user": "user1",
+ "scope": "organization",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ purefusion.ResourceMetadata(
+ id="org_id",
+ name="org",
+ self_link="/",
+ ),
+ ),
+ # tenant scope
+ (
+ {
+ "state": "absent",
+ "role": "tenant-admin",
+ "user": "user1",
+ "scope": "tenant",
+ "tenant": "tenant1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ purefusion.ResourceMetadata(
+ id="tenant1_id",
+ name="tenant1",
+ self_link="/tenants/tenant1",
+ ),
+ ),
+ # tenant space scope
+ (
+ {
+ "state": "absent",
+ "role": "tenant-space-admin",
+ "user": "user1",
+ "scope": "tenant_space",
+ "tenant": "tenant1",
+ "tenant_space": "tenant_space1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ purefusion.ResourceMetadata(
+ id="tenant_space1_id",
+ name="tenant_space1",
+ self_link="/tenants/tenant1/tenant-spaces/tenant_space1",
+ ),
+ ),
+ ],
+)
+def test_ra_delete_ok(ra_api_init, im_api_init, op_api_init, args_and_scope):
+ module_args = args_and_scope[0]
+ ra_scope = args_and_scope[1]
+ set_module_args(module_args)
+
+ ra_mock = MagicMock()
+ ra_mock.list_role_assignments = MagicMock(
+ return_value=[
+ purefusion.RoleAssignment(
+ id="ra1_id",
+ name="ra1",
+ self_link="test_value",
+ role=purefusion.RoleRef(
+ id="role1_id",
+ name=module_args["role"],
+ kind="Role",
+ self_link="test_value",
+ ),
+ scope=ra_scope,
+ principal="principal1",
+ )
+ ]
+ )
+ ra_mock.create_role_assignment = MagicMock(side_effect=NotImplementedError())
+ ra_mock.delete_role_assignment = MagicMock(return_value=OperationMock(id="op1"))
+ ra_api_init.return_value = ra_mock
+
+ im_mock = MagicMock()
+ im_mock.list_users = MagicMock(
+ return_value=[
+ purefusion.User(
+ id="principal1",
+ self_link="test_value",
+ name="user1",
+ email="example@example.com",
+ )
+ ]
+ )
+ im_api_init.return_value = im_mock
+
+ op_mock = MagicMock()
+ op_mock.get_operation = MagicMock(
+ return_value=OperationMock(id="op1", success=True)
+ )
+ op_api_init.return_value = op_mock
+
+ with pytest.raises(AnsibleExitJson) as excinfo:
+ fusion_ra.main()
+ assert excinfo.value.changed
+
+ ra_mock.list_role_assignments.assert_called_with(
+ role_name=module_args["role"], principal="principal1"
+ )
+ ra_mock.create_role_assignment.assert_not_called()
+ ra_mock.delete_role_assignment.assert_called_with(
+ role_name=module_args["role"], role_assignment_name="ra1"
+ )
+ op_mock.get_operation.assert_called_with("op1")
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.IdentityManagerApi")
+@patch("fusion.RoleAssignmentsApi")
+@pytest.mark.parametrize(
+ ("raised_exception", "expected_exception"),
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, HTTPError),
+ ],
+)
+def test_ra_delete_exception(
+ ra_api_init,
+ im_api_init,
+ op_api_init,
+ raised_exception,
+ expected_exception,
+ module_args_absent,
+):
+ module_args = module_args_absent
+ set_module_args(module_args)
+
+ ra_mock = MagicMock()
+ ra_mock.list_role_assignments = MagicMock(
+ return_value=[
+ purefusion.RoleAssignment(
+ id="ra1_id",
+ name="ra1",
+ self_link="test_value",
+ role=purefusion.RoleRef(
+ id="role1_id",
+ name=module_args["role"],
+ kind="Role",
+ self_link="test_value",
+ ),
+ scope=purefusion.ResourceMetadata(
+ id="org_id",
+ name="org",
+ self_link="/",
+ ),
+ principal="principal1",
+ )
+ ]
+ )
+ ra_mock.create_role_assignment = MagicMock(side_effect=NotImplementedError())
+ ra_mock.delete_role_assignment = MagicMock(side_effect=raised_exception)
+ ra_api_init.return_value = ra_mock
+
+ im_mock = MagicMock()
+ im_mock.list_users = MagicMock(
+ return_value=[
+ purefusion.User(
+ id="principal1",
+ self_link="test_value",
+ name="user1",
+ email="example@example.com",
+ )
+ ]
+ )
+ im_api_init.return_value = im_mock
+
+ op_mock = MagicMock()
+ op_mock.get_operation = MagicMock(side_effect=NotImplementedError())
+ op_api_init.return_value = op_mock
+
+ with pytest.raises(expected_exception):
+ fusion_ra.main()
+
+ ra_mock.list_role_assignments.assert_called_with(
+ role_name=module_args["role"], principal="principal1"
+ )
+ ra_mock.create_role_assignment.assert_not_called()
+ ra_mock.delete_role_assignment.assert_called_with(
+ role_name=module_args["role"], role_assignment_name="ra1"
+ )
+ op_mock.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.IdentityManagerApi")
+@patch("fusion.RoleAssignmentsApi")
+def test_ra_delete_op_fails(ra_api_init, im_api_init, op_api_init, module_args_absent):
+ module_args = module_args_absent
+ set_module_args(module_args)
+
+ ra_mock = MagicMock()
+ ra_mock.list_role_assignments = MagicMock(
+ return_value=[
+ purefusion.RoleAssignment(
+ id="ra1_id",
+ name="ra1",
+ self_link="test_value",
+ role=purefusion.RoleRef(
+ id="role1_id",
+ name=module_args["role"],
+ kind="Role",
+ self_link="test_value",
+ ),
+ scope=purefusion.ResourceMetadata(
+ id="org_id",
+ name="org",
+ self_link="/",
+ ),
+ principal="principal1",
+ )
+ ]
+ )
+ ra_mock.create_role_assignment = MagicMock(side_effect=NotImplementedError())
+ ra_mock.delete_role_assignment = MagicMock(return_value=OperationMock(id="op1"))
+ ra_api_init.return_value = ra_mock
+
+ im_mock = MagicMock()
+ im_mock.list_users = MagicMock(
+ return_value=[
+ purefusion.User(
+ id="principal1",
+ self_link="test_value",
+ name="user1",
+ email="example@example.com",
+ )
+ ]
+ )
+ im_api_init.return_value = im_mock
+
+ op_mock = MagicMock()
+ op_mock.get_operation = MagicMock(
+ return_value=OperationMock(id="op1", success=False)
+ )
+ op_api_init.return_value = op_mock
+
+ with pytest.raises(OperationException):
+ fusion_ra.main()
+
+ ra_mock.list_role_assignments.assert_called_with(
+ role_name=module_args["role"], principal="principal1"
+ )
+ ra_mock.create_role_assignment.assert_not_called()
+ ra_mock.delete_role_assignment.assert_called_with(
+ role_name=module_args["role"], role_assignment_name="ra1"
+ )
+ op_mock.get_operation.assert_called_with("op1")
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.IdentityManagerApi")
+@patch("fusion.RoleAssignmentsApi")
+def test_ra_present_not_changed(
+ ra_api_init, im_api_init, op_api_init, module_args_present
+):
+ module_args = module_args_present
+ set_module_args(module_args)
+
+ ra_mock = MagicMock()
+ ra_mock.list_role_assignments = MagicMock(
+ return_value=[
+ purefusion.RoleAssignment(
+ id="ra1_id",
+ name="ra1",
+ self_link="test_value",
+ role=purefusion.RoleRef(
+ id="role1_id",
+ name=module_args["role"],
+ kind="Role",
+ self_link="test_value",
+ ),
+ scope=purefusion.ResourceMetadata(
+ id="org_id",
+ name="org",
+ self_link="/",
+ ),
+ principal="principal1",
+ )
+ ]
+ )
+ ra_mock.create_role_assignment = MagicMock(side_effect=NotImplementedError())
+ ra_mock.delete_role_assignment = MagicMock(side_effect=NotImplementedError())
+ ra_api_init.return_value = ra_mock
+
+ im_mock = MagicMock()
+ im_mock.list_users = MagicMock(
+ return_value=[
+ purefusion.User(
+ id="principal1",
+ self_link="test_value",
+ name="user1",
+ email="example@example.com",
+ )
+ ]
+ )
+ im_api_init.return_value = im_mock
+
+ op_mock = MagicMock()
+ op_mock.get_operation = MagicMock(side_effect=NotImplementedError())
+ op_api_init.return_value = op_mock
+
+ with pytest.raises(AnsibleExitJson) as excinfo:
+ fusion_ra.main()
+ assert not excinfo.value.changed
+
+ ra_mock.list_role_assignments.assert_called_with(
+ role_name=module_args["role"], principal="principal1"
+ )
+ ra_mock.create_role_assignment.assert_not_called()
+ ra_mock.delete_role_assignment.assert_not_called()
+ op_mock.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.IdentityManagerApi")
+@patch("fusion.RoleAssignmentsApi")
+def test_ra_absent_not_changed(
+ ra_api_init, im_api_init, op_api_init, module_args_absent
+):
+ module_args = module_args_absent
+ set_module_args(module_args)
+
+ ra_mock = MagicMock()
+ ra_mock.list_role_assignments = MagicMock(return_value=[])
+ ra_mock.create_role_assignment = MagicMock(side_effect=NotImplementedError())
+ ra_mock.delete_role_assignment = MagicMock(side_effect=NotImplementedError())
+ ra_api_init.return_value = ra_mock
+
+ im_mock = MagicMock()
+ im_mock.list_users = MagicMock(
+ return_value=[
+ purefusion.User(
+ id="principal1",
+ self_link="test_value",
+ name="user1",
+ email="example@example.com",
+ )
+ ]
+ )
+ im_api_init.return_value = im_mock
+
+ op_mock = MagicMock()
+ op_mock.get_operation = MagicMock(side_effect=NotImplementedError())
+ op_api_init.return_value = op_mock
+
+ with pytest.raises(AnsibleExitJson) as excinfo:
+ fusion_ra.main()
+ assert not excinfo.value.changed
+
+ ra_mock.list_role_assignments.assert_called_with(
+ role_name=module_args["role"], principal="principal1"
+ )
+ ra_mock.create_role_assignment.assert_not_called()
+ ra_mock.delete_role_assignment.assert_not_called()
+ op_mock.get_operation.assert_not_called()
diff --git a/ansible_collections/purestorage/fusion/tests/functional/test_fusion_region.py b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_region.py
new file mode 100644
index 000000000..6b13adecf
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_region.py
@@ -0,0 +1,798 @@
+# -*- coding: utf-8 -*-
+
+# (c) 2023, Andrej Pajtas (apajtas@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING.GPLv3 or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from unittest.mock import MagicMock, patch
+
+import fusion as purefusion
+import pytest
+from ansible.module_utils import basic
+from ansible_collections.purestorage.fusion.plugins.module_utils.errors import (
+ OperationException,
+)
+from ansible_collections.purestorage.fusion.plugins.modules import fusion_region
+from ansible_collections.purestorage.fusion.tests.functional.utils import (
+ AnsibleExitJson,
+ AnsibleFailJson,
+ FailedOperationMock,
+ OperationMock,
+ SuccessfulOperationMock,
+ exit_json,
+ fail_json,
+ set_module_args,
+)
+from urllib3.exceptions import HTTPError
+
+# GLOBAL MOCKS
+fusion_region.setup_fusion = MagicMock(return_value=purefusion.api_client.ApiClient())
+purefusion.api_client.ApiClient.call_api = MagicMock(
+ side_effect=Exception("API call not mocked!")
+)
+basic.AnsibleModule.exit_json = exit_json
+basic.AnsibleModule.fail_json = fail_json
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.RegionsApi")
+@pytest.mark.parametrize(
+ "module_args",
+ [
+ # required parameter 'name` is missing
+ {
+ "state": "present",
+ "display_name": "Region 1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ # unknown parameter 'extra' is provided
+ {
+ "state": "present",
+ "name": "region1",
+ "display_name": "Region 1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ "extra": "value",
+ },
+ # parameter 'state` has incorrect value
+ {
+ "state": "cool",
+ "name": "region1",
+ "display_name": "Region 1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ ],
+)
+def test_module_fails_on_wrong_parameters(m_region_api, m_op_api, module_args):
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_region = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_region = MagicMock(return_value=OperationMock(1))
+ api_obj.update_region = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_region = MagicMock(return_value=OperationMock(3))
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+ m_region_api.return_value = api_obj
+
+ # run module
+ with pytest.raises(AnsibleFailJson):
+ fusion_region.main()
+
+ # check api was not called at all
+ api_obj.get_region.assert_not_called()
+ api_obj.create_region.assert_not_called()
+ api_obj.update_region.assert_not_called()
+ api_obj.delete_region.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.RegionsApi")
+def test_region_create(m_region_api, m_op_api):
+ module_args = {
+ "state": "present",
+ "name": "region1",
+ "display_name": "Region 1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_region = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_region = MagicMock(return_value=OperationMock(1))
+ api_obj.update_region = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_region = MagicMock(return_value=OperationMock(3))
+ m_region_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_region.main()
+
+ assert exc.value.changed
+
+ # check api was called correctly
+ api_obj.get_region.assert_called_once_with(region_name=module_args["name"])
+ api_obj.create_region.assert_called_once_with(
+ purefusion.RegionPost(
+ name=module_args["name"], display_name=module_args["display_name"]
+ )
+ )
+ api_obj.update_region.assert_not_called()
+ api_obj.delete_region.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(1)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.RegionsApi")
+def test_region_create_without_display_name(m_region_api, m_op_api):
+ module_args = {
+ "state": "present",
+ "name": "region1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_region = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_region = MagicMock(return_value=OperationMock(1))
+ api_obj.update_region = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_region = MagicMock(return_value=OperationMock(3))
+ m_region_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_region.main()
+
+ assert exc.value.changed
+
+ # check api was called correctly
+ api_obj.get_region.assert_called_once_with(region_name=module_args["name"])
+ api_obj.create_region.assert_called_once_with(
+ purefusion.RegionPost(
+ name=module_args["name"], display_name=module_args["name"]
+ )
+ )
+ api_obj.update_region.assert_not_called()
+ api_obj.delete_region.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(1)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.RegionsApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, HTTPError),
+ ],
+)
+def test_region_create_exception(m_region_api, m_op_api, exec_original, exec_catch):
+ module_args = {
+ "state": "present",
+ "name": "region1",
+ "display_name": "Region 1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_region = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_region = MagicMock(side_effect=exec_original)
+ api_obj.update_region = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_region = MagicMock(return_value=OperationMock(3))
+ m_region_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_region.main()
+
+ # check api was called correctly
+ api_obj.get_region.assert_called_once_with(region_name=module_args["name"])
+ api_obj.create_region.assert_called_once_with(
+ purefusion.RegionPost(
+ name=module_args["name"], display_name=module_args["display_name"]
+ )
+ )
+ api_obj.update_region.assert_not_called()
+ api_obj.delete_region.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.RegionsApi")
+def test_region_create_op_fails(m_region_api, m_op_api):
+ module_args = {
+ "state": "present",
+ "name": "region1",
+ "display_name": "Region 1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_region = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_region = MagicMock(return_value=OperationMock(1))
+ api_obj.update_region = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_region = MagicMock(return_value=OperationMock(3))
+ m_region_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(OperationException):
+ fusion_region.main()
+
+ # check api was called correctly
+ api_obj.get_region.assert_called_once_with(region_name=module_args["name"])
+ api_obj.create_region.assert_called_once_with(
+ purefusion.RegionPost(
+ name=module_args["name"], display_name=module_args["display_name"]
+ )
+ )
+ api_obj.update_region.assert_not_called()
+ api_obj.delete_region.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(1)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.RegionsApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, OperationException),
+ ],
+)
+def test_region_create_op_exception(m_region_api, m_op_api, exec_original, exec_catch):
+ module_args = {
+ "state": "present",
+ "name": "region1",
+ "display_name": "Region 1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_region = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_region = MagicMock(return_value=OperationMock(1))
+ api_obj.update_region = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_region = MagicMock(return_value=OperationMock(3))
+ m_region_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(side_effect=exec_original)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_region.main()
+
+ # check api was called correctly
+ api_obj.get_region.assert_called_once_with(region_name=module_args["name"])
+ api_obj.create_region.assert_called_once_with(
+ purefusion.RegionPost(
+ name=module_args["name"], display_name=module_args["display_name"]
+ )
+ )
+ api_obj.update_region.assert_not_called()
+ api_obj.delete_region.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(1)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.RegionsApi")
+def test_region_update(m_region_api, m_op_api):
+ module_args = {
+ "state": "present",
+ "name": "region1",
+ "display_name": "Region 1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ current_region = {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"], # name must match
+ "display_name": None,
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_region = MagicMock(return_value=purefusion.Region(**current_region))
+ api_obj.create_region = MagicMock(return_value=OperationMock(1))
+ api_obj.update_region = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_region = MagicMock(return_value=OperationMock(3))
+ m_region_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_region.main()
+
+ assert exc.value.changed
+
+ # check api was called correctly
+ api_obj.get_region.assert_called_once_with(region_name=module_args["name"])
+ api_obj.create_region.assert_not_called()
+ api_obj.update_region.assert_called_once_with(
+ purefusion.RegionPatch(
+ display_name=purefusion.NullableString(module_args["display_name"])
+ ),
+ region_name=module_args["name"],
+ )
+ api_obj.delete_region.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(2)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.RegionsApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, HTTPError),
+ ],
+)
+def test_region_update_exception(m_region_api, m_op_api, exec_original, exec_catch):
+ module_args = {
+ "state": "present",
+ "name": "region1",
+ "display_name": "Region 1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ current_region = {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"], # name must match
+ "display_name": None,
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_region = MagicMock(return_value=purefusion.Region(**current_region))
+ api_obj.create_region = MagicMock(return_value=OperationMock(1))
+ api_obj.update_region = MagicMock(side_effect=exec_original)
+ api_obj.delete_region = MagicMock(return_value=OperationMock(3))
+ m_region_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_region.main()
+
+ # check api was called correctly
+ api_obj.get_region.assert_called_once_with(region_name=module_args["name"])
+ api_obj.create_region.assert_not_called()
+ api_obj.update_region.assert_called_once_with(
+ purefusion.RegionPatch(
+ display_name=purefusion.NullableString(module_args["display_name"])
+ ),
+ region_name=module_args["name"],
+ )
+ api_obj.delete_region.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.RegionsApi")
+def test_region_update_op_fails(m_region_api, m_op_api):
+ module_args = {
+ "state": "present",
+ "name": "region1",
+ "display_name": "Region 1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ current_region = {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"], # name must match
+ "display_name": None,
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_region = MagicMock(return_value=purefusion.Region(**current_region))
+ api_obj.create_region = MagicMock(return_value=OperationMock(1))
+ api_obj.update_region = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_region = MagicMock(return_value=OperationMock(3))
+ m_region_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(OperationException):
+ fusion_region.main()
+
+ # check api was called correctly
+ api_obj.get_region.assert_called_once_with(region_name=module_args["name"])
+ api_obj.create_region.assert_not_called()
+ api_obj.update_region.assert_called_once_with(
+ purefusion.RegionPatch(
+ display_name=purefusion.NullableString(module_args["display_name"])
+ ),
+ region_name=module_args["name"],
+ )
+ api_obj.delete_region.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(2)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.RegionsApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, OperationException),
+ ],
+)
+def test_region_update_op_exception(m_region_api, m_op_api, exec_original, exec_catch):
+ module_args = {
+ "state": "present",
+ "name": "region1",
+ "display_name": "Region 1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ current_region = {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"], # name must match
+ "display_name": None,
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_region = MagicMock(return_value=purefusion.Region(**current_region))
+ api_obj.create_region = MagicMock(return_value=OperationMock(1))
+ api_obj.update_region = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_region = MagicMock(return_value=OperationMock(3))
+ m_region_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(side_effect=exec_original)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_region.main()
+
+ # check api was called correctly
+ api_obj.get_region.assert_called_once_with(region_name=module_args["name"])
+ api_obj.create_region.assert_not_called()
+ api_obj.update_region.assert_called_once_with(
+ purefusion.RegionPatch(
+ display_name=purefusion.NullableString(module_args["display_name"])
+ ),
+ region_name=module_args["name"],
+ )
+ api_obj.delete_region.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(2)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.RegionsApi")
+def test_region_present_not_changed(m_region_api, m_op_api):
+ module_args = {
+ "state": "present",
+ "name": "region1",
+ "display_name": "Region 1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ current_region = {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"], # name must match
+ "display_name": module_args["display_name"],
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_region = MagicMock(return_value=purefusion.Region(**current_region))
+ api_obj.create_region = MagicMock(return_value=OperationMock(1))
+ api_obj.update_region = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_region = MagicMock(return_value=OperationMock(3))
+ m_region_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_region.main()
+
+ assert not exc.value.changed
+
+ # check api was called correctly
+ api_obj.get_region.assert_called_once_with(region_name=module_args["name"])
+ api_obj.create_region.assert_not_called()
+ api_obj.update_region.assert_not_called()
+ api_obj.delete_region.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.RegionsApi")
+def test_region_absent_not_changed(m_region_api, m_op_api):
+ module_args = {
+ "state": "absent",
+ "name": "region1",
+ "display_name": "Region 1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_region = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_region = MagicMock(return_value=OperationMock(1))
+ api_obj.update_region = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_region = MagicMock(return_value=OperationMock(3))
+ m_region_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_region.main()
+
+ assert not exc.value.changed
+
+ # check api was called correctly
+ api_obj.get_region.assert_called_once_with(region_name=module_args["name"])
+ api_obj.create_region.assert_not_called()
+ api_obj.update_region.assert_not_called()
+ api_obj.delete_region.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.RegionsApi")
+def test_region_delete(m_region_api, m_op_api):
+ module_args = {
+ "state": "absent",
+ "name": "region1",
+ "display_name": "Region 1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ current_region = {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"], # name must match
+ "display_name": "different", # display_name doesn't match but UPDATE shouldn't be called
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_region = MagicMock(return_value=purefusion.Region(**current_region))
+ api_obj.create_region = MagicMock(return_value=OperationMock(1))
+ api_obj.update_region = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_region = MagicMock(return_value=OperationMock(3))
+ m_region_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_region.main()
+
+ assert exc.value.changed
+
+ # check api was called correctly
+ api_obj.get_region.assert_called_once_with(region_name=module_args["name"])
+ api_obj.create_region.assert_not_called()
+ api_obj.update_region.assert_not_called()
+ api_obj.delete_region.assert_called_once_with(region_name=module_args["name"])
+ op_obj.get_operation.assert_called_once_with(3)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.RegionsApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, HTTPError),
+ ],
+)
+def test_region_delete_exception(m_region_api, m_op_api, exec_original, exec_catch):
+ module_args = {
+ "state": "absent",
+ "name": "region1",
+ "display_name": "Region 1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ current_region = {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"], # name must match
+ "display_name": "different", # display_name doesn't match but UPDATE shouldn't be called
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_region = MagicMock(return_value=purefusion.Region(**current_region))
+ api_obj.create_region = MagicMock(return_value=OperationMock(1))
+ api_obj.update_region = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_region = MagicMock(side_effect=exec_original)
+ m_region_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_region.main()
+
+ # check api was called correctly
+ api_obj.get_region.assert_called_once_with(region_name=module_args["name"])
+ api_obj.create_region.assert_not_called()
+ api_obj.update_region.assert_not_called()
+ api_obj.delete_region.assert_called_once_with(region_name=module_args["name"])
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.RegionsApi")
+def test_region_delete_op_fails(m_region_api, m_op_api):
+ module_args = {
+ "state": "absent",
+ "name": "region1",
+ "display_name": "Region 1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ current_region = {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"], # name must match
+ "display_name": "different", # display_name doesn't match but UPDATE shouldn't be called
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_region = MagicMock(return_value=purefusion.Region(**current_region))
+ api_obj.create_region = MagicMock(return_value=OperationMock(1))
+ api_obj.update_region = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_region = MagicMock(return_value=OperationMock(3))
+ m_region_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(OperationException):
+ fusion_region.main()
+
+ # check api was called correctly
+ api_obj.get_region.assert_called_once_with(region_name=module_args["name"])
+ api_obj.create_region.assert_not_called()
+ api_obj.update_region.assert_not_called()
+ api_obj.delete_region.assert_called_once_with(region_name=module_args["name"])
+ op_obj.get_operation.assert_called_once_with(3)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.RegionsApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, OperationException),
+ ],
+)
+def test_region_delete_op_exception(m_region_api, m_op_api, exec_original, exec_catch):
+ module_args = {
+ "state": "absent",
+ "name": "region1",
+ "display_name": "Region 1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ current_region = {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"], # name must match
+ "display_name": "different", # display_name doesn't match but UPDATE shouldn't be called
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_region = MagicMock(return_value=purefusion.Region(**current_region))
+ api_obj.create_region = MagicMock(return_value=OperationMock(1))
+ api_obj.update_region = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_region = MagicMock(return_value=OperationMock(3))
+ m_region_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(side_effect=exec_original)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_region.main()
+
+ # check api was called correctly
+ api_obj.get_region.assert_called_once_with(region_name=module_args["name"])
+ api_obj.create_region.assert_not_called()
+ api_obj.update_region.assert_not_called()
+ api_obj.delete_region.assert_called_once_with(region_name=module_args["name"])
+ op_obj.get_operation.assert_called_once_with(3)
diff --git a/ansible_collections/purestorage/fusion/tests/functional/test_fusion_sc.py b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_sc.py
new file mode 100644
index 000000000..1a2db191c
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_sc.py
@@ -0,0 +1,1240 @@
+# -*- coding: utf-8 -*-
+
+# (c) 2023, Andrej Pajtas (apajtas@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING.GPLv3 or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from unittest.mock import MagicMock, patch
+
+import fusion as purefusion
+import pytest
+from ansible.module_utils import basic
+from ansible_collections.purestorage.fusion.plugins.module_utils.errors import (
+ OperationException,
+)
+from ansible_collections.purestorage.fusion.plugins.modules import fusion_sc
+from ansible_collections.purestorage.fusion.tests.functional.utils import (
+ AnsibleExitJson,
+ AnsibleFailJson,
+ FailedOperationMock,
+ OperationMock,
+ SuccessfulOperationMock,
+ exit_json,
+ fail_json,
+ set_module_args,
+)
+from urllib3.exceptions import HTTPError
+
+# GLOBAL MOCKS
+fusion_sc.setup_fusion = MagicMock(return_value=purefusion.api_client.ApiClient())
+purefusion.api_client.ApiClient.call_api = MagicMock(
+ side_effect=Exception("API call not mocked!")
+)
+basic.AnsibleModule.exit_json = exit_json
+basic.AnsibleModule.fail_json = fail_json
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.StorageClassesApi")
+@pytest.mark.parametrize(
+ "module_args",
+ [
+ # required parameter 'name` is missing
+ {
+ "state": "present",
+ "display_name": "Storage Class 1",
+ "iops_limit": "2000000",
+ "bw_limit": "256G",
+ "size_limit": "2P",
+ "storage_service": "ss1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ # required parameter 'storage_service` is missing
+ {
+ "state": "present",
+ "name": "sc1",
+ "display_name": "Storage Class 1",
+ "iops_limit": "2000000",
+ "bw_limit": "256G",
+ "size_limit": "2P",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ # unknown parameter 'extra' is provided
+ {
+ "state": "present",
+ "name": "sc1",
+ "display_name": "Storage Class 1",
+ "iops_limit": "2000000",
+ "bw_limit": "256G",
+ "size_limit": "2P",
+ "storage_service": "ss1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ "extra": "value",
+ },
+ # parameter 'state` has incorrect value
+ {
+ "state": "cool",
+ "name": "sc1",
+ "display_name": "Storage Class 1",
+ "iops_limit": "2000000",
+ "bw_limit": "256G",
+ "size_limit": "2P",
+ "storage_service": "ss1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ ],
+)
+def test_module_fails_on_wrong_parameters(m_sc_api, m_op_api, module_args):
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_storage_class = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_storage_class = MagicMock(return_value=OperationMock(1))
+ api_obj.update_storage_class = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_storage_class = MagicMock(return_value=OperationMock(3))
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+ m_sc_api.return_value = api_obj
+
+ # run module
+ with pytest.raises(AnsibleFailJson):
+ fusion_sc.main()
+
+ # check api was not called at all
+ api_obj.get_storage_class.assert_not_called()
+ api_obj.create_storage_class.assert_not_called()
+ api_obj.update_storage_class.assert_not_called()
+ api_obj.delete_storage_class.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.StorageClassesApi")
+@pytest.mark.parametrize(
+ "iops_arg,iops_exp",
+ [("2000000", 2_000_000), (None, 100_000_000)],
+)
+@pytest.mark.parametrize(
+ "bw_arg,bw_exp",
+ [("256G", 274877906944), (None, 549755813888)],
+)
+@pytest.mark.parametrize(
+ "size_arg,size_exp",
+ [("2P", 2251799813685248), (None, 4503599627370496)],
+)
+def test_sc_create(
+ m_sc_api, m_op_api, iops_arg, iops_exp, bw_arg, bw_exp, size_arg, size_exp
+):
+ module_args = {
+ "state": "present",
+ "name": "sc1",
+ "display_name": "Storage Class 1",
+ "iops_limit": iops_arg,
+ "bw_limit": bw_arg,
+ "size_limit": size_arg,
+ "storage_service": "ss1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_storage_class = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_storage_class = MagicMock(return_value=OperationMock(1))
+ api_obj.update_storage_class = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_storage_class = MagicMock(return_value=OperationMock(3))
+ m_sc_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_sc.main()
+
+ assert exc.value.changed
+
+ # check api was called correctly
+ api_obj.get_storage_class.assert_called_once_with(
+ storage_class_name=module_args["name"],
+ storage_service_name=module_args["storage_service"],
+ )
+ api_obj.create_storage_class.assert_called_once_with(
+ purefusion.StorageClassPost(
+ name=module_args["name"],
+ display_name=module_args["display_name"],
+ iops_limit=iops_exp,
+ bandwidth_limit=bw_exp,
+ size_limit=size_exp,
+ ),
+ storage_service_name=module_args["storage_service"],
+ )
+ api_obj.update_storage_class.assert_not_called()
+ api_obj.delete_storage_class.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(1)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.StorageClassesApi")
+def test_sc_create_without_display_name(m_sc_api, m_op_api):
+ module_args = {
+ "state": "present",
+ "name": "sc1",
+ "iops_limit": "2000000",
+ "bw_limit": "256G",
+ "size_limit": "2P",
+ "storage_service": "ss1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ parsed_size = 2251799813685248
+ parsed_bandwidth = 274877906944
+ parsed_iops = 2000000
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_storage_class = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_storage_class = MagicMock(return_value=OperationMock(1))
+ api_obj.update_storage_class = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_storage_class = MagicMock(return_value=OperationMock(3))
+ m_sc_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_sc.main()
+
+ assert exc.value.changed
+
+ # check api was called correctly
+ api_obj.get_storage_class.assert_called_once_with(
+ storage_class_name=module_args["name"],
+ storage_service_name=module_args["storage_service"],
+ )
+ api_obj.create_storage_class.assert_called_once_with(
+ purefusion.StorageClassPost(
+ name=module_args["name"],
+ display_name=module_args["name"],
+ iops_limit=parsed_iops,
+ bandwidth_limit=parsed_bandwidth,
+ size_limit=parsed_size,
+ ),
+ storage_service_name=module_args["storage_service"],
+ )
+ api_obj.update_storage_class.assert_not_called()
+ api_obj.delete_storage_class.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(1)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.StorageClassesApi")
+@pytest.mark.parametrize("iops_arg", [-100, 99, 100_000_001])
+def test_sc_create_iops_out_of_range(m_sc_api, m_op_api, iops_arg):
+ module_args = {
+ "state": "present",
+ "name": "sc1",
+ "display_name": "Storage Class 1",
+ "iops_limit": iops_arg,
+ "bw_limit": "256G",
+ "size_limit": "2P",
+ "storage_service": "ss1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_storage_class = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_storage_class = MagicMock(return_value=OperationMock(1))
+ api_obj.update_storage_class = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_storage_class = MagicMock(return_value=OperationMock(3))
+ m_sc_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleFailJson):
+ fusion_sc.main()
+
+ # check api was called correctly
+ api_obj.get_storage_class.assert_called_once_with(
+ storage_class_name=module_args["name"],
+ storage_service_name=module_args["storage_service"],
+ )
+ api_obj.create_storage_class.assert_not_called()
+ api_obj.update_storage_class.assert_not_called()
+ api_obj.delete_storage_class.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.StorageClassesApi")
+@pytest.mark.parametrize("bw_arg", ["1023K", "513G"])
+def test_sc_create_bw_out_of_range(m_sc_api, m_op_api, bw_arg):
+ module_args = {
+ "state": "present",
+ "name": "sc1",
+ "display_name": "Storage Class 1",
+ "iops_limit": "2000000",
+ "bw_limit": bw_arg,
+ "size_limit": "2P",
+ "storage_service": "ss1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_storage_class = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_storage_class = MagicMock(return_value=OperationMock(1))
+ api_obj.update_storage_class = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_storage_class = MagicMock(return_value=OperationMock(3))
+ m_sc_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleFailJson):
+ fusion_sc.main()
+
+ # check api was called correctly
+ api_obj.get_storage_class.assert_called_once_with(
+ storage_class_name=module_args["name"],
+ storage_service_name=module_args["storage_service"],
+ )
+ api_obj.create_storage_class.assert_not_called()
+ api_obj.update_storage_class.assert_not_called()
+ api_obj.delete_storage_class.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.StorageClassesApi")
+@pytest.mark.parametrize("size_arg", ["1023K", "5P"])
+def test_sc_create_size_out_of_range(m_sc_api, m_op_api, size_arg):
+ module_args = {
+ "state": "present",
+ "name": "sc1",
+ "display_name": "Storage Class 1",
+ "iops_limit": "2000000",
+ "bw_limit": "256G",
+ "size_limit": size_arg,
+ "storage_service": "ss1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_storage_class = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_storage_class = MagicMock(return_value=OperationMock(1))
+ api_obj.update_storage_class = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_storage_class = MagicMock(return_value=OperationMock(3))
+ m_sc_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleFailJson):
+ fusion_sc.main()
+
+ # check api was called correctly
+ api_obj.get_storage_class.assert_called_once_with(
+ storage_class_name=module_args["name"],
+ storage_service_name=module_args["storage_service"],
+ )
+ api_obj.create_storage_class.assert_not_called()
+ api_obj.update_storage_class.assert_not_called()
+ api_obj.delete_storage_class.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.StorageClassesApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, HTTPError),
+ ],
+)
+def test_sc_create_exception(m_sc_api, m_op_api, exec_original, exec_catch):
+ module_args = {
+ "state": "present",
+ "name": "sc1",
+ "display_name": "Storage Class 1",
+ "iops_limit": "2000000",
+ "bw_limit": "256G",
+ "size_limit": "2P",
+ "storage_service": "ss1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ parsed_size = 2251799813685248
+ parsed_bandwidth = 274877906944
+ parsed_iops = 2000000
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_storage_class = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_storage_class = MagicMock(side_effect=exec_original)
+ api_obj.update_storage_class = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_storage_class = MagicMock(return_value=OperationMock(3))
+ m_sc_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_sc.main()
+
+ # check api was called correctly
+ api_obj.get_storage_class.assert_called_once_with(
+ storage_class_name=module_args["name"],
+ storage_service_name=module_args["storage_service"],
+ )
+ api_obj.create_storage_class.assert_called_once_with(
+ purefusion.StorageClassPost(
+ name=module_args["name"],
+ display_name=module_args["display_name"],
+ iops_limit=parsed_iops,
+ bandwidth_limit=parsed_bandwidth,
+ size_limit=parsed_size,
+ ),
+ storage_service_name=module_args["storage_service"],
+ )
+ api_obj.update_storage_class.assert_not_called()
+ api_obj.delete_storage_class.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.StorageClassesApi")
+def test_sc_create_op_fails(m_sc_api, m_op_api):
+ module_args = {
+ "state": "present",
+ "name": "sc1",
+ "display_name": "Storage Class 1",
+ "iops_limit": "2000000",
+ "bw_limit": "256G",
+ "size_limit": "2P",
+ "storage_service": "ss1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ parsed_size = 2251799813685248
+ parsed_bandwidth = 274877906944
+ parsed_iops = 2000000
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_storage_class = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_storage_class = MagicMock(return_value=OperationMock(1))
+ api_obj.update_storage_class = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_storage_class = MagicMock(return_value=OperationMock(3))
+ m_sc_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(OperationException):
+ fusion_sc.main()
+
+ # check api was called correctly
+ api_obj.get_storage_class.assert_called_once_with(
+ storage_class_name=module_args["name"],
+ storage_service_name=module_args["storage_service"],
+ )
+ api_obj.create_storage_class.assert_called_once_with(
+ purefusion.StorageClassPost(
+ name=module_args["name"],
+ display_name=module_args["display_name"],
+ iops_limit=parsed_iops,
+ bandwidth_limit=parsed_bandwidth,
+ size_limit=parsed_size,
+ ),
+ storage_service_name=module_args["storage_service"],
+ )
+ api_obj.update_storage_class.assert_not_called()
+ api_obj.delete_storage_class.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(1)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.StorageClassesApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, OperationException),
+ ],
+)
+def test_sc_create_op_exception(m_sc_api, m_op_api, exec_original, exec_catch):
+ module_args = {
+ "state": "present",
+ "name": "sc1",
+ "display_name": "Storage Class 1",
+ "iops_limit": "2000000",
+ "bw_limit": "256G",
+ "size_limit": "2P",
+ "storage_service": "ss1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ parsed_size = 2251799813685248
+ parsed_bandwidth = 274877906944
+ parsed_iops = 2000000
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_storage_class = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_storage_class = MagicMock(return_value=OperationMock(1))
+ api_obj.update_storage_class = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_storage_class = MagicMock(return_value=OperationMock(3))
+ m_sc_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(side_effect=exec_original)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_sc.main()
+
+ # check api was called correctly
+ api_obj.get_storage_class.assert_called_once_with(
+ storage_class_name=module_args["name"],
+ storage_service_name=module_args["storage_service"],
+ )
+ api_obj.create_storage_class.assert_called_once_with(
+ purefusion.StorageClassPost(
+ name=module_args["name"],
+ display_name=module_args["display_name"],
+ iops_limit=parsed_iops,
+ bandwidth_limit=parsed_bandwidth,
+ size_limit=parsed_size,
+ ),
+ storage_service_name=module_args["storage_service"],
+ )
+ api_obj.update_storage_class.assert_not_called()
+ api_obj.delete_storage_class.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(1)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.StorageClassesApi")
+def test_sc_update(m_sc_api, m_op_api):
+ module_args = {
+ "state": "present",
+ "name": "sc1",
+ "display_name": "Storage Class 1",
+ "iops_limit": "2000000",
+ "bw_limit": "256G",
+ "size_limit": "2P",
+ "storage_service": "ss1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ current_sc = {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"], # name must match
+ "display_name": None,
+ "iops_limit": "2000000",
+ "bandwidth_limit": "256G",
+ "size_limit": "2P",
+ "storage_service": module_args["storage_service"],
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_storage_class = MagicMock(
+ return_value=purefusion.StorageClass(**current_sc)
+ )
+ api_obj.create_storage_class = MagicMock(return_value=OperationMock(1))
+ api_obj.update_storage_class = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_storage_class = MagicMock(return_value=OperationMock(3))
+ m_sc_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_sc.main()
+
+ assert exc.value.changed
+
+ # check api was called correctly
+ api_obj.get_storage_class.assert_called_once_with(
+ storage_class_name=module_args["name"],
+ storage_service_name=module_args["storage_service"],
+ )
+ api_obj.create_storage_class.assert_not_called()
+ api_obj.update_storage_class.assert_called_once_with(
+ purefusion.StorageClassPatch(
+ display_name=purefusion.NullableString(module_args["display_name"])
+ ),
+ storage_class_name=module_args["name"],
+ storage_service_name=module_args["storage_service"],
+ )
+ api_obj.delete_storage_class.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(2)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.StorageClassesApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, HTTPError),
+ ],
+)
+def test_sc_update_exception(m_sc_api, m_op_api, exec_original, exec_catch):
+ module_args = {
+ "state": "present",
+ "name": "sc1",
+ "display_name": "Storage Class 1",
+ "iops_limit": "2000000",
+ "bw_limit": "256G",
+ "size_limit": "2P",
+ "storage_service": "ss1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ current_sc = {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"], # name must match
+ "display_name": None,
+ "iops_limit": "2000000",
+ "bandwidth_limit": "256G",
+ "size_limit": "2P",
+ "storage_service": module_args["storage_service"],
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_storage_class = MagicMock(
+ return_value=purefusion.StorageClass(**current_sc)
+ )
+ api_obj.create_storage_class = MagicMock(return_value=OperationMock(1))
+ api_obj.update_storage_class = MagicMock(side_effect=exec_original)
+ api_obj.delete_storage_class = MagicMock(return_value=OperationMock(3))
+ m_sc_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_sc.main()
+
+ # check api was called correctly
+ api_obj.get_storage_class.assert_called_once_with(
+ storage_class_name=module_args["name"],
+ storage_service_name=module_args["storage_service"],
+ )
+ api_obj.create_storage_class.assert_not_called()
+ api_obj.update_storage_class.assert_called_once_with(
+ purefusion.StorageClassPatch(
+ display_name=purefusion.NullableString(module_args["display_name"])
+ ),
+ storage_class_name=module_args["name"],
+ storage_service_name=module_args["storage_service"],
+ )
+ api_obj.delete_storage_class.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.StorageClassesApi")
+def test_sc_update_op_fails(m_sc_api, m_op_api):
+ module_args = {
+ "state": "present",
+ "name": "sc1",
+ "display_name": "Storage Class 1",
+ "iops_limit": "2000000",
+ "bw_limit": "256G",
+ "size_limit": "2P",
+ "storage_service": "ss1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ current_sc = {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"], # name must match
+ "display_name": None,
+ "iops_limit": "2000000",
+ "bandwidth_limit": "256G",
+ "size_limit": "2P",
+ "storage_service": module_args["storage_service"],
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_storage_class = MagicMock(
+ return_value=purefusion.StorageClass(**current_sc)
+ )
+ api_obj.create_storage_class = MagicMock(return_value=OperationMock(1))
+ api_obj.update_storage_class = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_storage_class = MagicMock(return_value=OperationMock(3))
+ m_sc_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(OperationException):
+ fusion_sc.main()
+
+ # check api was called correctly
+ api_obj.get_storage_class.assert_called_once_with(
+ storage_class_name=module_args["name"],
+ storage_service_name=module_args["storage_service"],
+ )
+ api_obj.create_storage_class.assert_not_called()
+ api_obj.update_storage_class.assert_called_once_with(
+ purefusion.StorageClassPatch(
+ display_name=purefusion.NullableString(module_args["display_name"])
+ ),
+ storage_class_name=module_args["name"],
+ storage_service_name=module_args["storage_service"],
+ )
+ api_obj.delete_storage_class.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(2)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.StorageClassesApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, OperationException),
+ ],
+)
+def test_sc_update_op_exception(m_sc_api, m_op_api, exec_original, exec_catch):
+ module_args = {
+ "state": "present",
+ "name": "sc1",
+ "display_name": "Storage Class 1",
+ "iops_limit": "2000000",
+ "bw_limit": "256G",
+ "size_limit": "2P",
+ "storage_service": "ss1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ current_sc = {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"], # name must match
+ "display_name": None,
+ "iops_limit": "2000000",
+ "bandwidth_limit": "256G",
+ "size_limit": "2P",
+ "storage_service": module_args["storage_service"],
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_storage_class = MagicMock(
+ return_value=purefusion.StorageClass(**current_sc)
+ )
+ api_obj.create_storage_class = MagicMock(return_value=OperationMock(1))
+ api_obj.update_storage_class = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_storage_class = MagicMock(return_value=OperationMock(3))
+ m_sc_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(side_effect=exec_original)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_sc.main()
+
+ # check api was called correctly
+ api_obj.get_storage_class.assert_called_once_with(
+ storage_class_name=module_args["name"],
+ storage_service_name=module_args["storage_service"],
+ )
+ api_obj.create_storage_class.assert_not_called()
+ api_obj.update_storage_class.assert_called_once_with(
+ purefusion.StorageClassPatch(
+ display_name=purefusion.NullableString(module_args["display_name"])
+ ),
+ storage_class_name=module_args["name"],
+ storage_service_name=module_args["storage_service"],
+ )
+ api_obj.delete_storage_class.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(2)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.StorageClassesApi")
+def test_sc_present_not_changed(m_sc_api, m_op_api):
+ module_args = {
+ "state": "present",
+ "name": "sc1",
+ "display_name": "Storage Class 1",
+ "iops_limit": "2000000",
+ "bw_limit": "256G",
+ "size_limit": "2P",
+ "storage_service": "ss1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ current_sc = {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"], # name must match
+ "display_name": module_args["display_name"],
+ "iops_limit": "2000000",
+ "bandwidth_limit": "256G",
+ "size_limit": "2P",
+ "storage_service": module_args["storage_service"],
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_storage_class = MagicMock(
+ return_value=purefusion.StorageClass(**current_sc)
+ )
+ api_obj.create_storage_class = MagicMock(return_value=OperationMock(1))
+ api_obj.update_storage_class = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_storage_class = MagicMock(return_value=OperationMock(3))
+ m_sc_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_sc.main()
+
+ assert not exc.value.changed
+
+ # check api was called correctly
+ api_obj.get_storage_class.assert_called_once_with(
+ storage_class_name=module_args["name"],
+ storage_service_name=module_args["storage_service"],
+ )
+ api_obj.create_storage_class.assert_not_called()
+ api_obj.update_storage_class.assert_not_called()
+ api_obj.delete_storage_class.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.StorageClassesApi")
+def test_sc_absent_not_changed(m_sc_api, m_op_api):
+ module_args = {
+ "state": "absent",
+ "name": "sc1",
+ "display_name": "Storage Class 1",
+ "iops_limit": "2000000",
+ "bw_limit": "256G",
+ "size_limit": "2P",
+ "storage_service": "ss1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_storage_class = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_storage_class = MagicMock(return_value=OperationMock(1))
+ api_obj.update_storage_class = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_storage_class = MagicMock(return_value=OperationMock(3))
+ m_sc_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_sc.main()
+
+ assert not exc.value.changed
+
+ # check api was called correctly
+ api_obj.get_storage_class.assert_called_once_with(
+ storage_class_name=module_args["name"],
+ storage_service_name=module_args["storage_service"],
+ )
+ api_obj.create_storage_class.assert_not_called()
+ api_obj.update_storage_class.assert_not_called()
+ api_obj.delete_storage_class.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.StorageClassesApi")
+def test_sc_update_limits_not_changed(m_sc_api, m_op_api):
+ module_args = {
+ "state": "present",
+ "name": "sc1",
+ "display_name": "Storage Class 1",
+ "iops_limit": "2000000",
+ "bw_limit": "256G",
+ "size_limit": "2P",
+ "storage_service": "ss1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ current_sc = {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"], # name must match
+ "display_name": module_args["display_name"],
+ "iops_limit": "1500000", # does not match but shouldn't be updated!
+ "bandwidth_limit": "300G", # does not match but shouldn't be updated!
+ "size_limit": "1P", # does not match but shouldn't be updated!
+ "storage_service": module_args["storage_service"],
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_storage_class = MagicMock(
+ return_value=purefusion.StorageClass(**current_sc)
+ )
+ api_obj.create_storage_class = MagicMock(return_value=OperationMock(1))
+ api_obj.update_storage_class = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_storage_class = MagicMock(return_value=OperationMock(3))
+ m_sc_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_sc.main()
+
+ assert not exc.value.changed
+
+ # check api was called correctly
+ api_obj.get_storage_class.assert_called_once_with(
+ storage_class_name=module_args["name"],
+ storage_service_name=module_args["storage_service"],
+ )
+ api_obj.create_storage_class.assert_not_called()
+ api_obj.update_storage_class.assert_not_called()
+ api_obj.delete_storage_class.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.StorageClassesApi")
+def test_sc_delete(m_sc_api, m_op_api):
+ module_args = {
+ "state": "absent",
+ "name": "sc1",
+ "display_name": "Storage Class 1",
+ "iops_limit": "2000000",
+ "bw_limit": "256G",
+ "size_limit": "2P",
+ "storage_service": "ss1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ current_sc = {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"], # name must match
+ "display_name": None,
+ "iops_limit": "2000000",
+ "bandwidth_limit": "256G",
+ "size_limit": "2P",
+ "storage_service": module_args["storage_service"],
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_storage_class = MagicMock(
+ return_value=purefusion.StorageClass(**current_sc)
+ )
+ api_obj.create_storage_class = MagicMock(return_value=OperationMock(1))
+ api_obj.update_storage_class = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_storage_class = MagicMock(return_value=OperationMock(3))
+ m_sc_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_sc.main()
+
+ assert exc.value.changed
+
+ # check api was called correctly
+ api_obj.get_storage_class.assert_called_once_with(
+ storage_class_name=module_args["name"],
+ storage_service_name=module_args["storage_service"],
+ )
+ api_obj.create_storage_class.assert_not_called()
+ api_obj.update_storage_class.assert_not_called()
+ api_obj.delete_storage_class.assert_called_once_with(
+ storage_class_name=module_args["name"],
+ storage_service_name=module_args["storage_service"],
+ )
+ op_obj.get_operation.assert_called_once_with(3)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.StorageClassesApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, HTTPError),
+ ],
+)
+def test_sc_delete_exception(m_sc_api, m_op_api, exec_original, exec_catch):
+ module_args = {
+ "state": "absent",
+ "name": "sc1",
+ "display_name": "Storage Class 1",
+ "iops_limit": "2000000",
+ "bw_limit": "256G",
+ "size_limit": "2P",
+ "storage_service": "ss1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ current_sc = {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"], # name must match
+ "display_name": None,
+ "iops_limit": "2000000",
+ "bandwidth_limit": "256G",
+ "size_limit": "2P",
+ "storage_service": module_args["storage_service"],
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_storage_class = MagicMock(
+ return_value=purefusion.StorageClass(**current_sc)
+ )
+ api_obj.create_storage_class = MagicMock(return_value=OperationMock(1))
+ api_obj.update_storage_class = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_storage_class = MagicMock(side_effect=exec_original)
+ m_sc_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_sc.main()
+
+ # check api was called correctly
+ api_obj.get_storage_class.assert_called_once_with(
+ storage_class_name=module_args["name"],
+ storage_service_name=module_args["storage_service"],
+ )
+ api_obj.create_storage_class.assert_not_called()
+ api_obj.update_storage_class.assert_not_called()
+ api_obj.delete_storage_class.assert_called_once_with(
+ storage_class_name=module_args["name"],
+ storage_service_name=module_args["storage_service"],
+ )
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.StorageClassesApi")
+def test_sc_delete_op_fails(m_sc_api, m_op_api):
+ module_args = {
+ "state": "absent",
+ "name": "sc1",
+ "display_name": "Storage Class 1",
+ "iops_limit": "2000000",
+ "bw_limit": "256G",
+ "size_limit": "2P",
+ "storage_service": "ss1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ current_sc = {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"], # name must match
+ "display_name": None,
+ "iops_limit": "2000000",
+ "bandwidth_limit": "256G",
+ "size_limit": "2P",
+ "storage_service": module_args["storage_service"],
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_storage_class = MagicMock(
+ return_value=purefusion.StorageClass(**current_sc)
+ )
+ api_obj.create_storage_class = MagicMock(return_value=OperationMock(1))
+ api_obj.update_storage_class = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_storage_class = MagicMock(return_value=OperationMock(3))
+ m_sc_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(OperationException):
+ fusion_sc.main()
+
+ # check api was called correctly
+ api_obj.get_storage_class.assert_called_once_with(
+ storage_class_name=module_args["name"],
+ storage_service_name=module_args["storage_service"],
+ )
+ api_obj.create_storage_class.assert_not_called()
+ api_obj.update_storage_class.assert_not_called()
+ api_obj.delete_storage_class.assert_called_once_with(
+ storage_class_name=module_args["name"],
+ storage_service_name=module_args["storage_service"],
+ )
+ op_obj.get_operation.assert_called_once_with(3)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.StorageClassesApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, OperationException),
+ ],
+)
+def test_sc_delete_op_exception(m_sc_api, m_op_api, exec_original, exec_catch):
+ module_args = {
+ "state": "absent",
+ "name": "sc1",
+ "display_name": "Storage Class 1",
+ "iops_limit": "2000000",
+ "bw_limit": "256G",
+ "size_limit": "2P",
+ "storage_service": "ss1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ current_sc = {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"], # name must match
+ "display_name": None,
+ "iops_limit": "2000000",
+ "bandwidth_limit": "256G",
+ "size_limit": "2P",
+ "storage_service": module_args["storage_service"],
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_storage_class = MagicMock(
+ return_value=purefusion.StorageClass(**current_sc)
+ )
+ api_obj.create_storage_class = MagicMock(return_value=OperationMock(1))
+ api_obj.update_storage_class = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_storage_class = MagicMock(return_value=OperationMock(3))
+ m_sc_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(side_effect=exec_original)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_sc.main()
+
+ # check api was called correctly
+ api_obj.get_storage_class.assert_called_once_with(
+ storage_class_name=module_args["name"],
+ storage_service_name=module_args["storage_service"],
+ )
+ api_obj.create_storage_class.assert_not_called()
+ api_obj.update_storage_class.assert_not_called()
+ api_obj.delete_storage_class.assert_called_once_with(
+ storage_class_name=module_args["name"],
+ storage_service_name=module_args["storage_service"],
+ )
+ op_obj.get_operation.assert_called_once_with(3)
diff --git a/ansible_collections/purestorage/fusion/tests/functional/test_fusion_se.py b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_se.py
new file mode 100644
index 000000000..a071190db
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_se.py
@@ -0,0 +1,1039 @@
+# -*- coding: utf-8 -*-
+
+# (c) 2023, Andrej Pajtas (apajtas@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING.GPLv3 or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from unittest.mock import MagicMock, patch
+
+import fusion as purefusion
+import pytest
+from ansible.module_utils import basic
+from ansible_collections.purestorage.fusion.plugins.module_utils.errors import (
+ OperationException,
+)
+from ansible_collections.purestorage.fusion.plugins.modules import fusion_se
+from ansible_collections.purestorage.fusion.tests.functional.utils import (
+ AnsibleExitJson,
+ AnsibleFailJson,
+ FailedOperationMock,
+ OperationMock,
+ SuccessfulOperationMock,
+ exit_json,
+ fail_json,
+ set_module_args,
+)
+from urllib3.exceptions import HTTPError
+
+# GLOBAL MOCKS
+fusion_se.setup_fusion = MagicMock(return_value=purefusion.api_client.ApiClient())
+purefusion.api_client.ApiClient.call_api = MagicMock(
+ side_effect=Exception("API call not mocked!")
+)
+basic.AnsibleModule.exit_json = exit_json
+basic.AnsibleModule.fail_json = fail_json
+
+
+@pytest.fixture
+def module_args():
+ return {
+ "state": "present",
+ "name": "se1",
+ "display_name": "Storage Endpoint 1",
+ "region": "region1",
+ "availability_zone": "az1",
+ "iscsi": [
+ {
+ "address": "10.21.200.124/24",
+ "gateway": "10.21.200.1",
+ "network_interface_groups": ["subnet-0", "subnet-1"],
+ }
+ ],
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+
+
+@pytest.fixture
+def current_se(module_args):
+ return {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"],
+ "display_name": module_args["display_name"],
+ "region": module_args["region"],
+ "availability_zone": module_args["availability_zone"],
+ "endpoint_type": "iscsi",
+ "iscsi": [
+ dict(discovery_interface) for discovery_interface in module_args["iscsi"]
+ ],
+ }
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.StorageEndpointsApi")
+@pytest.mark.parametrize(
+ "module_args",
+ [
+ # required parameter 'name` is missing
+ {
+ "state": "present",
+ "display_name": "Storage Endpoint 1",
+ "region": "region1",
+ "availability_zone": "az1",
+ "iscsi": [
+ {
+ "address": "10.21.200.124/24",
+ "gateway": "10.21.200.1",
+ "network_interface_groups": ["subnet-0", "subnet-1"],
+ }
+ ],
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ # required parameter 'region` is missing
+ {
+ "state": "present",
+ "name": "se1",
+ "display_name": "Storage Endpoint 1",
+ "availability_zone": "az1",
+ "iscsi": [
+ {
+ "address": "10.21.200.124/24",
+ "gateway": "10.21.200.1",
+ "network_interface_groups": ["subnet-0", "subnet-1"],
+ }
+ ],
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ # required parameter 'availability_zone` is missing
+ {
+ "state": "present",
+ "name": "se1",
+ "display_name": "Storage Endpoint 1",
+ "region": "region1",
+ "iscsi": [
+ {
+ "address": "10.21.200.124/24",
+ "gateway": "10.21.200.1",
+ "network_interface_groups": ["subnet-0", "subnet-1"],
+ }
+ ],
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ # unknown parameter 'extra' is provided
+ {
+ "state": "present",
+ "name": "se1",
+ "display_name": "Storage Endpoint 1",
+ "region": "region1",
+ "availability_zone": "az1",
+ "iscsi": [
+ {
+ "address": "10.21.200.124/24",
+ "gateway": "10.21.200.1",
+ "network_interface_groups": ["subnet-0", "subnet-1"],
+ }
+ ],
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ "extra": "value",
+ },
+ # parameter 'state` has incorrect value
+ {
+ "state": "cool",
+ "name": "se1",
+ "display_name": "Storage Endpoint 1",
+ "region": "region1",
+ "availability_zone": "az1",
+ "iscsi": [
+ {
+ "address": "10.21.200.124/24",
+ "gateway": "10.21.200.1",
+ "network_interface_groups": ["subnet-0", "subnet-1"],
+ }
+ ],
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ # parameter 'iscsi` and 'cbs_azure_iscsi' are used at the same time
+ {
+ "state": "present",
+ "name": "se1",
+ "display_name": "Storage Endpoint 1",
+ "region": "region1",
+ "availability_zone": "az1",
+ "iscsi": [
+ {
+ "address": "10.21.200.124/24",
+ "gateway": "10.21.200.1",
+ "network_interface_groups": ["subnet-0", "subnet-1"],
+ }
+ ],
+ "cbs_azure_iscsi": {
+ "storage_endpoint_collection_identity": "/subscriptions/sub/resourcegroups/sec/providers/ms/userAssignedIdentities/secId",
+ "load_balancer": "/subscriptions/sub/resourcegroups/sec/providers/ms/loadBalancers/sec-lb",
+ "load_balancer_addresses": [],
+ },
+ "app_id": "ABCD1234",
+ "key_file": "private-key.pem",
+ },
+ # parameter 'cbs_azure_iscsi' has invalid address
+ {
+ "state": "present",
+ "name": "se1",
+ "display_name": "Storage Endpoint 1",
+ "region": "region1",
+ "availability_zone": "az1",
+ "cbs_azure_iscsi": {
+ "storage_endpoint_collection_identity": "/subscriptions/sub/resourcegroups/sec/providers/ms/userAssignedIdentities/secId",
+ "load_balancer": "/subscriptions/sub/resourcegroups/sec/providers/ms/loadBalancers/sec-lb",
+ "load_balancer_addresses": ["not an address"],
+ },
+ "app_id": "ABCD1234",
+ "key_file": "private-key.pem",
+ },
+ # parameter 'iscsi' has invalid 'gateway' address
+ {
+ "state": "present",
+ "name": "se1",
+ "display_name": "Storage Endpoint 1",
+ "region": "region1",
+ "availability_zone": "az1",
+ "iscsi": [
+ {
+ "address": "10.21.200.124/24",
+ "gateway": "not an address",
+ "network_interface_groups": ["subnet-0", "subnet-1"],
+ }
+ ],
+ "app_id": "ABCD1234",
+ "key_file": "private-key.pem",
+ },
+ # parameter 'iscsi' has invalid 'address' address
+ {
+ "state": "present",
+ "name": "se1",
+ "display_name": "Storage Endpoint 1",
+ "region": "region1",
+ "availability_zone": "az1",
+ "iscsi": [
+ {
+ "address": "not an address",
+ "gateway": "10.21.200.1",
+ "network_interface_groups": ["subnet-0", "subnet-1"],
+ }
+ ],
+ "app_id": "ABCD1234",
+ "key_file": "private-key.pem",
+ },
+ ],
+)
+def test_module_fails_on_wrong_parameters(m_se_api, m_op_api, module_args):
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_storage_endpoint = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_storage_endpoint = MagicMock(return_value=OperationMock(1))
+ api_obj.update_storage_endpoint = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_storage_endpoint = MagicMock(return_value=OperationMock(3))
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+ m_se_api.return_value = api_obj
+
+ # run module
+ with pytest.raises(AnsibleFailJson):
+ fusion_se.main()
+
+ # check api was not called at all
+ api_obj.get_storage_endpoint.assert_not_called()
+ api_obj.create_storage_endpoint.assert_not_called()
+ api_obj.update_storage_endpoint.assert_not_called()
+ api_obj.delete_storage_endpoint.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.StorageEndpointsApi")
+def test_se_create_iscsi(m_se_api, m_op_api, module_args):
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_storage_endpoint = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_storage_endpoint = MagicMock(return_value=OperationMock(1))
+ api_obj.update_storage_endpoint = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_storage_endpoint = MagicMock(return_value=OperationMock(3))
+ m_se_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_se.main()
+
+ assert exc.value.changed
+
+ # check api was called correctly
+ api_obj.get_storage_endpoint.assert_called_once_with(
+ region_name=module_args["region"],
+ storage_endpoint_name=module_args["name"],
+ availability_zone_name=module_args["availability_zone"],
+ )
+ api_obj.create_storage_endpoint.assert_called_once_with(
+ purefusion.StorageEndpointPost(
+ name=module_args["name"],
+ display_name=module_args["display_name"],
+ endpoint_type="iscsi",
+ iscsi=purefusion.StorageEndpointIscsiPost(
+ discovery_interfaces=[
+ purefusion.StorageEndpointIscsiDiscoveryInterfacePost(**endpoint)
+ for endpoint in module_args["iscsi"]
+ ]
+ ),
+ ),
+ region_name=module_args["region"],
+ availability_zone_name=module_args["availability_zone"],
+ )
+ api_obj.update_storage_endpoint.assert_not_called()
+ api_obj.delete_storage_endpoint.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(1)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.StorageEndpointsApi")
+def test_se_create_cbs_azure_iscsi(m_se_api, m_op_api, module_args):
+ del module_args["iscsi"]
+ module_args["cbs_azure_iscsi"] = {
+ "storage_endpoint_collection_identity": "/subscriptions/sub/resourcegroups/sec/providers/ms/userAssignedIdentities/secId",
+ "load_balancer": "/subscriptions/sub/resourcegroups/sec/providers/ms/loadBalancers/sec-lb",
+ "load_balancer_addresses": ["234.1.2.3"],
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_storage_endpoint = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_storage_endpoint = MagicMock(return_value=OperationMock(1))
+ api_obj.update_storage_endpoint = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_storage_endpoint = MagicMock(return_value=OperationMock(3))
+ m_se_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_se.main()
+
+ assert exc.value.changed is True
+
+ # check api was called correctly
+ api_obj.get_storage_endpoint.assert_called_once_with(
+ region_name=module_args["region"],
+ storage_endpoint_name=module_args["name"],
+ availability_zone_name=module_args["availability_zone"],
+ )
+ api_obj.create_storage_endpoint.assert_called_once_with(
+ purefusion.StorageEndpointPost(
+ name=module_args["name"],
+ display_name=module_args["display_name"],
+ endpoint_type="cbs-azure-iscsi",
+ cbs_azure_iscsi=purefusion.StorageEndpointCbsAzureIscsiPost(
+ storage_endpoint_collection_identity=module_args["cbs_azure_iscsi"][
+ "storage_endpoint_collection_identity"
+ ],
+ load_balancer=module_args["cbs_azure_iscsi"]["load_balancer"],
+ load_balancer_addresses=module_args["cbs_azure_iscsi"][
+ "load_balancer_addresses"
+ ],
+ ),
+ ),
+ region_name=module_args["region"],
+ availability_zone_name=module_args["availability_zone"],
+ )
+ api_obj.update_storage_endpoint.assert_not_called()
+ api_obj.delete_storage_endpoint.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(1)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.StorageEndpointsApi")
+def test_se_create_without_display_name(m_se_api, m_op_api, module_args):
+ del module_args["display_name"]
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_storage_endpoint = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_storage_endpoint = MagicMock(return_value=OperationMock(1))
+ api_obj.update_storage_endpoint = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_storage_endpoint = MagicMock(return_value=OperationMock(3))
+ m_se_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_se.main()
+
+ assert exc.value.changed
+
+ # check api was called correctly
+ api_obj.get_storage_endpoint.assert_called_once_with(
+ region_name=module_args["region"],
+ storage_endpoint_name=module_args["name"],
+ availability_zone_name=module_args["availability_zone"],
+ )
+ api_obj.create_storage_endpoint.assert_called_once_with(
+ purefusion.StorageEndpointPost(
+ name=module_args["name"],
+ display_name=module_args["name"],
+ endpoint_type="iscsi",
+ iscsi=purefusion.StorageEndpointIscsiPost(
+ discovery_interfaces=[
+ purefusion.StorageEndpointIscsiDiscoveryInterfacePost(**endpoint)
+ for endpoint in module_args["iscsi"]
+ ]
+ ),
+ ),
+ region_name=module_args["region"],
+ availability_zone_name=module_args["availability_zone"],
+ )
+ api_obj.update_storage_endpoint.assert_not_called()
+ api_obj.delete_storage_endpoint.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(1)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.StorageEndpointsApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, HTTPError),
+ ],
+)
+def test_se_create_exception(
+ m_se_api, m_op_api, exec_original, exec_catch, module_args
+):
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_storage_endpoint = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_storage_endpoint = MagicMock(side_effect=exec_original)
+ api_obj.update_storage_endpoint = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_storage_endpoint = MagicMock(return_value=OperationMock(3))
+ m_se_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_se.main()
+
+ # check api was called correctly
+ api_obj.get_storage_endpoint.assert_called_once_with(
+ region_name=module_args["region"],
+ storage_endpoint_name=module_args["name"],
+ availability_zone_name=module_args["availability_zone"],
+ )
+ api_obj.create_storage_endpoint.assert_called_once_with(
+ purefusion.StorageEndpointPost(
+ name=module_args["name"],
+ display_name=module_args["display_name"],
+ endpoint_type="iscsi",
+ iscsi=purefusion.StorageEndpointIscsiPost(
+ discovery_interfaces=[
+ purefusion.StorageEndpointIscsiDiscoveryInterfacePost(**endpoint)
+ for endpoint in module_args["iscsi"]
+ ]
+ ),
+ ),
+ region_name=module_args["region"],
+ availability_zone_name=module_args["availability_zone"],
+ )
+ api_obj.update_storage_endpoint.assert_not_called()
+ api_obj.delete_storage_endpoint.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.StorageEndpointsApi")
+def test_se_create_op_fails(m_se_api, m_op_api, module_args):
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_storage_endpoint = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_storage_endpoint = MagicMock(return_value=OperationMock(1))
+ api_obj.update_storage_endpoint = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_storage_endpoint = MagicMock(return_value=OperationMock(3))
+ m_se_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(OperationException):
+ fusion_se.main()
+
+ # check api was called correctly
+ api_obj.get_storage_endpoint.assert_called_once_with(
+ region_name=module_args["region"],
+ storage_endpoint_name=module_args["name"],
+ availability_zone_name=module_args["availability_zone"],
+ )
+ api_obj.create_storage_endpoint.assert_called_once_with(
+ purefusion.StorageEndpointPost(
+ name=module_args["name"],
+ display_name=module_args["display_name"],
+ endpoint_type="iscsi",
+ iscsi=purefusion.StorageEndpointIscsiPost(
+ discovery_interfaces=[
+ purefusion.StorageEndpointIscsiDiscoveryInterfacePost(**endpoint)
+ for endpoint in module_args["iscsi"]
+ ]
+ ),
+ ),
+ region_name=module_args["region"],
+ availability_zone_name=module_args["availability_zone"],
+ )
+ api_obj.update_storage_endpoint.assert_not_called()
+ api_obj.delete_storage_endpoint.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(1)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.StorageEndpointsApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, OperationException),
+ ],
+)
+def test_se_create_op_exception(
+ m_se_api, m_op_api, exec_original, exec_catch, module_args
+):
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_storage_endpoint = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_storage_endpoint = MagicMock(return_value=OperationMock(1))
+ api_obj.update_storage_endpoint = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_storage_endpoint = MagicMock(return_value=OperationMock(3))
+ m_se_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(side_effect=exec_original)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_se.main()
+
+ # check api was called correctly
+ api_obj.get_storage_endpoint.assert_called_once_with(
+ region_name=module_args["region"],
+ storage_endpoint_name=module_args["name"],
+ availability_zone_name=module_args["availability_zone"],
+ )
+ api_obj.create_storage_endpoint.assert_called_once_with(
+ purefusion.StorageEndpointPost(
+ name=module_args["name"],
+ display_name=module_args["display_name"],
+ endpoint_type="iscsi",
+ iscsi=purefusion.StorageEndpointIscsiPost(
+ discovery_interfaces=[
+ purefusion.StorageEndpointIscsiDiscoveryInterfacePost(**endpoint)
+ for endpoint in module_args["iscsi"]
+ ]
+ ),
+ ),
+ region_name=module_args["region"],
+ availability_zone_name=module_args["availability_zone"],
+ )
+ api_obj.update_storage_endpoint.assert_not_called()
+ api_obj.delete_storage_endpoint.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(1)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.StorageEndpointsApi")
+def test_se_update(m_se_api, m_op_api, module_args, current_se):
+ current_se["display_name"] = None
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_storage_endpoint = MagicMock(
+ return_value=purefusion.StorageEndpoint(**current_se)
+ )
+ api_obj.create_storage_endpoint = MagicMock(return_value=OperationMock(1))
+ api_obj.update_storage_endpoint = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_storage_endpoint = MagicMock(return_value=OperationMock(3))
+ m_se_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_se.main()
+
+ assert exc.value.changed
+
+ # check api was called correctly
+ api_obj.get_storage_endpoint.assert_called_once_with(
+ region_name=module_args["region"],
+ storage_endpoint_name=module_args["name"],
+ availability_zone_name=module_args["availability_zone"],
+ )
+ api_obj.create_storage_endpoint.assert_not_called()
+ api_obj.update_storage_endpoint.assert_called_once_with(
+ purefusion.StorageEndpointPatch(
+ display_name=purefusion.NullableString(module_args["display_name"])
+ ),
+ region_name=module_args["region"],
+ storage_endpoint_name=module_args["name"],
+ availability_zone_name=module_args["availability_zone"],
+ )
+ api_obj.delete_storage_endpoint.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(2)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.StorageEndpointsApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, HTTPError),
+ ],
+)
+def test_se_update_exception(
+ m_se_api, m_op_api, exec_original, exec_catch, module_args, current_se
+):
+ current_se["display_name"] = None
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_storage_endpoint = MagicMock(
+ return_value=purefusion.StorageEndpoint(**current_se)
+ )
+ api_obj.create_storage_endpoint = MagicMock(return_value=OperationMock(1))
+ api_obj.update_storage_endpoint = MagicMock(side_effect=exec_original)
+ api_obj.delete_storage_endpoint = MagicMock(return_value=OperationMock(3))
+ m_se_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_se.main()
+
+ # check api was called correctly
+ api_obj.get_storage_endpoint.assert_called_once_with(
+ region_name=module_args["region"],
+ storage_endpoint_name=module_args["name"],
+ availability_zone_name=module_args["availability_zone"],
+ )
+ api_obj.create_storage_endpoint.assert_not_called()
+ api_obj.update_storage_endpoint.assert_called_once_with(
+ purefusion.StorageEndpointPatch(
+ display_name=purefusion.NullableString(module_args["display_name"])
+ ),
+ region_name=module_args["region"],
+ storage_endpoint_name=module_args["name"],
+ availability_zone_name=module_args["availability_zone"],
+ )
+ api_obj.delete_storage_endpoint.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.StorageEndpointsApi")
+def test_se_update_op_fails(m_se_api, m_op_api, module_args, current_se):
+ current_se["display_name"] = None
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_storage_endpoint = MagicMock(
+ return_value=purefusion.StorageEndpoint(**current_se)
+ )
+ api_obj.create_storage_endpoint = MagicMock(return_value=OperationMock(1))
+ api_obj.update_storage_endpoint = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_storage_endpoint = MagicMock(return_value=OperationMock(3))
+ m_se_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(OperationException):
+ fusion_se.main()
+
+ # check api was called correctly
+ api_obj.get_storage_endpoint.assert_called_once_with(
+ region_name=module_args["region"],
+ storage_endpoint_name=module_args["name"],
+ availability_zone_name=module_args["availability_zone"],
+ )
+ api_obj.create_storage_endpoint.assert_not_called()
+ api_obj.update_storage_endpoint.assert_called_once_with(
+ purefusion.StorageEndpointPatch(
+ display_name=purefusion.NullableString(module_args["display_name"])
+ ),
+ region_name=module_args["region"],
+ storage_endpoint_name=module_args["name"],
+ availability_zone_name=module_args["availability_zone"],
+ )
+ api_obj.delete_storage_endpoint.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(2)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.StorageEndpointsApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, OperationException),
+ ],
+)
+def test_se_update_op_exception(
+ m_se_api, m_op_api, exec_original, exec_catch, module_args, current_se
+):
+ current_se["display_name"] = None
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_storage_endpoint = MagicMock(
+ return_value=purefusion.StorageEndpoint(**current_se)
+ )
+ api_obj.create_storage_endpoint = MagicMock(return_value=OperationMock(1))
+ api_obj.update_storage_endpoint = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_storage_endpoint = MagicMock(return_value=OperationMock(3))
+ m_se_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(side_effect=exec_original)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_se.main()
+
+ # check api was called correctly
+ api_obj.get_storage_endpoint.assert_called_once_with(
+ region_name=module_args["region"],
+ storage_endpoint_name=module_args["name"],
+ availability_zone_name=module_args["availability_zone"],
+ )
+ api_obj.create_storage_endpoint.assert_not_called()
+ api_obj.update_storage_endpoint.assert_called_once_with(
+ purefusion.StorageEndpointPatch(
+ display_name=purefusion.NullableString(module_args["display_name"])
+ ),
+ region_name=module_args["region"],
+ storage_endpoint_name=module_args["name"],
+ availability_zone_name=module_args["availability_zone"],
+ )
+ api_obj.delete_storage_endpoint.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(2)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.StorageEndpointsApi")
+def test_se_present_not_changed(m_se_api, m_op_api, module_args, current_se):
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_storage_endpoint = MagicMock(
+ return_value=purefusion.StorageEndpoint(**current_se)
+ )
+ api_obj.create_storage_endpoint = MagicMock(return_value=OperationMock(1))
+ api_obj.update_storage_endpoint = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_storage_endpoint = MagicMock(return_value=OperationMock(3))
+ m_se_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_se.main()
+
+ assert not exc.value.changed
+
+ # check api was called correctly
+ api_obj.get_storage_endpoint.assert_called_once_with(
+ region_name=module_args["region"],
+ storage_endpoint_name=module_args["name"],
+ availability_zone_name=module_args["availability_zone"],
+ )
+ api_obj.create_storage_endpoint.assert_not_called()
+ api_obj.update_storage_endpoint.assert_not_called()
+ api_obj.delete_storage_endpoint.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.StorageEndpointsApi")
+def test_se_absent_not_changed(m_se_api, m_op_api, module_args, current_se):
+ module_args["state"] = "absent"
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_storage_endpoint = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_storage_endpoint = MagicMock(return_value=OperationMock(1))
+ api_obj.update_storage_endpoint = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_storage_endpoint = MagicMock(return_value=OperationMock(3))
+ m_se_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_se.main()
+
+ assert not exc.value.changed
+
+ # check api was called correctly
+ api_obj.get_storage_endpoint.assert_called_once_with(
+ region_name=module_args["region"],
+ storage_endpoint_name=module_args["name"],
+ availability_zone_name=module_args["availability_zone"],
+ )
+ api_obj.create_storage_endpoint.assert_not_called()
+ api_obj.update_storage_endpoint.assert_not_called()
+ api_obj.delete_storage_endpoint.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.StorageEndpointsApi")
+def test_se_delete(m_se_api, m_op_api, module_args, current_se):
+ module_args["state"] = "absent"
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_storage_endpoint = MagicMock(
+ return_value=purefusion.StorageEndpoint(**current_se)
+ )
+ api_obj.create_storage_endpoint = MagicMock(return_value=OperationMock(1))
+ api_obj.update_storage_endpoint = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_storage_endpoint = MagicMock(return_value=OperationMock(3))
+ m_se_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_se.main()
+
+ assert exc.value.changed
+
+ # check api was called correctly
+ api_obj.get_storage_endpoint.assert_called_once_with(
+ region_name=module_args["region"],
+ storage_endpoint_name=module_args["name"],
+ availability_zone_name=module_args["availability_zone"],
+ )
+ api_obj.create_storage_endpoint.assert_not_called()
+ api_obj.update_storage_endpoint.assert_not_called()
+ api_obj.delete_storage_endpoint.assert_called_once_with(
+ region_name=module_args["region"],
+ storage_endpoint_name=module_args["name"],
+ availability_zone_name=module_args["availability_zone"],
+ )
+ op_obj.get_operation.assert_called_once_with(3)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.StorageEndpointsApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, HTTPError),
+ ],
+)
+def test_se_delete_exception(
+ m_se_api, m_op_api, exec_original, exec_catch, module_args, current_se
+):
+ module_args["state"] = "absent"
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_storage_endpoint = MagicMock(
+ return_value=purefusion.StorageEndpoint(**current_se)
+ )
+ api_obj.create_storage_endpoint = MagicMock(return_value=OperationMock(1))
+ api_obj.update_storage_endpoint = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_storage_endpoint = MagicMock(side_effect=exec_original)
+ m_se_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_se.main()
+
+ # check api was called correctly
+ api_obj.get_storage_endpoint.assert_called_once_with(
+ region_name=module_args["region"],
+ storage_endpoint_name=module_args["name"],
+ availability_zone_name=module_args["availability_zone"],
+ )
+ api_obj.create_storage_endpoint.assert_not_called()
+ api_obj.update_storage_endpoint.assert_not_called()
+ api_obj.delete_storage_endpoint.assert_called_once_with(
+ region_name=module_args["region"],
+ storage_endpoint_name=module_args["name"],
+ availability_zone_name=module_args["availability_zone"],
+ )
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.StorageEndpointsApi")
+def test_se_delete_op_fails(m_se_api, m_op_api, module_args, current_se):
+ module_args["state"] = "absent"
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_storage_endpoint = MagicMock(
+ return_value=purefusion.StorageEndpoint(**current_se)
+ )
+ api_obj.create_storage_endpoint = MagicMock(return_value=OperationMock(1))
+ api_obj.update_storage_endpoint = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_storage_endpoint = MagicMock(return_value=OperationMock(3))
+ m_se_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(OperationException):
+ fusion_se.main()
+
+ # check api was called correctly
+ api_obj.get_storage_endpoint.assert_called_once_with(
+ region_name=module_args["region"],
+ storage_endpoint_name=module_args["name"],
+ availability_zone_name=module_args["availability_zone"],
+ )
+ api_obj.create_storage_endpoint.assert_not_called()
+ api_obj.update_storage_endpoint.assert_not_called()
+ api_obj.delete_storage_endpoint.assert_called_once_with(
+ region_name=module_args["region"],
+ storage_endpoint_name=module_args["name"],
+ availability_zone_name=module_args["availability_zone"],
+ )
+ op_obj.get_operation.assert_called_once_with(3)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.StorageEndpointsApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, OperationException),
+ ],
+)
+def test_se_delete_op_exception(
+ m_se_api, m_op_api, exec_original, exec_catch, module_args, current_se
+):
+ module_args["state"] = "absent"
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_storage_endpoint = MagicMock(
+ return_value=purefusion.StorageEndpoint(**current_se)
+ )
+ api_obj.create_storage_endpoint = MagicMock(return_value=OperationMock(1))
+ api_obj.update_storage_endpoint = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_storage_endpoint = MagicMock(return_value=OperationMock(3))
+ m_se_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(side_effect=exec_original)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_se.main()
+
+ # check api was called correctly
+ api_obj.get_storage_endpoint.assert_called_once_with(
+ region_name=module_args["region"],
+ storage_endpoint_name=module_args["name"],
+ availability_zone_name=module_args["availability_zone"],
+ )
+ api_obj.create_storage_endpoint.assert_not_called()
+ api_obj.update_storage_endpoint.assert_not_called()
+ api_obj.delete_storage_endpoint.assert_called_once_with(
+ region_name=module_args["region"],
+ storage_endpoint_name=module_args["name"],
+ availability_zone_name=module_args["availability_zone"],
+ )
+ op_obj.get_operation.assert_called_once_with(3)
diff --git a/ansible_collections/purestorage/fusion/tests/functional/test_fusion_ss.py b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_ss.py
new file mode 100644
index 000000000..d784b1a52
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_ss.py
@@ -0,0 +1,930 @@
+# -*- coding: utf-8 -*-
+
+# (c) 2023, Andrej Pajtas (apajtas@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING.GPLv3 or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from unittest.mock import MagicMock, patch
+
+import fusion as purefusion
+import pytest
+from ansible.module_utils import basic
+from ansible_collections.purestorage.fusion.plugins.module_utils.errors import (
+ OperationException,
+)
+from ansible_collections.purestorage.fusion.plugins.modules import fusion_ss
+from ansible_collections.purestorage.fusion.tests.functional.utils import (
+ AnsibleExitJson,
+ AnsibleFailJson,
+ FailedOperationMock,
+ OperationMock,
+ SuccessfulOperationMock,
+ exit_json,
+ fail_json,
+ set_module_args,
+)
+from urllib3.exceptions import HTTPError
+
+# GLOBAL MOCKS
+fusion_ss.setup_fusion = MagicMock(return_value=purefusion.api_client.ApiClient())
+purefusion.api_client.ApiClient.call_api = MagicMock(
+ side_effect=Exception("API call not mocked!")
+)
+basic.AnsibleModule.exit_json = exit_json
+basic.AnsibleModule.fail_json = fail_json
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.StorageServicesApi")
+@pytest.mark.parametrize(
+ "module_args",
+ [
+ # required parameter 'name` is missing
+ {
+ "state": "present",
+ "display_name": "Storage Service 1",
+ "hardware_types": ["flash-array-x"],
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ # unknown parameter 'extra' is provided
+ {
+ "state": "present",
+ "name": "ss1",
+ "display_name": "Storage Service 1",
+ "hardware_types": ["flash-array-x"],
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ "extra": "value",
+ },
+ # parameter 'state` has incorrect value
+ {
+ "state": "cool",
+ "name": "ss1",
+ "display_name": "Storage Service 1",
+ "hardware_types": ["flash-array-x"],
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ # parameter 'hardware_types` has incorrect value
+ {
+ "state": "present",
+ "name": "ss1",
+ "display_name": "Storage Service 1",
+ "hardware_types": ["hdd-array-x"],
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ ],
+)
+def test_module_fails_on_wrong_parameters(m_ss_api, m_op_api, module_args):
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_storage_service = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_storage_service = MagicMock(return_value=OperationMock(1))
+ api_obj.update_storage_service = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_storage_service = MagicMock(return_value=OperationMock(3))
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+ m_ss_api.return_value = api_obj
+
+ # run module
+ with pytest.raises(AnsibleFailJson):
+ fusion_ss.main()
+
+ # check api was not called at all
+ api_obj.get_storage_service.assert_not_called()
+ api_obj.create_storage_service.assert_not_called()
+ api_obj.update_storage_service.assert_not_called()
+ api_obj.delete_storage_service.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.StorageServicesApi")
+def test_ss_create(m_ss_api, m_op_api):
+ module_args = {
+ "state": "present",
+ "name": "ss1",
+ "display_name": "Storage Service 1",
+ "hardware_types": ["flash-array-x"],
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_storage_service = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_storage_service = MagicMock(return_value=OperationMock(1))
+ api_obj.update_storage_service = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_storage_service = MagicMock(return_value=OperationMock(3))
+ m_ss_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_ss.main()
+
+ assert exc.value.changed
+
+ # check api was called correctly
+ api_obj.get_storage_service.assert_called_once_with(
+ storage_service_name=module_args["name"]
+ )
+ api_obj.create_storage_service.assert_called_once_with(
+ purefusion.StorageServicePost(
+ name=module_args["name"],
+ display_name=module_args["display_name"],
+ hardware_types=module_args["hardware_types"],
+ )
+ )
+ api_obj.update_storage_service.assert_not_called()
+ api_obj.delete_storage_service.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(1)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.StorageServicesApi")
+def test_ss_create_without_display_name(m_ss_api, m_op_api):
+ module_args = {
+ "state": "present",
+ "name": "ss1",
+ "hardware_types": ["flash-array-x"],
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_storage_service = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_storage_service = MagicMock(return_value=OperationMock(1))
+ api_obj.update_storage_service = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_storage_service = MagicMock(return_value=OperationMock(3))
+ m_ss_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_ss.main()
+
+ assert exc.value.changed
+
+ # check api was called correctly
+ api_obj.get_storage_service.assert_called_once_with(
+ storage_service_name=module_args["name"]
+ )
+ api_obj.create_storage_service.assert_called_once_with(
+ purefusion.StorageServicePost(
+ name=module_args["name"],
+ display_name=module_args["name"],
+ hardware_types=module_args["hardware_types"],
+ )
+ )
+ api_obj.update_storage_service.assert_not_called()
+ api_obj.delete_storage_service.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(1)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.StorageServicesApi")
+def test_array_create_without_hardware_type(m_ss_api, m_op_api):
+ module_args = {
+ "state": "present",
+ "name": "ss1",
+ "display_name": "Storage Service 1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_storage_service = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_storage_service = MagicMock(return_value=OperationMock(1))
+ api_obj.update_storage_service = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_storage_service = MagicMock(return_value=OperationMock(3))
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+ m_ss_api.return_value = api_obj
+
+ # run module
+ with pytest.raises(AnsibleFailJson):
+ fusion_ss.main()
+
+ # check api was not called at all
+ api_obj.get_storage_service.assert_called_once_with(
+ storage_service_name=module_args["name"]
+ )
+ api_obj.create_storage_service.assert_not_called()
+ api_obj.update_storage_service.assert_not_called()
+ api_obj.delete_storage_service.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.StorageServicesApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, HTTPError),
+ ],
+)
+def test_ss_create_exception(m_ss_api, m_op_api, exec_original, exec_catch):
+ module_args = {
+ "state": "present",
+ "name": "ss1",
+ "display_name": "Storage Service 1",
+ "hardware_types": ["flash-array-x"],
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_storage_service = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_storage_service = MagicMock(side_effect=exec_original)
+ api_obj.update_storage_service = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_storage_service = MagicMock(return_value=OperationMock(3))
+ m_ss_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_ss.main()
+
+ # check api was called correctly
+ api_obj.get_storage_service.assert_called_once_with(
+ storage_service_name=module_args["name"]
+ )
+ api_obj.create_storage_service.assert_called_once_with(
+ purefusion.StorageServicePost(
+ name=module_args["name"],
+ display_name=module_args["display_name"],
+ hardware_types=module_args["hardware_types"],
+ )
+ )
+ api_obj.update_storage_service.assert_not_called()
+ api_obj.delete_storage_service.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.StorageServicesApi")
+def test_ss_create_op_fails(m_ss_api, m_op_api):
+ module_args = {
+ "state": "present",
+ "name": "ss1",
+ "display_name": "Storage Service 1",
+ "hardware_types": ["flash-array-x"],
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_storage_service = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_storage_service = MagicMock(return_value=OperationMock(1))
+ api_obj.update_storage_service = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_storage_service = MagicMock(return_value=OperationMock(3))
+ m_ss_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(OperationException):
+ fusion_ss.main()
+
+ # check api was called correctly
+ api_obj.get_storage_service.assert_called_once_with(
+ storage_service_name=module_args["name"]
+ )
+ api_obj.create_storage_service.assert_called_once_with(
+ purefusion.StorageServicePost(
+ name=module_args["name"],
+ display_name=module_args["display_name"],
+ hardware_types=module_args["hardware_types"],
+ )
+ )
+ api_obj.update_storage_service.assert_not_called()
+ api_obj.delete_storage_service.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(1)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.StorageServicesApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, OperationException),
+ ],
+)
+def test_ss_create_op_exception(m_ss_api, m_op_api, exec_original, exec_catch):
+ module_args = {
+ "state": "present",
+ "name": "ss1",
+ "display_name": "Storage Service 1",
+ "hardware_types": ["flash-array-x"],
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_storage_service = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_storage_service = MagicMock(return_value=OperationMock(1))
+ api_obj.update_storage_service = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_storage_service = MagicMock(return_value=OperationMock(3))
+ m_ss_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(side_effect=exec_original)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_ss.main()
+
+ # check api was called correctly
+ api_obj.get_storage_service.assert_called_once_with(
+ storage_service_name=module_args["name"]
+ )
+ api_obj.create_storage_service.assert_called_once_with(
+ purefusion.StorageServicePost(
+ name=module_args["name"],
+ display_name=module_args["display_name"],
+ hardware_types=module_args["hardware_types"],
+ )
+ )
+ api_obj.update_storage_service.assert_not_called()
+ api_obj.delete_storage_service.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(1)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.StorageServicesApi")
+def test_ss_update(m_ss_api, m_op_api):
+ module_args = {
+ "state": "present",
+ "name": "ss1",
+ "display_name": "Storage Service 1",
+ "hardware_types": ["flash-array-x"],
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ current_ss = {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"], # name must match
+ "display_name": None,
+ "hardware_types": ["flash-array-c"], # is different but shouldn't be patched!
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_storage_service = MagicMock(
+ return_value=purefusion.StorageService(**current_ss)
+ )
+ api_obj.create_storage_service = MagicMock(return_value=OperationMock(1))
+ api_obj.update_storage_service = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_storage_service = MagicMock(return_value=OperationMock(3))
+ m_ss_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_ss.main()
+
+ assert exc.value.changed
+
+ # check api was called correctly
+ api_obj.get_storage_service.assert_called_once_with(
+ storage_service_name=module_args["name"]
+ )
+ api_obj.create_storage_service.assert_not_called()
+ api_obj.update_storage_service.assert_called_once_with(
+ purefusion.StorageServicePatch(
+ display_name=purefusion.NullableString(module_args["display_name"])
+ ),
+ storage_service_name=module_args["name"],
+ )
+ api_obj.delete_storage_service.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(2)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.StorageServicesApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, HTTPError),
+ ],
+)
+def test_ss_update_exception(m_ss_api, m_op_api, exec_original, exec_catch):
+ module_args = {
+ "state": "present",
+ "name": "ss1",
+ "display_name": "Storage Service 1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ current_ss = {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"], # name must match
+ "display_name": None,
+ "hardware_types": ["flash-array-x"],
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_storage_service = MagicMock(
+ return_value=purefusion.StorageService(**current_ss)
+ )
+ api_obj.create_storage_service = MagicMock(return_value=OperationMock(1))
+ api_obj.update_storage_service = MagicMock(side_effect=exec_original)
+ api_obj.delete_storage_service = MagicMock(return_value=OperationMock(3))
+ m_ss_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_ss.main()
+
+ # check api was called correctly
+ api_obj.get_storage_service.assert_called_once_with(
+ storage_service_name=module_args["name"]
+ )
+ api_obj.create_storage_service.assert_not_called()
+ api_obj.update_storage_service.assert_called_once_with(
+ purefusion.StorageServicePatch(
+ display_name=purefusion.NullableString(module_args["display_name"])
+ ),
+ storage_service_name=module_args["name"],
+ )
+ api_obj.delete_storage_service.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.StorageServicesApi")
+def test_ss_update_op_fails(m_ss_api, m_op_api):
+ module_args = {
+ "state": "present",
+ "name": "ss1",
+ "display_name": "Storage Service 1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ current_ss = {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"], # name must match
+ "display_name": None,
+ "hardware_types": ["flash-array-x"],
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_storage_service = MagicMock(
+ return_value=purefusion.StorageService(**current_ss)
+ )
+ api_obj.create_storage_service = MagicMock(return_value=OperationMock(1))
+ api_obj.update_storage_service = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_storage_service = MagicMock(return_value=OperationMock(3))
+ m_ss_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(OperationException):
+ fusion_ss.main()
+
+ # check api was called correctly
+ api_obj.get_storage_service.assert_called_once_with(
+ storage_service_name=module_args["name"]
+ )
+ api_obj.create_storage_service.assert_not_called()
+ api_obj.update_storage_service.assert_called_once_with(
+ purefusion.StorageServicePatch(
+ display_name=purefusion.NullableString(module_args["display_name"])
+ ),
+ storage_service_name=module_args["name"],
+ )
+ api_obj.delete_storage_service.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(2)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.StorageServicesApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, OperationException),
+ ],
+)
+def test_ss_update_op_exception(m_ss_api, m_op_api, exec_original, exec_catch):
+ module_args = {
+ "state": "present",
+ "name": "ss1",
+ "display_name": "Storage Service 1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ current_ss = {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"], # name must match
+ "display_name": None,
+ "hardware_types": ["flash-array-x"],
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_storage_service = MagicMock(
+ return_value=purefusion.StorageService(**current_ss)
+ )
+ api_obj.create_storage_service = MagicMock(return_value=OperationMock(1))
+ api_obj.update_storage_service = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_storage_service = MagicMock(return_value=OperationMock(3))
+ m_ss_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(side_effect=exec_original)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_ss.main()
+
+ # check api was called correctly
+ api_obj.get_storage_service.assert_called_once_with(
+ storage_service_name=module_args["name"]
+ )
+ api_obj.create_storage_service.assert_not_called()
+ api_obj.update_storage_service.assert_called_once_with(
+ purefusion.StorageServicePatch(
+ display_name=purefusion.NullableString(module_args["display_name"])
+ ),
+ storage_service_name=module_args["name"],
+ )
+ api_obj.delete_storage_service.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(2)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.StorageServicesApi")
+def test_ss_present_not_changed(m_ss_api, m_op_api):
+ module_args = {
+ "state": "present",
+ "name": "ss1",
+ "display_name": "Storage Service 1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ current_ss = {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"], # name must match
+ "display_name": module_args["display_name"],
+ "hardware_types": ["flash-array-x"],
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_storage_service = MagicMock(
+ return_value=purefusion.StorageService(**current_ss)
+ )
+ api_obj.create_storage_service = MagicMock(return_value=OperationMock(1))
+ api_obj.update_storage_service = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_storage_service = MagicMock(return_value=OperationMock(3))
+ m_ss_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_ss.main()
+
+ assert not exc.value.changed
+
+ # check api was called correctly
+ api_obj.get_storage_service.assert_called_once_with(
+ storage_service_name=module_args["name"]
+ )
+ api_obj.create_storage_service.assert_not_called()
+ api_obj.update_storage_service.assert_not_called()
+ api_obj.delete_storage_service.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.StorageServicesApi")
+def test_ss_absent_not_changed(m_ss_api, m_op_api):
+ module_args = {
+ "state": "absent",
+ "name": "ss1",
+ "display_name": "Storage Service 1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_storage_service = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_storage_service = MagicMock(return_value=OperationMock(1))
+ api_obj.update_storage_service = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_storage_service = MagicMock(return_value=OperationMock(3))
+ m_ss_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_ss.main()
+
+ assert not exc.value.changed
+
+ # check api was called correctly
+ api_obj.get_storage_service.assert_called_once_with(
+ storage_service_name=module_args["name"]
+ )
+ api_obj.create_storage_service.assert_not_called()
+ api_obj.update_storage_service.assert_not_called()
+ api_obj.delete_storage_service.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.StorageServicesApi")
+def test_ss_delete(m_ss_api, m_op_api):
+ module_args = {
+ "state": "absent",
+ "name": "ss1",
+ "display_name": "Storage Service 1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ current_ss = {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"], # name must match
+ "display_name": "different", # display_name doesn't match but UPDATE shouldn't be called
+ "hardware_types": ["flash-array-x"],
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_storage_service = MagicMock(
+ return_value=purefusion.StorageService(**current_ss)
+ )
+ api_obj.create_storage_service = MagicMock(return_value=OperationMock(1))
+ api_obj.update_storage_service = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_storage_service = MagicMock(return_value=OperationMock(3))
+ m_ss_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_ss.main()
+
+ assert exc.value.changed
+
+ # check api was called correctly
+ api_obj.get_storage_service.assert_called_once_with(
+ storage_service_name=module_args["name"]
+ )
+ api_obj.create_storage_service.assert_not_called()
+ api_obj.update_storage_service.assert_not_called()
+ api_obj.delete_storage_service.assert_called_once_with(
+ storage_service_name=module_args["name"]
+ )
+ op_obj.get_operation.assert_called_once_with(3)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.StorageServicesApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, HTTPError),
+ ],
+)
+def test_ss_delete_exception(m_ss_api, m_op_api, exec_original, exec_catch):
+ module_args = {
+ "state": "absent",
+ "name": "ss1",
+ "display_name": "Storage Service 1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ current_ss = {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"], # name must match
+ "display_name": "different", # display_name doesn't match but UPDATE shouldn't be called
+ "hardware_types": ["flash-array-x"],
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_storage_service = MagicMock(
+ return_value=purefusion.StorageService(**current_ss)
+ )
+ api_obj.create_storage_service = MagicMock(return_value=OperationMock(1))
+ api_obj.update_storage_service = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_storage_service = MagicMock(side_effect=exec_original)
+ m_ss_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_ss.main()
+
+ # check api was called correctly
+ api_obj.get_storage_service.assert_called_once_with(
+ storage_service_name=module_args["name"]
+ )
+ api_obj.create_storage_service.assert_not_called()
+ api_obj.update_storage_service.assert_not_called()
+ api_obj.delete_storage_service.assert_called_once_with(
+ storage_service_name=module_args["name"]
+ )
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.StorageServicesApi")
+def test_ss_delete_op_fails(m_ss_api, m_op_api):
+ module_args = {
+ "state": "absent",
+ "name": "ss1",
+ "display_name": "Storage Service 1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ current_ss = {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"], # name must match
+ "display_name": "different", # display_name doesn't match but UPDATE shouldn't be called
+ "hardware_types": ["flash-array-x"],
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_storage_service = MagicMock(
+ return_value=purefusion.StorageService(**current_ss)
+ )
+ api_obj.create_storage_service = MagicMock(return_value=OperationMock(1))
+ api_obj.update_storage_service = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_storage_service = MagicMock(return_value=OperationMock(3))
+ m_ss_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(OperationException):
+ fusion_ss.main()
+
+ # check api was called correctly
+ api_obj.get_storage_service.assert_called_once_with(
+ storage_service_name=module_args["name"]
+ )
+ api_obj.create_storage_service.assert_not_called()
+ api_obj.update_storage_service.assert_not_called()
+ api_obj.delete_storage_service.assert_called_once_with(
+ storage_service_name=module_args["name"]
+ )
+ op_obj.get_operation.assert_called_once_with(3)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.StorageServicesApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, OperationException),
+ ],
+)
+def test_ss_delete_op_exception(m_ss_api, m_op_api, exec_original, exec_catch):
+ module_args = {
+ "state": "absent",
+ "name": "ss1",
+ "display_name": "Storage Service 1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ current_ss = {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"], # name must match
+ "display_name": "different", # display_name doesn't match but UPDATE shouldn't be called
+ "hardware_types": ["flash-array-x"],
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_storage_service = MagicMock(
+ return_value=purefusion.StorageService(**current_ss)
+ )
+ api_obj.create_storage_service = MagicMock(return_value=OperationMock(1))
+ api_obj.update_storage_service = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_storage_service = MagicMock(return_value=OperationMock(3))
+ m_ss_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(side_effect=exec_original)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_ss.main()
+
+ # check api was called correctly
+ api_obj.get_storage_service.assert_called_once_with(
+ storage_service_name=module_args["name"]
+ )
+ api_obj.create_storage_service.assert_not_called()
+ api_obj.update_storage_service.assert_not_called()
+ api_obj.delete_storage_service.assert_called_once_with(
+ storage_service_name=module_args["name"]
+ )
+ op_obj.get_operation.assert_called_once_with(3)
diff --git a/ansible_collections/purestorage/fusion/tests/functional/test_fusion_tenant.py b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_tenant.py
new file mode 100644
index 000000000..bb0521b01
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_tenant.py
@@ -0,0 +1,803 @@
+# -*- coding: utf-8 -*-
+
+# (c) 2023, Andrej Pajtas (apajtas@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING.GPLv3 or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from unittest.mock import MagicMock, patch
+
+import fusion as purefusion
+import pytest
+from ansible.module_utils import basic
+from ansible_collections.purestorage.fusion.plugins.module_utils.errors import (
+ OperationException,
+)
+from ansible_collections.purestorage.fusion.plugins.modules import fusion_tenant
+from ansible_collections.purestorage.fusion.tests.functional.utils import (
+ AnsibleExitJson,
+ AnsibleFailJson,
+ FailedOperationMock,
+ OperationMock,
+ SuccessfulOperationMock,
+ exit_json,
+ fail_json,
+ set_module_args,
+)
+from urllib3.exceptions import HTTPError
+
+# GLOBAL MOCKS
+fusion_tenant.setup_fusion = MagicMock(return_value=purefusion.api_client.ApiClient())
+purefusion.api_client.ApiClient.call_api = MagicMock(
+ side_effect=Exception("API call not mocked!")
+)
+basic.AnsibleModule.exit_json = exit_json
+basic.AnsibleModule.fail_json = fail_json
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.TenantsApi")
+@pytest.mark.parametrize(
+ "module_args",
+ [
+ # required parameter 'name` is missing
+ {
+ "state": "present",
+ "display_name": "Tenant 1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ # unknown parameter 'extra' is provided
+ {
+ "state": "present",
+ "name": "tenant1",
+ "display_name": "Tenant 1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ "extra": "value",
+ },
+ # parameter 'state` has incorrect value
+ {
+ "state": "cool",
+ "name": "tenant1",
+ "display_name": "Tenant 1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ ],
+)
+def test_module_fails_on_wrong_parameters(m_tenant_api, m_op_api, module_args):
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_tenant = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_tenant = MagicMock(return_value=OperationMock(1))
+ api_obj.update_tenant = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_tenant = MagicMock(return_value=OperationMock(3))
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+ m_tenant_api.return_value = api_obj
+
+ # run module
+ with pytest.raises(AnsibleFailJson):
+ fusion_tenant.main()
+
+ # check api was not called at all
+ api_obj.get_tenant.assert_not_called()
+ api_obj.create_tenant.assert_not_called()
+ api_obj.update_tenant.assert_not_called()
+ api_obj.delete_tenant.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.TenantsApi")
+def test_tenant_create(m_tenant_api, m_op_api):
+ module_args = {
+ "state": "present",
+ "name": "tenant1",
+ "display_name": "Tenant 1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_tenant = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_tenant = MagicMock(return_value=OperationMock(1))
+ api_obj.update_tenant = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_tenant = MagicMock(return_value=OperationMock(3))
+ m_tenant_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_tenant.main()
+
+ assert exc.value.changed
+
+ # check api was called correctly
+ api_obj.get_tenant.assert_called_once_with(tenant_name=module_args["name"])
+ api_obj.create_tenant.assert_called_once_with(
+ purefusion.TenantPost(
+ name=module_args["name"],
+ display_name=module_args["display_name"],
+ )
+ )
+ api_obj.update_tenant.assert_not_called()
+ api_obj.delete_tenant.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(1)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.TenantsApi")
+def test_tenant_create_without_display_name(m_tenant_api, m_op_api):
+ module_args = {
+ "state": "present",
+ "name": "tenant1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_tenant = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_tenant = MagicMock(return_value=OperationMock(1))
+ api_obj.update_tenant = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_tenant = MagicMock(return_value=OperationMock(3))
+ m_tenant_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_tenant.main()
+
+ assert exc.value.changed
+
+ # check api was called correctly
+ api_obj.get_tenant.assert_called_once_with(tenant_name=module_args["name"])
+ api_obj.create_tenant.assert_called_once_with(
+ purefusion.TenantPost(
+ name=module_args["name"],
+ display_name=module_args["name"],
+ )
+ )
+ api_obj.update_tenant.assert_not_called()
+ api_obj.delete_tenant.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(1)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.TenantsApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, HTTPError),
+ ],
+)
+def test_tenant_create_exception(m_tenant_api, m_op_api, exec_original, exec_catch):
+ module_args = {
+ "state": "present",
+ "name": "tenant1",
+ "display_name": "Tenant 1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_tenant = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_tenant = MagicMock(side_effect=exec_original)
+ api_obj.update_tenant = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_tenant = MagicMock(return_value=OperationMock(3))
+ m_tenant_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_tenant.main()
+
+ # check api was called correctly
+ api_obj.get_tenant.assert_called_once_with(tenant_name=module_args["name"])
+ api_obj.create_tenant.assert_called_once_with(
+ purefusion.TenantPost(
+ name=module_args["name"],
+ display_name=module_args["display_name"],
+ )
+ )
+ api_obj.update_tenant.assert_not_called()
+ api_obj.delete_tenant.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.TenantsApi")
+def test_tenant_create_op_fails(m_tenant_api, m_op_api):
+ module_args = {
+ "state": "present",
+ "name": "tenant1",
+ "display_name": "Tenant 1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_tenant = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_tenant = MagicMock(return_value=OperationMock(1))
+ api_obj.update_tenant = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_tenant = MagicMock(return_value=OperationMock(3))
+ m_tenant_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(OperationException):
+ fusion_tenant.main()
+
+ # check api was called correctly
+ api_obj.get_tenant.assert_called_once_with(tenant_name=module_args["name"])
+ api_obj.create_tenant.assert_called_once_with(
+ purefusion.TenantPost(
+ name=module_args["name"],
+ display_name=module_args["display_name"],
+ )
+ )
+ api_obj.update_tenant.assert_not_called()
+ api_obj.delete_tenant.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(1)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.TenantsApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, OperationException),
+ ],
+)
+def test_tenant_create_op_exception(m_tenant_api, m_op_api, exec_original, exec_catch):
+ module_args = {
+ "state": "present",
+ "name": "tenant1",
+ "display_name": "Tenant 1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_tenant = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_tenant = MagicMock(return_value=OperationMock(1))
+ api_obj.update_tenant = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_tenant = MagicMock(return_value=OperationMock(3))
+ m_tenant_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(side_effect=exec_original)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_tenant.main()
+
+ # check api was called correctly
+ api_obj.get_tenant.assert_called_once_with(tenant_name=module_args["name"])
+ api_obj.create_tenant.assert_called_once_with(
+ purefusion.TenantPost(
+ name=module_args["name"],
+ display_name=module_args["display_name"],
+ )
+ )
+ api_obj.update_tenant.assert_not_called()
+ api_obj.delete_tenant.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(1)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.TenantsApi")
+def test_tenant_update(m_tenant_api, m_op_api):
+ module_args = {
+ "state": "present",
+ "name": "tenant1",
+ "display_name": "Tenant 1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ current_tenant = {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"], # name must match
+ "display_name": None,
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_tenant = MagicMock(return_value=purefusion.Tenant(**current_tenant))
+ api_obj.create_tenant = MagicMock(return_value=OperationMock(1))
+ api_obj.update_tenant = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_tenant = MagicMock(return_value=OperationMock(3))
+ m_tenant_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_tenant.main()
+
+ assert exc.value.changed
+
+ # check api was called correctly
+ api_obj.get_tenant.assert_called_once_with(tenant_name=module_args["name"])
+ api_obj.create_tenant.assert_not_called()
+ api_obj.update_tenant.assert_called_once_with(
+ purefusion.TenantPatch(
+ display_name=purefusion.NullableString(module_args["display_name"])
+ ),
+ tenant_name=module_args["name"],
+ )
+ api_obj.delete_tenant.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(2)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.TenantsApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, HTTPError),
+ ],
+)
+def test_tenant_update_exception(m_tenant_api, m_op_api, exec_original, exec_catch):
+ module_args = {
+ "state": "present",
+ "name": "tenant1",
+ "display_name": "Tenant 1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ current_tenant = {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"], # name must match
+ "display_name": None,
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_tenant = MagicMock(return_value=purefusion.Tenant(**current_tenant))
+ api_obj.create_tenant = MagicMock(return_value=OperationMock(1))
+ api_obj.update_tenant = MagicMock(side_effect=exec_original)
+ api_obj.delete_tenant = MagicMock(return_value=OperationMock(3))
+ m_tenant_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_tenant.main()
+
+ # check api was called correctly
+ api_obj.get_tenant.assert_called_once_with(tenant_name=module_args["name"])
+ api_obj.create_tenant.assert_not_called()
+ api_obj.update_tenant.assert_called_once_with(
+ purefusion.TenantPatch(
+ display_name=purefusion.NullableString(module_args["display_name"])
+ ),
+ tenant_name=module_args["name"],
+ )
+ api_obj.delete_tenant.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.TenantsApi")
+def test_tenant_update_op_fails(m_tenant_api, m_op_api):
+ module_args = {
+ "state": "present",
+ "name": "tenant1",
+ "display_name": "Tenant 1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ current_tenant = {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"], # name must match
+ "display_name": None,
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_tenant = MagicMock(return_value=purefusion.Tenant(**current_tenant))
+ api_obj.create_tenant = MagicMock(return_value=OperationMock(1))
+ api_obj.update_tenant = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_tenant = MagicMock(return_value=OperationMock(3))
+ m_tenant_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(OperationException):
+ fusion_tenant.main()
+
+ # check api was called correctly
+ api_obj.get_tenant.assert_called_once_with(tenant_name=module_args["name"])
+ api_obj.create_tenant.assert_not_called()
+ api_obj.update_tenant.assert_called_once_with(
+ purefusion.TenantPatch(
+ display_name=purefusion.NullableString(module_args["display_name"])
+ ),
+ tenant_name=module_args["name"],
+ )
+ api_obj.delete_tenant.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(2)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.TenantsApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, OperationException),
+ ],
+)
+def test_tenant_update_op_exception(m_tenant_api, m_op_api, exec_original, exec_catch):
+ module_args = {
+ "state": "present",
+ "name": "tenant1",
+ "display_name": "Tenant 1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ current_tenant = {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"], # name must match
+ "display_name": None,
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_tenant = MagicMock(return_value=purefusion.Tenant(**current_tenant))
+ api_obj.create_tenant = MagicMock(return_value=OperationMock(1))
+ api_obj.update_tenant = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_tenant = MagicMock(return_value=OperationMock(3))
+ m_tenant_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(side_effect=exec_original)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_tenant.main()
+
+ # check api was called correctly
+ api_obj.get_tenant.assert_called_once_with(tenant_name=module_args["name"])
+ api_obj.create_tenant.assert_not_called()
+ api_obj.update_tenant.assert_called_once_with(
+ purefusion.TenantPatch(
+ display_name=purefusion.NullableString(module_args["display_name"])
+ ),
+ tenant_name=module_args["name"],
+ )
+ api_obj.delete_tenant.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(2)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.TenantsApi")
+def test_tenant_present_not_changed(m_tenant_api, m_op_api):
+ module_args = {
+ "state": "present",
+ "name": "tenant1",
+ "display_name": "Tenant 1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ current_tenant = {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"], # name must match
+ "display_name": module_args["display_name"],
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_tenant = MagicMock(return_value=purefusion.Tenant(**current_tenant))
+ api_obj.create_tenant = MagicMock(return_value=OperationMock(1))
+ api_obj.update_tenant = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_tenant = MagicMock(return_value=OperationMock(3))
+ m_tenant_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_tenant.main()
+
+ assert not exc.value.changed
+
+ # check api was called correctly
+ api_obj.get_tenant.assert_called_once_with(tenant_name=module_args["name"])
+ api_obj.create_tenant.assert_not_called()
+ api_obj.update_tenant.assert_not_called()
+ api_obj.delete_tenant.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.TenantsApi")
+def test_tenant_absent_not_changed(m_tenant_api, m_op_api):
+ module_args = {
+ "state": "absent",
+ "name": "tenant1",
+ "display_name": "Tenant 1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_tenant = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_tenant = MagicMock(return_value=OperationMock(1))
+ api_obj.update_tenant = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_tenant = MagicMock(return_value=OperationMock(3))
+ m_tenant_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_tenant.main()
+
+ assert not exc.value.changed
+
+ # check api was called correctly
+ api_obj.get_tenant.assert_called_once_with(tenant_name=module_args["name"])
+ api_obj.create_tenant.assert_not_called()
+ api_obj.update_tenant.assert_not_called()
+ api_obj.delete_tenant.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.TenantsApi")
+def test_tenant_delete(m_tenant_api, m_op_api):
+ module_args = {
+ "state": "absent",
+ "name": "tenant1",
+ "display_name": "Tenant 1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ current_tenant = {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"], # name must match
+ "display_name": "different", # display_name doesn't match but UPDATE shouldn't be called
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_tenant = MagicMock(return_value=purefusion.Tenant(**current_tenant))
+ api_obj.create_tenant = MagicMock(return_value=OperationMock(1))
+ api_obj.update_tenant = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_tenant = MagicMock(return_value=OperationMock(3))
+ m_tenant_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_tenant.main()
+
+ assert exc.value.changed
+
+ # check api was called correctly
+ api_obj.get_tenant.assert_called_once_with(tenant_name=module_args["name"])
+ api_obj.create_tenant.assert_not_called()
+ api_obj.update_tenant.assert_not_called()
+ api_obj.delete_tenant.assert_called_once_with(tenant_name=module_args["name"])
+ op_obj.get_operation.assert_called_once_with(3)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.TenantsApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, HTTPError),
+ ],
+)
+def test_tenant_delete_exception(m_tenant_api, m_op_api, exec_original, exec_catch):
+ module_args = {
+ "state": "absent",
+ "name": "tenant1",
+ "display_name": "Tenant 1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ current_tenant = {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"], # name must match
+ "display_name": "different", # display_name doesn't match but UPDATE shouldn't be called
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_tenant = MagicMock(return_value=purefusion.Tenant(**current_tenant))
+ api_obj.create_tenant = MagicMock(return_value=OperationMock(1))
+ api_obj.update_tenant = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_tenant = MagicMock(side_effect=exec_original)
+ m_tenant_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_tenant.main()
+
+ # check api was called correctly
+ api_obj.get_tenant.assert_called_once_with(tenant_name=module_args["name"])
+ api_obj.create_tenant.assert_not_called()
+ api_obj.update_tenant.assert_not_called()
+ api_obj.delete_tenant.assert_called_once_with(tenant_name=module_args["name"])
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.TenantsApi")
+def test_tenant_delete_op_fails(m_tenant_api, m_op_api):
+ module_args = {
+ "state": "absent",
+ "name": "tenant1",
+ "display_name": "Tenant 1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ current_tenant = {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"], # name must match
+ "display_name": "different", # display_name doesn't match but UPDATE shouldn't be called
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_tenant = MagicMock(return_value=purefusion.Tenant(**current_tenant))
+ api_obj.create_tenant = MagicMock(return_value=OperationMock(1))
+ api_obj.update_tenant = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_tenant = MagicMock(return_value=OperationMock(3))
+ m_tenant_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(OperationException):
+ fusion_tenant.main()
+
+ # check api was called correctly
+ api_obj.get_tenant.assert_called_once_with(tenant_name=module_args["name"])
+ api_obj.create_tenant.assert_not_called()
+ api_obj.update_tenant.assert_not_called()
+ api_obj.delete_tenant.assert_called_once_with(tenant_name=module_args["name"])
+ op_obj.get_operation.assert_called_once_with(3)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.TenantsApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, OperationException),
+ ],
+)
+def test_tenant_delete_op_exception(m_tenant_api, m_op_api, exec_original, exec_catch):
+ module_args = {
+ "state": "absent",
+ "name": "tenant1",
+ "display_name": "Tenant 1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ current_tenant = {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"], # name must match
+ "display_name": "different", # display_name doesn't match but UPDATE shouldn't be called
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_tenant = MagicMock(return_value=purefusion.Tenant(**current_tenant))
+ api_obj.create_tenant = MagicMock(return_value=OperationMock(1))
+ api_obj.update_tenant = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_tenant = MagicMock(return_value=OperationMock(3))
+ m_tenant_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(side_effect=exec_original)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_tenant.main()
+
+ # check api was called correctly
+ api_obj.get_tenant.assert_called_once_with(tenant_name=module_args["name"])
+ api_obj.create_tenant.assert_not_called()
+ api_obj.update_tenant.assert_not_called()
+ api_obj.delete_tenant.assert_called_once_with(tenant_name=module_args["name"])
+ op_obj.get_operation.assert_called_once_with(3)
diff --git a/ansible_collections/purestorage/fusion/tests/functional/test_fusion_ts.py b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_ts.py
new file mode 100644
index 000000000..0d9cbb25a
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_ts.py
@@ -0,0 +1,922 @@
+# -*- coding: utf-8 -*-
+
+# (c) 2023, Andrej Pajtas (apajtas@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING.GPLv3 or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from unittest.mock import MagicMock, patch
+
+import fusion as purefusion
+import pytest
+from ansible.module_utils import basic
+from ansible_collections.purestorage.fusion.plugins.module_utils.errors import (
+ OperationException,
+)
+from ansible_collections.purestorage.fusion.plugins.modules import fusion_ts
+from ansible_collections.purestorage.fusion.tests.functional.utils import (
+ AnsibleExitJson,
+ AnsibleFailJson,
+ FailedOperationMock,
+ OperationMock,
+ SuccessfulOperationMock,
+ exit_json,
+ fail_json,
+ set_module_args,
+)
+from urllib3.exceptions import HTTPError
+
+# GLOBAL MOCKS
+fusion_ts.setup_fusion = MagicMock(return_value=purefusion.api_client.ApiClient())
+purefusion.api_client.ApiClient.call_api = MagicMock(
+ side_effect=Exception("API call not mocked!")
+)
+basic.AnsibleModule.exit_json = exit_json
+basic.AnsibleModule.fail_json = fail_json
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.TenantSpacesApi")
+@pytest.mark.parametrize(
+ "module_args",
+ [
+ # required parameter 'name` is missing
+ {
+ "state": "present",
+ "display_name": "Tenanct Space 1",
+ "tenant": "tenant1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ # required tenant 'name` is missing
+ {
+ "state": "present",
+ "name": "tenantspace1",
+ "display_name": "Tenanct Space 1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ # unknown parameter 'extra' is provided
+ {
+ "state": "present",
+ "name": "tenantspace1",
+ "display_name": "Tenanct Space 1",
+ "tenant": "tenant1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ "extra": "value",
+ },
+ # parameter 'state` has incorrect value
+ {
+ "state": "cool",
+ "name": "tenantspace1",
+ "display_name": "Tenanct Space 1",
+ "tenant": "tenant1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ },
+ ],
+)
+def test_module_fails_on_wrong_parameters(m_ts_api, m_op_api, module_args):
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_tenant_space = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_tenant_space = MagicMock(return_value=OperationMock(1))
+ api_obj.update_tenant_space = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_tenant_space = MagicMock(return_value=OperationMock(3))
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+ m_ts_api.return_value = api_obj
+
+ # run module
+ with pytest.raises(AnsibleFailJson):
+ fusion_ts.main()
+
+ # check api was not called at all
+ api_obj.get_tenant_space.assert_not_called()
+ api_obj.create_tenant_space.assert_not_called()
+ api_obj.update_tenant_space.assert_not_called()
+ api_obj.delete_tenant_space.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.TenantSpacesApi")
+def test_ts_create(m_ts_api, m_op_api):
+ module_args = {
+ "state": "present",
+ "name": "tenantspace1",
+ "display_name": "Tenanct Space 1",
+ "tenant": "tenant1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_tenant_space = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_tenant_space = MagicMock(return_value=OperationMock(1))
+ api_obj.update_tenant_space = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_tenant_space = MagicMock(return_value=OperationMock(3))
+ m_ts_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_ts.main()
+
+ assert exc.value.changed
+
+ # check api was called correctly
+ api_obj.get_tenant_space.assert_called_once_with(
+ tenant_name=module_args["tenant"],
+ tenant_space_name=module_args["name"],
+ )
+ api_obj.create_tenant_space.assert_called_once_with(
+ purefusion.TenantSpacePost(
+ name=module_args["name"],
+ display_name=module_args["display_name"],
+ ),
+ tenant_name=module_args["tenant"],
+ )
+ api_obj.update_tenant_space.assert_not_called()
+ api_obj.delete_tenant_space.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(1)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.TenantSpacesApi")
+def test_ts_create_without_display_name(m_ts_api, m_op_api):
+ module_args = {
+ "state": "present",
+ "name": "tenantspace1",
+ "tenant": "tenant1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_tenant_space = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_tenant_space = MagicMock(return_value=OperationMock(1))
+ api_obj.update_tenant_space = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_tenant_space = MagicMock(return_value=OperationMock(3))
+ m_ts_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_ts.main()
+
+ assert exc.value.changed
+
+ # check api was called correctly
+ api_obj.get_tenant_space.assert_called_once_with(
+ tenant_name=module_args["tenant"],
+ tenant_space_name=module_args["name"],
+ )
+ api_obj.create_tenant_space.assert_called_once_with(
+ purefusion.TenantSpacePost(
+ name=module_args["name"],
+ display_name=module_args["name"],
+ ),
+ tenant_name=module_args["tenant"],
+ )
+ api_obj.update_tenant_space.assert_not_called()
+ api_obj.delete_tenant_space.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(1)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.TenantSpacesApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, HTTPError),
+ ],
+)
+def test_ts_create_exception(m_ts_api, m_op_api, exec_original, exec_catch):
+ module_args = {
+ "state": "present",
+ "name": "tenantspace1",
+ "display_name": "Tenanct Space 1",
+ "tenant": "tenant1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_tenant_space = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_tenant_space = MagicMock(side_effect=exec_original)
+ api_obj.update_tenant_space = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_tenant_space = MagicMock(return_value=OperationMock(3))
+ m_ts_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_ts.main()
+
+ # check api was called correctly
+ api_obj.get_tenant_space.assert_called_once_with(
+ tenant_name=module_args["tenant"],
+ tenant_space_name=module_args["name"],
+ )
+ api_obj.create_tenant_space.assert_called_once_with(
+ purefusion.TenantSpacePost(
+ name=module_args["name"],
+ display_name=module_args["display_name"],
+ ),
+ tenant_name=module_args["tenant"],
+ )
+ api_obj.update_tenant_space.assert_not_called()
+ api_obj.delete_tenant_space.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.TenantSpacesApi")
+def test_ts_create_op_fails(m_ts_api, m_op_api):
+ module_args = {
+ "state": "present",
+ "name": "tenantspace1",
+ "display_name": "Tenanct Space 1",
+ "tenant": "tenant1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_tenant_space = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_tenant_space = MagicMock(return_value=OperationMock(1))
+ api_obj.update_tenant_space = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_tenant_space = MagicMock(return_value=OperationMock(3))
+ m_ts_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(OperationException):
+ fusion_ts.main()
+
+ # check api was called correctly
+ api_obj.get_tenant_space.assert_called_once_with(
+ tenant_name=module_args["tenant"],
+ tenant_space_name=module_args["name"],
+ )
+ api_obj.create_tenant_space.assert_called_once_with(
+ purefusion.TenantSpacePost(
+ name=module_args["name"],
+ display_name=module_args["display_name"],
+ ),
+ tenant_name=module_args["tenant"],
+ )
+ api_obj.update_tenant_space.assert_not_called()
+ api_obj.delete_tenant_space.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(1)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.TenantSpacesApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, OperationException),
+ ],
+)
+def test_ts_create_op_exception(m_ts_api, m_op_api, exec_original, exec_catch):
+ module_args = {
+ "state": "present",
+ "name": "tenantspace1",
+ "display_name": "Tenanct Space 1",
+ "tenant": "tenant1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_tenant_space = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_tenant_space = MagicMock(return_value=OperationMock(1))
+ api_obj.update_tenant_space = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_tenant_space = MagicMock(return_value=OperationMock(3))
+ m_ts_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(side_effect=exec_original)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_ts.main()
+
+ # check api was called correctly
+ api_obj.get_tenant_space.assert_called_once_with(
+ tenant_name=module_args["tenant"],
+ tenant_space_name=module_args["name"],
+ )
+ api_obj.create_tenant_space.assert_called_once_with(
+ purefusion.TenantSpacePost(
+ name=module_args["name"],
+ display_name=module_args["display_name"],
+ ),
+ tenant_name=module_args["tenant"],
+ )
+ api_obj.update_tenant_space.assert_not_called()
+ api_obj.delete_tenant_space.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(1)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.TenantSpacesApi")
+def test_ts_update(m_ts_api, m_op_api):
+ module_args = {
+ "state": "present",
+ "name": "tenantspace1",
+ "display_name": "Tenanct Space 1",
+ "tenant": "tenant1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ current_ts = {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"], # name must match
+ "tenant": "tenant1",
+ "display_name": None,
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_tenant_space = MagicMock(
+ return_value=purefusion.TenantSpace(**current_ts)
+ )
+ api_obj.create_tenant_space = MagicMock(return_value=OperationMock(1))
+ api_obj.update_tenant_space = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_tenant_space = MagicMock(return_value=OperationMock(3))
+ m_ts_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_ts.main()
+
+ assert exc.value.changed
+
+ # check api was called correctly
+ api_obj.get_tenant_space.assert_called_once_with(
+ tenant_name=module_args["tenant"],
+ tenant_space_name=module_args["name"],
+ )
+ api_obj.create_tenant_space.assert_not_called()
+ api_obj.update_tenant_space.assert_called_once_with(
+ purefusion.TenantSpacePatch(
+ display_name=purefusion.NullableString(module_args["display_name"])
+ ),
+ tenant_name=module_args["tenant"],
+ tenant_space_name=module_args["name"],
+ )
+ api_obj.delete_tenant_space.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(2)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.TenantSpacesApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, HTTPError),
+ ],
+)
+def test_ts_update_exception(m_ts_api, m_op_api, exec_original, exec_catch):
+ module_args = {
+ "state": "present",
+ "name": "tenantspace1",
+ "display_name": "Tenanct Space 1",
+ "tenant": "tenant1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ current_ts = {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"], # name must match
+ "tenant": "tenant1",
+ "display_name": None,
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_tenant_space = MagicMock(
+ return_value=purefusion.TenantSpace(**current_ts)
+ )
+ api_obj.create_tenant_space = MagicMock(return_value=OperationMock(1))
+ api_obj.update_tenant_space = MagicMock(side_effect=exec_original)
+ api_obj.delete_tenant_space = MagicMock(return_value=OperationMock(3))
+ m_ts_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_ts.main()
+
+ # check api was called correctly
+ api_obj.get_tenant_space.assert_called_once_with(
+ tenant_name=module_args["tenant"],
+ tenant_space_name=module_args["name"],
+ )
+ api_obj.create_tenant_space.assert_not_called()
+ api_obj.update_tenant_space.assert_called_once_with(
+ purefusion.TenantSpacePatch(
+ display_name=purefusion.NullableString(module_args["display_name"])
+ ),
+ tenant_name=module_args["tenant"],
+ tenant_space_name=module_args["name"],
+ )
+ api_obj.delete_tenant_space.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.TenantSpacesApi")
+def test_ts_update_op_fails(m_ts_api, m_op_api):
+ module_args = {
+ "state": "present",
+ "name": "tenantspace1",
+ "display_name": "Tenanct Space 1",
+ "tenant": "tenant1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ current_ts = {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"], # name must match
+ "tenant": "tenant1",
+ "display_name": None,
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_tenant_space = MagicMock(
+ return_value=purefusion.TenantSpace(**current_ts)
+ )
+ api_obj.create_tenant_space = MagicMock(return_value=OperationMock(1))
+ api_obj.update_tenant_space = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_tenant_space = MagicMock(return_value=OperationMock(3))
+ m_ts_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(OperationException):
+ fusion_ts.main()
+
+ # check api was called correctly
+ api_obj.get_tenant_space.assert_called_once_with(
+ tenant_name=module_args["tenant"],
+ tenant_space_name=module_args["name"],
+ )
+ api_obj.create_tenant_space.assert_not_called()
+ api_obj.update_tenant_space.assert_called_once_with(
+ purefusion.TenantSpacePatch(
+ display_name=purefusion.NullableString(module_args["display_name"])
+ ),
+ tenant_name=module_args["tenant"],
+ tenant_space_name=module_args["name"],
+ )
+ api_obj.delete_tenant_space.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(2)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.TenantSpacesApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, OperationException),
+ ],
+)
+def test_ts_update_op_exception(m_ts_api, m_op_api, exec_original, exec_catch):
+ module_args = {
+ "state": "present",
+ "name": "tenantspace1",
+ "display_name": "Tenanct Space 1",
+ "tenant": "tenant1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ current_ts = {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"], # name must match
+ "tenant": "tenant1",
+ "display_name": None,
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_tenant_space = MagicMock(
+ return_value=purefusion.TenantSpace(**current_ts)
+ )
+ api_obj.create_tenant_space = MagicMock(return_value=OperationMock(1))
+ api_obj.update_tenant_space = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_tenant_space = MagicMock(return_value=OperationMock(3))
+ m_ts_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(side_effect=exec_original)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_ts.main()
+
+ # check api was called correctly
+ api_obj.get_tenant_space.assert_called_once_with(
+ tenant_name=module_args["tenant"],
+ tenant_space_name=module_args["name"],
+ )
+ api_obj.create_tenant_space.assert_not_called()
+ api_obj.update_tenant_space.assert_called_once_with(
+ purefusion.TenantSpacePatch(
+ display_name=purefusion.NullableString(module_args["display_name"])
+ ),
+ tenant_name=module_args["tenant"],
+ tenant_space_name=module_args["name"],
+ )
+ api_obj.delete_tenant_space.assert_not_called()
+ op_obj.get_operation.assert_called_once_with(2)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.TenantSpacesApi")
+def test_ts_present_not_changed(m_ts_api, m_op_api):
+ module_args = {
+ "state": "present",
+ "name": "tenantspace1",
+ "display_name": "Tenanct Space 1",
+ "tenant": "tenant1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ current_ts = {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"], # name must match
+ "tenant": "tenant1",
+ "display_name": module_args["display_name"],
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_tenant_space = MagicMock(
+ return_value=purefusion.TenantSpace(**current_ts)
+ )
+ api_obj.create_tenant_space = MagicMock(return_value=OperationMock(1))
+ api_obj.update_tenant_space = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_tenant_space = MagicMock(return_value=OperationMock(3))
+ m_ts_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_ts.main()
+
+ assert not exc.value.changed
+
+ # check api was called correctly
+ api_obj.get_tenant_space.assert_called_once_with(
+ tenant_name=module_args["tenant"],
+ tenant_space_name=module_args["name"],
+ )
+ api_obj.create_tenant_space.assert_not_called()
+ api_obj.update_tenant_space.assert_not_called()
+ api_obj.delete_tenant_space.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.TenantSpacesApi")
+def test_ts_absent_not_changed(m_ts_api, m_op_api):
+ module_args = {
+ "state": "absent",
+ "name": "tenantspace1",
+ "tenant": "tenant1",
+ "display_name": "Tenanct Space 1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_tenant_space = MagicMock(side_effect=purefusion.rest.ApiException)
+ api_obj.create_tenant_space = MagicMock(return_value=OperationMock(1))
+ api_obj.update_tenant_space = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_tenant_space = MagicMock(return_value=OperationMock(3))
+ m_ts_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_ts.main()
+
+ assert not exc.value.changed
+
+ # check api was called correctly
+ api_obj.get_tenant_space.assert_called_once_with(
+ tenant_name=module_args["tenant"],
+ tenant_space_name=module_args["name"],
+ )
+ api_obj.create_tenant_space.assert_not_called()
+ api_obj.update_tenant_space.assert_not_called()
+ api_obj.delete_tenant_space.assert_not_called()
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.TenantSpacesApi")
+def test_ts_delete(m_ts_api, m_op_api):
+ module_args = {
+ "state": "absent",
+ "name": "tenantspace1",
+ "tenant": "tenant1",
+ "display_name": "Tenanct Space 1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ current_ts = {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"], # name must match
+ "tenant": "tenant1",
+ "display_name": "different", # display_name doesn't match but UPDATE shouldn't be called
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_tenant_space = MagicMock(
+ return_value=purefusion.TenantSpace(**current_ts)
+ )
+ api_obj.create_tenant_space = MagicMock(return_value=OperationMock(1))
+ api_obj.update_tenant_space = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_tenant_space = MagicMock(return_value=OperationMock(3))
+ m_ts_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(AnsibleExitJson) as exc:
+ fusion_ts.main()
+
+ assert exc.value.changed
+
+ # check api was called correctly
+ api_obj.get_tenant_space.assert_called_once_with(
+ tenant_name=module_args["tenant"],
+ tenant_space_name=module_args["name"],
+ )
+ api_obj.create_tenant_space.assert_not_called()
+ api_obj.update_tenant_space.assert_not_called()
+ api_obj.delete_tenant_space.assert_called_once_with(
+ tenant_name=module_args["tenant"],
+ tenant_space_name=module_args["name"],
+ )
+ op_obj.get_operation.assert_called_once_with(3)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.TenantSpacesApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, HTTPError),
+ ],
+)
+def test_ts_delete_exception(m_ts_api, m_op_api, exec_original, exec_catch):
+ module_args = {
+ "state": "absent",
+ "name": "tenantspace1",
+ "tenant": "tenant1",
+ "display_name": "Tenanct Space 1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ current_ts = {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"], # name must match
+ "tenant": "tenant1",
+ "display_name": "different", # display_name doesn't match but UPDATE shouldn't be called
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_tenant_space = MagicMock(
+ return_value=purefusion.TenantSpace(**current_ts)
+ )
+ api_obj.create_tenant_space = MagicMock(return_value=OperationMock(1))
+ api_obj.update_tenant_space = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_tenant_space = MagicMock(side_effect=exec_original)
+ m_ts_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_ts.main()
+
+ # check api was called correctly
+ api_obj.get_tenant_space.assert_called_once_with(
+ tenant_name=module_args["tenant"],
+ tenant_space_name=module_args["name"],
+ )
+ api_obj.create_tenant_space.assert_not_called()
+ api_obj.update_tenant_space.assert_not_called()
+ api_obj.delete_tenant_space.assert_called_once_with(
+ tenant_name=module_args["tenant"],
+ tenant_space_name=module_args["name"],
+ )
+ op_obj.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.TenantSpacesApi")
+def test_ts_delete_op_fails(m_ts_api, m_op_api):
+ module_args = {
+ "state": "absent",
+ "name": "tenantspace1",
+ "tenant": "tenant1",
+ "display_name": "Tenanct Space 1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ current_ts = {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"], # name must match
+ "tenant": "tenant1",
+ "display_name": "different", # display_name doesn't match but UPDATE shouldn't be called
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_tenant_space = MagicMock(
+ return_value=purefusion.TenantSpace(**current_ts)
+ )
+ api_obj.create_tenant_space = MagicMock(return_value=OperationMock(1))
+ api_obj.update_tenant_space = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_tenant_space = MagicMock(return_value=OperationMock(3))
+ m_ts_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(return_value=FailedOperationMock)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(OperationException):
+ fusion_ts.main()
+
+ # check api was called correctly
+ api_obj.get_tenant_space.assert_called_once_with(
+ tenant_name=module_args["tenant"],
+ tenant_space_name=module_args["name"],
+ )
+ api_obj.create_tenant_space.assert_not_called()
+ api_obj.update_tenant_space.assert_not_called()
+ api_obj.delete_tenant_space.assert_called_once_with(
+ tenant_name=module_args["tenant"],
+ tenant_space_name=module_args["name"],
+ )
+ op_obj.get_operation.assert_called_once_with(3)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.TenantSpacesApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, OperationException),
+ ],
+)
+def test_ts_delete_op_exception(m_ts_api, m_op_api, exec_original, exec_catch):
+ module_args = {
+ "state": "absent",
+ "name": "tenantspace1",
+ "tenant": "tenant1",
+ "display_name": "Tenanct Space 1",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+ current_ts = {
+ "id": 1,
+ "self_link": "self_link_value",
+ "name": module_args["name"], # name must match
+ "tenant": "tenant1",
+ "display_name": "different", # display_name doesn't match but UPDATE shouldn't be called
+ }
+ set_module_args(module_args)
+
+ # mock api responses
+ api_obj = MagicMock()
+ api_obj.get_tenant_space = MagicMock(
+ return_value=purefusion.TenantSpace(**current_ts)
+ )
+ api_obj.create_tenant_space = MagicMock(return_value=OperationMock(1))
+ api_obj.update_tenant_space = MagicMock(return_value=OperationMock(2))
+ api_obj.delete_tenant_space = MagicMock(return_value=OperationMock(3))
+ m_ts_api.return_value = api_obj
+
+ # mock operation results
+ op_obj = MagicMock()
+ op_obj.get_operation = MagicMock(side_effect=exec_original)
+ m_op_api.return_value = op_obj
+
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_ts.main()
+
+ # check api was called correctly
+ api_obj.get_tenant_space.assert_called_once_with(
+ tenant_name=module_args["tenant"],
+ tenant_space_name=module_args["name"],
+ )
+ api_obj.create_tenant_space.assert_not_called()
+ api_obj.update_tenant_space.assert_not_called()
+ api_obj.delete_tenant_space.assert_called_once_with(
+ tenant_name=module_args["tenant"],
+ tenant_space_name=module_args["name"],
+ )
+ op_obj.get_operation.assert_called_once_with(3)
diff --git a/ansible_collections/purestorage/fusion/tests/functional/test_fusion_volume.py b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_volume.py
new file mode 100644
index 000000000..592bda32e
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_volume.py
@@ -0,0 +1,715 @@
+# -*- coding: utf-8 -*-
+
+# (c) 2023, Dmitriy Li (dmli@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING.GPLv3 or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from unittest.mock import MagicMock, patch
+
+import fusion as purefusion
+import pytest
+from ansible.module_utils import basic
+from ansible_collections.purestorage.fusion.plugins.module_utils.errors import (
+ OperationException,
+)
+from ansible_collections.purestorage.fusion.plugins.modules import (
+ fusion_volume,
+)
+from ansible_collections.purestorage.fusion.tests.functional.utils import (
+ AnsibleExitJson,
+ AnsibleFailJson,
+ OperationMock,
+ SuccessfulOperationMock,
+ exit_json,
+ fail_json,
+ set_module_args,
+)
+from urllib3.exceptions import HTTPError
+
+# GLOBAL MOCKS
+fusion_volume.setup_fusion = MagicMock(return_value=purefusion.api_client.ApiClient())
+purefusion.api_client.ApiClient.call_api = MagicMock(
+ side_effect=Exception("API call not mocked!")
+)
+basic.AnsibleModule.exit_json = exit_json
+basic.AnsibleModule.fail_json = fail_json
+
+
+@pytest.fixture
+def module_args():
+ return {
+ "name": "volume_1",
+ "state": "present",
+ "display_name": "Volume 1",
+ "tenant": "t1",
+ "tenant_space": "ts1",
+ "placement_group": "pg1",
+ "storage_class": "sc1",
+ "protection_policy": "pp1",
+ "host_access_policies": ["hap1"],
+ "eradicate": False,
+ "size": "1M",
+ "issuer_id": "ABCD1234",
+ "private_key_file": "private-key.pem",
+ }
+
+
+@pytest.fixture
+def absent_module_args(module_args):
+ module_args.update(
+ {"host_access_policies": [], "eradicate": True, "state": "absent"}
+ )
+ return module_args
+
+
+@pytest.fixture
+def volume():
+ return {
+ "name": "volume_1",
+ "display_name": "Volume 1",
+ "tenant": "t1",
+ "tenant_space": "ts1",
+ "storage_class": purefusion.StorageClassRef(
+ name="sc1", id="id_1", kind="storage_class", self_link="self_link"
+ ),
+ "placement_group": purefusion.PlacementGroupRef(
+ name="pg1", id="id_1", kind="placement_group", self_link="self_link"
+ ),
+ "protection_policy": purefusion.ProtectionPolicyRef(
+ name="pp1", id="id_1", kind="protection_policy", self_link="self_link"
+ ),
+ "host_access_policies": [
+ purefusion.HostAccessPolicyRef(
+ name="hap1", id="id_1", kind="host_access_policy", self_link="self_link"
+ )
+ ],
+ "serial_number": "sn1",
+ "destroyed": False,
+ "size": 1048576,
+ "id": "id_1",
+ "self_link": "self_link",
+ }
+
+
+@pytest.fixture
+def destroyed_volume(volume):
+ volume.update({"host_access_policies": [], "destroyed": True})
+ return volume
+
+
+@patch("fusion.VolumesApi")
+@pytest.mark.parametrize(
+ "field,expected_exception_regex",
+ [
+ (
+ "name",
+ "missing required arguments: name",
+ ),
+ (
+ "tenant",
+ "missing required arguments: tenant",
+ ),
+ (
+ "tenant_space",
+ "missing required arguments: tenant_space",
+ ),
+ (
+ "storage_class",
+ "missing parameter\\(s\\) required by 'placement_group': storage_class",
+ ),
+ (
+ "placement_group",
+ "missing required arguments: placement_group",
+ ),
+ (
+ "size",
+ "missing required arguments: size",
+ ),
+ ],
+)
+def test_module_fails_on_missing_parameters(
+ mock_volumes_api, field, expected_exception_regex, module_args
+):
+ volumes_api = purefusion.VolumesApi()
+ volumes_api.get_volume = MagicMock(side_effect=purefusion.rest.ApiException)
+ mock_volumes_api.return_value = volumes_api
+ del module_args[field]
+ set_module_args(module_args)
+ # run module
+ with pytest.raises(AnsibleFailJson) as ansible_error:
+ fusion_volume.main()
+ assert ansible_error.match(expected_exception_regex)
+
+
+@patch("fusion.VolumesApi")
+@pytest.mark.parametrize(
+ "dict_update,expected_exception_regex",
+ [
+ (
+ {"extra": "value"},
+ "Unsupported parameters for.*module: extra",
+ ),
+ (
+ {"state": "absent"},
+ "Volume must have no host access policies when destroyed",
+ ),
+ (
+ {"eradicate": True},
+ "'eradicate: true' cannot be used together with 'state: present'",
+ ),
+ (
+ {"size": "1K"},
+ "Size is not within the required range",
+ ),
+ ],
+)
+def test_module_fails_on_incorrect_parameters(
+ mock_volumes_api, dict_update, expected_exception_regex, module_args
+):
+ volumes_api = purefusion.VolumesApi()
+ volumes_api.get_volume = MagicMock(side_effect=purefusion.rest.ApiException)
+ mock_volumes_api.return_value = volumes_api
+ module_args.update(dict_update)
+ set_module_args(module_args)
+ # run module
+ with pytest.raises(AnsibleFailJson) as ansible_error:
+ fusion_volume.main()
+ assert ansible_error.match(expected_exception_regex)
+
+
+@patch("fusion.VolumesApi")
+def test_module_not_existent_volume_with_state_absent_not_changed(
+ mock_volumes_api, module_args
+):
+ volumes_api = purefusion.VolumesApi()
+ volumes_api.get_volume = MagicMock(side_effect=purefusion.rest.ApiException)
+ mock_volumes_api.return_value = volumes_api
+ del module_args["host_access_policies"]
+ module_args["state"] = "absent"
+ set_module_args(module_args)
+ # run module
+ with pytest.raises(AnsibleExitJson) as exception:
+ fusion_volume.main()
+ assert exception.value.changed is False
+ volumes_api.get_volume.assert_called_once_with(
+ volume_name=module_args["name"],
+ tenant_name=module_args["tenant"],
+ tenant_space_name=module_args["tenant_space"],
+ )
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.VolumesApi")
+def test_volume_create_successfully(mock_volumes_api, mock_operations_api, module_args):
+ operations_api = purefusion.OperationsApi()
+ volumes_api = purefusion.VolumesApi()
+ volumes_api.get_volume = MagicMock(side_effect=purefusion.rest.ApiException)
+ volumes_api.create_volume = MagicMock(return_value=OperationMock(1))
+ operations_api.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ mock_volumes_api.return_value = volumes_api
+ mock_operations_api.return_value = operations_api
+ set_module_args(module_args)
+ # run module
+ with pytest.raises(AnsibleExitJson) as exception:
+ fusion_volume.main()
+ assert exception.value.changed is True
+ volumes_api.get_volume.assert_called_with(
+ volume_name=module_args["name"],
+ tenant_name=module_args["tenant"],
+ tenant_space_name=module_args["tenant_space"],
+ )
+ volumes_api.create_volume.assert_called_once_with(
+ purefusion.VolumePost(
+ size=1048576,
+ storage_class=module_args["storage_class"],
+ placement_group=module_args["placement_group"],
+ name=module_args["name"],
+ display_name=module_args["display_name"],
+ protection_policy=module_args["protection_policy"],
+ ),
+ tenant_name=module_args["tenant"],
+ tenant_space_name=module_args["tenant_space"],
+ )
+ operations_api.get_operation.assert_called_once_with(1)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.VolumesApi")
+def test_volume_create_without_display_name_successfully(
+ mock_volumes_api, mock_operations_api, module_args
+):
+ del module_args["display_name"]
+ operations_api = purefusion.OperationsApi()
+ volumes_api = purefusion.VolumesApi()
+ volumes_api.get_volume = MagicMock(side_effect=purefusion.rest.ApiException)
+ volumes_api.create_volume = MagicMock(return_value=OperationMock(1))
+ operations_api.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ mock_volumes_api.return_value = volumes_api
+ mock_operations_api.return_value = operations_api
+ set_module_args(module_args)
+ # run module
+ with pytest.raises(AnsibleExitJson) as exception:
+ fusion_volume.main()
+ assert exception.value.changed is True
+ volumes_api.get_volume.assert_called_with(
+ volume_name=module_args["name"],
+ tenant_name=module_args["tenant"],
+ tenant_space_name=module_args["tenant_space"],
+ )
+ volumes_api.create_volume.assert_called_with(
+ purefusion.VolumePost(
+ size=1048576,
+ storage_class=module_args["storage_class"],
+ placement_group=module_args["placement_group"],
+ name=module_args["name"],
+ display_name=module_args["name"],
+ protection_policy=module_args["protection_policy"],
+ ),
+ tenant_name=module_args["tenant"],
+ tenant_space_name=module_args["tenant_space"],
+ )
+ operations_api.get_operation.assert_called_once_with(1)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.VolumesApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, HTTPError),
+ ],
+)
+def test_volume_create_throws_exception(
+ mock_volumes_api, mock_operations_api, exec_original, exec_catch, module_args
+):
+ operations_api = purefusion.OperationsApi()
+ volumes_api = purefusion.VolumesApi()
+ volumes_api.get_volume = MagicMock(side_effect=purefusion.rest.ApiException)
+ volumes_api.create_volume = MagicMock(side_effect=exec_original)
+ operations_api.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ mock_volumes_api.return_value = volumes_api
+ mock_operations_api.return_value = operations_api
+ set_module_args(module_args)
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_volume.main()
+ volumes_api.get_volume.assert_called_with(
+ volume_name=module_args["name"],
+ tenant_name=module_args["tenant"],
+ tenant_space_name=module_args["tenant_space"],
+ )
+ volumes_api.create_volume.assert_called_once_with(
+ purefusion.VolumePost(
+ size=1048576,
+ storage_class=module_args["storage_class"],
+ placement_group=module_args["placement_group"],
+ name=module_args["name"],
+ display_name=module_args["display_name"],
+ protection_policy=module_args["protection_policy"],
+ ),
+ tenant_name=module_args["tenant"],
+ tenant_space_name=module_args["tenant_space"],
+ )
+ operations_api.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.VolumesApi")
+@pytest.mark.parametrize(
+ "updated_volume,called_with",
+ [
+ (
+ {"destroyed": True},
+ purefusion.VolumePatch(destroyed=purefusion.NullableBoolean(False)),
+ ),
+ (
+ {"size": 1000000},
+ purefusion.VolumePatch(size=purefusion.NullableSize(1048576)),
+ ),
+ (
+ {
+ "protection_policy": purefusion.ProtectionPolicyRef(
+ name="pp2",
+ id="id_1",
+ kind="protection_policy",
+ self_link="self_link",
+ )
+ },
+ purefusion.VolumePatch(protection_policy=purefusion.NullableString("pp1")),
+ ),
+ (
+ {"display_name": "Volume"},
+ purefusion.VolumePatch(display_name=purefusion.NullableString("Volume 1")),
+ ),
+ (
+ {
+ "storage_class": purefusion.StorageClassRef(
+ name="sc2", id="id_1", kind="storage_class", self_link="self_link"
+ )
+ },
+ purefusion.VolumePatch(storage_class=purefusion.NullableString("sc1")),
+ ),
+ (
+ {
+ "placement_group": purefusion.PlacementGroupRef(
+ name="pg2", id="id_1", kind="placement_group", self_link="self_link"
+ )
+ },
+ purefusion.VolumePatch(placement_group=purefusion.NullableString("pg1")),
+ ),
+ (
+ {
+ "host_access_policies": [
+ purefusion.HostAccessPolicyRef(
+ name="hap2",
+ id="id_1",
+ kind="host_access_policy",
+ self_link="self_link",
+ )
+ ]
+ },
+ purefusion.VolumePatch(
+ host_access_policies=purefusion.NullableString("hap1")
+ ),
+ ),
+ ],
+)
+def test_volume_update_with_state_present_executed_correctly(
+ mock_volumes_api,
+ mock_operations_api,
+ updated_volume,
+ called_with,
+ module_args,
+ volume,
+):
+ volume.update(updated_volume)
+ operations_api = purefusion.OperationsApi()
+ volumes_api = purefusion.VolumesApi()
+ volumes_api.get_volume = MagicMock(return_value=purefusion.Volume(**volume))
+ volumes_api.update_volume = MagicMock(return_value=OperationMock(1))
+ operations_api.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ mock_volumes_api.return_value = volumes_api
+ mock_operations_api.return_value = operations_api
+ set_module_args(module_args)
+ # run module
+ with pytest.raises(AnsibleExitJson) as exception:
+ fusion_volume.main()
+ assert exception.value.changed is True
+ volumes_api.get_volume.assert_called_with(
+ volume_name=module_args["name"],
+ tenant_name=module_args["tenant"],
+ tenant_space_name=module_args["tenant_space"],
+ )
+ volumes_api.update_volume.assert_called_once_with(
+ called_with,
+ volume_name=module_args["name"],
+ tenant_name=module_args["tenant"],
+ tenant_space_name=module_args["tenant_space"],
+ )
+ operations_api.get_operation.assert_called_once_with(1)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.VolumesApi")
+@pytest.mark.parametrize(
+ "updated_volume,called_with",
+ [
+ (
+ {"destroyed": False, "host_access_policies": []},
+ purefusion.VolumePatch(destroyed=purefusion.NullableBoolean(True)),
+ )
+ ],
+)
+def test_volume_update_with_state_absent_executed_correctly(
+ mock_volumes_api,
+ mock_operations_api,
+ updated_volume,
+ called_with,
+ module_args,
+ volume,
+):
+ module_args["state"] = "absent"
+ del module_args["host_access_policies"]
+ volume.update(updated_volume)
+ operations_api = purefusion.OperationsApi()
+ volumes_api = purefusion.VolumesApi()
+ volumes_api.get_volume = MagicMock(return_value=purefusion.Volume(**volume))
+ volumes_api.update_volume = MagicMock(return_value=OperationMock(1))
+ operations_api.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ mock_volumes_api.return_value = volumes_api
+ mock_operations_api.return_value = operations_api
+ set_module_args(module_args)
+ # run module
+ with pytest.raises(AnsibleExitJson) as exception:
+ fusion_volume.main()
+ assert exception.value.changed is True
+ volumes_api.get_volume.assert_called_with(
+ volume_name=module_args["name"],
+ tenant_name=module_args["tenant"],
+ tenant_space_name=module_args["tenant_space"],
+ )
+ volumes_api.update_volume.assert_called_once_with(
+ called_with,
+ volume_name=module_args["name"],
+ tenant_name=module_args["tenant"],
+ tenant_space_name=module_args["tenant_space"],
+ )
+ operations_api.get_operation.assert_called_once_with(1)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.VolumesApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, HTTPError),
+ ],
+)
+def test_volume_update_throws_exception(
+ mock_volumes_api,
+ mock_operations_api,
+ exec_original,
+ exec_catch,
+ module_args,
+ volume,
+):
+ module_args["display_name"] = "volume"
+ operations_api = purefusion.OperationsApi()
+ volumes_api = purefusion.VolumesApi()
+ volumes_api.get_volume = MagicMock(return_value=purefusion.Volume(**volume))
+ volumes_api.update_volume = MagicMock(side_effect=exec_original)
+ operations_api.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ mock_volumes_api.return_value = volumes_api
+ mock_operations_api.return_value = operations_api
+ set_module_args(module_args)
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_volume.main()
+
+ volumes_api.get_volume.assert_called_with(
+ volume_name=module_args["name"],
+ tenant_name=module_args["tenant"],
+ tenant_space_name=module_args["tenant_space"],
+ )
+ volumes_api.update_volume.assert_called_once_with(
+ purefusion.VolumePatch(
+ display_name=purefusion.NullableString(module_args["display_name"])
+ ),
+ volume_name=module_args["name"],
+ tenant_name=module_args["tenant"],
+ tenant_space_name=module_args["tenant_space"],
+ )
+ operations_api.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.VolumesApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, OperationException),
+ ],
+)
+def test_volume_update_operation_throws_exception(
+ mock_volumes_api,
+ mock_operations_api,
+ exec_original,
+ exec_catch,
+ module_args,
+ volume,
+):
+ module_args["display_name"] = "volume"
+ operations_api = purefusion.OperationsApi()
+ volumes_api = purefusion.VolumesApi()
+ volumes_api.get_volume = MagicMock(return_value=purefusion.Volume(**volume))
+ volumes_api.update_volume = MagicMock(return_value=OperationMock(1))
+ operations_api.get_operation = MagicMock(side_effect=exec_original)
+ mock_volumes_api.return_value = volumes_api
+ mock_operations_api.return_value = operations_api
+ set_module_args(module_args)
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_volume.main()
+
+ volumes_api.get_volume.assert_called_with(
+ volume_name=module_args["name"],
+ tenant_name=module_args["tenant"],
+ tenant_space_name=module_args["tenant_space"],
+ )
+ volumes_api.update_volume.assert_called_once_with(
+ purefusion.VolumePatch(
+ display_name=purefusion.NullableString(module_args["display_name"])
+ ),
+ volume_name=module_args["name"],
+ tenant_name=module_args["tenant"],
+ tenant_space_name=module_args["tenant_space"],
+ )
+ operations_api.get_operation.assert_called_once_with(1)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.VolumesApi")
+def test_volume_delete_throws_validation_error(
+ mock_volumes_api, mock_operations_api, absent_module_args, volume
+):
+ volume["host_access_policies"] = []
+ operations_api = purefusion.OperationsApi()
+ volumes_api = purefusion.VolumesApi()
+ volumes_api.get_volume = MagicMock(return_value=purefusion.Volume(**volume))
+ volumes_api.update_volume = MagicMock(return_value=OperationMock(1))
+ volumes_api.delete_volume = MagicMock(return_value=OperationMock(2))
+
+ operations_api.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ mock_volumes_api.return_value = volumes_api
+ mock_operations_api.return_value = operations_api
+ set_module_args(absent_module_args)
+ # run module
+ with pytest.raises(AnsibleFailJson) as ansible_fail:
+ fusion_volume.main()
+ assert ansible_fail.match(regexp="BUG: inconsistent state, eradicate_volume")
+ volumes_api.get_volume.assert_called_with(
+ volume_name=absent_module_args["name"],
+ tenant_name=absent_module_args["tenant"],
+ tenant_space_name=absent_module_args["tenant_space"],
+ )
+ volumes_api.update_volume.assert_called_once_with(
+ purefusion.VolumePatch(destroyed=purefusion.NullableBoolean(True)),
+ volume_name=absent_module_args["name"],
+ tenant_name=absent_module_args["tenant"],
+ tenant_space_name=absent_module_args["tenant_space"],
+ )
+ volumes_api.delete_volume.assert_not_called()
+ operations_api.get_operation.assert_called_once_with(1)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.VolumesApi")
+def test_volume_delete_executed_correctly(
+ mock_volumes_api, mock_operations_api, absent_module_args, destroyed_volume
+):
+ operations_api = purefusion.OperationsApi()
+ volumes_api = purefusion.VolumesApi()
+ volumes_api.get_volume = MagicMock(
+ return_value=purefusion.Volume(**destroyed_volume)
+ )
+ volumes_api.update_volume = MagicMock(return_value=OperationMock(1))
+ volumes_api.delete_volume = MagicMock(return_value=OperationMock(2))
+
+ operations_api.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ mock_volumes_api.return_value = volumes_api
+ mock_operations_api.return_value = operations_api
+ set_module_args(absent_module_args)
+ # run module
+ with pytest.raises(AnsibleExitJson):
+ fusion_volume.main()
+ volumes_api.get_volume.assert_called_with(
+ volume_name=absent_module_args["name"],
+ tenant_name=absent_module_args["tenant"],
+ tenant_space_name=absent_module_args["tenant_space"],
+ )
+ volumes_api.update_volume.assert_not_called()
+ volumes_api.delete_volume.assert_called_once_with(
+ volume_name=absent_module_args["name"],
+ tenant_name=absent_module_args["tenant"],
+ tenant_space_name=absent_module_args["tenant_space"],
+ )
+ operations_api.get_operation.assert_called_once_with(2)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.VolumesApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, HTTPError),
+ ],
+)
+def test_volume_delete_throws_exception(
+ mock_volumes_api,
+ mock_operations_api,
+ exec_original,
+ exec_catch,
+ absent_module_args,
+ destroyed_volume,
+):
+ operations_api = purefusion.OperationsApi()
+ volumes_api = purefusion.VolumesApi()
+ volumes_api.get_volume = MagicMock(
+ return_value=purefusion.Volume(**destroyed_volume)
+ )
+ volumes_api.update_volume = MagicMock(return_value=OperationMock(1))
+ volumes_api.delete_volume = MagicMock(side_effect=exec_original)
+
+ operations_api.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ mock_volumes_api.return_value = volumes_api
+ mock_operations_api.return_value = operations_api
+ set_module_args(absent_module_args)
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_volume.main()
+ volumes_api.get_volume.assert_called_with(
+ volume_name=absent_module_args["name"],
+ tenant_name=absent_module_args["tenant"],
+ tenant_space_name=absent_module_args["tenant_space"],
+ )
+ volumes_api.update_volume.assert_not_called()
+ volumes_api.delete_volume.assert_called_once_with(
+ volume_name=absent_module_args["name"],
+ tenant_name=absent_module_args["tenant"],
+ tenant_space_name=absent_module_args["tenant_space"],
+ )
+
+ operations_api.get_operation.assert_not_called()
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.VolumesApi")
+@pytest.mark.parametrize(
+ "exec_original,exec_catch",
+ [
+ (purefusion.rest.ApiException, purefusion.rest.ApiException),
+ (HTTPError, OperationException),
+ ],
+)
+def test_volume_delete_operation_throws_exception(
+ mock_volumes_api,
+ mock_operations_api,
+ exec_original,
+ exec_catch,
+ absent_module_args,
+ destroyed_volume,
+):
+ operations_api = purefusion.OperationsApi()
+ volumes_api = purefusion.VolumesApi()
+ volumes_api.get_volume = MagicMock(
+ return_value=purefusion.Volume(**destroyed_volume)
+ )
+ volumes_api.update_volume = MagicMock(return_value=OperationMock(1))
+ volumes_api.delete_volume = MagicMock(return_value=OperationMock(2))
+
+ operations_api.get_operation = MagicMock(side_effect=exec_original)
+ mock_volumes_api.return_value = volumes_api
+ mock_operations_api.return_value = operations_api
+ set_module_args(absent_module_args)
+ # run module
+ with pytest.raises(exec_catch):
+ fusion_volume.main()
+ volumes_api.get_volume.assert_called_with(
+ volume_name=absent_module_args["name"],
+ tenant_name=absent_module_args["tenant"],
+ tenant_space_name=absent_module_args["tenant_space"],
+ )
+ volumes_api.update_volume.assert_not_called()
+ volumes_api.delete_volume.assert_called_once_with(
+ volume_name=absent_module_args["name"],
+ tenant_name=absent_module_args["tenant"],
+ tenant_space_name=absent_module_args["tenant_space"],
+ )
+ operations_api.get_operation.assert_called_once_with(2)
diff --git a/ansible_collections/purestorage/fusion/tests/functional/utils.py b/ansible_collections/purestorage/fusion/tests/functional/utils.py
new file mode 100644
index 000000000..24d6f0328
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/tests/functional/utils.py
@@ -0,0 +1,116 @@
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+import json
+from dataclasses import dataclass
+
+from ansible.module_utils import basic
+from ansible.module_utils.common.text.converters import to_bytes
+
+
+@dataclass
+class OperationMock:
+ """
+ Mock Operation object. This object should be returned by mocked api.
+ """
+
+ def __init__(self, id=None, success=None):
+ if success is None:
+ self.status = "Pending"
+ elif success:
+ self.status = "Succeeded"
+ else:
+ self.status = "Failed"
+ self.id = id
+
+
+class SuccessfulOperationMock:
+ """
+ Mock object for successful operation. This object is returned by mocked Operation API if the operation was successful.
+ """
+
+ status = "Succeeded"
+
+
+class FailedOperationMock:
+ """
+ Mock object for failed operation. This object is returned by mocked Operation API if the operation failed.
+ """
+
+ status = "Failed"
+
+
+def set_module_args(args):
+ """
+ Prepare arguments so that they will be picked up during module creation.
+ Docs: https://docs.ansible.com/ansible/latest/dev_guide/testing_units_modules.html
+ """
+
+ args = json.dumps({"ANSIBLE_MODULE_ARGS": args})
+ basic._ANSIBLE_ARGS = to_bytes(args)
+
+
+class AnsibleExitJson(Exception):
+ """
+ Exception class to be raised by module.exit_json and caught by the test case
+ Docs: https://docs.ansible.com/ansible/latest/dev_guide/testing_units_modules.html
+ """
+
+ def __init__(self, kwargs):
+ self.kwargs = kwargs
+
+ @property
+ def changed(self):
+ return self.kwargs["changed"]
+
+ @property
+ def fusion_info(self):
+ return self.kwargs["fusion_info"] if "fusion_info" in self.kwargs else None
+
+
+class AnsibleFailJson(Exception):
+ """
+ Exception class to be raised by module.fail_json and caught by the test case
+ Docs: https://docs.ansible.com/ansible/latest/dev_guide/testing_units_modules.html
+ """
+
+ def __init__(self, msg, kwargs):
+ super().__init__(msg)
+ self.kwargs = kwargs
+
+
+def exit_json(self, **kwargs):
+ """
+ Function to patch over exit_json; package return data into an exception
+ Docs: https://docs.ansible.com/ansible/latest/dev_guide/testing_units_modules.html
+ """
+
+ if "changed" not in kwargs:
+ kwargs["changed"] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(self, msg, **kwargs):
+ """
+ Function to patch over fail_json; package return data into an exception
+ Docs: https://docs.ansible.com/ansible/latest/dev_guide/testing_units_modules.html
+ """
+ kwargs["failed"] = True
+ raise AnsibleFailJson(msg, kwargs)
+
+
+def side_effects_with_exceptions(side_effects):
+ """
+ Assumes side_effects is a list. Works similarly to `MagicMock(side_effect=side_effects)`,
+ but if item in the list is instance of an exception, it raises it instead of returning it.
+ """
+ side_effects = side_effects.copy()
+
+ def _pop_side_effect(*args, **kwargs):
+ i = side_effects.pop(0)
+ if isinstance(i, Exception):
+ raise i
+ return i
+
+ return _pop_side_effect
diff --git a/ansible_collections/purestorage/fusion/tests/helpers.py b/ansible_collections/purestorage/fusion/tests/helpers.py
new file mode 100644
index 000000000..40d98cf0e
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/tests/helpers.py
@@ -0,0 +1,29 @@
+# -*- coding: utf-8 -*-
+
+# (c) 2023, Denys Denysyev (ddenysyev@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING.GPLv3 or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+import fusion as purefusion
+
+from http import HTTPStatus
+
+
+class ApiExceptionsMockGenerator:
+ @staticmethod
+ def create_permission_denied():
+ status = HTTPStatus.FORBIDDEN
+ return purefusion.rest.ApiException(status=status, reason=status.phrase)
+
+ @staticmethod
+ def create_conflict():
+ status = HTTPStatus.CONFLICT
+ return purefusion.rest.ApiException(status=status, reason=status.phrase)
+
+ @staticmethod
+ def create_not_found():
+ status = HTTPStatus.NOT_FOUND
+ return purefusion.rest.ApiException(status=status, reason=status.phrase)
diff --git a/ansible_collections/purestorage/fusion/tests/integration/README.md b/ansible_collections/purestorage/fusion/tests/integration/README.md
new file mode 100644
index 000000000..7ca8ee497
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/tests/integration/README.md
@@ -0,0 +1,10 @@
+# Integration tests
+
+## Running tests
+
+- Copy `tests/integration/integration_config.template` to `tests/integration/integration_config.yml` and fill out the
+ env variables
+- Run with the following command:
+ ```bash
+ ansible-test integration
+ ```
diff --git a/ansible_collections/purestorage/fusion/tests/integration/integration_config.template b/ansible_collections/purestorage/fusion/tests/integration/integration_config.template
new file mode 100644
index 000000000..3fd6e77d3
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/tests/integration/integration_config.template
@@ -0,0 +1,6 @@
+---
+test_env:
+ FUSION_API_HOST: <FUSION_API_HOST>
+ FUSION_ISSUER_ID: <API_ID>
+ FUSION_PRIVATE_KEY_FILE: <PRIVATE_KEY>
+ FUSION_TOKEN_ENDPOINT: <STAGING_TOKEN_ENDPOINT> \ No newline at end of file
diff --git a/ansible_collections/purestorage/fusion/tests/integration/targets/fusion_az/tasks/main.yml b/ansible_collections/purestorage/fusion/tests/integration/targets/fusion_az/tasks/main.yml
new file mode 100644
index 000000000..b9c23fc8c
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/tests/integration/targets/fusion_az/tasks/main.yml
@@ -0,0 +1,43 @@
+- name: Create Availability Zone
+ environment: "{{ test_env }}"
+ purestorage.fusion.fusion_az:
+ name: "test_az"
+ display_name: "foo AZ"
+ region: pure-us-west
+ register: result
+- name: Validate the task
+ ansible.builtin.assert:
+ that:
+ - result is success
+ - result is changed
+
+- name: Collect Availability Zones and verify the zone exists
+ environment: "{{ test_env }}"
+ purestorage.fusion.fusion_info:
+ gather_subset: availability_zones
+ register: fusion_info
+- name: Validate the task
+ ansible.builtin.assert:
+ that: "'test_az' in fusion_info['fusion_info']['availability_zones']"
+
+- name: Delete AZ
+ purestorage.fusion.fusion_az:
+ name: "test_az"
+ state: absent
+ region: pure-us-west
+ register: result
+ environment: "{{ test_env }}"
+- name: Validate the task
+ ansible.builtin.assert:
+ that:
+ - result is success
+ - result is changed
+
+- name: Collect Availability Zones and verify the zone does not exist
+ environment: "{{ test_env }}"
+ purestorage.fusion.fusion_info:
+ gather_subset: availability_zones
+ register: fusion_info
+- name: Validate the task
+ ansible.builtin.assert:
+ that: "'test_az' not in fusion_info['fusion_info']['availability_zones']"
diff --git a/ansible_collections/purestorage/fusion/tests/integration/targets/fusion_hap/tasks/main.yml b/ansible_collections/purestorage/fusion/tests/integration/targets/fusion_hap/tasks/main.yml
new file mode 100644
index 000000000..eaea92684
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/tests/integration/targets/fusion_hap/tasks/main.yml
@@ -0,0 +1,42 @@
+- name: Create new Linux host access policy
+ purestorage.fusion.fusion_hap:
+ name: hap_foo
+ personality: linux
+ iqn: "iqn.2005-03.com.RedHat:linux-host1"
+ register: result
+ environment: "{{ test_env }}"
+- name: Validate the task
+ ansible.builtin.assert:
+ that:
+ - result is success
+ - result is changed
+
+- name: Collect hosts and check the host exists
+ purestorage.fusion.fusion_info:
+ gather_subset: host_access_policies
+ register: fusion_info
+ environment: "{{ test_env }}"
+- name: Validate the task
+ ansible.builtin.assert:
+ that: "'hap_foo' in fusion_info['fusion_info']['host_access_policies']"
+
+- name: Delete host access policy
+ purestorage.fusion.fusion_hap:
+ name: hap_foo
+ state: absent
+ register: result
+ environment: "{{ test_env }}"
+- name: Validate the task
+ ansible.builtin.assert:
+ that:
+ - result is success
+ - result is changed
+
+- name: Collect hosts and check the host does not exist
+ purestorage.fusion.fusion_info:
+ gather_subset: host_access_policies
+ register: fusion_info
+ environment: "{{ test_env }}"
+- name: Validate the task
+ ansible.builtin.assert:
+ that: "'hap_foo' not in fusion_info['fusion_info']['host_access_policies']"
diff --git a/ansible_collections/purestorage/fusion/tests/integration/targets/fusion_ni/tasks/main.yml b/ansible_collections/purestorage/fusion/tests/integration/targets/fusion_ni/tasks/main.yml
new file mode 100644
index 000000000..16b5359de
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/tests/integration/targets/fusion_ni/tasks/main.yml
@@ -0,0 +1,37 @@
+# TODO: Currently not possible to remove network interface group from network interface
+
+# Prepare
+- name: Create new network interface group foo in AZ bar
+ purestorage.fusion.fusion_nig:
+ name: "interface_group1"
+ availability_zone: az1
+ region: pure-us-west
+ state: present
+ mtu: 1500
+ gateway: 172.17.1.1
+ prefix: 172.17.1.0/24
+ register: result
+ environment: "{{ test_env }}"
+- name: Validate the task
+ ansible.builtin.assert:
+ that:
+ - result is success
+
+# Test network interfaces
+
+- name: Patch the network interface
+ purestorage.fusion.fusion_ni:
+ name: ct0.eth4
+ region: pure-us-west
+ availability_zone: az1
+ array: doubleagent-2
+ eth: 172.17.1.2/24
+ enabled: true
+ network_interface_group: "interface_group1"
+ register: result
+ environment: "{{ test_env }}"
+- name: Validate the task
+ ansible.builtin.assert:
+ that:
+ - result is success
+ - result is changed
diff --git a/ansible_collections/purestorage/fusion/tests/integration/targets/fusion_nig/tasks/main.yml b/ansible_collections/purestorage/fusion/tests/integration/targets/fusion_nig/tasks/main.yml
new file mode 100644
index 000000000..8e3ab298b
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/tests/integration/targets/fusion_nig/tasks/main.yml
@@ -0,0 +1,48 @@
+- name: Create new network interface group
+ purestorage.fusion.fusion_nig:
+ name: "foo_group"
+ availability_zone: az1
+ region: pure-us-west
+ state: present
+ mtu: 1500
+ gateway: 172.17.17.1
+ prefix: 172.17.17.0/24
+ environment: "{{ test_env }}"
+ register: result
+- name: Validate the task
+ ansible.builtin.assert:
+ that:
+ - result is success
+ - result is changed
+
+- name: Collect network_interface_groups and check the group exist
+ purestorage.fusion.fusion_info:
+ gather_subset: network_interface_groups
+ register: fusion_info
+ environment: "{{ test_env }}"
+- name: Validate the task
+ ansible.builtin.assert:
+ that: "'pure-us-west/az1/foo_group' in fusion_info['fusion_info']['network_interface_groups']"
+
+- name: Delete network interface group
+ purestorage.fusion.fusion_nig:
+ name: "foo_group"
+ availability_zone: az1
+ region: pure-us-west
+ state: absent
+ register: result
+ environment: "{{ test_env }}"
+- name: Validate the task
+ ansible.builtin.assert:
+ that:
+ - result is success
+ - result is changed
+
+- name: Collect network_interface_groups and check the group does not exist
+ purestorage.fusion.fusion_info:
+ gather_subset: network_interface_groups
+ register: fusion_info
+ environment: "{{ test_env }}"
+- name: Validate the task
+ ansible.builtin.assert:
+ that: "'pure-us-west/az1/foo_group' not in fusion_info['fusion_info']['network_interface_groups']"
diff --git a/ansible_collections/purestorage/fusion/tests/integration/targets/fusion_pg/tasks/main.yml b/ansible_collections/purestorage/fusion/tests/integration/targets/fusion_pg/tasks/main.yml
new file mode 100644
index 000000000..f4d50a653
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/tests/integration/targets/fusion_pg/tasks/main.yml
@@ -0,0 +1,95 @@
+# Prepare dependencies
+- name: Create new tenat foo_tenant
+ purestorage.fusion.fusion_tenant:
+ name: foo_tenant
+ display_name: "tenant foo"
+ environment: "{{ test_env }}"
+
+- name: Create new tenant space foo_tenant_space for tenant_foo
+ purestorage.fusion.fusion_ts:
+ name: foo_tenant_space
+ tenant: foo_tenant
+ state: present
+ environment: "{{ test_env }}"
+
+- name: Create new storage service foo_service
+ purestorage.fusion.fusion_ss:
+ name: foo_service
+ hardware_types:
+ - flash-array-x
+ display_name: "test class"
+ environment: "{{ test_env }}"
+
+
+# Test placement groups
+
+- name: Create new placement group named foo_pg
+ purestorage.fusion.fusion_pg:
+ name: foo_pg
+ tenant: foo_tenant
+ tenant_space: foo_tenant_space
+ availability_zone: az1
+ region: pure-us-west
+ storage_service: foo_service
+ state: present
+ register: result
+ environment: "{{ test_env }}"
+- name: Validate the task
+ ansible.builtin.assert:
+ that:
+ - result is success
+ - result is changed
+
+- name: Collect placement_groups and check the placement group exists
+ purestorage.fusion.fusion_info:
+ gather_subset: placement_groups
+ register: fusion_info
+ environment: "{{ test_env }}"
+- name: Validate the task
+ ansible.builtin.assert:
+ that: "'foo_tenant/foo_tenant_space/foo_pg' in fusion_info['fusion_info']['placement_groups']"
+
+- name: Delete placement group foo
+ purestorage.fusion.fusion_pg:
+ name: foo_pg
+ tenant: foo_tenant
+ tenant_space: foo_tenant_space
+ state: absent
+ register: result
+ environment: "{{ test_env }}"
+- name: Validate the task
+ ansible.builtin.assert:
+ that:
+ - result is success
+ - result is changed
+
+- name: Collect placement_groups and check the placement group does not exist
+ purestorage.fusion.fusion_info:
+ gather_subset: placement_groups
+ register: fusion_info
+ environment: "{{ test_env }}"
+- name: Validate the task
+ ansible.builtin.assert:
+ that: "'foo_tenant/foo_tenant_space/foo_pg' not in fusion_info['fusion_info']['placement_groups']"
+
+
+# Teardown dependencies
+
+- name: Delete storage service foo_service
+ purestorage.fusion.fusion_ss:
+ name: foo_service
+ state: absent
+ environment: "{{ test_env }}"
+
+- name: Delete foo_tenant_space
+ purestorage.fusion.fusion_ts:
+ name: foo_tenant_space
+ tenant: foo_tenant
+ state: absent
+ environment: "{{ test_env }}"
+
+- name: Delete tenat foo
+ purestorage.fusion.fusion_tenant:
+ name: foo_tenant
+ state: absent
+ environment: "{{ test_env }}"
diff --git a/ansible_collections/purestorage/fusion/tests/integration/targets/fusion_pp/tasks/main.yml b/ansible_collections/purestorage/fusion/tests/integration/targets/fusion_pp/tasks/main.yml
new file mode 100644
index 000000000..f8a126662
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/tests/integration/targets/fusion_pp/tasks/main.yml
@@ -0,0 +1,43 @@
+- name: Create new protection policy foo_pp
+ purestorage.fusion.fusion_pp:
+ name: foo_pp
+ local_rpo: 10
+ local_retention: 4d
+ display_name: "foo pp"
+ register: result
+ environment: "{{ test_env }}"
+- name: Validate the task
+ ansible.builtin.assert:
+ that:
+ - result is success
+ - result is changed
+
+- name: Collect protection policies and check the policy exists
+ purestorage.fusion.fusion_info:
+ gather_subset: protection_policies
+ register: fusion_info
+ environment: "{{ test_env }}"
+- name: Validate the task
+ ansible.builtin.assert:
+ that: "'foo_pp' in fusion_info['fusion_info']['protection_policies']"
+
+- name: Delete protection policy foo_pp
+ purestorage.fusion.fusion_pp:
+ name: foo_pp
+ state: absent
+ register: result
+ environment: "{{ test_env }}"
+- name: Validate the task
+ ansible.builtin.assert:
+ that:
+ - result is success
+ - result is changed
+
+- name: Collect protection policies and check the policy does not exist
+ purestorage.fusion.fusion_info:
+ gather_subset: protection_policies
+ register: fusion_info
+ environment: "{{ test_env }}"
+- name: Validate the task
+ ansible.builtin.assert:
+ that: "'foo_pp' not in fusion_info['fusion_info']['protection_policies']"
diff --git a/ansible_collections/purestorage/fusion/tests/integration/targets/fusion_region/tasks/main.yml b/ansible_collections/purestorage/fusion/tests/integration/targets/fusion_region/tasks/main.yml
new file mode 100644
index 000000000..8e0112f6d
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/tests/integration/targets/fusion_region/tasks/main.yml
@@ -0,0 +1,53 @@
+- name: Create Region foo_region
+ purestorage.fusion.fusion_region:
+ name: "foo_region"
+ display_name: "foo Region"
+ register: result
+ environment: "{{ test_env }}"
+- name: Validate the task
+ ansible.builtin.assert:
+ that:
+ - result is success
+ - result is changed
+
+- name: Collect Regions and verify the region exists
+ environment: "{{ test_env }}"
+ purestorage.fusion.fusion_info:
+ gather_subset: regions
+ register: fusion_info
+- name: Validate the task
+ ansible.builtin.assert:
+ that: "'foo_region' in fusion_info['fusion_info']['regions']"
+
+- name: Update Region display_name
+ purestorage.fusion.fusion_region:
+ name: "foo_region"
+ display_name: "new foo Region"
+ register: result
+ environment: "{{ test_env }}"
+- name: Validate the task
+ ansible.builtin.assert:
+ that:
+ - result is success
+ - result is changed
+
+- name: Delete Region
+ purestorage.fusion.fusion_region:
+ name: "foo_region"
+ state: absent
+ register: result
+ environment: "{{ test_env }}"
+- name: Validate the task
+ ansible.builtin.assert:
+ that:
+ - result is success
+ - result is changed
+
+- name: Collect Regions and verify the region does not exist
+ environment: "{{ test_env }}"
+ purestorage.fusion.fusion_info:
+ gather_subset: regions
+ register: fusion_info
+- name: Validate the task
+ ansible.builtin.assert:
+ that: "'foo_region' not in fusion_info['fusion_info']['regions']"
diff --git a/ansible_collections/purestorage/fusion/tests/integration/targets/fusion_sc/tasks/main.yml b/ansible_collections/purestorage/fusion/tests/integration/targets/fusion_sc/tasks/main.yml
new file mode 100644
index 000000000..6420db28c
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/tests/integration/targets/fusion_sc/tasks/main.yml
@@ -0,0 +1,94 @@
+# Prepare dependencies
+- name: Create new tenat foo_tenant
+ purestorage.fusion.fusion_tenant:
+ name: foo_tenant
+ display_name: "tenant foo"
+ environment: "{{ test_env }}"
+
+- name: Create new tenant space foo_tenant_space for tenant_foo
+ purestorage.fusion.fusion_ts:
+ name: foo_tenant_space
+ tenant: foo_tenant
+ state: present
+ environment: "{{ test_env }}"
+
+- name: Create new storage service foo_service
+ purestorage.fusion.fusion_ss:
+ name: foo_service
+ hardware_types:
+ - flash-array-x
+ display_name: "test class"
+ environment: "{{ test_env }}"
+
+
+# Test storage classes
+
+- name: Create new storage class foo_sc
+ purestorage.fusion.fusion_sc:
+ name: foo_sc
+ size_limit: 100G
+ iops_limit: 100000
+ bw_limit: 25M
+ storage_service: foo_service
+ display_name: "test class"
+ register: result
+ environment: "{{ test_env }}"
+- name: Validate the task
+ ansible.builtin.assert:
+ that:
+ - result is success
+ - result is changed
+
+
+- name: Collect storage classes and verify the class exists
+ environment: "{{ test_env }}"
+ purestorage.fusion.fusion_info:
+ gather_subset: storage_classes
+ register: fusion_info
+- name: Validate the task
+ ansible.builtin.assert:
+ that: "'foo_sc' in fusion_info['fusion_info']['storage_classes']"
+
+- name: Delete storage class
+ purestorage.fusion.fusion_sc:
+ name: foo_sc
+ storage_service: foo_service
+ state: absent
+ register: result
+ environment: "{{ test_env }}"
+- name: Validate the task
+ ansible.builtin.assert:
+ that:
+ - result is success
+ - result is changed
+
+- name: Collect storage classes and verify the class does not exist
+ environment: "{{ test_env }}"
+ purestorage.fusion.fusion_info:
+ gather_subset: storage_classes
+ register: fusion_info
+- name: Validate the task
+ ansible.builtin.assert:
+ that: "'foo_sc' not in fusion_info['fusion_info']['storage_classes']"
+
+
+# Teardown dependencies
+
+- name: Delete storage service foo_service
+ purestorage.fusion.fusion_ss:
+ name: foo_service
+ state: absent
+ environment: "{{ test_env }}"
+
+- name: Delete foo_tenant_space
+ purestorage.fusion.fusion_ts:
+ name: foo_tenant_space
+ tenant: foo_tenant
+ state: absent
+ environment: "{{ test_env }}"
+
+- name: Delete tenat foo
+ purestorage.fusion.fusion_tenant:
+ name: foo_tenant
+ state: absent
+ environment: "{{ test_env }}"
diff --git a/ansible_collections/purestorage/fusion/tests/integration/targets/fusion_se/tasks/main.yml b/ansible_collections/purestorage/fusion/tests/integration/targets/fusion_se/tasks/main.yml
new file mode 100644
index 000000000..a900712ec
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/tests/integration/targets/fusion_se/tasks/main.yml
@@ -0,0 +1,100 @@
+# Prepare dependencies
+
+- name: Create Availability Zone
+ environment: "{{ test_env }}"
+ purestorage.fusion.fusion_az:
+ name: "test_az"
+ display_name: "foo AZ"
+ region: pure-us-west
+ register: result
+
+- name: Create new network interface group
+ purestorage.fusion.fusion_nig:
+ name: "foo_group"
+ availability_zone: test_az
+ region: pure-us-west
+ state: present
+ mtu: 1500
+ gateway: 172.17.17.1
+ prefix: 172.17.17.0/24
+ environment: "{{ test_env }}"
+
+
+# Test storage classes
+
+- name: Create new Storage Endpoint
+ purestorage.fusion.fusion_se:
+ state: present # or absent
+ region: pure-us-west
+ name: foo_se
+ display_name: "foo se"
+ availability_zone: test_az
+ endpoint_type: iscsi
+ iscsi:
+ - address: "172.17.1.2/24"
+ gateway: "172.17.1.1"
+ network_interface_groups: ["foo_group"]
+ register: result
+ environment: "{{ test_env }}"
+- name: Validate the task
+ ansible.builtin.assert:
+ that:
+ - result is success
+ - result is changed
+
+- name: Collect storage endpoints and verify the endpoint exists
+ environment: "{{ test_env }}"
+ purestorage.fusion.fusion_info:
+ gather_subset: storage_endpoints
+ register: fusion_info
+- name: Validate the task
+ ansible.builtin.assert:
+ that: "'pure-us-west/test_az/foo_se' in fusion_info['fusion_info']['storage_endpoints']"
+
+- name: Delete Storage Endpoint
+ purestorage.fusion.fusion_se:
+ state: absent
+ region: pure-us-west
+ name: foo_se
+ display_name: "foo se"
+ availability_zone: test_az
+ endpoint_type: iscsi
+ iscsi:
+ - address: "172.17.1.2/24"
+ gateway: "172.17.1.1"
+ network_interface_groups: ["foo_group"]
+ register: result
+ environment: "{{ test_env }}"
+- name: Validate the task
+ ansible.builtin.assert:
+ that:
+ - result is success
+ - result is changed
+
+- name: Collect storage endpoints and verify the endpoint does not exist
+ environment: "{{ test_env }}"
+ purestorage.fusion.fusion_info:
+ gather_subset: storage_endpoints
+ register: fusion_info
+- name: Validate the task
+ ansible.builtin.assert:
+ that: "'pure-us-west/test_az/foo_se' not in fusion_info['fusion_info']['storage_endpoints']"
+
+
+# Teardown dependencies
+
+- name: Delete network interface group
+ purestorage.fusion.fusion_nig:
+ name: "foo_group"
+ availability_zone: test_az
+ region: pure-us-west
+ state: absent
+ environment: "{{ test_env }}"
+
+- name: Delete AZ
+ purestorage.fusion.fusion_az:
+ name: "test_az"
+ state: absent
+ region: pure-us-west
+ register: result
+ environment: "{{ test_env }}"
diff --git a/ansible_collections/purestorage/fusion/tests/integration/targets/fusion_ss/tasks/main.yml b/ansible_collections/purestorage/fusion/tests/integration/targets/fusion_ss/tasks/main.yml
new file mode 100644
index 000000000..26332fcf7
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/tests/integration/targets/fusion_ss/tasks/main.yml
@@ -0,0 +1,77 @@
+# Prepare dependencies
+- name: Create new tenat foo_tenant
+ purestorage.fusion.fusion_tenant:
+ name: foo_tenant
+ display_name: "tenant foo"
+ environment: "{{ test_env }}"
+
+- name: Create new tenant space foo_tenant_space for tenant_foo
+ purestorage.fusion.fusion_ts:
+ name: foo_tenant_space
+ tenant: foo_tenant
+ state: present
+ environment: "{{ test_env }}"
+
+
+# Test storage services
+
+- name: Create new storage service foo_service
+ purestorage.fusion.fusion_ss:
+ name: foo_service
+ hardware_types:
+ - flash-array-x
+ display_name: "test class"
+ register: result
+ environment: "{{ test_env }}"
+- name: Validate the task
+ ansible.builtin.assert:
+ that:
+ - result is success
+ - result is changed
+
+
+- name: Collect storage services and check the service exists
+ purestorage.fusion.fusion_info:
+ gather_subset: storage_services
+ register: fusion_info
+ environment: "{{ test_env }}"
+- name: Validate the task
+ ansible.builtin.assert:
+ that: "'foo_service' in fusion_info['fusion_info']['storage_services']"
+
+- name: Delete storage service foo_service
+ purestorage.fusion.fusion_ss:
+ name: foo_service
+ state: absent
+ register: result
+ environment: "{{ test_env }}"
+- name: Validate the task
+ ansible.builtin.assert:
+ that:
+ - result is success
+ - result is changed
+
+- name: Collect storage services and check the service does not exist
+ purestorage.fusion.fusion_info:
+ gather_subset: storage_services
+ register: fusion_info
+ environment: "{{ test_env }}"
+- name: Validate the task
+ ansible.builtin.assert:
+ that: "'foo_service' not in fusion_info['fusion_info']['storage_services']"
+
+
+# Teardown dependencies
+
+- name: Delete foo_tenant_space
+ purestorage.fusion.fusion_ts:
+ name: foo_tenant_space
+ tenant: foo_tenant
+ state: absent
+ environment: "{{ test_env }}"
+
+- name: Delete tenat foo
+ purestorage.fusion.fusion_tenant:
+ name: foo_tenant
+ state: absent
+ environment: "{{ test_env }}"
diff --git a/ansible_collections/purestorage/fusion/tests/integration/targets/fusion_tenant/tasks/main.yml b/ansible_collections/purestorage/fusion/tests/integration/targets/fusion_tenant/tasks/main.yml
new file mode 100644
index 000000000..262c74a73
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/tests/integration/targets/fusion_tenant/tasks/main.yml
@@ -0,0 +1,41 @@
+- name: Create new tenat foo_tenant
+ purestorage.fusion.fusion_tenant:
+ name: foo_tenant
+ display_name: "tenant foo"
+ register: result
+ environment: "{{ test_env }}"
+- name: Validate the task
+ ansible.builtin.assert:
+ that:
+ - result is success
+ - result is changed
+
+- name: Collect tenants and check the tenant exists
+ purestorage.fusion.fusion_info:
+ gather_subset: tenants
+ register: fusion_info
+ environment: "{{ test_env }}"
+- name: Validate the task
+ ansible.builtin.assert:
+ that: "'foo_tenant' in fusion_info['fusion_info']['tenants']"
+
+- name: Delete tenat foo
+ purestorage.fusion.fusion_tenant:
+ name: foo_tenant
+ state: absent
+ register: result
+ environment: "{{ test_env }}"
+- name: Validate the task
+ ansible.builtin.assert:
+ that:
+ - result is success
+ - result is changed
+
+- name: Collect tenants and check the tenant does notexist
+ purestorage.fusion.fusion_info:
+ gather_subset: tenants
+ register: fusion_info
+ environment: "{{ test_env }}"
+- name: Validate the task
+ ansible.builtin.assert:
+ that: "'foo_tenant' not in fusion_info['fusion_info']['tenants']"
diff --git a/ansible_collections/purestorage/fusion/tests/integration/targets/fusion_ts/tasks/main.yml b/ansible_collections/purestorage/fusion/tests/integration/targets/fusion_ts/tasks/main.yml
new file mode 100644
index 000000000..101c3f251
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/tests/integration/targets/fusion_ts/tasks/main.yml
@@ -0,0 +1,62 @@
+# Prepare dependencies
+- name: Create new tenat foo_tenant
+ purestorage.fusion.fusion_tenant:
+ name: foo_tenant
+ display_name: "tenant foo"
+ environment: "{{ test_env }}"
+
+
+# Test tenant spaces
+
+- name: Create new tenant space foo_tenant_space for tenant_foo
+ purestorage.fusion.fusion_ts:
+ name: foo_tenant_space
+ tenant: foo_tenant
+ state: present
+ register: result
+ environment: "{{ test_env }}"
+- name: Validate the task
+ ansible.builtin.assert:
+ that:
+ - result is success
+ - result is changed
+
+- name: Collect tenant spaces and check the tenant space exists
+ purestorage.fusion.fusion_info:
+ gather_subset: tenant_spaces
+ register: fusion_info
+ environment: "{{ test_env }}"
+- name: Validate the task
+ ansible.builtin.assert:
+ that: "'foo_tenant/foo_tenant_space' in fusion_info['fusion_info']['tenant_spaces']"
+
+- name: Delete foo_tenant_space
+ purestorage.fusion.fusion_ts:
+ name: foo_tenant_space
+ tenant: foo_tenant
+ state: absent
+ register: result
+ environment: "{{ test_env }}"
+- name: Validate the task
+ ansible.builtin.assert:
+ that:
+ - result is success
+ - result is changed
+
+- name: Collect tenant spaces and check the tenant space does not exist
+ purestorage.fusion.fusion_info:
+ gather_subset: tenant_spaces
+ register: fusion_info
+ environment: "{{ test_env }}"
+- name: Validate the task
+ ansible.builtin.assert:
+ that: "'foo_tenant/foo_tenant_space' not in fusion_info['fusion_info']['tenant_spaces']"
+
+
+# Teardown dependencies
+
+- name: Delete tenat foo
+ purestorage.fusion.fusion_tenant:
+ name: foo_tenant
+ state: absent
+ environment: "{{ test_env }}"
diff --git a/ansible_collections/purestorage/fusion/tests/unit/README.md b/ansible_collections/purestorage/fusion/tests/unit/README.md
new file mode 100644
index 000000000..248a608ba
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/tests/unit/README.md
@@ -0,0 +1,15 @@
+# Unit tests
+
+Unit tests aims at testing specific functions of modules.
+
+Each module as a whole should be tested in Functional tests.
+
+## Running tests
+
+```bash
+pytest tests/unit
+```
+
+## Adding new tests
+
+See already existing tests for inspiration.
diff --git a/ansible_collections/purestorage/fusion/tests/unit/mocks/__init__.py b/ansible_collections/purestorage/fusion/tests/unit/mocks/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/tests/unit/mocks/__init__.py
diff --git a/ansible_collections/purestorage/fusion/tests/unit/mocks/module_mock.py b/ansible_collections/purestorage/fusion/tests/unit/mocks/module_mock.py
new file mode 100644
index 000000000..cb6d489e1
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/tests/unit/mocks/module_mock.py
@@ -0,0 +1,38 @@
+# -*- coding: utf-8 -*-
+
+# (c) 2023, Denys Denysyev (ddenysyev@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING.GPLv3 or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from unittest.mock import MagicMock
+
+
+class ModuleSucceeded(Exception):
+ pass
+
+
+class ModuleFailed(Exception):
+ pass
+
+
+class ModuleMock(MagicMock):
+ def __init__(self, params, check_mode=False):
+ super().__init__()
+
+ self.params = params
+ self.check_mode = check_mode
+
+ # mocking exit_json function, so we can check if it was successfully called
+ self.exit_json = MagicMock()
+
+ def fail_json(self, **kwargs):
+ raise ModuleFailed(str(kwargs))
+
+ def fail_on_missing_params(self, required_params=None):
+ if required_params is not None:
+ for param in required_params:
+ if param not in self.params:
+ raise ModuleFailed(f"Parameter '{param}' is missing")
diff --git a/ansible_collections/purestorage/fusion/tests/unit/mocks/operation_mock.py b/ansible_collections/purestorage/fusion/tests/unit/mocks/operation_mock.py
new file mode 100644
index 000000000..99487ddfa
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/tests/unit/mocks/operation_mock.py
@@ -0,0 +1,24 @@
+# -*- coding: utf-8 -*-
+
+# (c) 2023, Denys Denysyev (ddenysyev@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING.GPLv3 or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from enum import Enum
+
+
+class OperationStatus(str, Enum):
+ PENDING = "Pending"
+ ABORTING = "Aborting"
+ FAILED = "Failed"
+ SUCCEDED = "Succeeded"
+
+
+class OperationMock:
+ def __init__(self, id, status, retry_in=1):
+ self.id = id
+ self.status = status
+ self.retry_in = retry_in
diff --git a/ansible_collections/purestorage/fusion/tests/unit/module_utils/__init__.py b/ansible_collections/purestorage/fusion/tests/unit/module_utils/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/tests/unit/module_utils/__init__.py
diff --git a/ansible_collections/purestorage/fusion/tests/unit/module_utils/test_networking.py b/ansible_collections/purestorage/fusion/tests/unit/module_utils/test_networking.py
new file mode 100644
index 000000000..13437456a
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/tests/unit/module_utils/test_networking.py
@@ -0,0 +1,58 @@
+# -*- coding: utf-8 -*-
+
+# (c) 2023, Jan Kodera (jkodera@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING.GPLv3 or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible_collections.purestorage.fusion.plugins.module_utils.networking import (
+ is_valid_address,
+ is_valid_network,
+ is_address_in_network,
+)
+
+
+def test_valid_address():
+ assert is_valid_address("0.0.0.0")
+ assert is_valid_address("1.1.1.1")
+ assert is_valid_address("192.168.1.2")
+ assert is_valid_address("255.255.255.255")
+
+
+def test_invalid_address():
+ assert not is_valid_address("256.1.1.1")
+ assert not is_valid_address("1.256.1.1")
+ assert not is_valid_address("1.1.256.1")
+ assert not is_valid_address("1.1.1.256")
+ assert not is_valid_address("1.1.1.256")
+ assert not is_valid_address("010.010.010.010")
+ assert not is_valid_address("1.1.1")
+ assert not is_valid_address("hostname")
+ assert not is_valid_address("0x1.0x2.0x3.0x4")
+
+
+def test_valid_network():
+ assert is_valid_network("0.0.0.0/8")
+ assert is_valid_network("1.1.1.1/12")
+ assert is_valid_network("192.168.1.2/24")
+ assert is_valid_network("255.255.255.255/32")
+
+
+def test_invalid_network():
+ assert not is_valid_network("1.1.1.1")
+ assert not is_valid_network("1.1.1.1/")
+ assert not is_valid_network("1.1.1.1/1")
+ assert not is_valid_network("1.1.1.1/7")
+ assert not is_valid_network("1.1.1.1/33")
+
+
+def test_address_is_in_network():
+ assert is_address_in_network("1.1.1.1", "1.1.0.0/16")
+ assert is_address_in_network("1.1.1.1", "1.1.1.1/32")
+
+
+def test_address_is_not_in_network():
+ assert not is_address_in_network("1.1.1.1", "1.2.0.0/16")
+ assert not is_address_in_network("1.1.1.1", "1.1.1.2/32")
diff --git a/ansible_collections/purestorage/fusion/tests/unit/module_utils/test_operations.py b/ansible_collections/purestorage/fusion/tests/unit/module_utils/test_operations.py
new file mode 100644
index 000000000..6b42eb35f
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/tests/unit/module_utils/test_operations.py
@@ -0,0 +1,230 @@
+# -*- coding: utf-8 -*-
+
+# (c) 2023, Denys Denysyev (ddenysyev@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING.GPLv3 or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+import time
+
+from ansible_collections.purestorage.fusion.plugins.module_utils.errors import (
+ OperationException,
+)
+from ansible_collections.purestorage.fusion.tests.helpers import (
+ ApiExceptionsMockGenerator,
+)
+from ansible_collections.purestorage.fusion.tests.unit.mocks.operation_mock import (
+ OperationMock,
+ OperationStatus,
+)
+
+__metaclass__ = type
+
+import fusion as purefusion
+from urllib3.exceptions import HTTPError
+
+from unittest.mock import Mock, MagicMock, call, patch
+import pytest
+from ansible_collections.purestorage.fusion.plugins.module_utils import operations
+
+time.sleep = MagicMock() # mock time.sleep function globally
+current_module = (
+ "ansible_collections.purestorage.fusion.tests.unit.module_utils.test_operations"
+)
+
+
+class TestAwaitOperations:
+ @patch(f"{current_module}.operations.purefusion.OperationsApi.__new__")
+ def test_await_success_op(self, mock_op_api):
+ """
+ Should return operation
+ """
+ # Mock operation
+ op = OperationMock("1", OperationStatus.SUCCEDED)
+
+ # Mock operations api
+ mock_op_api_obj = MagicMock()
+ mock_op_api.return_value = mock_op_api_obj
+ mock_op_api_obj.get_operation = Mock(return_value=op)
+
+ # Mock fusion
+ fusion_mock = MagicMock()
+
+ # Test function
+ op1 = operations.await_operation(fusion_mock, op)
+
+ # Assertions
+ assert op == op1
+ mock_op_api_obj.get_operation.assert_called_once_with(op.id)
+
+ @patch(f"{current_module}.operations.purefusion.OperationsApi.__new__")
+ def test_await_failed_op(self, mock_op_api):
+ """
+ Should raise OperationException
+ """
+ # Mock operation
+ op = OperationMock("1", OperationStatus.FAILED)
+
+ # Mock exception
+ op_exception = OperationException(op, None)
+
+ # Mock operations api
+ mock_op_api_obj = MagicMock()
+ mock_op_api.return_value = mock_op_api_obj
+ mock_op_api_obj.get_operation = Mock(return_value=op)
+
+ # Mock fusion
+ fusion_mock = MagicMock()
+
+ # Test function
+ with pytest.raises(Exception) as exception:
+ operations.await_operation(fusion_mock, op)
+
+ # Assertions
+ assert (
+ type(exception) is type(op_exception)
+ and exception.args == op_exception.args
+ )
+ mock_op_api_obj.get_operation.assert_called_once_with(op.id)
+
+ @patch(f"{current_module}.operations.purefusion.OperationsApi.__new__")
+ def test_await_pending_op(self, mock_op_api):
+ """
+ Should return operation
+ """
+ # Mock operation
+ op1 = OperationMock("1", OperationStatus.PENDING)
+ op2 = OperationMock("1", OperationStatus.SUCCEDED)
+
+ # Mock operations api
+ mock_op_api_obj = MagicMock()
+ mock_op_api.return_value = mock_op_api_obj
+ mock_op_api_obj.get_operation = Mock(side_effect=[op1, op2])
+
+ # Mock fusion
+ fusion_mock = MagicMock()
+
+ # Test function
+ op = operations.await_operation(fusion_mock, op1)
+
+ # Assertions
+ assert op == op2
+ calls = [call(op1.id), call(op1.id)]
+ mock_op_api_obj.get_operation.assert_has_calls(calls)
+
+ @patch(f"{current_module}.operations.purefusion.OperationsApi.__new__")
+ def test_await_failed_pending_op(self, mock_op_api):
+ """
+ Should raise OperationException
+ """
+ # Mock operation
+ op1 = OperationMock("1", OperationStatus.PENDING)
+ op2 = OperationMock("1", OperationStatus.FAILED)
+
+ # Mock exception
+ op_exception = OperationException(op2, None)
+
+ # Mock operations api
+ mock_op_api_obj = MagicMock()
+ mock_op_api.return_value = mock_op_api_obj
+ mock_op_api_obj.get_operation = Mock(side_effect=[op1, op2])
+
+ # Mock fusion
+ fusion_mock = MagicMock()
+
+ # Test function
+ with pytest.raises(Exception) as exception:
+ operations.await_operation(fusion_mock, op1)
+
+ # Assertions
+ assert (
+ type(exception) is type(op_exception)
+ and exception.args == op_exception.args
+ )
+ calls = [call(op1.id), call(op1.id)]
+ mock_op_api_obj.get_operation.assert_has_calls(calls)
+
+ @patch(f"{current_module}.operations.purefusion.OperationsApi.__new__")
+ def test_await_api_exception(self, mock_op_api):
+ """
+ Should raise ApiException
+ """
+ # Mock exceptions
+ api_exception = ApiExceptionsMockGenerator.create_conflict()
+
+ # Mock operation
+ op = OperationMock("1", OperationStatus.FAILED)
+
+ # Mock operations api
+ mock_op_api_obj = MagicMock()
+ mock_op_api.return_value = mock_op_api_obj
+ mock_op_api_obj.get_operation = Mock(side_effect=api_exception)
+
+ # Mock fusion
+ fusion_mock = MagicMock()
+
+ # Test function
+ with pytest.raises(purefusion.rest.ApiException) as exception:
+ operations.await_operation(fusion_mock, op)
+
+ # Assertions
+ assert (
+ type(exception) is type(api_exception)
+ and exception.args == api_exception.args
+ )
+ mock_op_api_obj.get_operation.assert_called_once_with(op)
+
+ @patch(f"{current_module}.operations.purefusion.OperationsApi.__new__")
+ def test_await_http_exception(self, mock_op_api):
+ """
+ Should raise OperationException
+ """
+ # Mock operation
+ op = OperationMock("1", OperationStatus.FAILED)
+
+ # Mock exceptions
+ http_error = HTTPError()
+ op_exception = OperationException(op, http_error)
+ # Mock operations api
+ mock_op_api_obj = MagicMock()
+ mock_op_api.return_value = mock_op_api_obj
+ mock_op_api_obj.get_operation = Mock(side_effect=http_error)
+
+ # Mock fusion
+ fusion_mock = MagicMock()
+
+ # Test function
+ with pytest.raises(OperationException) as exception:
+ operations.await_operation(fusion_mock, op)
+
+ # Assertions
+ assert (
+ type(exception) is type(op_exception)
+ and exception.args == op_exception.args
+ )
+ mock_op_api_obj.get_operation.assert_called_once_with(op)
+
+ @patch(f"{current_module}.operations.purefusion.OperationsApi.__new__")
+ def test_await_failed_op_without_failing(self, mock_op_api):
+ """
+ Should return failed operation
+ """
+ # Mock operation
+ op = OperationMock("1", OperationStatus.FAILED)
+
+ # Mock operations api
+ mock_op_api_obj = MagicMock()
+ mock_op_api.return_value = mock_op_api_obj
+ mock_op_api_obj.get_operation = Mock(return_value=op)
+
+ # Mock fusion
+ fusion_mock = MagicMock()
+
+ # Test function
+ op_res = operations.await_operation(
+ fusion_mock, op, fail_playbook_if_operation_fails=False
+ )
+
+ # Assertions
+ assert op_res == op
+ mock_op_api_obj.get_operation.assert_called_once_with(op.id)
diff --git a/ansible_collections/purestorage/fusion/tests/unit/module_utils/test_parsing.py b/ansible_collections/purestorage/fusion/tests/unit/module_utils/test_parsing.py
new file mode 100644
index 000000000..7e2a1cc78
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/tests/unit/module_utils/test_parsing.py
@@ -0,0 +1,138 @@
+# -*- coding: utf-8 -*-
+
+# (c) 2023, Jan Kodera (jkodera@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING.GPLv3 or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible_collections.purestorage.fusion.plugins.module_utils.parsing import (
+ parse_number_with_metric_suffix,
+ parse_minutes,
+)
+
+import pytest
+
+
+class MockException(Exception):
+ pass
+
+
+class MockModule:
+ def fail_json(self, msg):
+ raise MockException()
+
+
+def test_parsing_valid_number():
+ module = MockModule()
+ assert parse_number_with_metric_suffix(module, "0") == 0
+ assert parse_number_with_metric_suffix(module, "1") == 1
+ assert parse_number_with_metric_suffix(module, "1K") == 1024
+ assert parse_number_with_metric_suffix(module, "1 K") == 1024
+ assert parse_number_with_metric_suffix(module, "124 M") == 124 * 1024 * 1024
+ assert parse_number_with_metric_suffix(module, "10 G") == 10 * 1024 * 1024 * 1024
+ assert (
+ parse_number_with_metric_suffix(module, "20 T")
+ == 20 * 1024 * 1024 * 1024 * 1024
+ )
+ assert (
+ parse_number_with_metric_suffix(module, "30 P")
+ == 30 * 1024 * 1024 * 1024 * 1024 * 1024
+ )
+ assert (
+ parse_number_with_metric_suffix(module, "30000 P")
+ == 30000 * 1024 * 1024 * 1024 * 1024 * 1024
+ )
+ assert parse_number_with_metric_suffix(module, "0", factor=1000) == 0
+ assert parse_number_with_metric_suffix(module, "1", factor=1000) == 1
+ assert parse_number_with_metric_suffix(module, "1K", factor=1000) == 1000
+ assert (
+ parse_number_with_metric_suffix(module, "124M", factor=1000)
+ == 124 * 1000 * 1000
+ )
+ assert parse_number_with_metric_suffix(module, "1.5K", factor=1000) == 1500
+ assert parse_number_with_metric_suffix(module, "1.5K", factor=1024) == 1536
+
+
+def test_parsing_invalid_number():
+ module = MockModule()
+ with pytest.raises(MockException):
+ assert parse_number_with_metric_suffix(module, "")
+ with pytest.raises(MockException):
+ assert parse_number_with_metric_suffix(module, "102X")
+ with pytest.raises(MockException):
+ assert parse_number_with_metric_suffix(module, "102 N")
+ with pytest.raises(MockException):
+ assert parse_number_with_metric_suffix(module, "102 N", factor=1000)
+ with pytest.raises(MockException):
+ assert parse_number_with_metric_suffix(module, "million")
+ with pytest.raises(MockException):
+ assert parse_number_with_metric_suffix(module, "K")
+ with pytest.raises(MockException):
+ assert parse_number_with_metric_suffix(module, "K1")
+ with pytest.raises(MockException):
+ assert parse_number_with_metric_suffix(module, "1K1")
+ with pytest.raises(MockException):
+ assert parse_number_with_metric_suffix(module, "1 K1")
+ with pytest.raises(MockException):
+ assert parse_number_with_metric_suffix(module, "M")
+ with pytest.raises(MockException):
+ assert parse_number_with_metric_suffix(module, "hello world")
+
+
+def test_parsing_valid_time_period():
+ module = MockModule()
+ assert parse_minutes(module, "10") == 10
+ assert parse_minutes(module, "2h") == 120
+ assert parse_minutes(module, "2H") == 120
+ assert parse_minutes(module, "14D") == 14 * 24 * 60
+ assert parse_minutes(module, "1W") == 7 * 24 * 60
+ assert parse_minutes(module, "12Y") == 12 * 365 * 24 * 60
+ assert (
+ parse_minutes(module, "10Y20W30D40H50M")
+ == 10 * 365 * 24 * 60 + 20 * 7 * 24 * 60 + 30 * 24 * 60 + 40 * 60 + 50
+ )
+ assert (
+ parse_minutes(module, "10Y20W30D40H")
+ == 10 * 365 * 24 * 60 + 20 * 7 * 24 * 60 + 30 * 24 * 60 + 40 * 60
+ )
+ assert (
+ parse_minutes(module, "10Y20W30D")
+ == 10 * 365 * 24 * 60 + 20 * 7 * 24 * 60 + 30 * 24 * 60
+ )
+ assert parse_minutes(module, "10Y20W") == 10 * 365 * 24 * 60 + 20 * 7 * 24 * 60
+ assert (
+ parse_minutes(module, "20W30D40H50M")
+ == 20 * 7 * 24 * 60 + 30 * 24 * 60 + 40 * 60 + 50
+ )
+ assert parse_minutes(module, "30D40H50M") == 30 * 24 * 60 + 40 * 60 + 50
+ assert parse_minutes(module, "40H50M") == 40 * 60 + 50
+ assert parse_minutes(module, "30D50M") == 30 * 24 * 60 + 50
+ assert parse_minutes(module, "20W40H") == 20 * 7 * 24 * 60 + 40 * 60
+
+
+def test_parsing_invalid_time_period():
+ module = MockModule()
+ with pytest.raises(MockException):
+ assert parse_minutes(module, "")
+ with pytest.raises(MockException):
+ assert parse_minutes(module, "1s")
+ with pytest.raises(MockException):
+ assert parse_minutes(module, "1S")
+ with pytest.raises(MockException):
+ assert parse_minutes(module, "1V")
+ with pytest.raises(MockException):
+ assert parse_minutes(module, "0M")
+ with pytest.raises(MockException):
+ assert parse_minutes(module, "0H10M")
+ with pytest.raises(MockException):
+ assert parse_minutes(module, "0H10M")
+ with pytest.raises(MockException):
+ assert parse_minutes(module, "0D10H10M")
+ with pytest.raises(MockException):
+ assert parse_minutes(module, "01W10D10H10M")
+ with pytest.raises(MockException):
+ assert parse_minutes(module, "01Y0H10M")
+ with pytest.raises(MockException):
+ assert parse_minutes(module, "1V")
diff --git a/ansible_collections/purestorage/fusion/tests/unit/module_utils/test_prerequisites.py b/ansible_collections/purestorage/fusion/tests/unit/module_utils/test_prerequisites.py
new file mode 100644
index 000000000..0158878cf
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/tests/unit/module_utils/test_prerequisites.py
@@ -0,0 +1,116 @@
+# -*- coding: utf-8 -*-
+
+# (c) 2023, Jan Kodera (jkodera@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING.GPLv3 or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible_collections.purestorage.fusion.plugins.module_utils.prerequisites import (
+ _parse_version,
+ _parse_version_requirements,
+ _version_satisfied,
+)
+
+import pytest
+
+
+def test_version():
+ # VALID
+ assert _parse_version("1.0") == (1, 0, None)
+ assert _parse_version("1.0.0") == (1, 0, 0)
+ assert _parse_version("2.3.4") == (2, 3, 4)
+ assert _parse_version("2.3.5a") == (2, 3, 5)
+ assert _parse_version("2.3.6-release") == (2, 3, 6)
+ # INVALID
+ assert _parse_version("1") is None
+ assert _parse_version("1.a") is None
+ assert _parse_version("1.1a") is None
+ assert _parse_version("a.1") is None
+ assert _parse_version("1.") is None
+ assert _parse_version("1..") is None
+ assert _parse_version("1.0.1.0") is None
+ assert _parse_version("1.0.1.a") is None
+
+
+def test_requirements():
+ # VALID
+ assert _parse_version_requirements(">= 1.0") == [(">=", (1, 0, None))]
+ assert _parse_version_requirements(">=1.0.1") == [(">=", (1, 0, 1))]
+ assert _parse_version_requirements(">= 2.0.2-release") == [(">=", (2, 0, 2))]
+ assert _parse_version_requirements(" >=3.0.3b") == [(">=", (3, 0, 3))]
+ assert _parse_version_requirements("<= 3.3.3") == [("<=", (3, 3, 3))]
+ assert _parse_version_requirements("= 3.0.3") == [("=", (3, 0, 3))]
+ assert _parse_version_requirements("== 5.3.1") == [("==", (5, 3, 1))]
+ assert _parse_version_requirements("< 4.1.2") == [("<", (4, 1, 2))]
+ assert _parse_version_requirements("> 1.3.4") == [(">", (1, 3, 4))]
+ assert _parse_version_requirements("> 1.3.4, < 2.0") == [
+ (">", (1, 3, 4)),
+ ("<", (2, 0, None)),
+ ]
+ assert _parse_version_requirements(">1.3.4 , <2.0") == [
+ (">", (1, 3, 4)),
+ ("<", (2, 0, None)),
+ ]
+ assert _parse_version_requirements("> 1.3.4 ,< 2.0") == [
+ (">", (1, 3, 4)),
+ ("<", (2, 0, None)),
+ ]
+ assert _parse_version_requirements(">1.3.4,<2.0") == [
+ (">", (1, 3, 4)),
+ ("<", (2, 0, None)),
+ ]
+ assert _parse_version_requirements(">1.3.4,<2.0, != 3.4.1") == [
+ (">", (1, 3, 4)),
+ ("<", (2, 0, None)),
+ ("!=", (3, 4, 1)),
+ ]
+ # INVALID
+ with pytest.raises(ValueError):
+ _parse_version_requirements(">>1.3.4")
+ with pytest.raises(ValueError):
+ _parse_version_requirements("<<1.3.4")
+ with pytest.raises(ValueError):
+ _parse_version_requirements("=>1.3.4,,3.0")
+ with pytest.raises(ValueError):
+ _parse_version_requirements("=<1.3.4,")
+ with pytest.raises(ValueError):
+ _parse_version_requirements("=<1.3.4")
+
+
+def test_version_satisfied():
+ assert _version_satisfied("1.0", ">=1.0, <2.0") is True
+ assert _version_satisfied("1.0.1", ">=1.0, <2.0") is True
+ assert _version_satisfied("2.0", ">=1.0, <2.0") is False
+ assert _version_satisfied("2.0.0", ">=1.0, <2.0") is False
+ assert _version_satisfied("2.0.1", ">=1.0, <2.0") is False
+ assert _version_satisfied("1.0.0", ">=1.0.0") is True
+ assert _version_satisfied("1.0", ">=1.0.0") is True
+ assert _version_satisfied("1.0", ">=1.0") is True
+ assert _version_satisfied("1.0.1", ">=1.0") is True
+ assert _version_satisfied("1.0.1", ">=1.0.0") is True
+ assert _version_satisfied("1.0.1", "<=1.0.0") is False
+ assert _version_satisfied("1.0.0", "<=1.0.0") is True
+ assert _version_satisfied("1.0", "<=1.0.0") is True
+ assert _version_satisfied("1.0", "<=1.0.1") is True
+ assert _version_satisfied("1.0", "<=1.0") is True
+ assert _version_satisfied("1.0", "<1.0") is False
+ assert _version_satisfied("1.0.0", "<1.0") is False
+ assert _version_satisfied("1.0.0", "<1.1") is True
+ assert _version_satisfied("1.0.0", "<1.0.1") is True
+ assert _version_satisfied("1.0", ">1.0") is False
+ assert _version_satisfied("1.0.1", ">1.0") is False
+ assert _version_satisfied("1.0", ">1.0.0") is False
+ assert _version_satisfied("1.0.0", ">1.0.0") is False
+ assert _version_satisfied("1.0.1", ">1.0.0") is True
+ assert _version_satisfied("1.0", "==1.0") is True
+ assert _version_satisfied("1.0", "=1.0") is True
+ assert _version_satisfied("1.0.0", "==1.0") is True
+ assert _version_satisfied("1.0.1", "==1.0") is True
+ assert _version_satisfied("1.0", "==1.0.0") is True
+ assert _version_satisfied("1.0", "==1.0.1") is False
+ assert _version_satisfied("1.0", "!=1.0.1") is True
+ assert _version_satisfied("1.0", "!=1.0.0") is False
+ assert _version_satisfied("1.0.1", "!=1.0") is False
+ assert _version_satisfied("1.0", "!=1.0") is False
diff --git a/ansible_collections/purestorage/fusion/tests/unit/modules/__init__.py b/ansible_collections/purestorage/fusion/tests/unit/modules/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/tests/unit/modules/__init__.py
diff --git a/ansible_collections/purestorage/fusion/tests/unit/modules/test_fusion_az.py b/ansible_collections/purestorage/fusion/tests/unit/modules/test_fusion_az.py
new file mode 100644
index 000000000..a384506d8
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/tests/unit/modules/test_fusion_az.py
@@ -0,0 +1,446 @@
+# -*- coding: utf-8 -*-
+
+# (c) 2023, Denys Denysyev (ddenysyev@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING.GPLv3 or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from unittest.mock import MagicMock, Mock, patch
+
+import fusion as purefusion
+import pytest
+from ansible_collections.purestorage.fusion.plugins.module_utils.errors import (
+ OperationException,
+)
+from ansible_collections.purestorage.fusion.plugins.modules import fusion_az
+from ansible_collections.purestorage.fusion.tests.helpers import (
+ ApiExceptionsMockGenerator,
+)
+from ansible_collections.purestorage.fusion.tests.unit.mocks.module_mock import (
+ ModuleMock,
+)
+from ansible_collections.purestorage.fusion.tests.unit.mocks.operation_mock import (
+ OperationMock,
+ OperationStatus,
+)
+
+fusion_az.setup_fusion = MagicMock()
+current_module = (
+ "ansible_collections.purestorage.fusion.tests.unit.modules.test_fusion_az"
+)
+
+
+def default_module_az_params(state="present", display_name="foo_az"):
+ module_params = {
+ "state": state,
+ "name": "foo",
+ "region": "region1",
+ "display_name": display_name,
+ "issuer_id": "ABCD12345",
+ "private_key_file": "az-admin-private-key.pem",
+ }
+ return module_params
+
+
+class TestCreateAZ:
+ @patch(f"{current_module}.fusion_az.purefusion.AvailabilityZonesApi.__new__")
+ @patch(f"{current_module}.fusion_az.await_operation")
+ def test_create_az_without_disp_name(self, await_operation_mock, mock_az_api):
+ """
+ Should create az successfully
+ """
+ # Mock operation
+ op = OperationMock("1", OperationStatus.SUCCEDED)
+
+ # Mock az api
+ mock_az_api_obj = MagicMock()
+ mock_az_api.return_value = mock_az_api_obj
+ mock_az_api_obj.create_availability_zone = MagicMock(return_value=op)
+
+ # Mock await operation
+ await_operation_mock.return_value = op
+
+ # Mock fusion
+ fusion_mock = MagicMock()
+
+ # Mock Module
+ module_params = default_module_az_params("present", None)
+ moduleMock = ModuleMock(module_params)
+
+ # Test function
+ fusion_az.create_az(moduleMock, fusion_mock)
+
+ # Assertions
+ azone = purefusion.AvailabilityZonePost(
+ name=module_params["name"],
+ display_name=module_params["name"],
+ )
+ mock_az_api_obj.create_availability_zone.assert_called_with(
+ azone, region_name=module_params["region"]
+ )
+ await_operation_mock.assert_called_once_with(fusion_mock, op)
+ moduleMock.exit_json.assert_called_once_with(changed=True)
+
+ @patch(f"{current_module}.fusion_az.purefusion.AvailabilityZonesApi.__new__")
+ @patch(f"{current_module}.fusion_az.await_operation")
+ def test_create_az_check_mode(self, await_operation_mock, mock_az_api):
+ """
+ Should only exit_json
+ """
+ # Mock operation
+ op = OperationMock("1", OperationStatus.SUCCEDED)
+
+ # Mock az api
+ mock_az_api_obj = MagicMock()
+ mock_az_api.return_value = mock_az_api_obj
+ mock_az_api_obj.create_availability_zone = MagicMock(return_value=op)
+
+ # Mock await operation
+ await_operation_mock.return_value = op
+
+ # Mock fusion
+ fusion_mock = MagicMock()
+
+ # Mock Module
+ module_params = default_module_az_params("present")
+ moduleMock = ModuleMock(module_params, check_mode=True)
+
+ # Test function
+ fusion_az.create_az(moduleMock, fusion_mock)
+
+ # Assertions
+ mock_az_api_obj.create_availability_zone.assert_not_called()
+ await_operation_mock.assert_not_called()
+ moduleMock.exit_json.assert_called_once_with(changed=True)
+
+ @patch(f"{current_module}.fusion_az.purefusion.AvailabilityZonesApi.__new__")
+ @patch(f"{current_module}.fusion_az.await_operation")
+ def test_create_az_with_disp_name(self, await_operation_mock, mock_az_api):
+ """
+ Should create az successfully
+ """
+ # Mock operation
+ op = OperationMock("1", OperationStatus.SUCCEDED)
+
+ # Mock az api
+ mock_az_api_obj = MagicMock()
+ mock_az_api.return_value = mock_az_api_obj
+ mock_az_api_obj.create_availability_zone = MagicMock(return_value=op)
+
+ # Mock await operation
+ await_operation_mock.return_value = op
+
+ # Mock fusion
+ fusion_mock = MagicMock()
+
+ # Mock Module
+ module_params = default_module_az_params("present")
+ moduleMock = ModuleMock(module_params)
+
+ # Test function
+ fusion_az.create_az(moduleMock, fusion_mock)
+
+ # Assertions
+ azone = purefusion.AvailabilityZonePost(
+ name=module_params["name"],
+ display_name=module_params["display_name"],
+ )
+ mock_az_api_obj.create_availability_zone.assert_called_with(
+ azone, region_name=module_params["region"]
+ )
+ await_operation_mock.assert_called_once_with(fusion_mock, op)
+ moduleMock.exit_json.assert_called_once_with(changed=True)
+
+ @patch(f"{current_module}.fusion_az.purefusion.AvailabilityZonesApi.__new__")
+ @patch(f"{current_module}.fusion_az.await_operation")
+ def test_create_az_conflict(self, await_operation_mock, mock_az_api):
+ """
+ Should raise api exception
+ """
+ # Mock exceptions
+ api_exception = ApiExceptionsMockGenerator.create_conflict()
+
+ # Mock operation
+ op = OperationMock("1", OperationStatus.FAILED)
+
+ # Mock az api
+ mock_az_api_obj = MagicMock()
+ mock_az_api.return_value = mock_az_api_obj
+ mock_az_api_obj.create_availability_zone = Mock(side_effect=api_exception)
+
+ # Mock await operation
+ await_operation_mock.return_value = op
+
+ # Mock fusion
+ fusion_mock = MagicMock()
+
+ # Mock Module
+ module_params = default_module_az_params("present")
+ moduleMock = ModuleMock(module_params)
+
+ # Test function
+ with pytest.raises(purefusion.rest.ApiException) as exception:
+ fusion_az.create_az(moduleMock, fusion_mock)
+ azone = purefusion.AvailabilityZonePost(
+ name=module_params["name"],
+ display_name=module_params["display_name"],
+ )
+
+ # Assertions
+ assert (
+ type(exception) is type(api_exception)
+ and exception.args == api_exception.args
+ )
+ mock_az_api_obj.create_availability_zone.assert_called_with(
+ azone, region_name=module_params["region"]
+ )
+ await_operation_mock.assert_not_called()
+ moduleMock.exit_json.assert_not_called()
+
+ @patch(f"{current_module}.fusion_az.purefusion.AvailabilityZonesApi.__new__")
+ @patch(f"{current_module}.fusion_az.await_operation")
+ def test_create_az_not_found(self, await_operation_mock, mock_az_api):
+ """
+ Should raise api exception
+ """
+ # Mock exceptions
+ api_exception = ApiExceptionsMockGenerator.create_not_found()
+
+ # Mock operation
+ op = OperationMock("1", OperationStatus.FAILED)
+
+ # Mock az api
+ mock_az_api_obj = MagicMock()
+ mock_az_api.return_value = mock_az_api_obj
+ mock_az_api_obj.create_availability_zone = Mock(side_effect=api_exception)
+
+ # Mock await operation
+ await_operation_mock.return_value = op
+
+ # Mock fusion
+ fusion_mock = MagicMock()
+
+ # Mock Module
+ module_params = default_module_az_params("present")
+ moduleMock = ModuleMock(module_params)
+
+ # Test function
+ with pytest.raises(purefusion.rest.ApiException) as exception:
+ fusion_az.create_az(moduleMock, fusion_mock)
+ azone = purefusion.AvailabilityZonePost(
+ name=module_params["name"],
+ display_name=module_params["display_name"],
+ )
+
+ # Assertions
+ assert (
+ type(exception) is type(api_exception)
+ and exception.args == api_exception.args
+ )
+ mock_az_api_obj.create_availability_zone.assert_called_with(
+ azone, region_name=module_params["region"]
+ )
+ await_operation_mock.assert_not_called()
+ moduleMock.exit_json.assert_not_called()
+
+ @patch(f"{current_module}.fusion_az.purefusion.AvailabilityZonesApi.__new__")
+ @patch(f"{current_module}.fusion_az.await_operation")
+ def test_create_az_op_fails(self, await_operation_mock, mock_az_api):
+ """
+ Should raise operation exception
+ """
+ # Mock operation
+ op = OperationMock("1", OperationStatus.FAILED)
+
+ # Mock exception
+ op_exception = OperationException(op, None)
+
+ # Mock az api
+ mock_az_api_obj = MagicMock()
+ mock_az_api.return_value = mock_az_api_obj
+ mock_az_api_obj.create_availability_zone = MagicMock(return_value=op)
+
+ # Mock await operation
+ await_operation_mock.side_effect = op_exception
+
+ # Mock fusion
+ fusion_mock = MagicMock()
+
+ # Mock Module
+ module_params = default_module_az_params("present")
+ moduleMock = ModuleMock(module_params)
+
+ # Test function
+ with pytest.raises(Exception) as exception:
+ fusion_az.create_az(moduleMock, fusion_mock)
+ azone = purefusion.AvailabilityZonePost(
+ name=module_params["name"],
+ display_name=module_params["display_name"],
+ )
+
+ # Assertions
+ assert (
+ type(exception) is type(op_exception)
+ and exception.args == op_exception.args
+ )
+ mock_az_api_obj.create_availability_zone.assert_called_with(
+ azone, region_name=module_params["region"]
+ )
+ await_operation_mock.assert_called_once(fusion_mock, op)
+ moduleMock.exit_json.assert_not_called()
+
+
+class TestDeleteAZ:
+ @patch(f"{current_module}.fusion_az.purefusion.AvailabilityZonesApi.__new__")
+ @patch(f"{current_module}.fusion_az.await_operation")
+ def test_delete_az_successfully(self, await_operation_mock, mock_az_api):
+ """
+ Should delete az successfully
+ """
+ # Mock operation
+ op = OperationMock("1", OperationStatus.SUCCEDED)
+
+ # Mock az api
+ mock_az_api_obj = MagicMock()
+ mock_az_api.return_value = mock_az_api_obj
+ mock_az_api_obj.delete_availability_zone = MagicMock(return_value=op)
+
+ # Mock await operation
+ await_operation_mock.return_value = op
+
+ # Mock fusion
+ fusion_mock = MagicMock()
+
+ # Mock Module
+ module_params = default_module_az_params("absent")
+ moduleMock = ModuleMock(module_params)
+
+ # Test function
+ fusion_az.delete_az(moduleMock, fusion_mock)
+
+ # Assertions
+ mock_az_api_obj.delete_availability_zone.assert_called_with(
+ availability_zone_name=module_params["name"],
+ region_name=module_params["region"],
+ )
+ await_operation_mock.assert_called_once_with(fusion_mock, op)
+ moduleMock.exit_json.assert_called_once_with(changed=True)
+
+ @patch(f"{current_module}.fusion_az.purefusion.AvailabilityZonesApi.__new__")
+ @patch(f"{current_module}.fusion_az.await_operation")
+ def test_create_az_conflict(self, await_operation_mock, mock_az_api):
+ """
+ Should raise api exception
+ """
+ # Mock exceptions
+ api_exception = ApiExceptionsMockGenerator.create_conflict()
+
+ # Mock operation
+ op = OperationMock("1", OperationStatus.FAILED)
+
+ # Mock az api
+ mock_az_api_obj = MagicMock()
+ mock_az_api.return_value = mock_az_api_obj
+ mock_az_api_obj.delete_availability_zone = Mock(side_effect=api_exception)
+
+ # Mock await operation
+ await_operation_mock.return_value = op
+
+ # Mock fusion
+ fusion_mock = MagicMock()
+
+ # Mock Module
+ module_params = default_module_az_params("absent")
+ moduleMock = ModuleMock(module_params)
+
+ # Test function
+ with pytest.raises(purefusion.rest.ApiException) as exception:
+ fusion_az.delete_az(moduleMock, fusion_mock)
+
+ # Assertions
+ assert (
+ type(exception) is type(api_exception)
+ and exception.args == api_exception.args
+ )
+ mock_az_api_obj.delete_availability_zone.assert_called_with(
+ region_name=module_params["region"],
+ availability_zone_name=module_params["name"],
+ )
+ await_operation_mock.assert_not_called()
+ moduleMock.exit_json.assert_not_called()
+
+ @patch(f"{current_module}.fusion_az.purefusion.AvailabilityZonesApi.__new__")
+ @patch(f"{current_module}.fusion_az.await_operation")
+ def test_create_az_op_fails(self, await_operation_mock, mock_az_api):
+ """
+ Should raise operation exception
+ """
+ # Mock operation
+ op = OperationMock("1", OperationStatus.FAILED)
+
+ # Mock exception
+ op_exception = OperationException(op, None)
+
+ # Mock az api
+ mock_az_api_obj = MagicMock()
+ mock_az_api.return_value = mock_az_api_obj
+ mock_az_api_obj.delete_availability_zone = MagicMock(return_value=op)
+
+ # Mock await operation
+ await_operation_mock.side_effect = op_exception
+
+ # Mock fusion
+ fusion_mock = MagicMock()
+
+ # Mock Module
+ module_params = default_module_az_params("absent")
+ moduleMock = ModuleMock(module_params)
+
+ # Test function
+ with pytest.raises(OperationException) as exception:
+ fusion_az.delete_az(moduleMock, fusion_mock)
+ # Assertions
+ assert (
+ type(exception) is type(op_exception)
+ and exception.args == op_exception.args
+ )
+ mock_az_api_obj.delete_availability_zone.assert_called_with(
+ region_name=module_params["region"],
+ availability_zone_name=module_params["name"],
+ )
+ await_operation_mock.assert_called_once(fusion_mock, op)
+ moduleMock.exit_json.assert_not_called()
+
+ @patch(f"{current_module}.fusion_az.purefusion.AvailabilityZonesApi.__new__")
+ @patch(f"{current_module}.fusion_az.await_operation")
+ def test_delete_az_check_mode(self, await_operation_mock, mock_az_api):
+ """
+ Should only exit_json
+ """
+ # Mock operation
+ op = OperationMock("1", OperationStatus.SUCCEDED)
+
+ # Mock az api
+ mock_az_api_obj = MagicMock()
+ mock_az_api.return_value = mock_az_api_obj
+ mock_az_api_obj.delete_availability_zone = MagicMock(return_value=op)
+
+ # Mock await operation
+ await_operation_mock.return_value = op
+
+ # Mock fusion
+ fusion_mock = MagicMock()
+
+ # Mock Module
+ module_params = default_module_az_params("absent")
+ moduleMock = ModuleMock(module_params, check_mode=True)
+
+ # Test function
+ fusion_az.delete_az(moduleMock, fusion_mock)
+
+ # Assertions
+ mock_az_api_obj.delete_availability_zone.assert_not_called()
+ await_operation_mock.assert_not_called()
+ moduleMock.exit_json.assert_called_once_with(changed=True)