summaryrefslogtreecommitdiffstats
path: root/ansible_collections/purestorage
diff options
context:
space:
mode:
Diffstat (limited to 'ansible_collections/purestorage')
-rw-r--r--ansible_collections/purestorage/flasharray/.github/pull_request_template.md1
-rw-r--r--ansible_collections/purestorage/flasharray/.github/workflows/ansible-lint.yml6
-rw-r--r--ansible_collections/purestorage/flasharray/.github/workflows/black.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/.github/workflows/main.yml23
-rw-r--r--ansible_collections/purestorage/flasharray/.github/workflows/stale.yml2
-rw-r--r--ansible_collections/purestorage/flasharray/.pylintrc587
-rw-r--r--ansible_collections/purestorage/flasharray/CHANGELOG.rst187
-rw-r--r--ansible_collections/purestorage/flasharray/FILES.json1508
-rw-r--r--ansible_collections/purestorage/flasharray/MANIFEST.json4
-rw-r--r--ansible_collections/purestorage/flasharray/README.md16
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/.plugin-cache.yaml17
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/changelog.yaml234
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/1_27_summary.yaml7
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/411_nfs_user_mapping.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/412_fix_snapshot_suffix_handling.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/413_eradicate_pgsnap.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/415_autodir_policies.yaml3
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/420_proxy_protocol.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/422_sched_enable_fix.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/428_promotion.yaml4
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/429_host_balance.yaml3
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/430_throttle_support.yaml3
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/431_offload_profile.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/433_certs.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/436_snap_fix.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/440_null_suffix.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/441_v2_version.yaml3
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/444_euc_fix.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/445_py39.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/448_add_subs.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/450_no_gateway.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/452_throttle_fix.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/459_fix_eradication_timer_info.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/460_eradicaton.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/461_ntp_keys.yaml3
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/462_info_update.yaml6
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/463_nfs_version.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/464_fix_ds_add.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/468_missing_subset.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/469_fix_missing_bind_password.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/471_fix_ip_protocol.yaml3
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/474_network_fixes.yaml6
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/480_rename_vg.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/482_schedule.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/483_missing_replicate.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/484_fix_repl_sched.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/485_fix_host.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/487_pgrename.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/488_fix_pgsnap_eradication.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/495_add_del_pgroup_info.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/496_fix_cert_signing.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/498_fix_pg_creation.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/499_rest_227.yaml3
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/505_dns_attribute.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/506_disable_pgsched.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/509_check_peer.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/513_remote_snapshot_suffix.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/516_fix_throttle.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/518_nfs_security.yaml3
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/519_add_cloud_capacity.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/520_add_distro.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/523_nfs_policies.yaml5
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/524_empty_ds.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/527_pgsnap_rest2.yaml4
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/529_eula_v2.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/530_ntp_rest2.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/531_ra_rest.yaml4
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/536_inv_rest2.yaml4
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/536_syslog_rest.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/538_arrayname_rest.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/539_rest2_vnc.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/541_r2_offload.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/545_4kcert.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/547_lacp_neighbor_info.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/changelogs/fragments/548_uptime.yaml2
-rw-r--r--ansible_collections/purestorage/flasharray/meta/runtime.yml2
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/doc_fragments/purestorage.py1
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/module_utils/common.py89
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/module_utils/purefa.py48
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/module_utils/version.py344
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_ad.py25
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_admin.py11
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_apiclient.py11
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_arrayname.py25
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_banner.py25
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_cbsexpand.py153
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_certs.py218
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_connect.py46
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_default_protection.py12
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_directory.py11
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_dirsnap.py15
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_dns.py7
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_ds.py115
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_eradication.py86
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_eula.py72
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_export.py11
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_file.py183
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_fs.py16
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_hardware.py110
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_host.py16
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_info.py686
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_inventory.py345
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_kmip.py11
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_logging.py13
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_messages.py56
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_network.py452
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_ntp.py91
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_offload.py314
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_pg.py107
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_pgsched.py87
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_pgsnap.py471
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_pod.py29
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_policy.py508
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_proxy.py14
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_ra.py84
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_saml.py11
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_smis.py11
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_snap.py65
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_snmp_agent.py10
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_sso.py11
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_subnet.py58
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_syslog.py149
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_syslog_settings.py11
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_token.py49
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_vg.py93
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_vnc.py82
-rw-r--r--ansible_collections/purestorage/flasharray/plugins/modules/purefa_volume.py55
-rw-r--r--ansible_collections/purestorage/flasharray/requirements.txt1
-rw-r--r--ansible_collections/purestorage/flasharray/tests/config.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/.github/workflows/ansible-lint.yml4
-rw-r--r--ansible_collections/purestorage/flashblade/.github/workflows/black.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/.github/workflows/main.yml24
-rw-r--r--ansible_collections/purestorage/flashblade/.github/workflows/stale.yml2
-rw-r--r--ansible_collections/purestorage/flashblade/.pylintrc587
-rw-r--r--ansible_collections/purestorage/flashblade/CHANGELOG.rst98
-rw-r--r--ansible_collections/purestorage/flashblade/FILES.json894
-rw-r--r--ansible_collections/purestorage/flashblade/MANIFEST.json6
-rw-r--r--ansible_collections/purestorage/flashblade/README.md8
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/.plugin-cache.yaml7
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/changelog.yaml105
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/101_fix_policy_and_timezone_error.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/131-apiclient.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/205_fix_multi_lifecycle.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/222_bucket_type_fix.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/223_add_drive_type.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/224_smb_policies.yaml4
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/225_delete_rl.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/227_s3acc_human_quota.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/230_prom_fix.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/232_multiple_keys.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/237_info_policy.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/238_user_policy.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/239_access_rights.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/242_cascade.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/243_policy_desc.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/244_add_deny.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/245_quota_plus.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/246_smb_encrypt.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/247_space_consistency.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/252_object_lock_info.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/254_update_212_info.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/255_smb_ca.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/257_mode_change.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/258_add_public_buckets.yaml4
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/263_fix_multiple_modules_idempotency.yaml4
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/266_bucket_fix.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/changelogs/fragments/268_multi-chassis-lag.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/meta/runtime.yml2
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/module_utils/purefb.py48
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_bucket.py304
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_bucket_replica.py63
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_connect.py8
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_ds.py28
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_fs.py174
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_fs_replica.py40
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_hardware.py187
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_info.py658
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_inventory.py60
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_lag.py23
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_policy.py908
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_s3acc.py167
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_s3user.py69
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_timeout.py2
-rw-r--r--ansible_collections/purestorage/flashblade/plugins/modules/purefb_userpolicy.py1
-rw-r--r--ansible_collections/purestorage/flashblade/tests/config.yaml2
-rw-r--r--ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.10.txt2
-rw-r--r--ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.11.txt2
-rw-r--r--ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.12.txt2
-rw-r--r--ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.13.txt2
-rw-r--r--ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.14.txt2
-rw-r--r--ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.15.txt2
-rw-r--r--ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.16.txt2
-rw-r--r--ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.9.txt2
-rw-r--r--ansible_collections/purestorage/fusion/.github/workflows/ansible-lint.yaml4
-rw-r--r--ansible_collections/purestorage/fusion/.github/workflows/black.yaml2
-rw-r--r--ansible_collections/purestorage/fusion/.github/workflows/create-release.yaml4
-rw-r--r--ansible_collections/purestorage/fusion/.github/workflows/main.yml25
-rw-r--r--ansible_collections/purestorage/fusion/.github/workflows/rh_automation_hub_token_keep_alive.yml19
-rw-r--r--ansible_collections/purestorage/fusion/.github/workflows/stale.yml2
-rw-r--r--ansible_collections/purestorage/fusion/CHANGELOG.rst23
-rw-r--r--ansible_collections/purestorage/fusion/FILES.json135
-rw-r--r--ansible_collections/purestorage/fusion/MANIFEST.json4
-rw-r--r--ansible_collections/purestorage/fusion/README.md12
-rw-r--r--ansible_collections/purestorage/fusion/changelogs/.plugin-cache.yaml2
-rw-r--r--ansible_collections/purestorage/fusion/changelogs/changelog.yaml36
-rw-r--r--ansible_collections/purestorage/fusion/meta/runtime.yml2
-rw-r--r--ansible_collections/purestorage/fusion/plugins/module_utils/errors.py2
-rw-r--r--ansible_collections/purestorage/fusion/plugins/module_utils/parsing.py2
-rw-r--r--ansible_collections/purestorage/fusion/plugins/module_utils/prerequisites.py2
-rw-r--r--ansible_collections/purestorage/fusion/plugins/module_utils/snapshots.py29
-rw-r--r--ansible_collections/purestorage/fusion/plugins/modules/fusion_api_client.py11
-rw-r--r--ansible_collections/purestorage/fusion/plugins/modules/fusion_array.py28
-rw-r--r--ansible_collections/purestorage/fusion/plugins/modules/fusion_az.py8
-rw-r--r--ansible_collections/purestorage/fusion/plugins/modules/fusion_hap.py6
-rw-r--r--ansible_collections/purestorage/fusion/plugins/modules/fusion_ni.py7
-rw-r--r--ansible_collections/purestorage/fusion/plugins/modules/fusion_nig.py8
-rw-r--r--ansible_collections/purestorage/fusion/plugins/modules/fusion_pg.py37
-rw-r--r--ansible_collections/purestorage/fusion/plugins/modules/fusion_pp.py35
-rw-r--r--ansible_collections/purestorage/fusion/plugins/modules/fusion_ra.py13
-rw-r--r--ansible_collections/purestorage/fusion/plugins/modules/fusion_region.py8
-rw-r--r--ansible_collections/purestorage/fusion/plugins/modules/fusion_sc.py8
-rw-r--r--ansible_collections/purestorage/fusion/plugins/modules/fusion_se.py9
-rw-r--r--ansible_collections/purestorage/fusion/plugins/modules/fusion_ss.py9
-rw-r--r--ansible_collections/purestorage/fusion/plugins/modules/fusion_tenant.py8
-rw-r--r--ansible_collections/purestorage/fusion/plugins/modules/fusion_ts.py8
-rw-r--r--ansible_collections/purestorage/fusion/plugins/modules/fusion_volume.py157
-rw-r--r--ansible_collections/purestorage/fusion/test/config.yaml2
-rw-r--r--ansible_collections/purestorage/fusion/tests/functional/test_fusion_api_client.py15
-rw-r--r--ansible_collections/purestorage/fusion/tests/functional/test_fusion_array.py25
-rw-r--r--ansible_collections/purestorage/fusion/tests/functional/test_fusion_az.py3
-rw-r--r--ansible_collections/purestorage/fusion/tests/functional/test_fusion_hap.py3
-rw-r--r--ansible_collections/purestorage/fusion/tests/functional/test_fusion_info.py1
-rw-r--r--ansible_collections/purestorage/fusion/tests/functional/test_fusion_nig.py5
-rw-r--r--ansible_collections/purestorage/fusion/tests/functional/test_fusion_pg.py5
-rw-r--r--ansible_collections/purestorage/fusion/tests/functional/test_fusion_pp.py21
-rw-r--r--ansible_collections/purestorage/fusion/tests/functional/test_fusion_ra.py2
-rw-r--r--ansible_collections/purestorage/fusion/tests/functional/test_fusion_region.py4
-rw-r--r--ansible_collections/purestorage/fusion/tests/functional/test_fusion_sc.py4
-rw-r--r--ansible_collections/purestorage/fusion/tests/functional/test_fusion_se.py5
-rw-r--r--ansible_collections/purestorage/fusion/tests/functional/test_fusion_ss.py4
-rw-r--r--ansible_collections/purestorage/fusion/tests/functional/test_fusion_tenant.py4
-rw-r--r--ansible_collections/purestorage/fusion/tests/functional/test_fusion_ts.py4
-rw-r--r--ansible_collections/purestorage/fusion/tests/functional/test_fusion_volume.py150
-rw-r--r--ansible_collections/purestorage/fusion/tests/functional/utils.py15
-rw-r--r--ansible_collections/purestorage/fusion/tests/helpers.py8
-rw-r--r--ansible_collections/purestorage/fusion/tests/unit/mocks/operation_mock.py14
-rw-r--r--ansible_collections/purestorage/fusion/tests/unit/module_utils/test_parsing.py47
-rw-r--r--ansible_collections/purestorage/fusion/tests/unit/modules/test_fusion_az.py10
248 files changed, 9325 insertions, 4521 deletions
diff --git a/ansible_collections/purestorage/flasharray/.github/pull_request_template.md b/ansible_collections/purestorage/flasharray/.github/pull_request_template.md
index 27079cb18..5872c8ad6 100644
--- a/ansible_collections/purestorage/flasharray/.github/pull_request_template.md
+++ b/ansible_collections/purestorage/flasharray/.github/pull_request_template.md
@@ -10,6 +10,7 @@
- Feature Pull Request
- New Module Pull Request
- New Role Pull Request
+- REST 2 Pull Request
##### COMPONENT NAME
<!--- Write the short name of the module, plugin, task or feature below -->
diff --git a/ansible_collections/purestorage/flasharray/.github/workflows/ansible-lint.yml b/ansible_collections/purestorage/flasharray/.github/workflows/ansible-lint.yml
index 0b2102184..7e79f4e7d 100644
--- a/ansible_collections/purestorage/flasharray/.github/workflows/ansible-lint.yml
+++ b/ansible_collections/purestorage/flasharray/.github/workflows/ansible-lint.yml
@@ -1,5 +1,5 @@
-name: Ansible Lint # feel free to pick your own name
-on: [push, pull_request]
+name: Ansible Lint # feel free to pick your own name
+"on": [push, pull_request]
jobs:
build:
@@ -7,4 +7,4 @@ jobs:
steps:
- uses: actions/checkout@v3
- name: Run ansible-lint
- uses: ansible-community/ansible-lint-action@main
+ uses: ansible/ansible-lint-action@main
diff --git a/ansible_collections/purestorage/flasharray/.github/workflows/black.yaml b/ansible_collections/purestorage/flasharray/.github/workflows/black.yaml
index e5f9711f6..19b2b01d3 100644
--- a/ansible_collections/purestorage/flasharray/.github/workflows/black.yaml
+++ b/ansible_collections/purestorage/flasharray/.github/workflows/black.yaml
@@ -1,6 +1,6 @@
name: Lint
-on: [push, pull_request]
+"on": [push, pull_request]
jobs:
lint:
diff --git a/ansible_collections/purestorage/flasharray/.github/workflows/main.yml b/ansible_collections/purestorage/flasharray/.github/workflows/main.yml
index e41c0e099..529e1fa88 100644
--- a/ansible_collections/purestorage/flasharray/.github/workflows/main.yml
+++ b/ansible_collections/purestorage/flasharray/.github/workflows/main.yml
@@ -1,6 +1,6 @@
name: Pure Storage Ansible CI
-on:
+"on":
pull_request:
push:
schedule:
@@ -13,31 +13,18 @@ jobs:
strategy:
matrix:
ansible:
- - stable-2.11
- - stable-2.12
- - stable-2.13
- stable-2.14
- stable-2.15
+ - stable-2.16
- devel
python-version:
- - 3.8
- 3.9
- "3.10"
- "3.11"
exclude:
- - python-version: "3.11"
- ansible: stable-2.11
- - python-version: "3.11"
- ansible: stable-2.12
- - python-version: "3.11"
- ansible: stable-2.13
- - python-version: "3.10"
- ansible: stable-2.11
- - python-version: 3.8
- ansible: stable-2.14
- - python-version: 3.8
- ansible: stable-2.15
- - python-version: 3.8
+ - python-version: 3.9
+ ansible: stable-2.16
+ - python-version: 3.9
ansible: devel
steps:
- name: Check out code
diff --git a/ansible_collections/purestorage/flasharray/.github/workflows/stale.yml b/ansible_collections/purestorage/flasharray/.github/workflows/stale.yml
index 7bbc0505b..ee7c9796e 100644
--- a/ansible_collections/purestorage/flasharray/.github/workflows/stale.yml
+++ b/ansible_collections/purestorage/flasharray/.github/workflows/stale.yml
@@ -1,6 +1,6 @@
name: Mark stale issues and pull requests
-on:
+"on":
schedule:
- cron: "0 0 * * *"
diff --git a/ansible_collections/purestorage/flasharray/.pylintrc b/ansible_collections/purestorage/flasharray/.pylintrc
deleted file mode 100644
index cc8d948d4..000000000
--- a/ansible_collections/purestorage/flasharray/.pylintrc
+++ /dev/null
@@ -1,587 +0,0 @@
-[MASTER]
-
-# A comma-separated list of package or module names from where C extensions may
-# be loaded. Extensions are loading into the active Python interpreter and may
-# run arbitrary code
-extension-pkg-whitelist=
-
-# Add files or directories to the blacklist. They should be base names, not
-# paths.
-ignore=CVS
-
-# Add files or directories matching the regex patterns to the blacklist. The
-# regex matches against base names, not paths.
-ignore-patterns=
-
-# Python code to execute, usually for sys.path manipulation such as
-# pygtk.require().
-#init-hook=
-
-# Use multiple processes to speed up Pylint.
-jobs=1
-
-# List of plugins (as comma separated values of python modules names) to load,
-# usually to register additional checkers.
-load-plugins=
-
-# Pickle collected data for later comparisons.
-persistent=yes
-
-# Specify a configuration file.
-#rcfile=
-
-# When enabled, pylint would attempt to guess common misconfiguration and emit
-# user-friendly hints instead of false-positive error messages
-suggestion-mode=yes
-
-# Allow loading of arbitrary C extensions. Extensions are imported into the
-# active Python interpreter and may run arbitrary code.
-unsafe-load-any-extension=no
-
-
-[MESSAGES CONTROL]
-
-# Only show warnings with the listed confidence levels. Leave empty to show
-# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED
-confidence=
-
-# Disable the message, report, category or checker with the given id(s). You
-# can either give multiple identifiers separated by comma (,) or put this
-# option multiple times (only on the command line, not in the configuration
-# file where it should appear only once).You can also use "--disable=all" to
-# disable everything first and then reenable specific checks. For example, if
-# you want to run only the similarities checker, you can use "--disable=all
-# --enable=similarities". If you want to run only the classes checker, but have
-# no Warning level messages displayed, use"--disable=all --enable=classes
-# --disable=W"
-disable=
- abstract-method,
- access-member-before-definition,
- ansible-deprecated-version,
- arguments-differ,
- assignment-from-no-return,
- assignment-from-none,
- attribute-defined-outside-init,
- bad-continuation,
- bad-indentation,
- bad-mcs-classmethod-argument,
- broad-except,
- c-extension-no-member,
- cell-var-from-loop,
- chained-comparison,
- comparison-with-callable,
- consider-iterating-dictionary,
- consider-merging-isinstance,
- consider-using-dict-comprehension,
- consider-using-enumerate,
- consider-using-get,
- consider-using-in,
- consider-using-set-comprehension,
- consider-using-ternary,
- deprecated-lambda,
- deprecated-method,
- deprecated-module,
- eval-used,
- exec-used,
- expression-not-assigned,
- fixme,
- function-redefined,
- global-statement,
- global-variable-undefined,
- import-error,
- import-self,
- inconsistent-return-statements,
- invalid-envvar-default,
- invalid-name,
- invalid-sequence-index,
- keyword-arg-before-vararg,
- len-as-condition,
- line-too-long,
- literal-comparison,
- locally-disabled,
- method-hidden,
- misplaced-comparison-constant,
- missing-docstring,
- no-else-raise,
- no-else-return,
- no-init,
- no-member,
- no-name-in-module,
- no-self-use,
- no-value-for-parameter,
- non-iterator-returned,
- not-a-mapping,
- not-an-iterable,
- not-callable,
- old-style-class,
- pointless-statement,
- pointless-string-statement,
- possibly-unused-variable,
- protected-access,
- redefined-argument-from-local,
- redefined-builtin,
- redefined-outer-name,
- redefined-variable-type,
- reimported,
- relative-import,
- signature-differs,
- simplifiable-if-expression,
- simplifiable-if-statement,
- subprocess-popen-preexec-fn,
- super-init-not-called,
- superfluous-parens,
- too-few-public-methods,
- too-many-ancestors,
- too-many-arguments,
- too-many-boolean-expressions,
- too-many-branches,
- too-many-function-args,
- too-many-instance-attributes,
- too-many-lines,
- too-many-locals,
- too-many-nested-blocks,
- too-many-public-methods,
- too-many-return-statements,
- too-many-statements,
- trailing-comma-tuple,
- trailing-comma-tuple,
- try-except-raise,
- unbalanced-tuple-unpacking,
- undefined-loop-variable,
- unexpected-keyword-arg,
- ungrouped-imports,
- unidiomatic-typecheck,
- unnecessary-pass,
- unsubscriptable-object,
- unsupported-assignment-operation,
- unsupported-delete-operation,
- unsupported-membership-test,
- unused-argument,
- unused-import,
- unused-variable,
- used-before-assignment,
- useless-object-inheritance,
- useless-return,
- useless-super-delegation,
- wrong-import-order,
- wrong-import-position,
-
-# Enable the message, report, category or checker with the given id(s). You can
-# either give multiple identifier separated by comma (,) or put this option
-# multiple time (only on the command line, not in the configuration file where
-# it should appear only once). See also the "--disable" option for examples.
-enable=c-extension-no-member
-
-
-[REPORTS]
-
-# Python expression which should return a note less than 10 (10 is the highest
-# note). You have access to the variables errors warning, statement which
-# respectively contain the number of errors / warnings messages and the total
-# number of statements analyzed. This is used by the global evaluation report
-# (RP0004).
-evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
-
-# Template used to display messages. This is a python new-style format string
-# used to format the message information. See doc for all details
-#msg-template=
-
-# Set the output format. Available formats are text, parseable, colorized, json
-# and msvs (visual studio).You can also give a reporter class, eg
-# mypackage.mymodule.MyReporterClass.
-output-format=text
-
-# Tells whether to display a full report or only the messages
-reports=no
-
-# Activate the evaluation score.
-score=yes
-
-
-[REFACTORING]
-
-# Maximum number of nested blocks for function / method body
-max-nested-blocks=5
-
-# Complete name of functions that never returns. When checking for
-# inconsistent-return-statements if a never returning function is called then
-# it will be considered as an explicit return statement and no message will be
-# printed.
-never-returning-functions=optparse.Values,sys.exit
-
-
-[VARIABLES]
-
-# List of additional names supposed to be defined in builtins. Remember that
-# you should avoid to define new builtins when possible.
-additional-builtins=
-
-# Tells whether unused global variables should be treated as a violation.
-allow-global-unused-variables=yes
-
-# List of strings which can identify a callback function by name. A callback
-# name must start or end with one of those strings.
-callbacks=cb_,
- _cb
-
-# A regular expression matching the name of dummy variables (i.e. expectedly
-# not used).
-dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_
-
-# Argument names that match this expression will be ignored. Default to name
-# with leading underscore
-ignored-argument-names=_.*|^ignored_|^unused_
-
-# Tells whether we should check for unused import in __init__ files.
-init-import=no
-
-# List of qualified module names which can have objects that can redefine
-# builtins.
-redefining-builtins-modules=six.moves,past.builtins,future.builtins,io,builtins
-
-
-[BASIC]
-
-# Naming style matching correct argument names
-argument-naming-style=snake_case
-
-# Regular expression matching correct argument names. Overrides argument-
-# naming-style
-#argument-rgx=
-
-# Naming style matching correct attribute names
-attr-naming-style=snake_case
-
-# Regular expression matching correct attribute names. Overrides attr-naming-
-# style
-#attr-rgx=
-
-# Bad variable names which should always be refused, separated by a comma
-bad-names=foo,
- bar,
- baz,
- toto,
- tutu,
- tata,
- _,
-
-# Naming style matching correct class attribute names
-class-attribute-naming-style=any
-
-# Regular expression matching correct class attribute names. Overrides class-
-# attribute-naming-style
-#class-attribute-rgx=
-
-# Naming style matching correct class names
-class-naming-style=PascalCase
-
-# Regular expression matching correct class names. Overrides class-naming-style
-#class-rgx=
-
-# Naming style matching correct constant names
-const-naming-style=UPPER_CASE
-
-# Regular expression matching correct constant names. Overrides const-naming-
-# style
-#const-rgx=
-
-# Minimum line length for functions/classes that require docstrings, shorter
-# ones are exempt.
-docstring-min-length=-1
-
-# Naming style matching correct function names
-function-naming-style=snake_case
-
-# Regular expression matching correct function names. Overrides function-
-# naming-style
-#function-rgx=
-
-# Good variable names which should always be accepted, separated by a comma
-good-names=i,
- j,
- k,
- f,
- e,
- ex,
- Run,
- C,
- __metaclass__,
-
-# Include a hint for the correct naming format with invalid-name
-include-naming-hint=no
-
-# Naming style matching correct inline iteration names
-inlinevar-naming-style=any
-
-# Regular expression matching correct inline iteration names. Overrides
-# inlinevar-naming-style
-#inlinevar-rgx=
-
-# Naming style matching correct method names
-method-naming-style=snake_case
-
-# Regular expression matching correct method names. Overrides method-naming-
-# style
-#method-rgx=
-
-# Naming style matching correct module names
-module-naming-style=snake_case
-
-# Regular expression matching correct module names. Overrides module-naming-
-# style
-#module-rgx=
-module-rgx=[a-z_][a-z0-9_-]{2,40}$
-method-rgx=[a-z_][a-z0-9_]{2,40}$
-function-rgx=[a-z_][a-z0-9_]{2,40}$
-
-# Colon-delimited sets of names that determine each other's naming style when
-# the name regexes allow several styles.
-name-group=
-
-# Regular expression which should only match function or class names that do
-# not require a docstring.
-no-docstring-rgx=^_
-
-# List of decorators that produce properties, such as abc.abstractproperty. Add
-# to this list to register other decorators that produce valid properties.
-property-classes=abc.abstractproperty
-
-# Naming style matching correct variable names
-variable-naming-style=snake_case
-
-# Regular expression matching correct variable names. Overrides variable-
-# naming-style
-#variable-rgx=
-
-
-[SPELLING]
-
-# Limits count of emitted suggestions for spelling mistakes
-max-spelling-suggestions=4
-
-# Spelling dictionary name. Available dictionaries: none. To make it working
-# install python-enchant package.
-spelling-dict=
-
-# List of comma separated words that should not be checked.
-spelling-ignore-words=
-
-# A path to a file that contains private dictionary; one word per line.
-spelling-private-dict-file=
-
-# Tells whether to store unknown words to indicated private dictionary in
-# --spelling-private-dict-file option instead of raising a message.
-spelling-store-unknown-words=no
-
-
-[FORMAT]
-
-# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
-expected-line-ending-format=
-
-# Regexp for a line that is allowed to be longer than the limit.
-ignore-long-lines=^\s*(# )?<?https?://\S+>?$
-
-# Number of spaces of indent required inside a hanging or continued line.
-indent-after-paren=4
-
-# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
-# tab).
-indent-string=' '
-
-# Maximum number of characters on a single line.
-max-line-length=160
-
-# Maximum number of lines in a module
-max-module-lines=1000
-
-# List of optional constructs for which whitespace checking is disabled. `dict-
-# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}.
-# `trailing-comma` allows a space between comma and closing bracket: (a, ).
-# `empty-line` allows space-only lines.
-no-space-check=trailing-comma,
- dict-separator
-
-# Allow the body of a class to be on the same line as the declaration if body
-# contains single statement.
-single-line-class-stmt=no
-
-# Allow the body of an if to be on the same line as the test if there is no
-# else.
-single-line-if-stmt=no
-
-
-[TYPECHECK]
-
-# List of decorators that produce context managers, such as
-# contextlib.contextmanager. Add to this list to register other decorators that
-# produce valid context managers.
-contextmanager-decorators=contextlib.contextmanager
-
-# List of members which are set dynamically and missed by pylint inference
-# system, and so shouldn't trigger E1101 when accessed. Python regular
-# expressions are accepted.
-generated-members=
-
-# Tells whether missing members accessed in mixin class should be ignored. A
-# mixin class is detected if its name ends with "mixin" (case insensitive).
-ignore-mixin-members=yes
-
-# This flag controls whether pylint should warn about no-member and similar
-# checks whenever an opaque object is returned when inferring. The inference
-# can return multiple potential results while evaluating a Python object, but
-# some branches might not be evaluated, which results in partial inference. In
-# that case, it might be useful to still emit no-member and other checks for
-# the rest of the inferred objects.
-ignore-on-opaque-inference=yes
-
-# List of class names for which member attributes should not be checked (useful
-# for classes with dynamically set attributes). This supports the use of
-# qualified names.
-ignored-classes=optparse.Values,thread._local,_thread._local
-
-# List of module names for which member attributes should not be checked
-# (useful for modules/projects where namespaces are manipulated during runtime
-# and thus existing member attributes cannot be deduced by static analysis. It
-# supports qualified module names, as well as Unix pattern matching.
-ignored-modules=
- _MovedItems,
-# Show a hint with possible names when a member name was not found. The aspect
-# of finding the hint is based on edit distance.
-missing-member-hint=yes
-
-# The minimum edit distance a name should have in order to be considered a
-# similar match for a missing member name.
-missing-member-hint-distance=1
-
-# The total number of similar names that should be taken in consideration when
-# showing a hint for a missing member.
-missing-member-max-choices=1
-
-
-[SIMILARITIES]
-
-# Ignore comments when computing similarities.
-ignore-comments=yes
-
-# Ignore docstrings when computing similarities.
-ignore-docstrings=yes
-
-# Ignore imports when computing similarities.
-ignore-imports=no
-
-# Minimum lines number of a similarity.
-min-similarity-lines=4
-
-
-[LOGGING]
-
-# Logging modules to check that the string format arguments are in logging
-# function parameter format
-logging-modules=logging
-
-
-[MISCELLANEOUS]
-
-# List of note tags to take in consideration, separated by a comma.
-notes=FIXME,
- XXX,
- TODO
-
-
-[CLASSES]
-
-# List of method names used to declare (i.e. assign) instance attributes.
-defining-attr-methods=__init__,
- __new__,
- setUp
-
-# List of member names, which should be excluded from the protected access
-# warning.
-exclude-protected=_asdict,
- _fields,
- _replace,
- _source,
- _make
-
-# List of valid names for the first argument in a class method.
-valid-classmethod-first-arg=cls
-
-# List of valid names for the first argument in a metaclass class method.
-valid-metaclass-classmethod-first-arg=mcs
-
-
-[DESIGN]
-
-# Maximum number of arguments for function / method
-max-args=5
-
-# Maximum number of attributes for a class (see R0902).
-max-attributes=7
-
-# Maximum number of boolean expressions in a if statement
-max-bool-expr=5
-
-# Maximum number of branch for function / method body
-max-branches=12
-
-# Maximum number of locals for function / method body
-max-locals=15
-
-# Maximum number of parents for a class (see R0901).
-max-parents=7
-
-# Maximum number of public methods for a class (see R0904).
-max-public-methods=20
-
-# Maximum number of return / yield for function / method body
-max-returns=6
-
-# Maximum number of statements in function / method body
-max-statements=50
-
-# Minimum number of public methods for a class (see R0903).
-min-public-methods=2
-
-
-[IMPORTS]
-
-# Allow wildcard imports from modules that define __all__.
-allow-wildcard-with-all=no
-
-# Analyse import fallback blocks. This can be used to support both Python 2 and
-# 3 compatible code, which means that the block might have code that exists
-# only in one or another interpreter, leading to false positives when analysed.
-analyse-fallback-blocks=no
-
-# Deprecated modules which should not be used, separated by a comma
-deprecated-modules=regsub,
- TERMIOS,
- Bastion,
- rexec
-
-# Create a graph of external dependencies in the given file (report RP0402 must
-# not be disabled)
-ext-import-graph=
-
-# Create a graph of every (i.e. internal and external) dependencies in the
-# given file (report RP0402 must not be disabled)
-import-graph=
-
-# Create a graph of internal dependencies in the given file (report RP0402 must
-# not be disabled)
-int-import-graph=
-
-# Force import order to recognize a module as part of the standard
-# compatibility libraries.
-known-standard-library=
-
-# Force import order to recognize a module as part of a third party library.
-known-third-party=enchant
-
-
-[EXCEPTIONS]
-
-# Exceptions that will emit a warning when being caught. Defaults to
-# "Exception"
-overgeneral-exceptions=Exception
diff --git a/ansible_collections/purestorage/flasharray/CHANGELOG.rst b/ansible_collections/purestorage/flasharray/CHANGELOG.rst
index 60e168bbd..71991a731 100644
--- a/ansible_collections/purestorage/flasharray/CHANGELOG.rst
+++ b/ansible_collections/purestorage/flasharray/CHANGELOG.rst
@@ -5,6 +5,193 @@ Purestorage.Flasharray Release Notes
.. contents:: Topics
+v1.27.0
+=======
+
+Release Summary
+---------------
+
+| This release changes the minimum supported Purity//FA version.
+|
+| The minimum supported Purity//FA version increases to 6.1.0.
+| All previous versions are classed as EOL by Pure Storage support.
+|
+| This change is to support the full integration to Purity//FA REST v2.x
+
+
+Minor Changes
+-------------
+
+- purefa_arrayname - Convert to REST v2
+- purefa_eula - Only sign if not previously signed. From REST 2.30 name, title and company are no longer required
+- purefa_info - Add support for controller uptime from Purity//FA 6.6.3
+- purefa_inventory - Convert to REST v2
+- purefa_ntp - Convert to REST v2
+- purefa_offload - Convert to REST v2
+- purefa_pgsnap - Module now requires minimum FlashArray Purity//FA 6.1.0
+- purefa_ra - Add ``present`` and ``absent`` as valid ``state`` options
+- purefa_ra - Add connecting as valid status of RA to perform operations on
+- purefa_ra - Convert to REST v2
+- purefa_syslog - ``name`` becomes a required parameter as module converts to full REST 2 support
+- purefa_vnc - Convert to REST v2
+
+Bugfixes
+--------
+
+- purefa_certs - Allow certificates of over 3000 characters to be imported.
+- purefa_info - Resolved issue with KeyError when LACP bonds are in use
+- purefa_inventory - Fix issue with iSCSI-only FlashArrays
+- purefa_pgsnap - Add support for restoring volumes connected to hosts in a host-based protection group and hosts in a hostgroup-based protection group.
+
+v1.26.0
+=======
+
+Minor Changes
+-------------
+
+- purefa_policy - Add SMB user based enumeration parameter
+- purefa_policy - Remove default setting for nfs_version to allow for change of version at policy level
+
+Bugfixes
+--------
+
+- purefa_ds - Fix issue with SDK returning empty data for data directory services even when it does exist
+- purefa_policy - Fix incorrect call of psot instead of patch for NFS policies
+
+v1.25.0
+=======
+
+Minor Changes
+-------------
+
+- all - ``distro`` package added as a pre-requisite
+- multiple - Remove packaging pre-requisite.
+- multiple - Where only REST 2.x endpoints are used, convert to REST 2.x methodology.
+- purefa_info - Expose NFS security flavor for policies
+- purefa_info - Expose cloud capacity details if array is a Cloud Block Store.
+- purefa_policy - Added NFS security flavors for accessing files in the mount point.
+
+v1.24.0
+=======
+
+Minor Changes
+-------------
+
+- purefa_dns - Added facility to add a CA certifcate to management DNS and check peer.
+- purefa_snap - Add support for suffix on remote offload snapshots
+
+Bugfixes
+--------
+
+- purefa_dns - Fixed attribute error on deletion of management DNS
+- purefa_pgsched - Fixed issue with disabling schedules
+- purefa_pgsnap - Fixed incorrect parameter name
+
+New Modules
+-----------
+
+- purestorage.flasharray.purefa_hardware - Manage FlashArray Hardware Identification
+
+v1.23.0
+=======
+
+Minor Changes
+-------------
+
+- purefa_info - Add NSID value for NVMe namespace in `hosts` response
+- purefa_info - Subset `pgroups` now also provides a new dict called `deleted_pgroups`
+- purefa_offload - Remove `nfs` as an option when Purity//FA 6.6.0 or higher is detected
+
+Bugfixes
+--------
+
+- purefa_cert - Fixed issue where parts of the subject where not included in the CSR if they did not exist in the currently used cert.
+- purefa_pg - Allows a protection group to be correctly created when `target` is specified as well as other objects, such as `volumes` or `hosts`
+
+v1.22.0
+=======
+
+Minor Changes
+-------------
+
+- purefa_eradication - Added support for disabled and enabled timers from Purity//FA 6.4.10
+- purefa_info - Add array subscription data
+- purefa_info - Added `nfs_version` to policies and rules from Purity//FA 6.4.10
+- purefa_info - Added `total_used` to multiple sections from Purity//FA 6.4.10
+- purefa_info - Prive array timezone from Purity//FA 6.4.10
+- purefa_info - Report NTP Symmetric key presence from Purity//FA 6.4.10
+- purefa_network - Add support for creating/modifying VIF and LACP_BOND interfaces
+- purefa_network - `enabled` option added. This must now be used instead of state=absent to disable a physical interface as state=absent can now fully delete a non-physical interface
+- purefa_ntp - Added support for NTP Symmetric Key from Purity//FA 6.4.10s
+- purefa_pgsched - Change `snap_at` and `replicate_at` to be AM or PM hourly
+- purefa_pgsnap - Add protection group snapshot rename functionality
+- purefa_policy - Added support for multiple NFS versions from Purity//FA 6.4.10
+- purefa_vg - Add rename parameter
+
+Bugfixes
+--------
+
+- purefa_ds - Fixes error when enabling directory services while a bind_user is set on the array and a bind_password is not.
+- purefa_ds - Fixes issue with creating a new ds configuration while setting force_bind_password as "false".
+- purefa_host - Fix incorrect calling of "module.params".
+- purefa_info - Added missing alerts subset name
+- purefa_info - Fixed attribute errors after EUC changes
+- purefa_info - Fixed issue with replica links in unknown state
+- purefa_info - Fixed parameter error when enabled and disabled timers are different values on purity 6.4.10+ arrays.
+- purefa_info - Fixed py39 specific bug with multiple DNS entries
+- purefa_network - Allow `gateway` to be set as `0.0.0.0` to remove an existing gateway address
+- purefa_network - Fixed IPv6 support issues
+- purefa_network - Fixed idempotency issue when gateway not modified
+- purefa_pgsched - Fixed bug with an unnecessary substitution
+- purefa_pgsnap - Enabled to eradicate destroyed snapshots.
+- purefa_pgsnap - Ensure that `now` and `remote` are mutually exclusive.
+- purefa_snap - Fixed incorrect calling logic causing failure on remote snapshot creation
+- purefa_subnet - Fixed IPv4 gateway removal issue.
+- purefa_subnet - Fixed IPv6 support issues.
+
+New Modules
+-----------
+
+- purestorage.flasharray.purefa_file - Manage FlashArray File Copies
+
+v1.21.0
+=======
+
+Minor Changes
+-------------
+
+- purefa_info - Add `port_connectivity` information for hosts
+- purefa_info - Add promotion status information for volumes
+- purefa_offload - Added a new profile parameter.
+- purefa_pgsnap - Added new parameter to support snapshot throttling
+- purefa_snap - Added new parameter to support snapshot throttling
+
+Bugfixes
+--------
+
+- purefa_certs - Resolved CSR issue and require export_file for state sign.
+- purefa_info - Fix serial number generation issue for vVols
+- purefa_snap - Fixed issue with remote snapshot retrieve. Mainly a workaround to an issue with Purity REST 1.x when remote snapshots are searched.
+- purefa_volume - Fixed bug with NULL suffix for multiple volume creation.
+
+v1.20.0
+=======
+
+Minor Changes
+-------------
+
+- purefa_info - Added support for autodir policies
+- purefa_policy - Added support for autodir policies
+- purefa_proxy - Add new protocol parameter, defaults to https
+
+Bugfixes
+--------
+
+- purefa_pgsched - Resolved idempotency issue with snap and replication enabled flags
+- purefa_pgsnap - Fixed issue with eradicating deleted pgsnapshot
+- purefa_pgsnap - Update the accepted suffixes to include also numbers only. Fixed the logic to retrieve the latest completed snapshot
+- purefa_policy - Set user_mapping parameter default to True
+
v1.19.1
=======
diff --git a/ansible_collections/purestorage/flasharray/FILES.json b/ansible_collections/purestorage/flasharray/FILES.json
index 8bb40c24c..5beee0936 100644
--- a/ansible_collections/purestorage/flasharray/FILES.json
+++ b/ansible_collections/purestorage/flasharray/FILES.json
@@ -8,458 +8,479 @@
"format": 1
},
{
- "name": "roles",
+ "name": "changelogs",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
- "name": "roles/.keep",
+ "name": "changelogs/210_add_rename_hgroup.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "chksum_sha256": "8589783011b4145d3eb5099a7d5e025e9fd2cbf50319d426f0b5b6f8e1b637af",
"format": 1
},
{
- "name": "tests",
+ "name": "changelogs/fragments",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
- "name": ".github",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
+ "name": "changelogs/fragments/360_fix_volume.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "214e7d076ebe88080ae3b674f9218f3fd82c3f624c12c47c1e0f5b25a25cedff",
"format": 1
},
{
- "name": ".github/CONTRIBUTING.md",
+ "name": "changelogs/fragments/294_user_map_support.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "69fb16d49892fb5d60316a051f1d27d741e71fc84f18c14ff7d388616925535e",
+ "chksum_sha256": "a8303eade7f404a0454044ac907e369cf12ab8a715dae17873ef482f959b55ce",
"format": 1
},
{
- "name": ".github/feature_request_template.md",
+ "name": "changelogs/fragments/342_add_vol_promotion.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "4731d199ca9cbe66b2b6de02846b4860ccfb4fd0ebb2872fe6452b6cf5b73ce2",
+ "chksum_sha256": "9bcce181fe8efb221844d176b0afa0afceeec84a3fdb252c4b6e9b60d262d800",
"format": 1
},
{
- "name": ".github/workflows",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
+ "name": "changelogs/fragments/506_disable_pgsched.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9de4a4a3dbd6f1d1d5ce98b60c936cad9bfa10a10c81ee106131541f5256429c",
"format": 1
},
{
- "name": ".github/workflows/black.yaml",
+ "name": "changelogs/fragments/441_v2_version.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "6fb3e0af2e41fb0618586a2990e6645fb9b29d1a7b64b7168c5d27af320569c8",
+ "chksum_sha256": "e2ef872d9429b76ec5d282503e51e165303e0a742ee5826efe210542cfecaa2e",
"format": 1
},
{
- "name": ".github/workflows/ansible-lint.yml",
+ "name": "changelogs/fragments/214_join_ou.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "4c85688d98b71e3a6594530a362cd5d2cf83842ceaccd0e0fc76e233777c1cef",
+ "chksum_sha256": "b14ab70f9bd3756c7aca4c28f7d0bf7c2e40815710275232deb7d90239108b57",
"format": 1
},
{
- "name": ".github/workflows/stale.yml",
+ "name": "changelogs/fragments/428_promotion.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "0bdef4889afabcd627fc30711a0809c7468b8c9e64cbcebe1334f794a41e7bd9",
+ "chksum_sha256": "f5eeb622fc74c1f0abe26e024c16868b3d475aa878071844b15f914720c3f013",
"format": 1
},
{
- "name": ".github/workflows/main.yml",
+ "name": "changelogs/fragments/270_add_priority_info.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "2c584c3cb803d47f9b2325a565f92bea429be2f5e2a1241824ed1b2a0a99ebaf",
+ "chksum_sha256": "3ce6bf60d3a1efd2f708490a654ec6c34e1617bb80f5114c170d683dee794f56",
"format": 1
},
{
- "name": ".github/ISSUE_TEMPLATE",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
+ "name": "changelogs/fragments/116_add_policies.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "308c90a9b130b29db36fe30970cc4e83a86f72d909d979c251cdfa9ea37cc17d",
"format": 1
},
{
- "name": ".github/ISSUE_TEMPLATE/feature_request.md",
+ "name": "changelogs/fragments/259_fix_gateway_check.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "1f48c52f209a971b8e7eae4120144d28fcf8ee38a7778a7b4d8cf1ab356617d2",
+ "chksum_sha256": "89448cb7b64a91aaeee486714624ffa318333f43cfda73519129081df032e01a",
"format": 1
},
{
- "name": ".github/ISSUE_TEMPLATE/bug_report.md",
+ "name": "changelogs/fragments/141_add_remote_snapshot.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "0c8d64f29fb4536513653bf8c97da30f3340e2041b91c8952db1515d6b23a7b3",
+ "chksum_sha256": "658ef54ac8bea8cb8a3737ace02ac93e301ac1195044ba8a474eab5ed9f68fe4",
"format": 1
},
{
- "name": ".github/pull_request_template.md",
+ "name": "changelogs/fragments/310_hg_vol_idempotency.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "565ead1b588caaa10cd6f2ed1bb6c809eb2ad93bf75da3a198690cac778432d6",
+ "chksum_sha256": "9aba6c636b1732e62a10fa765a6c3e7e1c5c25f4be439d1d603b5769356c4a02",
"format": 1
},
{
- "name": ".github/bug_report_template.md",
+ "name": "changelogs/fragments/139_pgsnap_ac_support.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "b4eb8821158c73fa62944e91e917f1d1b81fafed3adfe0e6ea373f99902bdf1d",
+ "chksum_sha256": "e44ab022a764253dabc565481afd94e7a5a2cb0e37a638bfe76a8d0e59139bdf",
"format": 1
},
{
- "name": "meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
+ "name": "changelogs/fragments/518_nfs_security.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1bcb6ca1d611f013d3c1a12bbfdcb1381b1122158138054d9522a7c70b9978ac",
"format": 1
},
{
- "name": "meta/runtime.yml",
+ "name": "changelogs/fragments/420_proxy_protocol.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f0b38b3ecfc8b98b6957f123c01aea90d068bc4349210b126758f8a009062a82",
+ "chksum_sha256": "7b0d6dfcb36aa94fdb2254482b73bf06facc0a4c854b3560017fcd33a53b28cf",
"format": 1
},
{
- "name": "meta/execution-environment.yml",
+ "name": "changelogs/fragments/169_add_certs.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a6458a579fbece249677d5d5473a58da36c7c8ab0a23b136891551f96e2c9b4e",
+ "chksum_sha256": "355b59a19cff9aede4aab5de882b471171cda7853e50aec1089e196c7df16e28",
"format": 1
},
{
- "name": ".yamllint",
+ "name": "changelogs/fragments/536_syslog_rest.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "2970fa4875092f99825ac0da3c82d2413ce973087b9945e68fdfa7b3b1e2012e",
+ "chksum_sha256": "8fc95a321302077490598c5d57d224dd83a4afc4b6c9338f33d89516755169c4",
"format": 1
},
{
- "name": "README.md",
+ "name": "changelogs/fragments/145_fix_missing_move_variable.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "1de49e694e4dde079633a7fd592588252a1f37f9d9e687e9ed66acaf82248ca5",
+ "chksum_sha256": "a212d6e202a5231066c55f1ef24f962bd544d31220b75c6820d58616b3ba3a20",
"format": 1
},
{
- "name": "requirements.txt",
+ "name": "changelogs/fragments/429_host_balance.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d19402afeb70da85f47229c6d2c91666784d4c9f4f2b3171a4d9921dc1aaa48e",
+ "chksum_sha256": "2f26eddba192d79e62a6e680da2917ff6067b5bb31cb1cd82dc8fa35bca8a518",
"format": 1
},
{
- "name": "docs",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
+ "name": "changelogs/fragments/412_fix_snapshot_suffix_handling.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ea3612f016a451961a56457389c8a277e7e09aaf244fb5f8058eced860d27ea7",
"format": 1
},
{
- "name": "docs/docsite",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
+ "name": "changelogs/fragments/487_pgrename.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5dcfbd333705d454559b8d6819a89fefa18eebafabd012cdd1d79afeea839274",
"format": 1
},
{
- "name": "docs/docsite/links.yml",
+ "name": "changelogs/fragments/231_syslog_settings.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "ba87531c24128b4584a8f5dc481594ff232c9c19f1324e315149e17fe685baec",
+ "chksum_sha256": "8554f5bb3da3ca0f967b61188806535a8d4161371ba5abcae56c3fbef98981d3",
"format": 1
},
{
- "name": "playbooks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
+ "name": "changelogs/fragments/229_snapsuffix.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "76c0cbc363953ab6384119e2b69f15be5ffb4f8b251966da6906cf397fb13c0a",
"format": 1
},
{
- "name": "playbooks/roles",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
+ "name": "changelogs/fragments/308_add_vm.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fde532f4cb23df09dba53e242f40d796ac82bbc0bb5e2208a3642288708cdd65",
"format": 1
},
{
- "name": "playbooks/.keep",
+ "name": "changelogs/fragments/440_null_suffix.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "chksum_sha256": "231b9105fcff58da4b829bcd76e0910685cd4bfadf5d2f278453ecc1b81887da",
"format": 1
},
{
- "name": "playbooks/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
+ "name": "changelogs/fragments/367_fix_vg.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f0a9b281dad34acdf964d12a23ab6644168adf1104b1cf51c0af829fbebf9333",
"format": 1
},
{
- "name": "playbooks/templates/.keep",
+ "name": "changelogs/fragments/202_add_sso.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "chksum_sha256": "2558b4d0697775d62ab7e12c6149fb1be85a2e56be9dafe561ee02aac2bf3920",
"format": 1
},
{
- "name": "playbooks/files",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
+ "name": "changelogs/fragments/207_fix_disable_for_remote_assist.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6f3e2568e1937dcbd0ce31e3a87ce1da72af881c77995e55e8673ec487832696",
"format": 1
},
{
- "name": "playbooks/files/.keep",
+ "name": "changelogs/fragments/398_hgoup_alias.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "chksum_sha256": "8e001322d964bedd03a888574a51c7b967a91a295b80e0f62bcca1426e58d716",
"format": 1
},
{
- "name": "playbooks/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
+ "name": "changelogs/fragments/351_fix_rest_check.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e9247247ec40002f6c1cbfa6cade93009a439529a61f88b7b3d6541f2cdf2f80",
"format": 1
},
{
- "name": "playbooks/vars/.keep",
+ "name": "changelogs/fragments/110_add_apiclient_support.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "chksum_sha256": "a91901335be037584e59a521f7201e27431567170240b027ef9bb9f7220bf3d0",
"format": 1
},
{
- "name": "playbooks/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
+ "name": "changelogs/fragments/152_fix_user.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "855ffc4ffbd24286e556666da375a79f19a3ec7da829ffa46d8f1983335f96d2",
"format": 1
},
{
- "name": "playbooks/tasks/.keep",
+ "name": "changelogs/fragments/436_snap_fix.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "chksum_sha256": "3fff9af259a27393498695b348fec1d30ed60e966caaccea6f3e3a18562f7df9",
"format": 1
},
{
- "name": "CHANGELOG.rst",
+ "name": "changelogs/fragments/124_sdk_handshake.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "36c7a096434b1733c4fa08958f557f3dbbedf562d8f1bdd0ea1c469c8d5a0823",
+ "chksum_sha256": "09dd30cee672d1bfcf0852933b8db73124d3465fe15be02d4b77cfe93df58c51",
"format": 1
},
{
- "name": "settings.json",
+ "name": "changelogs/fragments/111_add_filesystem_support.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "02d67ecc8a46b6b4ee0955afb6bcc8e8be5739c7cc9552e0d084cb8d2dda79dd",
+ "chksum_sha256": "e99a34a5a71e458de587f9741aadfb712c00f98ac28795c23929d97c32468550",
"format": 1
},
{
- "name": ".gitignore",
+ "name": "changelogs/fragments/393_offload_recover.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "301bd6ff5bc1dea2fe8e6b9295d4757ea0b569143b3ae21e3fb6cfe458e3c46d",
+ "chksum_sha256": "99d7fa900a916022865378fcc1f766e71bcf5a8f3b9a98b272eff891de04b481",
"format": 1
},
{
- "name": "changelogs",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
+ "name": "changelogs/fragments/520_add_distro.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f660b1cc27e66116661b2a7c1a026d26a5f0726c9d43c02fdfe3a7a4a35ca579",
"format": 1
},
{
- "name": "changelogs/config.yaml",
+ "name": "changelogs/fragments/422_sched_enable_fix.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "787229dd578477fe1009b0d84411b5c9678bf474c0c89642bd7381d6c4803c19",
+ "chksum_sha256": "e5237c8f88098a0fe4c626d7ad8657745bf42f58a4f8fce2da10bac4c187175f",
"format": 1
},
{
- "name": "changelogs/210_add_rename_hgroup.yaml",
+ "name": "changelogs/fragments/134_ac_pg_support.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8589783011b4145d3eb5099a7d5e025e9fd2cbf50319d426f0b5b6f8e1b637af",
+ "chksum_sha256": "eb72b8d852fda09db8bfcd0742081fecbabd6d68c97b0311a29a26765ed67307",
"format": 1
},
{
- "name": "changelogs/.plugin-cache.yaml",
+ "name": "changelogs/fragments/547_lacp_neighbor_info.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3df7f8ef3b35247657661745b3ed47c672699a9965e556f26aa763910b6087eb",
+ "chksum_sha256": "8451b969e6f2a71c80ed8e8ba94a038723d5f55c1bad9e88eba05b9c4fff9333",
"format": 1
},
{
- "name": "changelogs/changelog.yaml",
+ "name": "changelogs/fragments/272_volume_prio.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "dc5530e8f118081f497ab074144452b06f24b4771f8fa6332e0e367d86fc0f4d",
+ "chksum_sha256": "d5126118e5a4e014fe832b11a84fe2f0004496a45d3bcf47be4645f3fa85c11e",
"format": 1
},
{
- "name": "changelogs/211_fix_clearing_host_inititators.yaml",
+ "name": "changelogs/fragments/107_host_case_clarity.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8ce58291d0256cb22a7a8cb015ebfc4775474f594d5c724225875c495213d259",
+ "chksum_sha256": "14cdfe46c920bce4daf2066105d43bd974d27b7398f0c6021a4c7409c53ecbe9",
"format": 1
},
{
- "name": "changelogs/fragments",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
+ "name": "changelogs/fragments/388_remove_27.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "79481f3b61a45b1d006a43850b16ca12b356b3157005dd866ae6b84c73279d55",
"format": 1
},
{
- "name": "changelogs/fragments/145_fix_missing_move_variable.yaml",
+ "name": "changelogs/fragments/292_fix_ds_password.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a212d6e202a5231066c55f1ef24f962bd544d31220b75c6820d58616b3ba3a20",
+ "chksum_sha256": "3c65285fc6514bcb540d9f89b7e7786e1e5c5c40dc835d459e333e87be2070b1",
"format": 1
},
{
- "name": "changelogs/fragments/116_add_policies.yaml",
+ "name": "changelogs/fragments/348_add_default_prot.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "308c90a9b130b29db36fe30970cc4e83a86f72d909d979c251cdfa9ea37cc17d",
+ "chksum_sha256": "70909ac3544b7e941da1c84e9ba75ecd1e01898f0da9f5a58bef8f372222dbac",
"format": 1
},
{
- "name": "changelogs/fragments/317_add_all_squash.yaml",
+ "name": "changelogs/fragments/513_remote_snapshot_suffix.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d8a2830079f133a5e1f3876f720599a8b974f9337a7b7fec2d7c2957c7a0b238",
+ "chksum_sha256": "13821b3072fc7532cc784e6199a90e578394f533cbfdd14786069dc97c9f3220",
"format": 1
},
{
- "name": "changelogs/fragments/259_fix_gateway_check.yaml",
+ "name": "changelogs/fragments/113_add_exports_support.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "89448cb7b64a91aaeee486714624ffa318333f43cfda73519129081df032e01a",
+ "chksum_sha256": "5966655368d1bd6f13a19deb5ff00c2884e3015eea1fe054393e47c0a367343b",
"format": 1
},
{
- "name": "changelogs/fragments/387_no_volume_failure.yaml",
+ "name": "changelogs/fragments/344_fix_smtp.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "6489276ccdaba3a849b7e24a8a5a6069fd4b62505857d2ebdd7945492b25c5ca",
+ "chksum_sha256": "eb2f427e4bc4f43a9dcf4cfcb70387974e2239613d4363adb8d4d022a9b0cb6e",
"format": 1
},
{
- "name": "changelogs/fragments/140_pod_case.yaml",
+ "name": "changelogs/fragments/318_vol_defaults.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8314f833498e81e803152d5a6b8fa4992b690a8954a7b75e4f78f55f3e6281f1",
+ "chksum_sha256": "7542c7e94b9cd44f3f946c5a4518e0ddf23fc02ebfc48d032b483e5d6534b8e0",
"format": 1
},
{
- "name": "changelogs/fragments/369_fix_host.yaml",
+ "name": "changelogs/fragments/226_deprecate_protocol.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "b504c0859681ad87a0b32f5d8e8671c523fdf1514ec4c46f815758a192971012",
+ "chksum_sha256": "47123e52a081b333ae710eb005535c5fa84469d33a0d30ca7ec83e207d7d46c1",
"format": 1
},
{
- "name": "changelogs/fragments/319_lockout.yaml",
+ "name": "changelogs/fragments/224_add_nguid_info.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "4e8e8f895f0e131b6129c80938763ba06a483e419ad5be1cf5e8e262e32d7fd4",
+ "chksum_sha256": "afee325ee45ede0ecadb3d8cfcfc8b11caeb91c257e6fc69b221e972ae80415f",
"format": 1
},
{
- "name": "changelogs/fragments/334_fix_vg_qos.yaml",
+ "name": "changelogs/fragments/121_add_multi_volume_creation.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "9963dd088bdc4526e0306809822f4137b6c60fc92df70bd59a535ccf66d70cd0",
+ "chksum_sha256": "7d63a0ff3a88738493bf7f0afee3600f0b236b28f592694bd686d38a94bdd7d7",
"format": 1
},
{
- "name": "changelogs/fragments/238_add_dirsnap_rename.yaml",
+ "name": "changelogs/fragments/444_euc_fix.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "7dfe5fa7c0d3f44e723d764cf28d28438255e6d92fb0351537b1b75aeb4cde37",
+ "chksum_sha256": "d77c63e49dc36b2aaab0f38c1c907d43a36c6d3d64da01b18981a6c00a5b94fd",
"format": 1
},
{
- "name": "changelogs/fragments/320_completed_snaps.yaml",
+ "name": "changelogs/fragments/516_fix_throttle.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "304300ac00c30ab4b188befe164c2d9a89bb9eb92f1ea1580d4866abdbb00c3a",
+ "chksum_sha256": "f8e12e7a99fbc8168ec12a835c27e54941ecfb15dc3696e09054571ad4302724",
"format": 1
},
{
- "name": "changelogs/fragments/261_fix_bad_arrays.yaml",
+ "name": "changelogs/fragments/284_volfact_for_recover.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "10bcda38e7401292d7eb21bfbea276bbcdaa70279475b57ead65200878682562",
+ "chksum_sha256": "cafe8f028e3c83d63d9dd1270249266c7fba52f22c193424e4feb5ae8c73c4d3",
"format": 1
},
{
- "name": "changelogs/fragments/397_parialconnect_bug.yaml",
+ "name": "changelogs/fragments/213_add_kmip.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "dcb8da9aace9eaed868a579288728ec27c58fe43de3a0ac4b2f26c0f2c045749",
+ "chksum_sha256": "a25a4a59e2fe39675f54af4b041187ff50f2a84419ecf0ad33597f9a870e347c",
"format": 1
},
{
- "name": "changelogs/fragments/239_safe_mode.yaml",
+ "name": "changelogs/fragments/448_add_subs.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "50e022d852e1bc93695d7980834f02c62fe584f160ceab7c6fde0a50909f215b",
+ "chksum_sha256": "687bba4eefbc30ab258e8c7b507101deea354529cd7504274aa07b12aabe437b",
"format": 1
},
{
- "name": "changelogs/fragments/113_add_exports_support.yaml",
+ "name": "changelogs/fragments/265_fix_multiple_nfs_rules.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "5966655368d1bd6f13a19deb5ff00c2884e3015eea1fe054393e47c0a367343b",
+ "chksum_sha256": "92a7032c57aa30e06e3b7f3a8b14f1bc4a1dbd3c0f91e13ee862f9f91d894d30",
"format": 1
},
{
- "name": "changelogs/fragments/284_volfact_for_recover.yaml",
+ "name": "changelogs/fragments/527_pgsnap_rest2.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "cafe8f028e3c83d63d9dd1270249266c7fba52f22c193424e4feb5ae8c73c4d3",
+ "chksum_sha256": "35b80dbb6d79fbe84eeff5acd53ef6d398582c28d94b55926cfed82f3aaa2351",
"format": 1
},
{
- "name": "changelogs/fragments/228_nguid_to_volfact.yaml",
+ "name": "changelogs/fragments/337_fix_non-prod_versions.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "286446286f7b817d37b3ecd99bf40347dc9ca9e76835dae68f5042088f0ccad0",
+ "chksum_sha256": "9461a330780061d2f543cda7721e4818e8c7e2245439c1737a0f4ea8d095018f",
"format": 1
},
{
- "name": "changelogs/fragments/302_fix_pg_recover_and_target_update.yaml",
+ "name": "changelogs/fragments/523_nfs_policies.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "24bd01c41044777cec8a08dceab1e2adffefeef7454fa3856d9e90aac81986a2",
+ "chksum_sha256": "3a8f974f616c2729c0b1ed99aa60e66b23c246eb2e91c8ff0e02af38f460f0f7",
"format": 1
},
{
- "name": "changelogs/fragments/351_fix_rest_check.yaml",
+ "name": "changelogs/fragments/153_syslog_update.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e9247247ec40002f6c1cbfa6cade93009a439529a61f88b7b3d6541f2cdf2f80",
+ "chksum_sha256": "ee87138221741514acab7cee37c3f0c41772129b130a8100acdcea2ec732495f",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/341_pg_400s.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2f3e1a18a9d91068015a1874425c8516bd1c8b7dd82bf5f4c4db8af24ce010eb",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/293_add_chassis_inventory.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c4f7b2c60b0fdddb85d2c7ac76f6f71c09b0e1de21908d4945d88203e9895d30",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/294_dns_ntp_idempotency_absent.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3621e04c4076595ad61dbd05bbdc9810304336354c40938e82be177b89e5e029",
"format": 1
},
{
@@ -470,45 +491,52 @@
"format": 1
},
{
- "name": "changelogs/fragments/342_add_vol_promotion.yaml",
+ "name": "changelogs/fragments/174_null_gateway.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "9bcce181fe8efb221844d176b0afa0afceeec84a3fdb252c4b6e9b60d262d800",
+ "chksum_sha256": "bec4fcd5e2b2ec84d89caf3e6513843980489d6cc60c02fe8e01fbbebb658b31",
"format": 1
},
{
- "name": "changelogs/fragments/249_allow_cert_reimport.yaml",
+ "name": "changelogs/fragments/196_fix_activedr_api_version.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "b1b80b8daae56e7b84f267a875b689f1ca8b1b69e9b061261a18121ebab54908",
+ "chksum_sha256": "da06077e7148e0a5647cbb4db66dbf57a87199b3ecd8961f7c070f0a223b49f6",
"format": 1
},
{
- "name": "changelogs/fragments/293_add_chassis_inventory.yaml",
+ "name": "changelogs/fragments/205_policy_protocl.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "c4f7b2c60b0fdddb85d2c7ac76f6f71c09b0e1de21908d4945d88203e9895d30",
+ "chksum_sha256": "13fe566546b6523cbea2cdf17fd43b01886664a7e016f093b575f107cf65f400",
"format": 1
},
{
- "name": "changelogs/fragments/227_missing_regex.yaml",
+ "name": "changelogs/fragments/459_fix_eradication_timer_info.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "ab1065f5ec052ecd0bc4d6c1416cac3f1a2783478fdcb8ca434747d099ba746a",
+ "chksum_sha256": "0aee632fba57abe3de04034e9338703fa45225cf888e9015db78d7cecaa4463d",
"format": 1
},
{
- "name": "changelogs/fragments/220_capacity_info.yaml",
+ "name": "changelogs/fragments/387_no_volume_failure.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "2676555e4ab199c39f247eb8186470544e3f392bfea10b44bbdf27b59d963f9b",
+ "chksum_sha256": "6489276ccdaba3a849b7e24a8a5a6069fd4b62505857d2ebdd7945492b25c5ca",
"format": 1
},
{
- "name": "changelogs/fragments/202_add_sso.yaml",
+ "name": "changelogs/fragments/328_policy_fix.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "2558b4d0697775d62ab7e12c6149fb1be85a2e56be9dafe561ee02aac2bf3920",
+ "chksum_sha256": "e2241976b8065bcfaad829f6b6b682e932fe24f6750dd4467588fe2aff709a48",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/483_missing_replicate.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "684d4ac8100833002dee8245bd1cb18b69d960414b5c193cd241b16e71fee31d",
"format": 1
},
{
@@ -519,80 +547,101 @@
"format": 1
},
{
- "name": "changelogs/fragments/134_ac_pg_support.yaml",
+ "name": "changelogs/fragments/243_sso_to_admin.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "eb72b8d852fda09db8bfcd0742081fecbabd6d68c97b0311a29a26765ed67307",
+ "chksum_sha256": "3d50baeb61cf616fa61f24d6df31a7d75e1f742cb831843548dad5863267310e",
"format": 1
},
{
- "name": "changelogs/fragments/246_python_precedence.yaml",
+ "name": "changelogs/fragments/312_pg_alias.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "c4901627b1f73f31126c2ff78bb5ddbcd40d8b5d8f6c76fde19eb39147764875",
+ "chksum_sha256": "048f69ebae940761460f18b2c0c5b84f789dd5fa127a0ff8d6d8aafa89d7f1b7",
"format": 1
},
{
- "name": "changelogs/fragments/279_pg_safemode.yaml",
+ "name": "changelogs/fragments/461_ntp_keys.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "2c3658161baa8290c76c8ae550c7b3e267df82d78c26f11a36fb63f5ad0e3551",
+ "chksum_sha256": "64309a372f6a0f8e886b2ddad8593f6fc6e56400abb19b24ccad2f37f7c2a825",
"format": 1
},
{
- "name": "changelogs/fragments/271_vgroup_prio.yaml",
+ "name": "changelogs/fragments/450_no_gateway.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "9aef1c5e381f895a7096f7f4a14d5004445a54c3e9048f9176b77615e3f98861",
+ "chksum_sha256": "794edfa3a274fa0739d392c586d608f2d45f314b887b3a5bf561f5d09c6d7e3b",
"format": 1
},
{
- "name": "changelogs/fragments/203_add_eradication_timer.yaml",
+ "name": "changelogs/fragments/445_py39.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "fabd383ba2343e7b5a9e4ac4818546ef8d991de7e93d14c90bd75f58d7f05a45",
+ "chksum_sha256": "b922ead0b741d02d100828781683d2054d43d188b383f0ff1a5eeaedf499aa59",
"format": 1
},
{
- "name": "changelogs/fragments/292_fix_ds_password.yaml",
+ "name": "changelogs/fragments/354_fix_promotion.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3c65285fc6514bcb540d9f89b7e7786e1e5c5c40dc835d459e333e87be2070b1",
+ "chksum_sha256": "d7864f83fbfd6f9f8e5bc05b184b4fbd0a64c8c3f7c2819cabd1a0cb2cda5568",
"format": 1
},
{
- "name": "changelogs/fragments/135_no_cbs_ntp.yaml",
+ "name": "changelogs/fragments/320_completed_snaps.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "1ce568cbe1255ecfcdb3e6e66146dcf52e8bcc5cfcc600b856958785b4c8a820",
+ "chksum_sha256": "304300ac00c30ab4b188befe164c2d9a89bb9eb92f1ea1580d4866abdbb00c3a",
"format": 1
},
{
- "name": "changelogs/fragments/193_duplicate_initiators.yaml",
+ "name": "changelogs/fragments/495_add_del_pgroup_info.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "040a717fd2d50545967a195dfefa3143d746d80b614349a18f6da93ac084398f",
+ "chksum_sha256": "279c330a0ea28864c213250b3dd40bf752e6c6074741993848546ed1c76649bd",
"format": 1
},
{
- "name": "changelogs/fragments/247_fix_smb_policy_rules.yaml",
+ "name": "changelogs/fragments/343_fix_ds.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "de2fd709d27d85d6b8e983f60b16a3aab0dd0f2bb02f44d7ccfcc99915ba3fee",
+ "chksum_sha256": "3fa3323ca7bf1ee194ad96413ad37d4dc1018d0b10dd2fff226fc233120737a6",
"format": 1
},
{
- "name": "changelogs/fragments/348_add_default_prot.yaml",
+ "name": "changelogs/fragments/349_add_alerts.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "70909ac3544b7e941da1c84e9ba75ecd1e01898f0da9f5a58bef8f372222dbac",
+ "chksum_sha256": "0508cb660cfe0dab94258230a0378e2185e1bda6c0eb05ab362352215c91a800",
"format": 1
},
{
- "name": "changelogs/fragments/141_add_remote_snapshot.yaml",
+ "name": "changelogs/fragments/237_fix_network.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "658ef54ac8bea8cb8a3737ace02ac93e301ac1195044ba8a474eab5ed9f68fe4",
+ "chksum_sha256": "3f561b0d4d78d98e16c0b6dd42d3d1360fd2f0b8b57644a72578e9f294bf63b9",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/227_missing_regex.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ab1065f5ec052ecd0bc4d6c1416cac3f1a2783478fdcb8ca434747d099ba746a",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/234_add_vol_info_on_nochange.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4032b2d3fb21ea9f94f9ed246d926049d5d7ce8daf094fa22703ba73b1f26caf",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/469_fix_missing_bind_password.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "989c52d6564fe13a42f5479b408a49e674b2a4ad691cac401815ca3df094277b",
"format": 1
},
{
@@ -603,311 +652,339 @@
"format": 1
},
{
- "name": "changelogs/fragments/315_spf_details.yaml",
+ "name": "changelogs/fragments/296_ad_tls.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "86798075e98bf22253d023307d6cf1e8361b9a221d826c5f4a742d715a1058c9",
+ "chksum_sha256": "12284c1bee381d7b76410aad520b578b814182839662f5cd35ae917f720c89c7",
"format": 1
},
{
- "name": "changelogs/fragments/169_add_certs.yaml",
+ "name": "changelogs/fragments/548_uptime.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "355b59a19cff9aede4aab5de882b471171cda7853e50aec1089e196c7df16e28",
+ "chksum_sha256": "93269f030ad6bc92383376ff7b98e7d6fc3709b6e4d16f593aa4d86d361f3a0e",
"format": 1
},
{
- "name": "changelogs/fragments/124_sdk_handshake.yaml",
+ "name": "changelogs/fragments/415_autodir_policies.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "09dd30cee672d1bfcf0852933b8db73124d3465fe15be02d4b77cfe93df58c51",
+ "chksum_sha256": "5c7eeb95d5372ae7986c243afb87f626c203fedb6edac1e71986ec49ca9266f4",
"format": 1
},
{
- "name": "changelogs/fragments/308_add_vm.yaml",
+ "name": "changelogs/fragments/288_zero_params.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "fde532f4cb23df09dba53e242f40d796ac82bbc0bb5e2208a3642288708cdd65",
+ "chksum_sha256": "630fa677c01443af17039493a324e335da583109dca296e1f5162b12e65c4014",
"format": 1
},
{
- "name": "changelogs/fragments/252_add_saml2.yaml",
+ "name": "changelogs/fragments/238_add_dirsnap_rename.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "ca47ce440b28f64030239cdf8e3d0d246092e93b0552043bbea7c4c769ceab8f",
+ "chksum_sha256": "7dfe5fa7c0d3f44e723d764cf28d28438255e6d92fb0351537b1b75aeb4cde37",
"format": 1
},
{
- "name": "changelogs/fragments/132_fc_replication.yaml",
+ "name": "changelogs/fragments/530_ntp_rest2.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "31119ca85b86a8e72c64dd998590a231499f135595052c25a73df1f5b9a1965e",
+ "chksum_sha256": "6528b34d1eab43edced573ac73006e0aa8f576bab9592aba84c674dbe37ed96e",
"format": 1
},
{
- "name": "changelogs/fragments/354_fix_promotion.yaml",
+ "name": "changelogs/fragments/411_nfs_user_mapping.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d7864f83fbfd6f9f8e5bc05b184b4fbd0a64c8c3f7c2819cabd1a0cb2cda5568",
+ "chksum_sha256": "b2061f97f76b8bdffb5c73bb4a39146bf88df324cf63567cabd97a1622bedc41",
"format": 1
},
{
- "name": "changelogs/fragments/364_fc_targets.yaml",
+ "name": "changelogs/fragments/463_nfs_version.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "aa25ae0317807b32b3adf7fb3d2fb2ee28677408e8fb8fc5a5ec8ee47d217c6c",
+ "chksum_sha256": "8ff09f0f258184ab8d4ee4f6d9517872defffa5efd9338ecee017f14d3da50d7",
"format": 1
},
{
- "name": "changelogs/fragments/122_add_multi_host_creation.yaml",
+ "name": "changelogs/fragments/383_network_idemp.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f330217bef8d7c34061545e4071b372538afb841b2b013e6934e6f872af35a58",
+ "chksum_sha256": "891ed6fb13b7a9450d646736929c46d9fd657035b0a3a60858faac3c51a32cf9",
"format": 1
},
{
- "name": "changelogs/fragments/208_add_directory_quota_support.yaml",
+ "name": "changelogs/fragments/452_throttle_fix.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "aa55d46f1d02d324beff7238a2cf14255774b32bc4ff34921f682b5edc7b4061",
+ "chksum_sha256": "5004483a7037d8d8bb3c9fbd74544592e97db06599f0baf1645998b4b6ed39c4",
"format": 1
},
{
- "name": "changelogs/fragments/370_add_user_role.yaml",
+ "name": "changelogs/fragments/125_dns_idempotency.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "713d2d7066e76f6967b51a67aa59e8dfdd666cd90b8538f7f00827fe3b7b2231",
+ "chksum_sha256": "27bba2cd35c66b07bbd99031704cce7e3234305d883af0e9841cb35dbefb14f0",
"format": 1
},
{
- "name": "changelogs/fragments/174_null_gateway.yaml",
+ "name": "changelogs/fragments/531_ra_rest.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "bec4fcd5e2b2ec84d89caf3e6513843980489d6cc60c02fe8e01fbbebb658b31",
+ "chksum_sha256": "e18a53173d1f64ff016f65275a0ebb51b7d9e50be1d42340381d672c8b1ddaaf",
"format": 1
},
{
- "name": "changelogs/fragments/393_offload_recover.yaml",
+ "name": "changelogs/fragments/249_allow_cert_reimport.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "99d7fa900a916022865378fcc1f766e71bcf5a8f3b9a98b272eff891de04b481",
+ "chksum_sha256": "b1b80b8daae56e7b84f267a875b689f1ca8b1b69e9b061261a18121ebab54908",
"format": 1
},
{
- "name": "changelogs/fragments/149_volumes_demoted_pods_fix.yaml",
+ "name": "changelogs/fragments/162_pgsnap_info_fix.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8cf22b39b47e9256017c2a10f64e327db8e87d9f2aa4c75c081e3709fce3330e",
+ "chksum_sha256": "4dbaadcb3b3f5f5cfcdac7c5a7a0434e5ee06ad88e245b22cb2857a13eea79d2",
"format": 1
},
{
- "name": "changelogs/fragments/214_join_ou.yaml",
+ "name": "changelogs/fragments/379_cap_compat.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "b14ab70f9bd3756c7aca4c28f7d0bf7c2e40815710275232deb7d90239108b57",
+ "chksum_sha256": "1c135bcff0cf73fd477bc81cce7903bc90d0ed27554b4b70f938673a10141d61",
"format": 1
},
{
- "name": "changelogs/fragments/182_allow_pgroup_with_create.yaml",
+ "name": "changelogs/fragments/193_duplicate_initiators.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "b2d30f4f5efeb55578f94bd60575f7998104bedd70f9c3a4112de08918c6430a",
+ "chksum_sha256": "040a717fd2d50545967a195dfefa3143d746d80b614349a18f6da93ac084398f",
"format": 1
},
{
- "name": "changelogs/fragments/341_pg_400s.yaml",
+ "name": "changelogs/fragments/471_fix_ip_protocol.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "2f3e1a18a9d91068015a1874425c8516bd1c8b7dd82bf5f4c4db8af24ce010eb",
+ "chksum_sha256": "6a5e48d2ef1218a47211a7608e89ee7eb5eb49e8f8884500f40510b56e59d88c",
"format": 1
},
{
- "name": "changelogs/fragments/v1.4.0_summary.yaml",
+ "name": "changelogs/fragments/305_fix_target_dempo.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "876598a7a2135b855db8a38e69902920412966606847157fd97d8bb49fc479d4",
+ "chksum_sha256": "f71d18b40505148907262f5f577ca2f50a881379a44ec5a95aa81bfb1b5b27f8",
"format": 1
},
{
- "name": "changelogs/fragments/109_fa_files_support_purefa_info.yaml",
+ "name": "changelogs/fragments/132_fc_replication.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3523ae2ba986b989876bab0ff2182888a48c112cab18373b70e17528c330c3c5",
+ "chksum_sha256": "31119ca85b86a8e72c64dd998590a231499f135595052c25a73df1f5b9a1965e",
"format": 1
},
{
- "name": "changelogs/fragments/133_purefa_info_v6_replication.yaml",
+ "name": "changelogs/fragments/347_dns_fix.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "cbb12d92a0c8c763b7e9057fe0d7e8bef92b644c016d6da016c7bda7494b6d53",
+ "chksum_sha256": "83f875d82f6573107b0a3428870ea83f37a24c97b8fb0b60732530d2bcc4d09e",
"format": 1
},
{
- "name": "changelogs/fragments/375_fix_remote_hosts.yaml",
+ "name": "changelogs/fragments/175_check_pgname.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "bf689dc94a6aa93b1a5caefebd9bc03581fa83fa22ecdcc1f639c855dd1d3659",
+ "chksum_sha256": "c9a31f2e60103d8c62690888c0e05c6bbb7ae74faef2705783f6091996275009",
"format": 1
},
{
- "name": "changelogs/fragments/384_update_vol_facts.yaml",
+ "name": "changelogs/fragments/364_fc_targets.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d8eb9673ceca70f3c28a1f90d394719d69bd2a0654c880a59e09fb10ea9b1bd3",
+ "chksum_sha256": "aa25ae0317807b32b3adf7fb3d2fb2ee28677408e8fb8fc5a5ec8ee47d217c6c",
"format": 1
},
{
- "name": "changelogs/fragments/126_fix_volume_move.yaml",
+ "name": "changelogs/fragments/170_pgsnap_stretch_pod_fail.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d0b47177157c3ab8b7f3676d9bf5f5d6dd6fecb2d7850b612067bc0dbaf457fe",
+ "chksum_sha256": "af14d8b057b9a6947e71bf6f5ccfc1a89e8cd926a2aeb2e21561e9859d074540",
"format": 1
},
{
- "name": "changelogs/fragments/337_fix_non-prod_versions.yml",
+ "name": "changelogs/fragments/482_schedule.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "9461a330780061d2f543cda7721e4818e8c7e2245439c1737a0f4ea8d095018f",
+ "chksum_sha256": "e29a5531872449ad519872cea91ad29252d36df6f28fe47d16650a97fd5d7edf",
"format": 1
},
{
- "name": "changelogs/fragments/213_add_kmip.yaml",
+ "name": "changelogs/fragments/279_pg_safemode.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a25a4a59e2fe39675f54af4b041187ff50f2a84419ecf0ad33597f9a870e347c",
+ "chksum_sha256": "2c3658161baa8290c76c8ae550c7b3e267df82d78c26f11a36fb63f5ad0e3551",
"format": 1
},
{
- "name": "changelogs/fragments/201_increase_krb_count.yaml",
+ "name": "changelogs/fragments/160_rename_pg.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "4c9af9fb7823e936ee5da520bca07919e280832c868d8e808819967cc02b2776",
+ "chksum_sha256": "889571c492bfad0e49d4384c0ab89948ed4ef26b3838523d1e2d00b59c006b6e",
"format": 1
},
{
- "name": "changelogs/fragments/199_add_fc_port_enable.yaml",
+ "name": "changelogs/fragments/468_missing_subset.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "1eb08594ec67b6f68fdc33cbeba0929733a4d762ad1e5775651ce28aeb8be577",
+ "chksum_sha256": "decb4c312bfd5d8a20e0fc2f06a77b63bdb8eb1c5df4a4708ff9684adf7375a5",
"format": 1
},
{
- "name": "changelogs/fragments/136_add_vol_get_send_info.yaml",
+ "name": "changelogs/fragments/462_info_update.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "901f750d427f4fdd282bdaac8ea955e84980af5ab61cb3537842552142fa7831",
+ "chksum_sha256": "515dcf5d8cf7c7a349b2f1a5bce7859b38eed16a9f603335fc6eea2a93f17317",
"format": 1
},
{
- "name": "changelogs/fragments/328_policy_fix.yaml",
+ "name": "changelogs/fragments/334_fix_vg_qos.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e2241976b8065bcfaad829f6b6b682e932fe24f6750dd4467588fe2aff709a48",
+ "chksum_sha256": "9963dd088bdc4526e0306809822f4137b6c60fc92df70bd59a535ccf66d70cd0",
"format": 1
},
{
- "name": "changelogs/fragments/125_dns_idempotency.yaml",
+ "name": "changelogs/fragments/135_no_cbs_ntp.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "27bba2cd35c66b07bbd99031704cce7e3234305d883af0e9841cb35dbefb14f0",
+ "chksum_sha256": "1ce568cbe1255ecfcdb3e6e66146dcf52e8bcc5cfcc600b856958785b4c8a820",
"format": 1
},
{
- "name": "changelogs/fragments/398_hgoup_alias.yaml",
+ "name": "changelogs/fragments/485_fix_host.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8e001322d964bedd03a888574a51c7b967a91a295b80e0f62bcca1426e58d716",
+ "chksum_sha256": "5c55238368291000abb54e4d42aa6e42a6e73ae50ccdb1f970c0e0ba5b16258e",
"format": 1
},
{
- "name": "changelogs/fragments/365_pod_pgsched.yaml",
+ "name": "changelogs/fragments/317_add_all_squash.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "1935c18a1605977c8d6449336c30c7022829f7263ec53f61dc004e3f87f10c5a",
+ "chksum_sha256": "d8a2830079f133a5e1f3876f720599a8b974f9337a7b7fec2d7c2957c7a0b238",
"format": 1
},
{
- "name": "changelogs/fragments/272_volume_prio.yaml",
+ "name": "changelogs/fragments/330_extend_vlan.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d5126118e5a4e014fe832b11a84fe2f0004496a45d3bcf47be4645f3fa85c11e",
+ "chksum_sha256": "14826f46f85309e3d52b4eeeecf757e36cb0ec71115db6aba41efdf8e8b9119d",
"format": 1
},
{
- "name": "changelogs/fragments/229_snapsuffix.yaml",
+ "name": "changelogs/fragments/161_offline_offload_fix.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "76c0cbc363953ab6384119e2b69f15be5ffb4f8b251966da6906cf397fb13c0a",
+ "chksum_sha256": "954b6c0a6350782c3bc461de4e3813d05d2c441f20e906d73e74bc10ba2a5783",
"format": 1
},
{
- "name": "changelogs/fragments/366_add_nvme_types.yaml",
+ "name": "changelogs/fragments/252_add_saml2.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "7e157e8e6dc8d2311fa1b7de576b4ac4a23875a76ab8faaab657b6b77b5c057f",
+ "chksum_sha256": "ca47ce440b28f64030239cdf8e3d0d246092e93b0552043bbea7c4c769ceab8f",
"format": 1
},
{
- "name": "changelogs/fragments/367_fix_vg.yaml",
+ "name": "changelogs/fragments/156_snap_suffix_fix.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f0a9b281dad34acdf964d12a23ab6644168adf1104b1cf51c0af829fbebf9333",
+ "chksum_sha256": "69187a1d2f2a0ceba0494eaeb883cb74ca8503f68020d29341a27e462a8dd6ea",
"format": 1
},
{
- "name": "changelogs/fragments/160_rename_pg.yaml",
+ "name": "changelogs/fragments/302_fix_pg_recover_and_target_update.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "889571c492bfad0e49d4384c0ab89948ed4ef26b3838523d1e2d00b59c006b6e",
+ "chksum_sha256": "24bd01c41044777cec8a08dceab1e2adffefeef7454fa3856d9e90aac81986a2",
"format": 1
},
{
- "name": "changelogs/fragments/318_vol_defaults.yaml",
+ "name": "changelogs/fragments/430_throttle_support.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "7542c7e94b9cd44f3f946c5a4518e0ddf23fc02ebfc48d032b483e5d6534b8e0",
+ "chksum_sha256": "1d8b93ee3c1f2dfcc6fedd6a3ee71a8ca7e574fd1e2ff6e25224ed1300d4d342",
"format": 1
},
{
- "name": "changelogs/fragments/107_host_case_clarity.yaml",
+ "name": "changelogs/fragments/545_4kcert.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "14cdfe46c920bce4daf2066105d43bd974d27b7398f0c6021a4c7409c53ecbe9",
+ "chksum_sha256": "d1ef5ef2b59240b97e1774cfecfa3555b7d6afd03045f4ffa89596de1d5bed76",
"format": 1
},
{
- "name": "changelogs/fragments/296_ad_tls.yaml",
+ "name": "changelogs/fragments/496_fix_cert_signing.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "12284c1bee381d7b76410aad520b578b814182839662f5cd35ae917f720c89c7",
+ "chksum_sha256": "f9916161d203b84982579681a8d16e11263f05a5a54e7b2ec7bd901e25ea0cc5",
"format": 1
},
{
- "name": "changelogs/fragments/168_dsrole_fix.yaml",
+ "name": "changelogs/fragments/199_add_fc_port_enable.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a952b14eb16b084e107dc1e809347680de1bbabae470751ce53c1fbe8c00f7b9",
+ "chksum_sha256": "1eb08594ec67b6f68fdc33cbeba0929733a4d762ad1e5775651ce28aeb8be577",
"format": 1
},
{
- "name": "changelogs/fragments/347_dns_fix.yaml",
+ "name": "changelogs/fragments/280_multihost_no_suffix.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "83f875d82f6573107b0a3428870ea83f37a24c97b8fb0b60732530d2bcc4d09e",
+ "chksum_sha256": "c31e72389decf9391a7ce5ea2504e6490b3afe0be780106152cb473ec3dd5d1b",
"format": 1
},
{
- "name": "changelogs/fragments/163_add_maintenance_windows.yaml",
+ "name": "changelogs/fragments/519_add_cloud_capacity.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "cae0c49297590fa895d7944d8ca2c9ed8ee936cfb2a7eb1e7480ddd8d363790d",
+ "chksum_sha256": "d960cd2c1b0d2e241e04b302f50c8628aea1408ddb8e69a2a7e9897c193356df",
"format": 1
},
{
- "name": "changelogs/fragments/205_policy_protocl.yaml",
+ "name": "changelogs/fragments/460_eradicaton.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "13fe566546b6523cbea2cdf17fd43b01886664a7e016f093b575f107cf65f400",
+ "chksum_sha256": "ee6d8e2070da03b4a99edc98a568c36c53ee37335ce0fd9e060ce352af64c096",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/247_fix_smb_policy_rules.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "de2fd709d27d85d6b8e983f60b16a3aab0dd0f2bb02f44d7ccfcc99915ba3fee",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/187_add_ad.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7bdf293cc4cf7f96dfa5e017da8988dd7dbcd5103f7d624c882870938ed92f78",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/474_network_fixes.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "38bb04fb49d107927480ab3ab8d1bf0eb2e28f9ad921e09899417df556994e06",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/345_user_map.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1dfe18617cd617abc6ad45ca549ffce2fa88493506f3516deba1d9b6a4108a15",
"format": 1
},
{
@@ -918,122 +995,122 @@
"format": 1
},
{
- "name": "changelogs/fragments/111_add_filesystem_support.yaml",
+ "name": "changelogs/fragments/381_change_booleans.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e99a34a5a71e458de587f9741aadfb712c00f98ac28795c23929d97c32468550",
+ "chksum_sha256": "f04fd18a42e321cb3818a579e14cc50a6d27935196ff04632e2db44f7b807322",
"format": 1
},
{
- "name": "changelogs/fragments/175_check_pgname.yaml",
+ "name": "changelogs/fragments/498_fix_pg_creation.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "c9a31f2e60103d8c62690888c0e05c6bbb7ae74faef2705783f6091996275009",
+ "chksum_sha256": "a76d85d910376a46b3e6be6a54d7573b58faad66e973f2cac21428287e0a39b8",
"format": 1
},
{
- "name": "changelogs/fragments/108_fix_eradicate_idempotency.yaml",
+ "name": "changelogs/fragments/235_eula.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "56a1a21cbd2337826c0b74c2e0b8fdc7488726ee48fa1039e9621bc8035ae01b",
+ "chksum_sha256": "1f47001145eba84245d432bb58679d15acaf7065794bd486ce6f4b47d15ccc5f",
"format": 1
},
{
- "name": "changelogs/fragments/280_multihost_no_suffix.yaml",
+ "name": "changelogs/fragments/366_add_nvme_types.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "c31e72389decf9391a7ce5ea2504e6490b3afe0be780106152cb473ec3dd5d1b",
+ "chksum_sha256": "7e157e8e6dc8d2311fa1b7de576b4ac4a23875a76ab8faaab657b6b77b5c057f",
"format": 1
},
{
- "name": "changelogs/fragments/270_add_priority_info.yaml",
+ "name": "changelogs/fragments/140_pod_case.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3ce6bf60d3a1efd2f708490a654ec6c34e1617bb80f5114c170d683dee794f56",
+ "chksum_sha256": "8314f833498e81e803152d5a6b8fa4992b690a8954a7b75e4f78f55f3e6281f1",
"format": 1
},
{
- "name": "changelogs/fragments/153_syslog_update.yaml",
+ "name": "changelogs/fragments/108_fix_eradicate_idempotency.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "ee87138221741514acab7cee37c3f0c41772129b130a8100acdcea2ec732495f",
+ "chksum_sha256": "56a1a21cbd2337826c0b74c2e0b8fdc7488726ee48fa1039e9621bc8035ae01b",
"format": 1
},
{
- "name": "changelogs/fragments/288_zero_params.yaml",
+ "name": "changelogs/fragments/370_add_user_role.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "630fa677c01443af17039493a324e335da583109dca296e1f5162b12e65c4014",
+ "chksum_sha256": "713d2d7066e76f6967b51a67aa59e8dfdd666cd90b8538f7f00827fe3b7b2231",
"format": 1
},
{
- "name": "changelogs/fragments/176_fix_promote_api_issue.yaml",
+ "name": "changelogs/fragments/109_fa_files_support_purefa_info.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e1643ed3af96a2f4177225b32510025dad4fa8b2374279f80d960c1a8208bfa7",
+ "chksum_sha256": "3523ae2ba986b989876bab0ff2182888a48c112cab18373b70e17528c330c3c5",
"format": 1
},
{
- "name": "changelogs/fragments/187_add_ad.yaml",
+ "name": "changelogs/fragments/374_offload_pgsnap.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "7bdf293cc4cf7f96dfa5e017da8988dd7dbcd5103f7d624c882870938ed92f78",
+ "chksum_sha256": "1c57da1f0407cfd608f8dba3ab67f68abcb140d190cdf48072b7d629979492d7",
"format": 1
},
{
- "name": "changelogs/fragments/305_fix_target_dempo.yaml",
+ "name": "changelogs/fragments/118_rename_host.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f71d18b40505148907262f5f577ca2f50a881379a44ec5a95aa81bfb1b5b27f8",
+ "chksum_sha256": "5fad30e620947f3c5878545aa55f223370e4e769ec28002f662efdf9ffd1358a",
"format": 1
},
{
- "name": "changelogs/fragments/194_vg_qos.yaml",
+ "name": "changelogs/fragments/163_add_maintenance_windows.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "0e9835e8e6d6dd32ec0af6c42f7d3c38fa496eb3afb3314f80c66fa189268558",
+ "chksum_sha256": "cae0c49297590fa895d7944d8ca2c9ed8ee936cfb2a7eb1e7480ddd8d363790d",
"format": 1
},
{
- "name": "changelogs/fragments/344_fix_smtp.yaml",
+ "name": "changelogs/fragments/220_capacity_info.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "eb2f427e4bc4f43a9dcf4cfcb70387974e2239613d4363adb8d4d022a9b0cb6e",
+ "chksum_sha256": "2676555e4ab199c39f247eb8186470544e3f392bfea10b44bbdf27b59d963f9b",
"format": 1
},
{
- "name": "changelogs/fragments/330_extend_vlan.yaml",
+ "name": "changelogs/fragments/200_add_DAR_info.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "14826f46f85309e3d52b4eeeecf757e36cb0ec71115db6aba41efdf8e8b9119d",
+ "chksum_sha256": "d30ccf31e256808a9bfeef72c50100fed7fa0922b49c31e893f277890733dd1a",
"format": 1
},
{
- "name": "changelogs/fragments/162_pgsnap_info_fix.yaml",
+ "name": "changelogs/fragments/194_vg_qos.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "4dbaadcb3b3f5f5cfcdac7c5a7a0434e5ee06ad88e245b22cb2857a13eea79d2",
+ "chksum_sha256": "0e9835e8e6d6dd32ec0af6c42f7d3c38fa496eb3afb3314f80c66fa189268558",
"format": 1
},
{
- "name": "changelogs/fragments/170_pgsnap_stretch_pod_fail.yaml",
+ "name": "changelogs/fragments/488_fix_pgsnap_eradication.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "af14d8b057b9a6947e71bf6f5ccfc1a89e8cd926a2aeb2e21561e9859d074540",
+ "chksum_sha256": "1c930c51c905b94b4921e8c37c2b5609206176d8382ed933a31f86956849f037",
"format": 1
},
{
- "name": "changelogs/fragments/310_hg_vol_idempotency.yaml",
+ "name": "changelogs/fragments/365_pod_pgsched.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "9aba6c636b1732e62a10fa765a6c3e7e1c5c25f4be439d1d603b5769356c4a02",
+ "chksum_sha256": "1935c18a1605977c8d6449336c30c7022829f7263ec53f61dc004e3f87f10c5a",
"format": 1
},
{
- "name": "changelogs/fragments/234_add_vol_info_on_nochange.yaml",
+ "name": "changelogs/fragments/1_27_summary.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "4032b2d3fb21ea9f94f9ed246d926049d5d7ce8daf094fa22703ba73b1f26caf",
+ "chksum_sha256": "4e5b4b3ca97eae9f96b6bb84b9c8a5877756bec23bb1b5b2419cf142f7197f20",
"format": 1
},
{
@@ -1044,73 +1121,73 @@
"format": 1
},
{
- "name": "changelogs/fragments/343_fix_ds.yaml",
+ "name": "changelogs/fragments/133_purefa_info_v6_replication.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3fa3323ca7bf1ee194ad96413ad37d4dc1018d0b10dd2fff226fc233120737a6",
+ "chksum_sha256": "cbb12d92a0c8c763b7e9057fe0d7e8bef92b644c016d6da016c7bda7494b6d53",
"format": 1
},
{
- "name": "changelogs/fragments/243_sso_to_admin.yaml",
+ "name": "changelogs/fragments/257_fqcn.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3d50baeb61cf616fa61f24d6df31a7d75e1f742cb831843548dad5863267310e",
+ "chksum_sha256": "dff369a116a9ba4014989743704667ccbafdf9dbb792bc1387642284c2df8a1b",
"format": 1
},
{
- "name": "changelogs/fragments/139_pgsnap_ac_support.yaml",
+ "name": "changelogs/fragments/394_neighbors.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e44ab022a764253dabc565481afd94e7a5a2cb0e37a638bfe76a8d0e59139bdf",
+ "chksum_sha256": "5c60e10bb81cef310e5ac44a3faf04d192e2be272cd0e5b967dcf53a83148fd0",
"format": 1
},
{
- "name": "changelogs/fragments/237_fix_network.yaml",
+ "name": "changelogs/fragments/529_eula_v2.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3f561b0d4d78d98e16c0b6dd42d3d1360fd2f0b8b57644a72578e9f294bf63b9",
+ "chksum_sha256": "d5a9df37d095b8b77793b6833db65aba5f887544c6d49138af04c4499d003dc1",
"format": 1
},
{
- "name": "changelogs/fragments/304_host_vlan.yaml",
+ "name": "changelogs/fragments/115_add_gcp_offload.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "2453f6516a40120b2c06ad1672e860b14b9c0276b12ae9c97852194d07a2a1ef",
+ "chksum_sha256": "500050720f1fb56e313fcd73dc7a98b06abce63cdca08b37cf11a1f8d7d01a49",
"format": 1
},
{
- "name": "changelogs/fragments/277_add_fs_repl.yaml",
+ "name": "changelogs/fragments/384_update_vol_facts.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "94698812175494446d89f92951b8bb0b8794eb1b8b1453aa85dbdf23e0b1522b",
+ "chksum_sha256": "d8eb9673ceca70f3c28a1f90d394719d69bd2a0654c880a59e09fb10ea9b1bd3",
"format": 1
},
{
- "name": "changelogs/fragments/118_rename_host.yaml",
+ "name": "changelogs/fragments/319_lockout.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "5fad30e620947f3c5878545aa55f223370e4e769ec28002f662efdf9ffd1358a",
+ "chksum_sha256": "4e8e8f895f0e131b6129c80938763ba06a483e419ad5be1cf5e8e262e32d7fd4",
"format": 1
},
{
- "name": "changelogs/fragments/307_multiple_dns.yaml",
+ "name": "changelogs/fragments/126_fix_volume_move.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "63b254118808a320dd69dbc885cdb3ffd20bf8a60c2fa636e5235b15bbb8fa7f",
+ "chksum_sha256": "d0b47177157c3ab8b7f3676d9bf5f5d6dd6fecb2d7850b612067bc0dbaf457fe",
"format": 1
},
{
- "name": "changelogs/fragments/360_fix_volume.yaml",
+ "name": "changelogs/fragments/136_add_vol_get_send_info.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "214e7d076ebe88080ae3b674f9218f3fd82c3f624c12c47c1e0f5b25a25cedff",
+ "chksum_sha256": "901f750d427f4fdd282bdaac8ea955e84980af5ab61cb3537842552142fa7831",
"format": 1
},
{
- "name": "changelogs/fragments/112_add_directory_support.yaml",
+ "name": "changelogs/fragments/505_dns_attribute.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "6ea1e0a2ce1457141a4ce999d0538dce6943fd07f9d203dc46728fdd17121c77",
+ "chksum_sha256": "fccee6e53d139f63be14aa1f268572126de4eea82ce79c4ced931d01d63d7bd1",
"format": 1
},
{
@@ -1121,164 +1198,171 @@
"format": 1
},
{
- "name": "changelogs/fragments/294_dns_ntp_idempotency_absent.yaml",
+ "name": "changelogs/fragments/268_fix_quotas_issues.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3621e04c4076595ad61dbd05bbdc9810304336354c40938e82be177b89e5e029",
+ "chksum_sha256": "c59529e1bf783c3e450ce897d8285253d9e660598604e81981d2cb1417c5d728",
"format": 1
},
{
- "name": "changelogs/fragments/388_remove_27.yaml",
+ "name": "changelogs/fragments/v1.4.0_summary.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "79481f3b61a45b1d006a43850b16ca12b356b3157005dd866ae6b84c73279d55",
+ "chksum_sha256": "876598a7a2135b855db8a38e69902920412966606847157fd97d8bb49fc479d4",
"format": 1
},
{
- "name": "changelogs/fragments/257_fqcn.yaml",
+ "name": "changelogs/fragments/130_info_ds_update.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "dff369a116a9ba4014989743704667ccbafdf9dbb792bc1387642284c2df8a1b",
+ "chksum_sha256": "947189a4487b75926ef5cd535900916d0d2107159243e41b3677dc13adcbbc84",
"format": 1
},
{
- "name": "changelogs/fragments/200_add_DAR_info.yaml",
+ "name": "changelogs/fragments/363_overwrite_combo.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d30ccf31e256808a9bfeef72c50100fed7fa0922b49c31e893f277890733dd1a",
+ "chksum_sha256": "8b03125168fec5bdc6800055674a23646d2d1318f68666bf3647c9588a9934ff",
"format": 1
},
{
- "name": "changelogs/fragments/268_fix_quotas_issues.yaml",
+ "name": "changelogs/fragments/206_add_naa_info.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "c59529e1bf783c3e450ce897d8285253d9e660598604e81981d2cb1417c5d728",
+ "chksum_sha256": "45e9f15e85c52453f42909582a7c48be2af6c5291f470c6c11861ad6d924bfb3",
"format": 1
},
{
- "name": "changelogs/fragments/312_pg_alias.yaml",
+ "name": "changelogs/fragments/208_add_directory_quota_support.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "048f69ebae940761460f18b2c0c5b84f789dd5fa127a0ff8d6d8aafa89d7f1b7",
+ "chksum_sha256": "aa55d46f1d02d324beff7238a2cf14255774b32bc4ff34921f682b5edc7b4061",
"format": 1
},
{
- "name": "changelogs/fragments/235_eula.yaml",
+ "name": "changelogs/fragments/413_eradicate_pgsnap.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "1f47001145eba84245d432bb58679d15acaf7065794bd486ce6f4b47d15ccc5f",
+ "chksum_sha256": "cfd3bbb30efde5755653c8c8aabdbfb30a26f3371493af7fad638133dfcf64a2",
"format": 1
},
{
- "name": "changelogs/fragments/254_sam2_info.yaml",
+ "name": "changelogs/fragments/203_add_eradication_timer.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "83cf6a33a4429f2f17d2dc63f4b198e0a1cd7b2f4cb02f86f08a2deb5d5ccb66",
+ "chksum_sha256": "fabd383ba2343e7b5a9e4ac4818546ef8d991de7e93d14c90bd75f58d7f05a45",
"format": 1
},
{
- "name": "changelogs/fragments/121_add_multi_volume_creation.yaml",
+ "name": "changelogs/fragments/315_spf_details.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "7d63a0ff3a88738493bf7f0afee3600f0b236b28f592694bd686d38a94bdd7d7",
+ "chksum_sha256": "86798075e98bf22253d023307d6cf1e8361b9a221d826c5f4a742d715a1058c9",
"format": 1
},
{
- "name": "changelogs/fragments/130_info_ds_update.yaml",
+ "name": "changelogs/fragments/464_fix_ds_add.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "947189a4487b75926ef5cd535900916d0d2107159243e41b3677dc13adcbbc84",
+ "chksum_sha256": "3a399f8c87f1e5c8a637122908eb90f957fa4d3311e984171ed0dddca9f2b226",
"format": 1
},
{
- "name": "changelogs/fragments/379_cap_compat.yaml",
+ "name": "changelogs/fragments/375_fix_remote_hosts.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "1c135bcff0cf73fd477bc81cce7903bc90d0ed27554b4b70f938673a10141d61",
+ "chksum_sha256": "bf689dc94a6aa93b1a5caefebd9bc03581fa83fa22ecdcc1f639c855dd1d3659",
"format": 1
},
{
- "name": "changelogs/fragments/345_user_map.yaml",
+ "name": "changelogs/fragments/149_volumes_demoted_pods_fix.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "1dfe18617cd617abc6ad45ca549ffce2fa88493506f3516deba1d9b6a4108a15",
+ "chksum_sha256": "8cf22b39b47e9256017c2a10f64e327db8e87d9f2aa4c75c081e3709fce3330e",
"format": 1
},
{
- "name": "changelogs/fragments/394_neighbors.yaml",
+ "name": "changelogs/fragments/254_sam2_info.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "5c60e10bb81cef310e5ac44a3faf04d192e2be272cd0e5b967dcf53a83148fd0",
+ "chksum_sha256": "83cf6a33a4429f2f17d2dc63f4b198e0a1cd7b2f4cb02f86f08a2deb5d5ccb66",
"format": 1
},
{
- "name": "changelogs/fragments/383_network_idemp.yaml",
+ "name": "changelogs/fragments/228_nguid_to_volfact.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "891ed6fb13b7a9450d646736929c46d9fd657035b0a3a60858faac3c51a32cf9",
+ "chksum_sha256": "286446286f7b817d37b3ecd99bf40347dc9ca9e76835dae68f5042088f0ccad0",
"format": 1
},
{
- "name": "changelogs/fragments/152_fix_user.yaml",
+ "name": "changelogs/fragments/307_multiple_dns.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "855ffc4ffbd24286e556666da375a79f19a3ec7da829ffa46d8f1983335f96d2",
+ "chksum_sha256": "63b254118808a320dd69dbc885cdb3ffd20bf8a60c2fa636e5235b15bbb8fa7f",
"format": 1
},
{
- "name": "changelogs/fragments/231_syslog_settings.yaml",
+ "name": "changelogs/fragments/536_inv_rest2.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8554f5bb3da3ca0f967b61188806535a8d4161371ba5abcae56c3fbef98981d3",
+ "chksum_sha256": "49bd6e5881062ebfd0ea1303d35a498fcf4f0bf2d92009c2d244c3f448326b56",
"format": 1
},
{
- "name": "changelogs/fragments/207_fix_disable_for_remote_assist.yaml",
+ "name": "changelogs/fragments/122_add_multi_host_creation.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "6f3e2568e1937dcbd0ce31e3a87ce1da72af881c77995e55e8673ec487832696",
+ "chksum_sha256": "f330217bef8d7c34061545e4071b372538afb841b2b013e6934e6f872af35a58",
"format": 1
},
{
- "name": "changelogs/fragments/242_multi_offload.yaml",
+ "name": "changelogs/fragments/201_increase_krb_count.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d92fd1123f4e28eb21c3a150a3d00b34a474896405a53fe5d7f7bcd8e8463a23",
+ "chksum_sha256": "4c9af9fb7823e936ee5da520bca07919e280832c868d8e808819967cc02b2776",
"format": 1
},
{
- "name": "changelogs/fragments/156_snap_suffix_fix.yaml",
+ "name": "changelogs/fragments/484_fix_repl_sched.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "69187a1d2f2a0ceba0494eaeb883cb74ca8503f68020d29341a27e462a8dd6ea",
+ "chksum_sha256": "6dfa49e4b529929874bb184c39689ffdbea4ee4ca51a25e8a9f2bdc46e050d85",
"format": 1
},
{
- "name": "changelogs/fragments/381_change_booleans.yaml",
+ "name": "changelogs/fragments/397_parialconnect_bug.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f04fd18a42e321cb3818a579e14cc50a6d27935196ff04632e2db44f7b807322",
+ "chksum_sha256": "dcb8da9aace9eaed868a579288728ec27c58fe43de3a0ac4b2f26c0f2c045749",
"format": 1
},
{
- "name": "changelogs/fragments/265_fix_multiple_nfs_rules.yaml",
+ "name": "changelogs/fragments/271_vgroup_prio.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "92a7032c57aa30e06e3b7f3a8b14f1bc4a1dbd3c0f91e13ee862f9f91d894d30",
+ "chksum_sha256": "9aef1c5e381f895a7096f7f4a14d5004445a54c3e9048f9176b77615e3f98861",
"format": 1
},
{
- "name": "changelogs/fragments/115_add_gcp_offload.yaml",
+ "name": "changelogs/fragments/480_rename_vg.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "500050720f1fb56e313fcd73dc7a98b06abce63cdca08b37cf11a1f8d7d01a49",
+ "chksum_sha256": "8cc8080ddb7e0ae0ba1ad6d628e5ebaa4ee003516bc2bde7b71cd5539ee27cda",
"format": 1
},
{
- "name": "changelogs/fragments/363_overwrite_combo.yaml",
+ "name": "changelogs/fragments/541_r2_offload.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8b03125168fec5bdc6800055674a23646d2d1318f68666bf3647c9588a9934ff",
+ "chksum_sha256": "df277134d1d8bb97a10833bd1f6acc6c87ee9f7bac92e55b4454952c8badc26a",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/168_dsrole_fix.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a952b14eb16b084e107dc1e809347680de1bbabae470751ce53c1fbe8c00f7b9",
"format": 1
},
{
@@ -1289,38 +1373,101 @@
"format": 1
},
{
- "name": "changelogs/fragments/161_offline_offload_fix.yaml",
+ "name": "changelogs/fragments/239_safe_mode.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "954b6c0a6350782c3bc461de4e3813d05d2c441f20e906d73e74bc10ba2a5783",
+ "chksum_sha256": "50e022d852e1bc93695d7980834f02c62fe584f160ceab7c6fde0a50909f215b",
"format": 1
},
{
- "name": "changelogs/fragments/226_deprecate_protocol.yaml",
+ "name": "changelogs/fragments/246_python_precedence.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "47123e52a081b333ae710eb005535c5fa84469d33a0d30ca7ec83e207d7d46c1",
+ "chksum_sha256": "c4901627b1f73f31126c2ff78bb5ddbcd40d8b5d8f6c76fde19eb39147764875",
"format": 1
},
{
- "name": "changelogs/fragments/110_add_apiclient_support.yaml",
+ "name": "changelogs/fragments/524_empty_ds.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a91901335be037584e59a521f7201e27431567170240b027ef9bb9f7220bf3d0",
+ "chksum_sha256": "074e821b0829379c4e21fcde46f0a64c4d733f485abfd1aeec17f271e1d0f591",
"format": 1
},
{
- "name": "changelogs/fragments/206_add_naa_info.yaml",
+ "name": "changelogs/fragments/261_fix_bad_arrays.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "45e9f15e85c52453f42909582a7c48be2af6c5291f470c6c11861ad6d924bfb3",
+ "chksum_sha256": "10bcda38e7401292d7eb21bfbea276bbcdaa70279475b57ead65200878682562",
"format": 1
},
{
- "name": "changelogs/fragments/299_fix_pgsched_zero_support.yaml",
+ "name": "changelogs/fragments/499_rest_227.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "794dcb8c905190621276322754f9a0c6766337e58a89e19734b8db5adc359688",
+ "chksum_sha256": "474ca8fea2b15197be43b37aa10bf2353bea1ade1e9086059c94c791badd8bf1",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/538_arrayname_rest.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "78dbb4a2ac91f7c19c63b82814536f4bbe65afc55200521b5550d7b4bd5c9d39",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/369_fix_host.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b504c0859681ad87a0b32f5d8e8671c523fdf1514ec4c46f815758a192971012",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/182_allow_pgroup_with_create.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b2d30f4f5efeb55578f94bd60575f7998104bedd70f9c3a4112de08918c6430a",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/433_certs.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "95d42c0dfb19c722930e0af5f397da9514e3f2472cc32425b53a9cdb761f886c",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/242_multi_offload.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d92fd1123f4e28eb21c3a150a3d00b34a474896405a53fe5d7f7bcd8e8463a23",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/176_fix_promote_api_issue.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e1643ed3af96a2f4177225b32510025dad4fa8b2374279f80d960c1a8208bfa7",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/277_add_fs_repl.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "94698812175494446d89f92951b8bb0b8794eb1b8b1453aa85dbdf23e0b1522b",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/509_check_peer.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7e2ebc389d5dd627995777ab4719829c1cab77f5483994239f7179fcb879f261",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/431_offload_profile.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8ec0bd0df5441b4e0de8925121989b1ec31f2252b1027665294b70eaf5fa6618",
"format": 1
},
{
@@ -1331,45 +1478,73 @@
"format": 1
},
{
- "name": "changelogs/fragments/196_fix_activedr_api_version.yaml",
+ "name": "changelogs/fragments/112_add_directory_support.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "da06077e7148e0a5647cbb4db66dbf57a87199b3ecd8961f7c070f0a223b49f6",
+ "chksum_sha256": "6ea1e0a2ce1457141a4ce999d0538dce6943fd07f9d203dc46728fdd17121c77",
"format": 1
},
{
- "name": "changelogs/fragments/349_add_alerts.yaml",
+ "name": "changelogs/fragments/299_fix_pgsched_zero_support.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "0508cb660cfe0dab94258230a0378e2185e1bda6c0eb05ab362352215c91a800",
+ "chksum_sha256": "794dcb8c905190621276322754f9a0c6766337e58a89e19734b8db5adc359688",
"format": 1
},
{
- "name": "changelogs/fragments/294_user_map_support.yaml",
+ "name": "changelogs/fragments/539_rest2_vnc.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a8303eade7f404a0454044ac907e369cf12ab8a715dae17873ef482f959b55ce",
+ "chksum_sha256": "1070f2c0ca85c5267559603b6a8840b958454980e88bcdd6e3020ce1488989ec",
"format": 1
},
{
- "name": "changelogs/fragments/374_offload_pgsnap.yaml",
+ "name": "changelogs/fragments/304_host_vlan.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "1c57da1f0407cfd608f8dba3ab67f68abcb140d190cdf48072b7d629979492d7",
+ "chksum_sha256": "2453f6516a40120b2c06ad1672e860b14b9c0276b12ae9c97852194d07a2a1ef",
"format": 1
},
{
- "name": "changelogs/fragments/224_add_nguid_info.yaml",
+ "name": "changelogs/changelog.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "afee325ee45ede0ecadb3d8cfcfc8b11caeb91c257e6fc69b221e972ae80415f",
+ "chksum_sha256": "b301bed7223921e76ea8c916463b769bb824f548df463e372920d8424d19dc2f",
"format": 1
},
{
- "name": "LICENSE",
+ "name": "changelogs/211_fix_clearing_host_inititators.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3972dc9744f6499f0f9b2dbf76696f2ae7ad8af9b23dde66d6af86c9dfb36986",
+ "chksum_sha256": "8ce58291d0256cb22a7a8cb015ebfc4775474f594d5c724225875c495213d259",
+ "format": 1
+ },
+ {
+ "name": "changelogs/.plugin-cache.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dc866613dd4087ce500e2278a17bcd5aa16faf970f135ce9390b3d98605ec035",
+ "format": 1
+ },
+ {
+ "name": "changelogs/config.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "787229dd578477fe1009b0d84411b5c9678bf474c0c89642bd7381d6c4803c19",
+ "format": 1
+ },
+ {
+ "name": "tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/config.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9a009a349eaaf78c93ff56072d2ef171937bdb884e4976592ab5aaa9c68e1044",
"format": 1
},
{
@@ -1380,6 +1555,111 @@
"format": 1
},
{
+ "name": "playbooks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/tasks/.keep",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "playbooks/.keep",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "playbooks/templates",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/templates/.keep",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "playbooks/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/vars/.keep",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "playbooks/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/files/.keep",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "docs",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "docs/docsite",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/links.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ba87531c24128b4584a8f5dc481594ff232c9c19f1324e315149e17fe685baec",
+ "format": 1
+ },
+ {
+ "name": "requirements.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "049b14af474bcb549405765b4e99f8865b5a7e8d912155a20cbb062e25334a93",
+ "format": 1
+ },
+ {
"name": "plugins",
"ftype": "dir",
"chksum_type": null,
@@ -1394,17 +1674,73 @@
"format": 1
},
{
- "name": "plugins/modules/purefa_fs.py",
+ "name": "plugins/modules/purefa_timeout.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "dc191939ce23b0ffb74d36022d868823f02eecdeb5b56e4702669a4ba3115747",
+ "chksum_sha256": "f6b7ead22ec616b084204ffdd2cc234410da1c76eeb04535fa4e073c699ba647",
"format": 1
},
{
- "name": "plugins/modules/purefa_user.py",
+ "name": "plugins/modules/purefa_maintenance.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d9c8bba3a64f111c817d53f8a83a373588fb54be24bdd34bd486151015c5df8d",
+ "chksum_sha256": "773f2559819b5d54f9e95aa4b29713d58c69ec0747b8ab9d194717f6032a3da1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_eula.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4f4be0b18be4216da5a285e408f442b04a9bc87a6c554014fba1074919e9f77e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_kmip.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bc6531160ea0990f3fbc1ee2469ec2b1b32f379ee4529ceac43be413b63174c5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_smis.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a0d7809a15efefbc2a7648585f0aa8c15e126c193ff1d0a5eb72f30b3820fd68",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_logging.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "72ff82ff72e5c0f11f3d0ddb3e24529c93e220c7e6aabecbde097caf207d0ef5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_pod.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a8af993da00b2df3d9b9f3dfb7d95162ffe60e1436c2cdc5d156d25dfc334b9e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_banner.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a599924ac2135480b5392520994e11f2dc39a7f1c4b1fd7ae478e8f87f0b6ab8",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_volume_tags.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "470c400d52f4092667d8fbb58cf7961b978d9967524e6f0fbde852bfe6e20d9d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_smtp.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a4eab45b866e8f0aea35287282cfb1fb2efdcf3e433a6842380e7fa0b62228c9",
"format": 1
},
{
@@ -1415,80 +1751,80 @@
"format": 1
},
{
- "name": "plugins/modules/purefa_host.py",
+ "name": "plugins/modules/purefa_pgsnap.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "7f57e4149defcf7a109b05921be116d0fd0f109d9fe7e45ca0188679654977c7",
+ "chksum_sha256": "5be7db8f57c6d2a33052e0df6019660ade480093139078d89e4b8410d655529b",
"format": 1
},
{
- "name": "plugins/modules/purefa_eradication.py",
+ "name": "plugins/modules/purefa_vnc.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d590409734be78cadc78c08cf4cc5fa36c23b71208ed785cdd435e82c190673d",
+ "chksum_sha256": "ea1372101e956d36b99fa36acc8b4b7bfb311e03e94d0c4d6a4ad39ab9c237bc",
"format": 1
},
{
- "name": "plugins/modules/purefa_saml.py",
+ "name": "plugins/modules/purefa_ad.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "30758391ec21522ff48ec934552999ef5a538747784554a84eb3cfbbeea9b020",
+ "chksum_sha256": "d53d7b7a95d0ecdcbee255db028a52c62a15e85bdb9541bafcd8270c3bd97b48",
"format": 1
},
{
- "name": "plugins/modules/purefa_vg.py",
+ "name": "plugins/modules/purefa_policy.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "1466a88fec2fc2ce37f5906b38e7e9d93671f00e30ff7f7990abaa0cbc696c67",
+ "chksum_sha256": "eb24acd1b8171f2a4f746db11574f9973956393ca450507029e08bd0a4de9f52",
"format": 1
},
{
- "name": "plugins/modules/purefa_ntp.py",
+ "name": "plugins/modules/purefa_hardware.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "aca0a4d618f09f623df6064d06e33c76bd26569c3e940580c7f608c2f90e5453",
+ "chksum_sha256": "51aa2083e0a857355fa1ec1fafc1edffe0885f6733152574f5136cd1d502300e",
"format": 1
},
{
- "name": "plugins/modules/purefa_offload.py",
+ "name": "plugins/modules/purefa_hg.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "c760847bf396a868a0b06b5ade07f19caaca54dbf1cdf7ed7ea93748e1562350",
+ "chksum_sha256": "7f45c42a72542be609cc8c68b99b44c4947683050b113f4477881c70d9e00bad",
"format": 1
},
{
- "name": "plugins/modules/purefa_pg.py",
+ "name": "plugins/modules/purefa_dsrole.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3e981db2b3ec21ae7a15078361642c74f511a335c0f7cbca9d4217cd25b96283",
+ "chksum_sha256": "f70148d126d365a5041de1cd4f51bde24c105024ca1c617621685b9d163e2ad7",
"format": 1
},
{
- "name": "plugins/modules/purefa_directory.py",
+ "name": "plugins/modules/purefa_snap.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d18b300ba799994b7c608cd6315255b7336c82632616c5b565960cd52d5b19c0",
+ "chksum_sha256": "610aa55d1d868577c75f28b27953d90e9775d1cf93ffd03c1555a2ccb84c0ec9",
"format": 1
},
{
- "name": "plugins/modules/purefa_vnc.py",
+ "name": "plugins/modules/purefa_user.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d53b0375a25ccd9894ab3ac39b7220a373220e149eaf303362e472f82d77ee92",
+ "chksum_sha256": "d9c8bba3a64f111c817d53f8a83a373588fb54be24bdd34bd486151015c5df8d",
"format": 1
},
{
- "name": "plugins/modules/purefa_pgsched.py",
+ "name": "plugins/modules/purefa_default_protection.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f1d6de2fe9a6f374ae371d2dc8702a7ad7228796421bc31d97b685bd5a192589",
+ "chksum_sha256": "b8afe24acc1a9d88e774c16da71bd9ded4f6aae6d794ba0dc1eb94086595ec92",
"format": 1
},
{
- "name": "plugins/modules/purefa_kmip.py",
+ "name": "plugins/modules/purefa_apiclient.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3c9c462829b542b572d7127c31e004e4114fc10cf8bf99cb645b7455f998080b",
+ "chksum_sha256": "334c936be4a9a953d49fd3c543c8d2fe8ecf0156993187a66d0364988745257e",
"format": 1
},
{
@@ -1499,59 +1835,59 @@
"format": 1
},
{
- "name": "plugins/modules/purefa_admin.py",
+ "name": "plugins/modules/purefa_arrayname.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "89a58a1b0c90b0eb840250eb710ab6e5fa769ad4b2ad0051de7e2d1b6c27e82f",
+ "chksum_sha256": "c563f710d872e54ddc387e3c63b4b3dc568e0f417eb0b7b4d6be78d31073f95b",
"format": 1
},
{
- "name": "plugins/modules/purefa_logging.py",
+ "name": "plugins/modules/purefa_certs.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "ea585c8404999eb52ad4b8a89728f7cf578cb7fe0f1af3199bb86fdff5eb1e06",
+ "chksum_sha256": "cf60871211a3ba7a171ed1829f109bcbb77676ec7bb5c3217cb6ee0e1171794e",
"format": 1
},
{
"name": "plugins/modules/purefa_syslog_settings.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "c065df17138b49ac114974aa45884aadb851db971f5c47b5e627aa46dfb6d778",
+ "chksum_sha256": "2af3fe94191e27d571e73bc224da4fbb383c98e3cc9aef387a067cd231dc64ec",
"format": 1
},
{
- "name": "plugins/modules/purefa_subnet.py",
+ "name": "plugins/modules/purefa_token.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "88983155d06a3e6dd71f5b298c01eb65296bf95b8e0d59c0fef7d69fd70ac261",
+ "chksum_sha256": "34d1f3b93908a9a552ee214188a014993444421e6c7640cdda3ab98dae7bd54f",
"format": 1
},
{
- "name": "plugins/modules/purefa_export.py",
+ "name": "plugins/modules/purefa_network.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "7700653d2520ef7152ace19d13ba116b5c57c300d468bb30e54adecce0d78931",
+ "chksum_sha256": "0e9a2651b81bb8edd1fa026e1a1d9f2496478f207405178b47f5fab8c2e27ae9",
"format": 1
},
{
- "name": "plugins/modules/purefa_timeout.py",
+ "name": "plugins/modules/purefa_fs.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f6b7ead22ec616b084204ffdd2cc234410da1c76eeb04535fa4e073c699ba647",
+ "chksum_sha256": "eef1a20e20aca70f1bb79338517351f2dec9db1aeb1d706af7f12ab8426f9f3f",
"format": 1
},
{
- "name": "plugins/modules/purefa_dirsnap.py",
+ "name": "plugins/modules/purefa_snmp_agent.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "55c1a87a354f48ae3cdd27e1b61483176789a4623b8317f5b540bc02fe98be75",
+ "chksum_sha256": "13940ca1a13c4b6b0e4d0df82898a9a5ae40845d55afa11ed0b11ec4b3131361",
"format": 1
},
{
- "name": "plugins/modules/purefa_dns.py",
+ "name": "plugins/modules/purefa_volume.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "b6e7c15606f73a0f95a6f82a729020f397cdf49ffff31fb1ebf7d2287c08e395",
+ "chksum_sha256": "028203768fcd1f50267ca0fcf343ad03f5f4a5e8a80698567f55cf5b0d71e770",
"format": 1
},
{
@@ -1562,269 +1898,367 @@
"format": 1
},
{
- "name": "plugins/modules/purefa_ad.py",
+ "name": "plugins/modules/purefa_offload.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3968634b5f92292c64ea18869880c7f8413e3a42c4a7fc9ab6a6515ab1de5664",
+ "chksum_sha256": "32fbfec0cb4b6e8052cc556c3e50fd55ebaba16f051d017a2266cd7bb1a6dc1c",
"format": 1
},
{
- "name": "plugins/modules/purefa_console.py",
+ "name": "plugins/modules/purefa_file.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d78dd7b52fbe4576d59e70c1d6c6212727358df4bd4b42b8f5c38c67133f10cf",
+ "chksum_sha256": "c89f6ee6ddd085d909fdacc4c683558d1b9d11e01aadc821a6ed21777ece18c8",
"format": 1
},
{
- "name": "plugins/modules/purefa_default_protection.py",
+ "name": "plugins/modules/purefa_eradication.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "238d08203ef9b095069d1b4ab96fe4cddcd3485c49e3af0aea19f8c93f15d9dd",
+ "chksum_sha256": "081a643d76472cf27e34b7e9c8c00071137f47dfa1f5c571bd1c7bfca29b7651",
"format": 1
},
{
- "name": "plugins/modules/purefa_pod.py",
+ "name": "plugins/modules/purefa_dirsnap.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "b864c6938ecf935c3747329b69e41feef8cb1edcaf7cf317bdb1e9bb3dcc7097",
+ "chksum_sha256": "fa642695dca67d19034374480f331eb23f5fac03d65fb94a785db1ba94ee3687",
"format": 1
},
{
- "name": "plugins/modules/purefa_pgsnap.py",
+ "name": "plugins/modules/purefa_connect.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e661145dabdf3380a512c6d50953a7e6291d54830f04bf962e6974330516512c",
+ "chksum_sha256": "99230b1bc6935ae62de5a61c581b5d93575cf9a9853c8cffdacfb5db1bc7b384",
"format": 1
},
{
- "name": "plugins/modules/purefa_token.py",
+ "name": "plugins/modules/purefa_host.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d6455d9d708a74e221e29b60997836459ddde22f8f31cac745f6a348dd321b68",
+ "chksum_sha256": "d913dcc81fa1cde2809c459cd4b59eafd14e41911dd1be71b935f34dc6cfd732",
"format": 1
},
{
- "name": "plugins/modules/purefa_snmp.py",
+ "name": "plugins/modules/purefa_subnet.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8eb07ce1ca80cba4e41c5b00de2e3c288197b773f30235326c24b74189b74151",
+ "chksum_sha256": "868abe5b53903e5f413f5d59561432b62137f83b6bb2a22d1884c973ff6f6d4a",
"format": 1
},
{
- "name": "plugins/modules/purefa_volume.py",
+ "name": "plugins/modules/purefa_directory.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "1409c4e1eb8eb3cb78013f3def7d96fc6922a530c0da658a370df74d8a87e8d7",
+ "chksum_sha256": "8a4ee2cc6a29c0a3e2b127b36ee4b72732951e9d1779d938f48e32e92c1b8390",
"format": 1
},
{
- "name": "plugins/modules/purefa_snmp_agent.py",
+ "name": "plugins/modules/purefa_export.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "17f0434a486ed7094f00731ca23b50556a0ef9e59c40d373b3ae1983fd9b34ef",
+ "chksum_sha256": "7d0adb2958eb5721ad25d0d61307f6747541140a63a9f2818181f1d0c0c27f13",
"format": 1
},
{
- "name": "plugins/modules/purefa_smtp.py",
+ "name": "plugins/modules/purefa_pod_replica.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a4eab45b866e8f0aea35287282cfb1fb2efdcf3e433a6842380e7fa0b62228c9",
+ "chksum_sha256": "f3dd36f84ce48befeaddad54b3ed1226cb27f916dc6876137a5b3553323b2070",
"format": 1
},
{
- "name": "plugins/modules/purefa_apiclient.py",
+ "name": "plugins/modules/purefa_vg.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "808b6e4d0510c8f5aca0f714785cbe38cc5dd4b3720e0f13c18395f72d7e3396",
+ "chksum_sha256": "a7d58e6007481afe022c81422f013fd8e2f43efeb02440c0eb1f6d3fed7c3d02",
"format": 1
},
{
- "name": "plugins/modules/purefa_connect.py",
+ "name": "plugins/modules/purefa_inventory.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "c7eb7946c8b5b4fa077ddb1ea15b38080e04dc711ca04f9877ec51a73db062b7",
+ "chksum_sha256": "7053973a6c856ebc0c893053459745585bf77199897db44acdbcbf2418eecc2c",
"format": 1
},
{
- "name": "plugins/modules/purefa_maintenance.py",
+ "name": "plugins/modules/purefa_proxy.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "773f2559819b5d54f9e95aa4b29713d58c69ec0747b8ab9d194717f6032a3da1",
+ "chksum_sha256": "1bb5ea8dbad2b3314d6a328fca691d0c4839484dcec8ce1e362b82f848efcbf5",
"format": 1
},
{
- "name": "plugins/modules/purefa_ds.py",
+ "name": "plugins/modules/purefa_syslog.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "2469f765525af21529e903b6b082f520770f0d4fe14182a37542aec9b5203fcf",
+ "chksum_sha256": "4c4c7f88ec261b0b93f40fd233413caa78fc81d00aa9216a039cf4026c482833",
"format": 1
},
{
- "name": "plugins/modules/purefa_dsrole.py",
+ "name": "plugins/modules/purefa_console.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f70148d126d365a5041de1cd4f51bde24c105024ca1c617621685b9d163e2ad7",
+ "chksum_sha256": "d78dd7b52fbe4576d59e70c1d6c6212727358df4bd4b42b8f5c38c67133f10cf",
"format": 1
},
{
- "name": "plugins/modules/purefa_inventory.py",
+ "name": "plugins/modules/purefa_pgsched.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "041b8bd3902a5b9c0736a4ab121af986588207c8ac8c15864531d6409be64c37",
+ "chksum_sha256": "786bb308f8a4be62d7567ba0637cf4641a59a422c09d4836b9b6c44eb42f5767",
"format": 1
},
{
- "name": "plugins/modules/purefa_hg.py",
+ "name": "plugins/modules/purefa_saml.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "7f45c42a72542be609cc8c68b99b44c4947683050b113f4477881c70d9e00bad",
+ "chksum_sha256": "b4296e6410ef984d4730a9ed08d62f76a79061fe195755ff10f1823be9c3c584",
"format": 1
},
{
- "name": "plugins/modules/purefa_vlan.py",
+ "name": "plugins/modules/purefa_snmp.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "747134ba0527d5cf676606763fe7ebf1202c258d3324247419fc9f25c9b9fdf2",
+ "chksum_sha256": "8eb07ce1ca80cba4e41c5b00de2e3c288197b773f30235326c24b74189b74151",
"format": 1
},
{
- "name": "plugins/modules/purefa_volume_tags.py",
+ "name": "plugins/modules/purefa_ra.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "470c400d52f4092667d8fbb58cf7961b978d9967524e6f0fbde852bfe6e20d9d",
+ "chksum_sha256": "554d87178984b42a750651cf56d8c798e2e1b1a1d53b97921dd95e73b5a40a67",
"format": 1
},
{
- "name": "plugins/modules/purefa_policy.py",
+ "name": "plugins/modules/purefa_dns.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3c1c34e351721c170b5a9e45c39cbdc6738311744b395e4058ba8346898b0b73",
+ "chksum_sha256": "5a86b07c48dbbe8bd54016d6180ce9bc476d14e867e508f8d39624b308c15035",
"format": 1
},
{
- "name": "plugins/modules/purefa_proxy.py",
+ "name": "plugins/modules/purefa_messages.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "278ae5411bfdb29a2f0c684784884b749aca4de72dcc60734f59e1371514002e",
+ "chksum_sha256": "52e56dcbd616be68dc6b3406ad1871c54c39c4d14b05fe1993e02e88d4508c1e",
"format": 1
},
{
- "name": "plugins/modules/purefa_network.py",
+ "name": "plugins/modules/purefa_admin.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "35f602c303d1e241904287a39195257b54644114a1c9b565b5c8d58b7ad6a63a",
+ "chksum_sha256": "bd8989ead01355ffa1041e3e37fce69e38a76b740c294c7e3b6d9c0fe473c483",
"format": 1
},
{
- "name": "plugins/modules/purefa_info.py",
+ "name": "plugins/modules/purefa_ds.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "dce27f771e4622e9e880365f28ff2720d1669229b712f0dd71723b6e5208e942",
+ "chksum_sha256": "60de25b1f95ae1ec4662210e7ee9d1c4f399badcdd2064e0259ef85f2444b9f7",
"format": 1
},
{
- "name": "plugins/modules/purefa_smis.py",
+ "name": "plugins/modules/purefa_ntp.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f76d9ac6936e9f1a13710fb9f8bee193107af7f20ad5b413a7b5cdace61913a6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "67b94b93742f3aa99c5602f2ae7a22b74eeae6ceaa9655633fb5cd9f43a2a6b7",
+ "chksum_sha256": "de685054de14726a87c3c9f2ef257770b8e82384e7fe024c97efd72f04b51ef2",
"format": 1
},
{
"name": "plugins/modules/purefa_sso.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "09c113c73e42683195efaeb9fe205409617330c751a1404c25ab2d942c8917d9",
+ "chksum_sha256": "cdec272790d6eb33af87025b1d5763a448caaa270b3f33404eba1068bb24e50c",
"format": 1
},
{
- "name": "plugins/modules/purefa_messages.py",
+ "name": "plugins/modules/purefa_vlan.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "08fb06ea180440c13a828e36ced36f3c59d0475e3c0768d48dcbd82874deb66c",
+ "chksum_sha256": "747134ba0527d5cf676606763fe7ebf1202c258d3324247419fc9f25c9b9fdf2",
"format": 1
},
{
- "name": "plugins/modules/purefa_syslog.py",
+ "name": "plugins/modules/purefa_pg.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "0d2140dfe303eeaf5e41dd869822baf88cddc5c97d1e21c9af3264e0ebf83662",
+ "chksum_sha256": "5412fdd646fe83be3147d917bfc6e7b67eca14f96e11aca16696e5f62ceb9a6a",
"format": 1
},
{
- "name": "plugins/modules/purefa_ra.py",
+ "name": "plugins/modules/purefa_cbsexpand.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "14bbb8a463dbdd8da75a64df6b2fa24aa17366b3f738836345a9f06a0a239013",
+ "chksum_sha256": "4edf2994e2705c4eab90cd9a165d16586b5c09c7cb94dca811e34a4eaec9ba61",
"format": 1
},
{
- "name": "plugins/modules/purefa_snap.py",
+ "name": "plugins/module_utils",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/common.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "2d14cb9e4c31dac479497d42c2fa4d0f1519c6ad338fadb1c25ec0aa6caf4da4",
+ "chksum_sha256": "8d38e66486a3122632d121c34d0bc22e3f9dc1b8052654ed2d9ce9c5086cf733",
"format": 1
},
{
- "name": "plugins/modules/purefa_banner.py",
+ "name": "plugins/module_utils/version.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "597776c8e075ce163f7c0dea404935db420e4ce2460bdb585c958011e846395e",
+ "chksum_sha256": "da42772669215aa2e1592bfcba0b4cef17d06cdbcdcfeb0ae05e431252fc5a16",
"format": 1
},
{
- "name": "plugins/modules/purefa_eula.py",
+ "name": "plugins/module_utils/purefa.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "7e1091138f4ba58dc36c4d229c3162d8cc4f06d1b1cd840805e31bd668c20963",
+ "chksum_sha256": "c4330a72fc18beb9a23bcda90c08e1ab1505a9039cba25ceb3525134042928e8",
"format": 1
},
{
- "name": "plugins/modules/purefa_certs.py",
+ "name": "plugins/doc_fragments",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/purestorage.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "11c420d24ffe53d41f0ff8d3295a7d706b10c4641d2bd2c231a3fae3e5fe518f",
+ "chksum_sha256": "9163ab6dff1c0498d18c3af8279428798105270fa6f655f266b9e5d4ad8614b0",
"format": 1
},
{
- "name": "plugins/modules/purefa_arrayname.py",
+ "name": ".github",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".github/pull_request_template.md",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a9eb4479369a95fe0f603e771c918ae77ed387f8b4602a2f0d9b070f04557ded",
+ "chksum_sha256": "72bc9a2e0da280485e59fcd58c6c16519047b564d02cd5a3d45514b0f3863806",
"format": 1
},
{
- "name": "plugins/modules/purefa_pod_replica.py",
+ "name": ".github/bug_report_template.md",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f3dd36f84ce48befeaddad54b3ed1226cb27f916dc6876137a5b3553323b2070",
+ "chksum_sha256": "b4eb8821158c73fa62944e91e917f1d1b81fafed3adfe0e6ea373f99902bdf1d",
"format": 1
},
{
- "name": "plugins/doc_fragments",
+ "name": ".github/ISSUE_TEMPLATE",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
- "name": "plugins/doc_fragments/purestorage.py",
+ "name": ".github/ISSUE_TEMPLATE/bug_report.md",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "cf8dc8c5bef5ff629b260985f336da06abc4a129b26b1f978d14b6e1346eb393",
+ "chksum_sha256": "0c8d64f29fb4536513653bf8c97da30f3340e2041b91c8952db1515d6b23a7b3",
"format": 1
},
{
- "name": "plugins/module_utils",
+ "name": ".github/ISSUE_TEMPLATE/feature_request.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1f48c52f209a971b8e7eae4120144d28fcf8ee38a7778a7b4d8cf1ab356617d2",
+ "format": 1
+ },
+ {
+ "name": ".github/workflows",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
- "name": "plugins/module_utils/purefa.py",
+ "name": ".github/workflows/ansible-lint.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0c7df98c21d28ad97baa0fe277a77d47d9d884be577800869c9bde4aeb7e280e",
+ "format": 1
+ },
+ {
+ "name": ".github/workflows/stale.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "544ccc9f17e16d9087802e3dcec69741e6ff79e31cf7302947ce2c08126ce1d4",
+ "format": 1
+ },
+ {
+ "name": ".github/workflows/black.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "803b5d6a6d7448701e1b7eb09595f783cb7ca83bd4d298f91c60ce7143c3607b",
+ "format": 1
+ },
+ {
+ "name": ".github/workflows/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3fbd0b2490b2c53dcf65cac2ca22b246ba7a447ffbe89366de942686de8e54a3",
+ "format": 1
+ },
+ {
+ "name": ".github/feature_request_template.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4731d199ca9cbe66b2b6de02846b4860ccfb4fd0ebb2872fe6452b6cf5b73ce2",
+ "format": 1
+ },
+ {
+ "name": ".github/CONTRIBUTING.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "69fb16d49892fb5d60316a051f1d27d741e71fc84f18c14ff7d388616925535e",
+ "format": 1
+ },
+ {
+ "name": "README.md",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "22661a8c1f12c0ff4ff416bd5df0a763dd23b33c4eb35c0145cdf68607e56346",
+ "chksum_sha256": "78c320e467ed14bb56976077daad539d0016f60275826f69ea2cce845167aed0",
+ "format": 1
+ },
+ {
+ "name": "meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "meta/runtime.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a215a07e8eb5923a9ad3ab77c7f18090860e0ce10af48e97c0ba13c2dc3ca354",
+ "format": 1
+ },
+ {
+ "name": "meta/execution-environment.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a6458a579fbece249677d5d5473a58da36c7c8ab0a23b136891551f96e2c9b4e",
"format": 1
},
{
@@ -1835,10 +2269,38 @@
"format": 1
},
{
- "name": ".pylintrc",
+ "name": "LICENSE",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3972dc9744f6499f0f9b2dbf76696f2ae7ad8af9b23dde66d6af86c9dfb36986",
+ "format": 1
+ },
+ {
+ "name": ".gitignore",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "301bd6ff5bc1dea2fe8e6b9295d4757ea0b569143b3ae21e3fb6cfe458e3c46d",
+ "format": 1
+ },
+ {
+ "name": "roles",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/.keep",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "1bcdf66af9559f83da18c688dba3088f7212923b3174f6283de19860c716362e",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "settings.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "02d67ecc8a46b6b4ee0955afb6bcc8e8be5739c7cc9552e0d084cb8d2dda79dd",
"format": 1
},
{
@@ -1847,6 +2309,20 @@
"chksum_type": "sha256",
"chksum_sha256": "ac6beea8907c4dec17a6c8ccf9cf7728865c8323a2fa0ef1c7e3e79a3c283433",
"format": 1
+ },
+ {
+ "name": ".yamllint",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2970fa4875092f99825ac0da3c82d2413ce973087b9945e68fdfa7b3b1e2012e",
+ "format": 1
+ },
+ {
+ "name": "CHANGELOG.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3d37a2cad23124cafaeb9cff76e42675e03e970925ef84ac6623a6ebdcf93536",
+ "format": 1
}
],
"format": 1
diff --git a/ansible_collections/purestorage/flasharray/MANIFEST.json b/ansible_collections/purestorage/flasharray/MANIFEST.json
index 3a7fdd581..7f4348785 100644
--- a/ansible_collections/purestorage/flasharray/MANIFEST.json
+++ b/ansible_collections/purestorage/flasharray/MANIFEST.json
@@ -2,7 +2,7 @@
"collection_info": {
"namespace": "purestorage",
"name": "flasharray",
- "version": "1.19.1",
+ "version": "1.27.0",
"authors": [
"Pure Storage Ansible Team <pure-ansible-team@purestorage.com>"
],
@@ -29,7 +29,7 @@
"name": "FILES.json",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "76a62745a3665d2ddc02ed6c76f491ad07853698b1c5b53b253084e41a21938a",
+ "chksum_sha256": "0b1b2d1f58f2481ee5654c9c884302a3e2c330cabf5a720b81a8e44543082dc7",
"format": 1
},
"format": 1
diff --git a/ansible_collections/purestorage/flasharray/README.md b/ansible_collections/purestorage/flasharray/README.md
index 43caf944a..e0ec9614f 100644
--- a/ansible_collections/purestorage/flasharray/README.md
+++ b/ansible_collections/purestorage/flasharray/README.md
@@ -10,18 +10,19 @@ The Pure Storage FlashArray collection consists of the latest versions of the Fl
## Supported Platforms
-- Pure Storage FlashArray with Purity 4.6 or later
+- Pure Storage FlashArray with Purity 6.1.0 or later
- Certain modules and functionality require higher versions of Purity. Modules will inform you if your Purity version is not high enough to use a module.
## Prerequisites
-- Ansible 2.9 or later
-- Pure Storage FlashArray system running Purity 4.6 or later
+- Ansible 2.14 or later
+- Pure Storage FlashArray system running Purity 6.1.0 or later
- some modules require higher versions of Purity
- Some modules require specific Purity versions
+- distro
- purestorage
- py-pure-client
-- python >= 3.6
+- python >= 3.9
- netaddr
- requests
- pycountry
@@ -31,10 +32,6 @@ The Pure Storage FlashArray collection consists of the latest versions of the Fl
All modules are idempotent with the exception of modules that change or set passwords. Due to security requirements exisitng passwords can be validated against and therefore will always be modified, even if there is no change.
-## Notes
-
-The Pure Storage Ansible modules force all host and volume names to use kebab-case. Any parameters that use camelCase or PascalCase will be lowercased to ensure consistency across all modules.
-
## Available Modules
- purefa_ad - manage FlashArray Active Directoy accounts
@@ -43,6 +40,7 @@ The Pure Storage Ansible modules force all host and volume names to use kebab-ca
- purefa_apiclient - manageFlashArray API clients
- purefa_arrayname - manage the name of the FlashArray
- purefa_banner - manage the CLI and GUI login banner of the FlashArray
+- purefa_cbsexpand - manage CBS FlashArray capacity expansion
- purefa_certs - manage FlashArray SSL certificates
- purefa_connect - manage FlashArrays connecting for replication purposes
- purefa_console - manage Console Lock setting for the FlashArray
@@ -56,7 +54,9 @@ The Pure Storage Ansible modules force all host and volume names to use kebab-ca
- purefa_eradication - manage eradication timer for deleted items
- purefa_eula - sign, or resign, FlashArray EULA
- purefa_export - manage FlashArrray managed file system exports
+- purefa_file - copy file between managed directories
- purefa_fs - manage FlashArray managed file systems
+- purefa_hardware - manage component identification LEDs
- purefa_hg - manage hostgroups on the FlashArray
- purefa_host - manage hosts on the FlashArray
- purefa_info - get information regarding the configuration of the Flasharray
diff --git a/ansible_collections/purestorage/flasharray/changelogs/.plugin-cache.yaml b/ansible_collections/purestorage/flasharray/changelogs/.plugin-cache.yaml
index 8719c5637..57f7e73f4 100644
--- a/ansible_collections/purestorage/flasharray/changelogs/.plugin-cache.yaml
+++ b/ansible_collections/purestorage/flasharray/changelogs/.plugin-cache.yaml
@@ -41,6 +41,11 @@ plugins:
name: purefa_banner
namespace: ''
version_added: 1.0.0
+ purefa_cbsexpand:
+ description: Modify the CBS array capacity
+ name: purefa_cbsexpand
+ namespace: ''
+ version_added: 1.0.0
purefa_certs:
description: Manage FlashArray SSL Certificates
name: purefa_certs
@@ -106,11 +111,21 @@ plugins:
name: purefa_export
namespace: ''
version_added: 1.5.0
+ purefa_file:
+ description: Manage FlashArray File Copies
+ name: purefa_file
+ namespace: ''
+ version_added: 1.22.0
purefa_fs:
description: Manage FlashArray File Systems
name: purefa_fs
namespace: ''
version_added: 1.5.0
+ purefa_hardware:
+ description: Manage FlashArray Hardware Identification
+ name: purefa_hardware
+ namespace: ''
+ version_added: 1.24.0
purefa_hg:
description: Manage hostgroups on Pure Storage FlashArrays
name: purefa_hg
@@ -307,4 +322,4 @@ plugins:
strategy: {}
test: {}
vars: {}
-version: 1.19.1
+version: 1.27.0
diff --git a/ansible_collections/purestorage/flasharray/changelogs/changelog.yaml b/ansible_collections/purestorage/flasharray/changelogs/changelog.yaml
index ba3ad5ba1..6a0cac383 100644
--- a/ansible_collections/purestorage/flasharray/changelogs/changelog.yaml
+++ b/ansible_collections/purestorage/flasharray/changelogs/changelog.yaml
@@ -348,6 +348,240 @@ releases:
bugfixes:
- purefa_info - Fixed missing arguments for google_offload and pods
release_date: '2023-05-19'
+ 1.20.0:
+ changes:
+ bugfixes:
+ - purefa_pgsched - Resolved idempotency issue with snap and replication enabled
+ flags
+ - purefa_pgsnap - Fixed issue with eradicating deleted pgsnapshot
+ - purefa_pgsnap - Update the accepted suffixes to include also numbers only.
+ Fixed the logic to retrieve the latest completed snapshot
+ - purefa_policy - Set user_mapping parameter default to True
+ minor_changes:
+ - purefa_info - Added support for autodir policies
+ - purefa_policy - Added support for autodir policies
+ - purefa_proxy - Add new protocol parameter, defaults to https
+ fragments:
+ - 411_nfs_user_mapping.yaml
+ - 412_fix_snapshot_suffix_handling.yaml
+ - 413_eradicate_pgsnap.yaml
+ - 415_autodir_policies.yaml
+ - 420_proxy_protocol.yaml
+ - 422_sched_enable_fix.yaml
+ release_date: '2023-07-10'
+ 1.21.0:
+ changes:
+ bugfixes:
+ - purefa_certs - Resolved CSR issue and require export_file for state sign.
+ - purefa_info - Fix serial number generation issue for vVols
+ - purefa_snap - Fixed issue with remote snapshot retrieve. Mainly a workaround
+ to an issue with Purity REST 1.x when remote snapshots are searched.
+ - purefa_volume - Fixed bug with NULL suffix for multiple volume creation.
+ minor_changes:
+ - purefa_info - Add `port_connectivity` information for hosts
+ - purefa_info - Add promotion status information for volumes
+ - purefa_offload - Added a new profile parameter.
+ - purefa_pgsnap - Added new parameter to support snapshot throttling
+ - purefa_snap - Added new parameter to support snapshot throttling
+ fragments:
+ - 428_promotion.yaml
+ - 429_host_balance.yaml
+ - 430_throttle_support.yaml
+ - 431_offload_profile.yaml
+ - 433_certs.yaml
+ - 436_snap_fix.yaml
+ - 440_null_suffix.yaml
+ release_date: '2023-09-06'
+ 1.22.0:
+ changes:
+ bugfixes:
+ - purefa_ds - Fixes error when enabling directory services while a bind_user
+ is set on the array and a bind_password is not.
+ - purefa_ds - Fixes issue with creating a new ds configuration while setting
+ force_bind_password as "false".
+ - purefa_host - Fix incorrect calling of "module.params".
+ - purefa_info - Added missing alerts subset name
+ - purefa_info - Fixed attribute errors after EUC changes
+ - purefa_info - Fixed issue with replica links in unknown state
+ - purefa_info - Fixed parameter error when enabled and disabled timers are different
+ values on purity 6.4.10+ arrays.
+ - purefa_info - Fixed py39 specific bug with multiple DNS entries
+ - purefa_network - Allow `gateway` to be set as `0.0.0.0` to remove an existing
+ gateway address
+ - purefa_network - Fixed IPv6 support issues
+ - purefa_network - Fixed idempotency issue when gateway not modified
+ - purefa_pgsched - Fixed bug with an unnecessary substitution
+ - purefa_pgsnap - Enabled to eradicate destroyed snapshots.
+ - purefa_pgsnap - Ensure that `now` and `remote` are mutually exclusive.
+ - purefa_snap - Fixed incorrect calling logic causing failure on remote snapshot
+ creation
+ - purefa_subnet - Fixed IPv4 gateway removal issue.
+ - purefa_subnet - Fixed IPv6 support issues.
+ minor_changes:
+ - purefa_eradication - Added support for disabled and enabled timers from Purity//FA
+ 6.4.10
+ - purefa_info - Add array subscription data
+ - purefa_info - Added `nfs_version` to policies and rules from Purity//FA 6.4.10
+ - purefa_info - Added `total_used` to multiple sections from Purity//FA 6.4.10
+ - purefa_info - Prive array timezone from Purity//FA 6.4.10
+ - purefa_info - Report NTP Symmetric key presence from Purity//FA 6.4.10
+ - purefa_network - Add support for creating/modifying VIF and LACP_BOND interfaces
+ - purefa_network - `enabled` option added. This must now be used instead of
+ state=absent to disable a physical interface as state=absent can now fully
+ delete a non-physical interface
+ - purefa_ntp - Added support for NTP Symmetric Key from Purity//FA 6.4.10s
+ - purefa_pgsched - Change `snap_at` and `replicate_at` to be AM or PM hourly
+ - purefa_pgsnap - Add protection group snapshot rename functionality
+ - purefa_policy - Added support for multiple NFS versions from Purity//FA 6.4.10
+ - purefa_vg - Add rename parameter
+ fragments:
+ - 444_euc_fix.yaml
+ - 445_py39.yaml
+ - 448_add_subs.yaml
+ - 450_no_gateway.yaml
+ - 452_throttle_fix.yaml
+ - 459_fix_eradication_timer_info.yaml
+ - 460_eradicaton.yaml
+ - 461_ntp_keys.yaml
+ - 462_info_update.yaml
+ - 463_nfs_version.yaml
+ - 464_fix_ds_add.yaml
+ - 468_missing_subset.yaml
+ - 469_fix_missing_bind_password.yaml
+ - 471_fix_ip_protocol.yaml
+ - 474_network_fixes.yaml
+ - 480_rename_vg.yaml
+ - 482_schedule.yaml
+ - 483_missing_replicate.yaml
+ - 484_fix_repl_sched.yaml
+ - 485_fix_host.yaml
+ - 487_pgrename.yaml
+ - 488_fix_pgsnap_eradication.yaml
+ modules:
+ - description: Manage FlashArray File Copies
+ name: purefa_file
+ namespace: ''
+ release_date: '2023-10-25'
+ 1.23.0:
+ changes:
+ bugfixes:
+ - purefa_cert - Fixed issue where parts of the subject where not included in
+ the CSR if they did not exist in the currently used cert.
+ - purefa_pg - Allows a protection group to be correctly created when `target`
+ is specified as well as other objects, such as `volumes` or `hosts`
+ minor_changes:
+ - purefa_info - Add NSID value for NVMe namespace in `hosts` response
+ - purefa_info - Subset `pgroups` now also provides a new dict called `deleted_pgroups`
+ - purefa_offload - Remove `nfs` as an option when Purity//FA 6.6.0 or higher
+ is detected
+ fragments:
+ - 495_add_del_pgroup_info.yaml
+ - 496_fix_cert_signing.yaml
+ - 498_fix_pg_creation.yaml
+ - 499_rest_227.yaml
+ release_date: '2023-11-07'
+ 1.24.0:
+ changes:
+ bugfixes:
+ - purefa_dns - Fixed attribute error on deletion of management DNS
+ - purefa_pgsched - Fixed issue with disabling schedules
+ - purefa_pgsnap - Fixed incorrect parameter name
+ minor_changes:
+ - purefa_dns - Added facility to add a CA certifcate to management DNS and check
+ peer.
+ - purefa_snap - Add support for suffix on remote offload snapshots
+ fragments:
+ - 505_dns_attribute.yaml
+ - 506_disable_pgsched.yaml
+ - 509_check_peer.yaml
+ - 513_remote_snapshot_suffix.yaml
+ - 516_fix_throttle.yaml
+ modules:
+ - description: Manage FlashArray Hardware Identification
+ name: purefa_hardware
+ namespace: ''
+ release_date: '2023-12-01'
+ 1.25.0:
+ changes:
+ minor_changes:
+ - all - ``distro`` package added as a pre-requisite
+ - multiple - Remove packaging pre-requisite.
+ - multiple - Where only REST 2.x endpoints are used, convert to REST 2.x methodology.
+ - purefa_info - Expose NFS security flavor for policies
+ - purefa_info - Expose cloud capacity details if array is a Cloud Block Store.
+ - purefa_policy - Added NFS security flavors for accessing files in the mount
+ point.
+ fragments:
+ - 441_v2_version.yaml
+ - 518_nfs_security.yaml
+ - 519_add_cloud_capacity.yaml
+ - 520_add_distro.yaml
+ release_date: '2023-12-20'
+ 1.26.0:
+ changes:
+ bugfixes:
+ - purefa_ds - Fix issue with SDK returning empty data for data directory services
+ even when it does exist
+ - purefa_policy - Fix incorrect call of psot instead of patch for NFS policies
+ minor_changes:
+ - purefa_policy - Add SMB user based enumeration parameter
+ - purefa_policy - Remove default setting for nfs_version to allow for change
+ of version at policy level
+ fragments:
+ - 523_nfs_policies.yaml
+ - 524_empty_ds.yaml
+ release_date: '2024-01-12'
+ 1.27.0:
+ changes:
+ bugfixes:
+ - purefa_certs - Allow certificates of over 3000 characters to be imported.
+ - purefa_info - Resolved issue with KeyError when LACP bonds are in use
+ - purefa_inventory - Fix issue with iSCSI-only FlashArrays
+ - purefa_pgsnap - Add support for restoring volumes connected to hosts in a
+ host-based protection group and hosts in a hostgroup-based protection group.
+ minor_changes:
+ - purefa_arrayname - Convert to REST v2
+ - purefa_eula - Only sign if not previously signed. From REST 2.30 name, title
+ and company are no longer required
+ - purefa_info - Add support for controller uptime from Purity//FA 6.6.3
+ - purefa_inventory - Convert to REST v2
+ - purefa_ntp - Convert to REST v2
+ - purefa_offload - Convert to REST v2
+ - purefa_pgsnap - Module now requires minimum FlashArray Purity//FA 6.1.0
+ - purefa_ra - Add ``present`` and ``absent`` as valid ``state`` options
+ - purefa_ra - Add connecting as valid status of RA to perform operations on
+ - purefa_ra - Convert to REST v2
+ - purefa_syslog - ``name`` becomes a required parameter as module converts to
+ full REST 2 support
+ - purefa_vnc - Convert to REST v2
+ release_summary: '| This release changes the minimum supported Purity//FA version.
+
+ |
+
+ | The minimum supported Purity//FA version increases to 6.1.0.
+
+ | All previous versions are classed as EOL by Pure Storage support.
+
+ |
+
+ | This change is to support the full integration to Purity//FA REST v2.x
+
+ '
+ fragments:
+ - 1_27_summary.yaml
+ - 527_pgsnap_rest2.yaml
+ - 529_eula_v2.yaml
+ - 530_ntp_rest2.yaml
+ - 531_ra_rest.yaml
+ - 536_inv_rest2.yaml
+ - 536_syslog_rest.yaml
+ - 538_arrayname_rest.yaml
+ - 539_rest2_vnc.yaml
+ - 541_r2_offload.yaml
+ - 545_4kcert.yaml
+ - 547_lacp_neighbor_info.yaml
+ - 548_uptime.yaml
+ release_date: '2024-03-08'
1.4.0:
changes:
bugfixes:
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/1_27_summary.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/1_27_summary.yaml
new file mode 100644
index 000000000..5a9d9e179
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/1_27_summary.yaml
@@ -0,0 +1,7 @@
+release_summary: |
+ | This release changes the minimum supported Purity//FA version.
+ |
+ | The minimum supported Purity//FA version increases to 6.1.0.
+ | All previous versions are classed as EOL by Pure Storage support.
+ |
+ | This change is to support the full integration to Purity//FA REST v2.x
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/411_nfs_user_mapping.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/411_nfs_user_mapping.yaml
new file mode 100644
index 000000000..f75cc91f8
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/411_nfs_user_mapping.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_policy - Set user_mapping parameter default to True
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/412_fix_snapshot_suffix_handling.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/412_fix_snapshot_suffix_handling.yaml
new file mode 100644
index 000000000..0a8ef7aa7
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/412_fix_snapshot_suffix_handling.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_pgsnap - Update the accepted suffixes to include also numbers only. Fixed the logic to retrieve the latest completed snapshot
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/413_eradicate_pgsnap.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/413_eradicate_pgsnap.yaml
new file mode 100644
index 000000000..ca408bfed
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/413_eradicate_pgsnap.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_pgsnap - Fixed issue with eradicating deleted pgsnapshot
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/415_autodir_policies.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/415_autodir_policies.yaml
new file mode 100644
index 000000000..2eb300bff
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/415_autodir_policies.yaml
@@ -0,0 +1,3 @@
+minor_changes:
+ - purefa_policy - Added support for autodir policies
+ - purefa_info - Added support for autodir policies
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/420_proxy_protocol.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/420_proxy_protocol.yaml
new file mode 100644
index 000000000..3f8be079f
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/420_proxy_protocol.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_proxy - Add new protocol parameter, defaults to https
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/422_sched_enable_fix.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/422_sched_enable_fix.yaml
new file mode 100644
index 000000000..c95ee9781
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/422_sched_enable_fix.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_pgsched - Resolved idempotency issue with snap and replication enabled flags
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/428_promotion.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/428_promotion.yaml
new file mode 100644
index 000000000..d4580a0f5
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/428_promotion.yaml
@@ -0,0 +1,4 @@
+minor_changes:
+ - purefa_info - Add promotion status information for volumes
+bugfixes:
+ - purefa_info - Fix serial number generation issue for vVols
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/429_host_balance.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/429_host_balance.yaml
new file mode 100644
index 000000000..cde858936
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/429_host_balance.yaml
@@ -0,0 +1,3 @@
+minor_changes:
+ - purefa_info - Add `hosts_balance` subset
+ - purefa_info - Add `port_connectivity` information for hosts
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/430_throttle_support.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/430_throttle_support.yaml
new file mode 100644
index 000000000..66a37035f
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/430_throttle_support.yaml
@@ -0,0 +1,3 @@
+minor_changes:
+ - purefa_snap - Added new parameter to support snapshot throttling
+ - purefa_pgsnap - Added new parameter to support snapshot throttling
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/431_offload_profile.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/431_offload_profile.yaml
new file mode 100644
index 000000000..40c9b03e3
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/431_offload_profile.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_offload - Added a new profile parameter.
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/433_certs.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/433_certs.yaml
new file mode 100644
index 000000000..b3e1b2065
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/433_certs.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_certs - Resolved CSR issue and require export_file for state sign.
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/436_snap_fix.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/436_snap_fix.yaml
new file mode 100644
index 000000000..777630ce8
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/436_snap_fix.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_snap - Fixed issue with remote snapshot retrieve. Mainly a workaround to an issue with Purity REST 1.x when remote snapshots are searched.
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/440_null_suffix.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/440_null_suffix.yaml
new file mode 100644
index 000000000..4eb46cac4
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/440_null_suffix.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_volume - Fixed bug with NULL suffix for multiple volume creation.
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/441_v2_version.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/441_v2_version.yaml
new file mode 100644
index 000000000..435c572c1
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/441_v2_version.yaml
@@ -0,0 +1,3 @@
+minor_changes:
+ - multiple - Where only REST 2.x endpoints are used, convert to REST 2.x methodology.
+ - multiple - Remove packaging pre-requisite.
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/444_euc_fix.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/444_euc_fix.yaml
new file mode 100644
index 000000000..b253141ef
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/444_euc_fix.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_info - Fixed attribute errors after EUC changes
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/445_py39.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/445_py39.yaml
new file mode 100644
index 000000000..28e89a3f7
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/445_py39.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_info - Fixed py39 specific bug with multiple DNS entries
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/448_add_subs.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/448_add_subs.yaml
new file mode 100644
index 000000000..3f49816ff
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/448_add_subs.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_info - Add array subscription data
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/450_no_gateway.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/450_no_gateway.yaml
new file mode 100644
index 000000000..3264afaf1
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/450_no_gateway.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_network - Allow `gateway` to be set as `0.0.0.0` to remove an existing gateway address
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/452_throttle_fix.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/452_throttle_fix.yaml
new file mode 100644
index 000000000..af66fc2c4
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/452_throttle_fix.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_snap - Fixed incorrect calling logic causing failure on remote snapshot creation
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/459_fix_eradication_timer_info.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/459_fix_eradication_timer_info.yaml
new file mode 100644
index 000000000..2913543d9
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/459_fix_eradication_timer_info.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_info - Fixed parameter error when enabled and disabled timers are different values on purity 6.4.10+ arrays.
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/460_eradicaton.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/460_eradicaton.yaml
new file mode 100644
index 000000000..fe35f87ec
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/460_eradicaton.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_eradication - Added support for disabled and enabled timers from Purity//FA 6.4.10
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/461_ntp_keys.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/461_ntp_keys.yaml
new file mode 100644
index 000000000..943e5c169
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/461_ntp_keys.yaml
@@ -0,0 +1,3 @@
+minor_changes:
+ - purefa_info - Report NTP Symmetric key presence from Purity//FA 6.4.10
+ - purefa_ntp - Added support for NTP Symmetric Key from Purity//FA 6.4.10s
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/462_info_update.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/462_info_update.yaml
new file mode 100644
index 000000000..06f1cb24e
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/462_info_update.yaml
@@ -0,0 +1,6 @@
+minor_changes:
+ - purefa_info - Prive array timezone from Purity//FA 6.4.10
+ - purefa_info - Added `total_used` to multiple sections from Purity//FA 6.4.10
+ - purefa_info - Added `nfs_version` to policies and rules from Purity//FA 6.4.10
+bugfixes:
+ - purefa_info - Fixed issue with replica links in unknown state
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/463_nfs_version.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/463_nfs_version.yaml
new file mode 100644
index 000000000..88bc6ac0a
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/463_nfs_version.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_policy - Added support for multiple NFS versions from Purity//FA 6.4.10
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/464_fix_ds_add.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/464_fix_ds_add.yaml
new file mode 100644
index 000000000..6703f3295
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/464_fix_ds_add.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_ds - Fixes issue with creating a new ds configuration while setting force_bind_password as "false".
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/468_missing_subset.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/468_missing_subset.yaml
new file mode 100644
index 000000000..2eb13b8ca
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/468_missing_subset.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_info - Added missing alerts subset name
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/469_fix_missing_bind_password.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/469_fix_missing_bind_password.yaml
new file mode 100644
index 000000000..e3c22ebf4
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/469_fix_missing_bind_password.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_ds - Fixes error when enabling directory services while a bind_user is set on the array and a bind_password is not.
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/471_fix_ip_protocol.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/471_fix_ip_protocol.yaml
new file mode 100644
index 000000000..42ebcecb2
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/471_fix_ip_protocol.yaml
@@ -0,0 +1,3 @@
+bugfixes:
+ - purefa_subnet - Fixed IPv6 support issues.
+ - purefa_subnet - Fixed IPv4 gateway removal issue.
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/474_network_fixes.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/474_network_fixes.yaml
new file mode 100644
index 000000000..50b504984
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/474_network_fixes.yaml
@@ -0,0 +1,6 @@
+bugfixes:
+ - purefa_network - Fixed IPv6 support issues
+ - purefa_network - Fixed idempotency issue when gateway not modified
+minor_changes:
+ - purefa_network - Add support for creating/modifying VIF and LACP_BOND interfaces
+ - purefa_changes - enabled option added. This must now be used instead of state=absent to disable a physical interface as state=absent can now fully delete a non-physical interface
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/480_rename_vg.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/480_rename_vg.yaml
new file mode 100644
index 000000000..58b8a9c93
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/480_rename_vg.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_vg - Add rename parameter
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/482_schedule.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/482_schedule.yaml
new file mode 100644
index 000000000..ddccfec3c
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/482_schedule.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_pgsched - Change `snap_at` and `replicate_at` to be AM or PM hourly number rather than 24-hour time.
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/483_missing_replicate.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/483_missing_replicate.yaml
new file mode 100644
index 000000000..5ab397e16
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/483_missing_replicate.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_pgsnap - Ensure that `now` and `remote` are mutually exclusive.
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/484_fix_repl_sched.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/484_fix_repl_sched.yaml
new file mode 100644
index 000000000..48de9ace8
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/484_fix_repl_sched.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_pgsched - Fixed bug with an unnecessary substitution
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/485_fix_host.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/485_fix_host.yaml
new file mode 100644
index 000000000..95986d3c4
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/485_fix_host.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_host - Fix incorrect calling of "module.params".
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/487_pgrename.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/487_pgrename.yaml
new file mode 100644
index 000000000..001fc4e1b
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/487_pgrename.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_pgsnap - Add protection group snapshot rename functionality
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/488_fix_pgsnap_eradication.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/488_fix_pgsnap_eradication.yaml
new file mode 100644
index 000000000..ebd3ff7b1
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/488_fix_pgsnap_eradication.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_pgsnap - Enabled to eradicate destroyed snapshots.
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/495_add_del_pgroup_info.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/495_add_del_pgroup_info.yaml
new file mode 100644
index 000000000..e720eaf76
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/495_add_del_pgroup_info.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_info - Subset `pgroups` now also provides a new dict called `deleted_pgroups`
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/496_fix_cert_signing.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/496_fix_cert_signing.yaml
new file mode 100644
index 000000000..69a2bb545
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/496_fix_cert_signing.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_cert - Fixed issue where parts of the subject where not included in the CSR if they did not exist in the currently used cert.
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/498_fix_pg_creation.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/498_fix_pg_creation.yaml
new file mode 100644
index 000000000..a22a752c6
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/498_fix_pg_creation.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_pg - Allows a protection group to be correctly created when `target` is specified as well as other objects, such as `volumes` or `hosts`
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/499_rest_227.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/499_rest_227.yaml
new file mode 100644
index 000000000..103c4d3d9
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/499_rest_227.yaml
@@ -0,0 +1,3 @@
+minor_changes:
+ - purefa_info - Add NSID value for NVMe namespace in `hosts` response
+ - purefa_offload - Remove `nfs` as an option when Purity//FA 6.6.0 or higher is detected
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/505_dns_attribute.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/505_dns_attribute.yaml
new file mode 100644
index 000000000..ce5f037b7
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/505_dns_attribute.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_dns - Fixed attribute error on deletion of management DNS
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/506_disable_pgsched.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/506_disable_pgsched.yaml
new file mode 100644
index 000000000..c12ab6baf
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/506_disable_pgsched.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_pgsched - Fixed issue with disabling schedules
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/509_check_peer.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/509_check_peer.yaml
new file mode 100644
index 000000000..653db16a0
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/509_check_peer.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_dns - Added facility to add a CA certifcate to management DNS and check peer.
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/513_remote_snapshot_suffix.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/513_remote_snapshot_suffix.yaml
new file mode 100644
index 000000000..4b32c10d0
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/513_remote_snapshot_suffix.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_snap - Add support for suffix on remote offload snapshots
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/516_fix_throttle.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/516_fix_throttle.yaml
new file mode 100644
index 000000000..9652dbbb7
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/516_fix_throttle.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_pgsnap - Fixed incorrect parameter name
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/518_nfs_security.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/518_nfs_security.yaml
new file mode 100644
index 000000000..10981085f
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/518_nfs_security.yaml
@@ -0,0 +1,3 @@
+minor_changes:
+ - purefa_policy - Added NFS security flavors for accessing files in the mount point.
+ - purefa_info - Expose NFS security flavor for policies
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/519_add_cloud_capacity.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/519_add_cloud_capacity.yaml
new file mode 100644
index 000000000..2c4b0e769
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/519_add_cloud_capacity.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_info - Expose cloud capacity details if array is a Cloud Block Store.
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/520_add_distro.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/520_add_distro.yaml
new file mode 100644
index 000000000..e7731af50
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/520_add_distro.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - all - ``distro`` package added as a pre-requisite
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/523_nfs_policies.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/523_nfs_policies.yaml
new file mode 100644
index 000000000..1513fa51d
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/523_nfs_policies.yaml
@@ -0,0 +1,5 @@
+bugfixes:
+ - purefa_policy - Fix incorrect call of psot instead of patch for NFS policies
+minor_changes:
+ - purefa_policy - Add SMB user based enumeration parameter
+ - purefa_policy - Remove default setting for nfs_version to allow for change of version at policy level
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/524_empty_ds.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/524_empty_ds.yaml
new file mode 100644
index 000000000..857d4d4c9
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/524_empty_ds.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_ds - Fix issue with SDK returning empty data for data directory services even when it does exist
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/527_pgsnap_rest2.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/527_pgsnap_rest2.yaml
new file mode 100644
index 000000000..f0a22b4e4
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/527_pgsnap_rest2.yaml
@@ -0,0 +1,4 @@
+bugfixes:
+ - purefa_pgsnap - Add support for restoring volumes connected to hosts in a host-based protection group and hosts in a hostgroup-based protection group.
+minor_changes:
+ - purefa_pgsnap - Module now requires minimum FlashArray Purity//FA 6.1.0
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/529_eula_v2.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/529_eula_v2.yaml
new file mode 100644
index 000000000..4d8d74084
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/529_eula_v2.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_eula - Only sign if not previously signed. From REST 2.30 name, title and company are no longer required
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/530_ntp_rest2.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/530_ntp_rest2.yaml
new file mode 100644
index 000000000..5102181c7
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/530_ntp_rest2.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_ntp - Convert to REST v2
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/531_ra_rest.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/531_ra_rest.yaml
new file mode 100644
index 000000000..9e393df1a
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/531_ra_rest.yaml
@@ -0,0 +1,4 @@
+minor_changes:
+ - purefa_ra - Convert to REST v2
+ - purefa_ra - Add ``present`` and ``absent`` as valid ``state`` options
+ - purefa_ra - Add connecting as valid status of RA to perform operations on
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/536_inv_rest2.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/536_inv_rest2.yaml
new file mode 100644
index 000000000..8a672cac7
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/536_inv_rest2.yaml
@@ -0,0 +1,4 @@
+minor_changes:
+ - purefa_inventory - Convert to REST v2
+bugfixes:
+ - purefa_inventory - Fix issue with iSCSI-only FlashArrays
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/536_syslog_rest.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/536_syslog_rest.yaml
new file mode 100644
index 000000000..5289de1a7
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/536_syslog_rest.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_syslog - ``name`` becomes a required parameter as module converts to full REST 2 support
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/538_arrayname_rest.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/538_arrayname_rest.yaml
new file mode 100644
index 000000000..0ac0c53df
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/538_arrayname_rest.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_arrayname - Convert to REST v2
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/539_rest2_vnc.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/539_rest2_vnc.yaml
new file mode 100644
index 000000000..ce2daca7d
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/539_rest2_vnc.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_vnc - Convert to REST v2
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/541_r2_offload.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/541_r2_offload.yaml
new file mode 100644
index 000000000..cc7a70853
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/541_r2_offload.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_offload - Convert to REST v2
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/545_4kcert.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/545_4kcert.yaml
new file mode 100644
index 000000000..3a2fe8b6c
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/545_4kcert.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_certs - Allow certificates of over 3000 characters to be imported.
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/547_lacp_neighbor_info.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/547_lacp_neighbor_info.yaml
new file mode 100644
index 000000000..968aa802d
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/547_lacp_neighbor_info.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefa_info - Resolved issue with KeyError when LACP bonds are in use
diff --git a/ansible_collections/purestorage/flasharray/changelogs/fragments/548_uptime.yaml b/ansible_collections/purestorage/flasharray/changelogs/fragments/548_uptime.yaml
new file mode 100644
index 000000000..4c7a02012
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/changelogs/fragments/548_uptime.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefa_info - Add support for controller uptime from Purity//FA 6.6.3
diff --git a/ansible_collections/purestorage/flasharray/meta/runtime.yml b/ansible_collections/purestorage/flasharray/meta/runtime.yml
index f664dcbd0..15af60575 100644
--- a/ansible_collections/purestorage/flasharray/meta/runtime.yml
+++ b/ansible_collections/purestorage/flasharray/meta/runtime.yml
@@ -1,5 +1,5 @@
---
-requires_ansible: ">=2.9.10"
+requires_ansible: ">=2.14.0"
plugin_routing:
modules:
purefa_sso:
diff --git a/ansible_collections/purestorage/flasharray/plugins/doc_fragments/purestorage.py b/ansible_collections/purestorage/flasharray/plugins/doc_fragments/purestorage.py
index 7c19925e6..e05cbf6a7 100644
--- a/ansible_collections/purestorage/flasharray/plugins/doc_fragments/purestorage.py
+++ b/ansible_collections/purestorage/flasharray/plugins/doc_fragments/purestorage.py
@@ -42,5 +42,4 @@ requirements:
- netaddr
- requests
- pycountry
- - packaging
"""
diff --git a/ansible_collections/purestorage/flasharray/plugins/module_utils/common.py b/ansible_collections/purestorage/flasharray/plugins/module_utils/common.py
new file mode 100644
index 000000000..ddc093731
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/module_utils/common.py
@@ -0,0 +1,89 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2024 Simon Dodsley, <simon@purestorage.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+"""
+This module adds shared functions for the FlashArray modules
+"""
+
+
+def human_to_bytes(size):
+ """Given a human-readable byte string (e.g. 2G, 30M),
+ return the number of bytes. Will return 0 if the argument has
+ unexpected form.
+ """
+ bytes = size[:-1]
+ unit = size[-1].upper()
+ if bytes.isdigit():
+ bytes = int(bytes)
+ if unit == "P":
+ bytes *= 1125899906842624
+ elif unit == "T":
+ bytes *= 1099511627776
+ elif unit == "G":
+ bytes *= 1073741824
+ elif unit == "M":
+ bytes *= 1048576
+ elif unit == "K":
+ bytes *= 1024
+ else:
+ bytes = 0
+ else:
+ bytes = 0
+ return bytes
+
+
+def human_to_real(iops):
+ """Given a human-readable string (e.g. 2K, 30M IOPs),
+ return the real number. Will return 0 if the argument has
+ unexpected form.
+ """
+ digit = iops[:-1]
+ unit = iops[-1].upper()
+ if unit.isdigit():
+ digit = iops
+ elif digit.isdigit():
+ digit = int(digit)
+ if unit == "M":
+ digit *= 1000000
+ elif unit == "K":
+ digit *= 1000
+ else:
+ digit = 0
+ else:
+ digit = 0
+ return digit
+
+
+def convert_to_millisecs(hour):
+ """Convert a 12-hour clock to milliseconds from
+ midnight"""
+ if hour[-2:].upper() == "AM" and hour[:2] == "12":
+ return 0
+ elif hour[-2:].upper() == "AM":
+ return int(hour[:-2]) * 3600000
+ elif hour[-2:].upper() == "PM" and hour[:2] == "12":
+ return 43200000
+ return (int(hour[:-2]) + 12) * 3600000
+
+
+def convert_time_to_millisecs(time):
+ """Convert a time period in milliseconds"""
+ if time[-1:].lower() not in ["w", "d", "h", "m", "s"]:
+ return 0
+ try:
+ if time[-1:].lower() == "w":
+ return int(time[:-1]) * 7 * 86400000
+ elif time[-1:].lower() == "d":
+ return int(time[:-1]) * 86400000
+ elif time[-1:].lower() == "h":
+ return int(time[:-1]) * 3600000
+ elif time[-1:].lower() == "m":
+ return int(time[:-1]) * 60000
+ except Exception:
+ return 0
diff --git a/ansible_collections/purestorage/flasharray/plugins/module_utils/purefa.py b/ansible_collections/purestorage/flasharray/plugins/module_utils/purefa.py
index b85ce0e29..82d048bcb 100644
--- a/ansible_collections/purestorage/flasharray/plugins/module_utils/purefa.py
+++ b/ansible_collections/purestorage/flasharray/plugins/module_utils/purefa.py
@@ -32,6 +32,12 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
+HAS_DISTRO = True
+try:
+ import distro
+except ImportError:
+ HAS_DISTRO = False
+
HAS_PURESTORAGE = True
try:
from purestorage import purestorage
@@ -47,18 +53,26 @@ except ImportError:
from os import environ
import platform
-VERSION = 1.4
+VERSION = 1.5
USER_AGENT_BASE = "Ansible"
def get_system(module):
"""Return System Object or Fail"""
- user_agent = "%(base)s %(class)s/%(version)s (%(platform)s)" % {
- "base": USER_AGENT_BASE,
- "class": __name__,
- "version": VERSION,
- "platform": platform.platform(),
- }
+ if HAS_DISTRO:
+ user_agent = "%(base)s %(class)s/%(version)s (%(platform)s)" % {
+ "base": USER_AGENT_BASE,
+ "class": __name__,
+ "version": VERSION,
+ "platform": distro.name(pretty=True),
+ }
+ else:
+ user_agent = "%(base)s %(class)s/%(version)s (%(platform)s)" % {
+ "base": USER_AGENT_BASE,
+ "class": __name__,
+ "version": VERSION,
+ "platform": platform.platform(),
+ }
array_name = module.params["fa_url"]
api = module.params["api_token"]
if HAS_PURESTORAGE:
@@ -91,12 +105,20 @@ def get_system(module):
def get_array(module):
"""Return System Object or Fail"""
- user_agent = "%(base)s %(class)s/%(version)s (%(platform)s)" % {
- "base": USER_AGENT_BASE,
- "class": __name__,
- "version": VERSION,
- "platform": platform.platform(),
- }
+ if HAS_DISTRO:
+ user_agent = "%(base)s %(class)s/%(version)s (%(platform)s)" % {
+ "base": USER_AGENT_BASE,
+ "class": __name__,
+ "version": VERSION,
+ "platform": distro.name(pretty=True),
+ }
+ else:
+ user_agent = "%(base)s %(class)s/%(version)s (%(platform)s)" % {
+ "base": USER_AGENT_BASE,
+ "class": __name__,
+ "version": VERSION,
+ "platform": platform.platform(),
+ }
array_name = module.params["fa_url"]
api = module.params["api_token"]
if HAS_PYPURECLIENT:
diff --git a/ansible_collections/purestorage/flasharray/plugins/module_utils/version.py b/ansible_collections/purestorage/flasharray/plugins/module_utils/version.py
new file mode 100644
index 000000000..d91cf3ab4
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/module_utils/version.py
@@ -0,0 +1,344 @@
+# Vendored copy of distutils/version.py from CPython 3.9.5
+#
+# Implements multiple version numbering conventions for the
+# Python Module Distribution Utilities.
+#
+# PSF License (see PSF-license.txt or https://opensource.org/licenses/Python-2.0)
+#
+
+"""Provides classes to represent module version numbers (one class for
+each style of version numbering). There are currently two such classes
+implemented: StrictVersion and LooseVersion.
+
+Every version number class implements the following interface:
+ * the 'parse' method takes a string and parses it to some internal
+ representation; if the string is an invalid version number,
+ 'parse' raises a ValueError exception
+ * the class constructor takes an optional string argument which,
+ if supplied, is passed to 'parse'
+ * __str__ reconstructs the string that was passed to 'parse' (or
+ an equivalent string -- ie. one that will generate an equivalent
+ version number instance)
+ * __repr__ generates Python code to recreate the version number instance
+ * _cmp compares the current instance with either another instance
+ of the same class or a string (which will be parsed to an instance
+ of the same class, thus must follow the same rules)
+"""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+import re
+
+try:
+ RE_FLAGS = re.VERBOSE | re.ASCII
+except AttributeError:
+ RE_FLAGS = re.VERBOSE
+
+
+class Version:
+ """Abstract base class for version numbering classes. Just provides
+ constructor (__init__) and reproducer (__repr__), because those
+ seem to be the same for all version numbering classes; and route
+ rich comparisons to _cmp.
+ """
+
+ def __init__(self, vstring=None):
+ if vstring:
+ self.parse(vstring)
+
+ def __repr__(self):
+ return "%s ('%s')" % (self.__class__.__name__, str(self))
+
+ def __eq__(self, other):
+ c = self._cmp(other)
+ if c is NotImplemented:
+ return c
+ return c == 0
+
+ def __lt__(self, other):
+ c = self._cmp(other)
+ if c is NotImplemented:
+ return c
+ return c < 0
+
+ def __le__(self, other):
+ c = self._cmp(other)
+ if c is NotImplemented:
+ return c
+ return c <= 0
+
+ def __gt__(self, other):
+ c = self._cmp(other)
+ if c is NotImplemented:
+ return c
+ return c > 0
+
+ def __ge__(self, other):
+ c = self._cmp(other)
+ if c is NotImplemented:
+ return c
+ return c >= 0
+
+
+# Interface for version-number classes -- must be implemented
+# by the following classes (the concrete ones -- Version should
+# be treated as an abstract class).
+# __init__ (string) - create and take same action as 'parse'
+# (string parameter is optional)
+# parse (string) - convert a string representation to whatever
+# internal representation is appropriate for
+# this style of version numbering
+# __str__ (self) - convert back to a string; should be very similar
+# (if not identical to) the string supplied to parse
+# __repr__ (self) - generate Python code to recreate
+# the instance
+# _cmp (self, other) - compare two version numbers ('other' may
+# be an unparsed version string, or another
+# instance of your version class)
+
+
+class StrictVersion(Version):
+ """Version numbering for anal retentives and software idealists.
+ Implements the standard interface for version number classes as
+ described above. A version number consists of two or three
+ dot-separated numeric components, with an optional "pre-release" tag
+ on the end. The pre-release tag consists of the letter 'a' or 'b'
+ followed by a number. If the numeric components of two version
+ numbers are equal, then one with a pre-release tag will always
+ be deemed earlier (lesser) than one without.
+
+ The following are valid version numbers (shown in the order that
+ would be obtained by sorting according to the supplied cmp function):
+
+ 0.4 0.4.0 (these two are equivalent)
+ 0.4.1
+ 0.5a1
+ 0.5b3
+ 0.5
+ 0.9.6
+ 1.0
+ 1.0.4a3
+ 1.0.4b1
+ 1.0.4
+
+ The following are examples of invalid version numbers:
+
+ 1
+ 2.7.2.2
+ 1.3.a4
+ 1.3pl1
+ 1.3c4
+
+ The rationale for this version numbering system will be explained
+ in the distutils documentation.
+ """
+
+ version_re = re.compile(r"^(\d+) \. (\d+) (\. (\d+))? ([ab](\d+))?$", RE_FLAGS)
+
+ def parse(self, vstring):
+ match = self.version_re.match(vstring)
+ if not match:
+ raise ValueError("invalid version number '%s'" % vstring)
+
+ (major, minor, patch, prerelease, prerelease_num) = match.group(1, 2, 4, 5, 6)
+
+ if patch:
+ self.version = tuple(map(int, [major, minor, patch]))
+ else:
+ self.version = tuple(map(int, [major, minor])) + (0,)
+
+ if prerelease:
+ self.prerelease = (prerelease[0], int(prerelease_num))
+ else:
+ self.prerelease = None
+
+ def __str__(self):
+ if self.version[2] == 0:
+ vstring = ".".join(map(str, self.version[0:2]))
+ else:
+ vstring = ".".join(map(str, self.version))
+
+ if self.prerelease:
+ vstring = vstring + self.prerelease[0] + str(self.prerelease[1])
+
+ return vstring
+
+ def _cmp(self, other):
+ if isinstance(other, str):
+ other = StrictVersion(other)
+ elif not isinstance(other, StrictVersion):
+ return NotImplemented
+
+ if self.version != other.version:
+ # numeric versions don't match
+ # prerelease stuff doesn't matter
+ if self.version < other.version:
+ return -1
+ else:
+ return 1
+
+ # have to compare prerelease
+ # case 1: neither has prerelease; they're equal
+ # case 2: self has prerelease, other doesn't; other is greater
+ # case 3: self doesn't have prerelease, other does: self is greater
+ # case 4: both have prerelease: must compare them!
+
+ if not self.prerelease and not other.prerelease:
+ return 0
+ elif self.prerelease and not other.prerelease:
+ return -1
+ elif not self.prerelease and other.prerelease:
+ return 1
+ elif self.prerelease and other.prerelease:
+ if self.prerelease == other.prerelease:
+ return 0
+ elif self.prerelease < other.prerelease:
+ return -1
+ else:
+ return 1
+ else:
+ raise AssertionError("never get here")
+
+
+# end class StrictVersion
+
+# The rules according to Greg Stein:
+# 1) a version number has 1 or more numbers separated by a period or by
+# sequences of letters. If only periods, then these are compared
+# left-to-right to determine an ordering.
+# 2) sequences of letters are part of the tuple for comparison and are
+# compared lexicographically
+# 3) recognize the numeric components may have leading zeroes
+#
+# The LooseVersion class below implements these rules: a version number
+# string is split up into a tuple of integer and string components, and
+# comparison is a simple tuple comparison. This means that version
+# numbers behave in a predictable and obvious way, but a way that might
+# not necessarily be how people *want* version numbers to behave. There
+# wouldn't be a problem if people could stick to purely numeric version
+# numbers: just split on period and compare the numbers as tuples.
+# However, people insist on putting letters into their version numbers;
+# the most common purpose seems to be:
+# - indicating a "pre-release" version
+# ('alpha', 'beta', 'a', 'b', 'pre', 'p')
+# - indicating a post-release patch ('p', 'pl', 'patch')
+# but of course this can't cover all version number schemes, and there's
+# no way to know what a programmer means without asking him.
+#
+# The problem is what to do with letters (and other non-numeric
+# characters) in a version number. The current implementation does the
+# obvious and predictable thing: keep them as strings and compare
+# lexically within a tuple comparison. This has the desired effect if
+# an appended letter sequence implies something "post-release":
+# eg. "0.99" < "0.99pl14" < "1.0", and "5.001" < "5.001m" < "5.002".
+#
+# However, if letters in a version number imply a pre-release version,
+# the "obvious" thing isn't correct. Eg. you would expect that
+# "1.5.1" < "1.5.2a2" < "1.5.2", but under the tuple/lexical comparison
+# implemented here, this just isn't so.
+#
+# Two possible solutions come to mind. The first is to tie the
+# comparison algorithm to a particular set of semantic rules, as has
+# been done in the StrictVersion class above. This works great as long
+# as everyone can go along with bondage and discipline. Hopefully a
+# (large) subset of Python module programmers will agree that the
+# particular flavour of bondage and discipline provided by StrictVersion
+# provides enough benefit to be worth using, and will submit their
+# version numbering scheme to its domination. The free-thinking
+# anarchists in the lot will never give in, though, and something needs
+# to be done to accommodate them.
+#
+# Perhaps a "moderately strict" version class could be implemented that
+# lets almost anything slide (syntactically), and makes some heuristic
+# assumptions about non-digits in version number strings. This could
+# sink into special-case-hell, though; if I was as talented and
+# idiosyncratic as Larry Wall, I'd go ahead and implement a class that
+# somehow knows that "1.2.1" < "1.2.2a2" < "1.2.2" < "1.2.2pl3", and is
+# just as happy dealing with things like "2g6" and "1.13++". I don't
+# think I'm smart enough to do it right though.
+#
+# In any case, I've coded the test suite for this module (see
+# ../test/test_version.py) specifically to fail on things like comparing
+# "1.2a2" and "1.2". That's not because the *code* is doing anything
+# wrong, it's because the simple, obvious design doesn't match my
+# complicated, hairy expectations for real-world version numbers. It
+# would be a snap to fix the test suite to say, "Yep, LooseVersion does
+# the Right Thing" (ie. the code matches the conception). But I'd rather
+# have a conception that matches common notions about version numbers.
+
+
+class LooseVersion(Version):
+ """Version numbering for anarchists and software realists.
+ Implements the standard interface for version number classes as
+ described above. A version number consists of a series of numbers,
+ separated by either periods or strings of letters. When comparing
+ version numbers, the numeric components will be compared
+ numerically, and the alphabetic components lexically. The following
+ are all valid version numbers, in no particular order:
+
+ 1.5.1
+ 1.5.2b2
+ 161
+ 3.10a
+ 8.02
+ 3.4j
+ 1996.07.12
+ 3.2.pl0
+ 3.1.1.6
+ 2g6
+ 11g
+ 0.960923
+ 2.2beta29
+ 1.13++
+ 5.5.kw
+ 2.0b1pl0
+
+ In fact, there is no such thing as an invalid version number under
+ this scheme; the rules for comparison are simple and predictable,
+ but may not always give the results you want (for some definition
+ of "want").
+ """
+
+ component_re = re.compile(r"(\d+ | [a-z]+ | \.)", re.VERBOSE)
+
+ def __init__(self, vstring=None):
+ if vstring:
+ self.parse(vstring)
+
+ def parse(self, vstring):
+ # I've given up on thinking I can reconstruct the version string
+ # from the parsed tuple -- so I just store the string here for
+ # use by __str__
+ self.vstring = vstring
+ components = [x for x in self.component_re.split(vstring) if x and x != "."]
+ for i, obj in enumerate(components):
+ try:
+ components[i] = int(obj)
+ except ValueError:
+ pass
+
+ self.version = components
+
+ def __str__(self):
+ return self.vstring
+
+ def __repr__(self):
+ return "LooseVersion ('%s')" % str(self)
+
+ def _cmp(self, other):
+ if isinstance(other, str):
+ other = LooseVersion(other)
+ elif not isinstance(other, LooseVersion):
+ return NotImplemented
+
+ if self.version == other.version:
+ return 0
+ if self.version < other.version:
+ return -1
+ if self.version > other.version:
+ return 1
+
+
+# end class LooseVersion
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_ad.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_ad.py
index d9eee96ac..35530bdf8 100644
--- a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_ad.py
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_ad.py
@@ -152,8 +152,10 @@ except ImportError:
HAS_PURESTORAGE = False
from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.version import (
+ LooseVersion,
+)
from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
- get_system,
get_array,
purefa_argument_spec,
)
@@ -205,7 +207,7 @@ def update_account(module, array):
def create_account(module, array, api_version):
"""Create Active Directory Account"""
changed = True
- if MIN_JOIN_OU_API_VERSION not in api_version:
+ if LooseVersion(MIN_JOIN_OU_API_VERSION) > LooseVersion(api_version):
ad_config = ActiveDirectoryPost(
computer_name=module.params["computer"],
directory_servers=module.params["directory_servers"],
@@ -214,7 +216,7 @@ def create_account(module, array, api_version):
user=module.params["username"],
password=module.params["password"],
)
- elif MIN_TLS_API_VERSION in api_version:
+ elif LooseVersion(MIN_TLS_API_VERSION) <= LooseVersion(api_version):
ad_config = ActiveDirectoryPost(
computer_name=module.params["computer"],
directory_servers=module.params["directory_servers"],
@@ -284,15 +286,14 @@ def main():
if not HAS_PURESTORAGE:
module.fail_json(msg="py-pure-client sdk is required for this module")
- array = get_system(module)
- api_version = array._list_available_rest_versions()
- if MIN_REQUIRED_API_VERSION not in api_version:
+ array = get_array(module)
+ api_version = array.get_rest_version()
+ if LooseVersion(MIN_REQUIRED_API_VERSION) > LooseVersion(api_version):
module.fail_json(
msg="FlashArray REST version not supported. "
"Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
)
state = module.params["state"]
- array = get_array(module)
exists = bool(
array.get_active_directory(names=[module.params["name"]]).status_code == 200
)
@@ -300,18 +301,22 @@ def main():
if not module.params["computer"]:
module.params["computer"] = module.params["name"].replace("_", "-")
if module.params["kerberos_servers"]:
- if SERVER_API_VERSION in api_version:
+ if LooseVersion(SERVER_API_VERSION) <= LooseVersion(api_version):
module.params["kerberos_servers"] = module.params["kerberos_servers"][0:3]
else:
module.params["kerberos_servers"] = module.params["kerberos_servers"][0:1]
if module.params["directory_servers"]:
- if SERVER_API_VERSION in api_version:
+ if LooseVersion(SERVER_API_VERSION) <= LooseVersion(api_version):
module.params["directory_servers"] = module.params["directory_servers"][0:3]
else:
module.params["directory_servers"] = module.params["directory_servers"][0:1]
if not exists and state == "present":
create_account(module, array, api_version)
- elif exists and state == "present" and MIN_TLS_API_VERSION in api_version:
+ elif (
+ exists
+ and state == "present"
+ and LooseVersion(MIN_TLS_API_VERSION) <= LooseVersion(api_version)
+ ):
update_account(module, array)
elif exists and state == "absent":
delete_account(module, array)
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_admin.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_admin.py
index becb86893..21eac3896 100644
--- a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_admin.py
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_admin.py
@@ -70,10 +70,12 @@ except ImportError:
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
- get_system,
get_array,
purefa_argument_spec,
)
+from ansible_collections.purestorage.flasharray.plugins.module_utils.version import (
+ LooseVersion,
+)
MIN_API_VERSION = "2.2"
@@ -95,11 +97,10 @@ def main():
module.fail_json(msg="py-pure-client sdk is required for this module")
if module.params["lockout"] and not 1 <= module.params["lockout"] <= 7776000:
module.fail_json(msg="Lockout must be between 1 and 7776000 seconds")
- array = get_system(module)
- api_version = array._list_available_rest_versions()
+ array = get_array(module)
+ api_version = array.get_rest_version()
changed = False
- if MIN_API_VERSION in api_version:
- array = get_array(module)
+ if LooseVersion(MIN_API_VERSION) <= LooseVersion(api_version):
current_settings = list(array.get_admins_settings().items)[0]
if (
module.params["sso"]
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_apiclient.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_apiclient.py
index 12970dddb..9334c6733 100644
--- a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_apiclient.py
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_apiclient.py
@@ -109,10 +109,12 @@ except ImportError:
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
- get_system,
get_array,
purefa_argument_spec,
)
+from ansible_collections.purestorage.flasharray.plugins.module_utils.version import (
+ LooseVersion,
+)
MIN_REQUIRED_API_VERSION = "2.1"
@@ -219,15 +221,14 @@ def main():
if not HAS_PURESTORAGE:
module.fail_json(msg="py-pure-client sdk is required for this module")
- array = get_system(module)
- api_version = array._list_available_rest_versions()
+ array = get_array(module)
+ api_version = array.get_rest_version()
- if MIN_REQUIRED_API_VERSION not in api_version:
+ if LooseVersion(MIN_REQUIRED_API_VERSION) > LooseVersion(api_version):
module.fail_json(
msg="FlashArray REST version not supported. "
"Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
)
- array = get_array(module)
state = module.params["state"]
try:
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_arrayname.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_arrayname.py
index cf5202c6f..550ae1401 100644
--- a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_arrayname.py
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_arrayname.py
@@ -52,10 +52,17 @@ EXAMPLES = r"""
RETURN = r"""
"""
+HAS_PURESTORAGE = True
+try:
+ from pypureclient.flasharray import Arrays
+except ImportError:
+ HAS_PURESTORAGE = False
+
+
import re
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
- get_system,
+ get_array,
purefa_argument_spec,
)
@@ -64,11 +71,12 @@ def update_name(module, array):
"""Change aray name"""
changed = True
if not module.check_mode:
- try:
- array.set(name=module.params["name"])
- except Exception:
+ res = array.patch_arrays(array=Arrays(name=module.params["name"]))
+ if res.status_code != 200:
module.fail_json(
- msg="Failed to change array name to {0}".format(module.params["name"])
+ msg="Failed to change array name to {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
)
module.exit_json(changed=changed)
@@ -85,7 +93,10 @@ def main():
module = AnsibleModule(argument_spec, supports_check_mode=True)
- array = get_system(module)
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+
+ array = get_array(module)
pattern = re.compile("^[a-zA-Z0-9]([a-zA-Z0-9-]{0,54}[a-zA-Z0-9])?$")
if not pattern.match(module.params["name"]):
module.fail_json(
@@ -93,7 +104,7 @@ def main():
module.params["name"]
)
)
- if module.params["name"] != array.get()["array_name"]:
+ if module.params["name"] != list(array.get_arrays().items)[0].name:
update_name(module, array)
module.exit_json(changed=False)
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_banner.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_banner.py
index bd7a367a5..c3c2346b3 100644
--- a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_banner.py
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_banner.py
@@ -61,10 +61,16 @@ RETURN = r"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
- get_system,
+ get_array,
purefa_argument_spec,
)
+HAS_PURESTORAGE = True
+try:
+ from pypureclient.flasharray import Arrays
+except ImportError:
+ HAS_PURESTORAGE = False
+
def set_banner(module, array):
"""Set MOTD banner text"""
@@ -72,9 +78,8 @@ def set_banner(module, array):
if not module.params["banner"]:
module.fail_json(msg="Invalid MOTD banner given")
if not module.check_mode:
- try:
- array.set(banner=module.params["banner"])
- except Exception:
+ res = array.patch_arrays(array=Arrays(banner=module.params["banner"]))
+ if res.status_code != 200:
module.fail_json(msg="Failed to set MOTD banner text")
module.exit_json(changed=changed)
@@ -84,9 +89,8 @@ def delete_banner(module, array):
"""Delete MOTD banner text"""
changed = True
if not module.check_mode:
- try:
- array.set(banner="")
- except Exception:
+ res = array.patch_arrays(array=Arrays(banner=""))
+ if res.status_code != 200:
module.fail_json(msg="Failed to delete current MOTD banner text")
module.exit_json(changed=changed)
@@ -106,9 +110,12 @@ def main():
argument_spec, required_if=required_if, supports_check_mode=True
)
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+
state = module.params["state"]
- array = get_system(module)
- current_banner = array.get(banner=True)["banner"]
+ array = get_array(module)
+ current_banner = list(array.get_arrays().items)[0].banner
# set banner if empty value or value differs
if state == "present" and (
not current_banner or current_banner != module.params["banner"]
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_cbsexpand.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_cbsexpand.py
new file mode 100644
index 000000000..4221e9013
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_cbsexpand.py
@@ -0,0 +1,153 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2023, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_cbsexpand
+version_added: '1.0.0'
+short_description: Modify the CBS array capacity
+description:
+- Expand the CBS array capacity. Capacity can only be updated to specific values.
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ state:
+ description:
+ - Action to be performed on the CBS array.
+ - I{list) will provide the options that I(capacity), in bytes, can be set to.
+ default: show
+ choices: [ show, expand ]
+ type: str
+ capacity:
+ description:
+ - Requested capacity of CBS array in bytes.
+ type: int
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Show available expansion capacities
+ purestorage.flasharray.purefa_cbsexpand:
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Expand CBS to new capacity
+ purestorage.flasharray.purefa_cbsexpand:
+ state: expand
+ capacity: 10995116277760
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.version import (
+ LooseVersion,
+)
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_array,
+ purefa_argument_spec,
+)
+
+
+EXPAND_API_VERSION = "2.29"
+
+
+def _is_cbs(array):
+ """Is the selected array a Cloud Block Store"""
+ model = list(array.get_hardware(filter="type='controller'").items)[0].model
+ is_cbs = bool("CBS" in model)
+ return is_cbs
+
+
+def list_capacity(module, array):
+ """Get avaible expansion points"""
+ steps = list(array.get_arrays_cloud_capacity_supported_steps().items)
+ available = []
+ for step in range(0, len(steps)):
+ available.append(steps[step].supported_capacity)
+ module.exit_json(changed=True, available=available)
+
+
+def update_capacity(module, array):
+ """Expand CBS capacity"""
+ steps = list(array.get_arrays_cloud_capacity_supported_steps().items)
+ available = []
+ for step in range(0, len(steps)):
+ available.append(steps[step].supported_capacity)
+ if module.params["capacity"] not in available:
+ module.fail_json(
+ msg="Selected capacity is not available. "
+ "Run this module with `list` to get available capapcity points."
+ )
+ expanded = array.patch_arrays_cloud_capacity(
+ capacity=flasharray.CloudCapacityStatus(
+ requested_capacity=module.params["capacity"]
+ )
+ )
+ if expanded.sttaus_code != 200:
+ module.fail_json(
+ msg="Expansion request failed. Error: {0}".format(
+ expanded.errors[0].message
+ )
+ )
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type="str", default="show", choices=["show", "expand"]),
+ capacity=dict(type="int"),
+ )
+ )
+
+ required_if = [["state", "expand", ["capacity"]]]
+ module = AnsibleModule(
+ argument_spec, required_if=required_if, supports_check_mode=True
+ )
+
+ array = get_array(module)
+ if not HAS_PURESTORAGE:
+ module.fail_json(
+ msg="py-pure-client sdk is required to support 'count' parameter"
+ )
+ if not _is_cbs(array):
+ module.fail_json(msg="Module only valid on Cloud Block Store array")
+ api_version = array.get_rest_version()
+ if LooseVersion(EXPAND_API_VERSION) > LooseVersion(api_version):
+ module.fail_json(
+ msg="FlashArray REST version not supported. "
+ "Minimum version required: {0}".format(EXPAND_API_VERSION)
+ )
+ if module.params["state"] == "show":
+ list_capacity(module, array)
+ else:
+ update_capacity(module, array)
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_certs.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_certs.py
index 33ffb60cc..bcc602610 100644
--- a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_certs.py
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_certs.py
@@ -151,6 +151,13 @@ EXAMPLES = r"""
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
+- name: Request CSR with updated fields
+ purestorage.flasharray.purefa_certs:
+ state: sign
+ org_unit: Development
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
- name: Regenerate key for SSL foo
purestorage.flasharray.purefa_certs:
generate: true
@@ -187,81 +194,95 @@ except ImportError:
import re
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
- get_system,
get_array,
purefa_argument_spec,
)
+from ansible_collections.purestorage.flasharray.plugins.module_utils.version import (
+ LooseVersion,
+)
MIN_REQUIRED_API_VERSION = "2.4"
def update_cert(module, array):
"""Update existing SSL Certificate"""
- changed = True
+ changed = False
current_cert = list(array.get_certificates(names=[module.params["name"]]).items)[0]
- try:
- if module.params["common_name"] != current_cert.common_name:
- module.params["common_name"] = current_cert.common_name
- except AttributeError:
- pass
- try:
- if module.params["country"] != current_cert.country:
- module.params["country"] = current_cert.country
- except AttributeError:
- pass
- try:
- if module.params["email"] != current_cert.email:
- module.params["email"] = current_cert.email
- except AttributeError:
- pass
- try:
- if module.params["key_size"] != current_cert.key_size:
- module.params["key_size"] = current_cert.key_size
- except AttributeError:
- pass
- try:
- if module.params["locality"] != current_cert.locality:
- module.params["locality"] = current_cert.locality
- except AttributeError:
- pass
- try:
- if module.params["province"] != current_cert.state:
- module.params["province"] = current_cert.state
- except AttributeError:
- pass
- try:
- if module.params["organization"] != current_cert.organization:
- module.params["organization"] = current_cert.organization
- except AttributeError:
- pass
- try:
- if module.params["org_unit"] != current_cert.organizational_unit:
- module.params["org_unit"] = current_cert.organizational_unit
- except AttributeError:
- pass
- certificate = flasharray.CertificatePost(
- common_name=module.params["common_name"],
- country=module.params["country"],
- email=module.params["email"],
- key_size=module.params["key_size"],
- locality=module.params["locality"],
- organization=module.params["organization"],
- organizational_unit=module.params["org_unit"],
- state=module.params["province"],
- days=module.params["days"],
- )
- if not module.check_mode:
- res = array.patch_certificates(
- names=[module.params["name"]],
- certificate=certificate,
- generate_new_key=module.params["generate"],
+ new_cert = current_cert
+ if module.params["common_name"] and module.params["common_name"] != getattr(
+ current_cert, "common_name", None
+ ):
+ new_cert.common_name = module.params["common_name"]
+ else:
+ new_cert.common_name = getattr(current_cert, "common_name", None)
+ if module.params["country"] and module.params["country"] != getattr(
+ current_cert, "country", None
+ ):
+ new_cert.country = module.params["country"]
+ else:
+ new_cert.country = getattr(current_cert, "country")
+ if module.params["email"] and module.params["email"] != getattr(
+ current_cert, "email", None
+ ):
+ new_cert.email = module.params["email"]
+ else:
+ new_cert.email = getattr(current_cert, "email", None)
+ if module.params["key_size"] and module.params["key_size"] != getattr(
+ current_cert, "key_size", None
+ ):
+ new_cert.key_size = module.params["key_size"]
+ else:
+ new_cert.key_size = getattr(current_cert, "key_size", None)
+ if module.params["locality"] and module.params["locality"] != getattr(
+ current_cert, "locality", None
+ ):
+ new_cert.locality = module.params["locality"]
+ else:
+ new_cert.locality = getattr(current_cert, "locality", None)
+ if module.params["province"] and module.params["province"] != getattr(
+ current_cert, "state", None
+ ):
+ new_cert.state = module.params["province"]
+ else:
+ new_cert.state = getattr(current_cert, "state", None)
+ if module.params["organization"] and module.params["organization"] != getattr(
+ current_cert, "organization", None
+ ):
+ new_cert.organization = module.params["organization"]
+ else:
+ new_cert.organization = getattr(current_cert, "organization", None)
+ if module.params["org_unit"] and module.params["org_unit"] != getattr(
+ current_cert, "organizational_unit", None
+ ):
+ new_cert.organizational_unit = module.params["org_unit"]
+ else:
+ new_cert.organizational_unit = getattr(
+ current_cert, "organizational_unit", None
)
- if res.status_code != 200:
- module.fail_json(
- msg="Updating existing SSL certificate {0} failed. Error: {1}".format(
- module.params["name"], res.errors[0].message
- )
+ if new_cert != current_cert:
+ changed = True
+ certificate = flasharray.CertificatePost(
+ common_name=new_cert.common_name,
+ country=getattr(new_cert, "country", None),
+ email=getattr(new_cert, "email", None),
+ key_size=getattr(new_cert, "key_size", None),
+ locality=getattr(new_cert, "locality", None),
+ organization=getattr(new_cert, "organization", None),
+ organizational_unit=getattr(new_cert, "organizational_unit", None),
+ state=getattr(new_cert, "state", None),
+ )
+ if not module.check_mode:
+ res = array.patch_certificates(
+ names=[module.params["name"]],
+ certificate=certificate,
+ generate_new_key=module.params["generate"],
)
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Updating existing SSL certificate {0} failed. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
module.exit_json(changed=changed)
@@ -312,12 +333,11 @@ def delete_cert(module, array):
def import_cert(module, array, reimport=False):
"""Import a CA provided SSL certificate"""
changed = True
- if len(module.params["certificate"]) > 3000:
- module.fail_json(msg="Imported Certificate exceeds 3000 characters")
certificate = flasharray.CertificatePost(
certificate=module.params["certificate"],
intermediate_certificate=module.params["intermeadiate_cert"],
key=module.params["key"],
+ key_size=module.params["key_size"],
passphrase=module.params["passphrase"],
status="imported",
)
@@ -364,50 +384,64 @@ def create_csr(module, array):
changed = True
current_attr = list(array.get_certificates(names=[module.params["name"]]).items)[0]
try:
- if module.params["common_name"] != current_attr.common_name:
- module.params["common_name"] = current_attr.common_name
+ if module.params["common_name"] and module.params["common_name"] != getattr(
+ current_attr, "common_name", None
+ ):
+ current_attr.common_name = module.params["common_name"]
except AttributeError:
pass
try:
- if module.params["country"] != current_attr.country:
- module.params["country"] = current_attr.country
+ if module.params["country"] and module.params["country"] != getattr(
+ current_attr, "country", None
+ ):
+ current_attr.country = module.params["country"]
except AttributeError:
pass
try:
- if module.params["email"] != current_attr.email:
- module.params["email"] = current_attr.email
+ if module.params["email"] and module.params["email"] != getattr(
+ current_attr, "email", None
+ ):
+ current_attr.email = module.params["email"]
except AttributeError:
pass
try:
- if module.params["locality"] != current_attr.locality:
- module.params["locality"] = current_attr.locality
+ if module.params["locality"] and module.params["locality"] != getattr(
+ current_attr, "locality", None
+ ):
+ current_attr.locality = module.params["locality"]
except AttributeError:
pass
try:
- if module.params["province"] != current_attr.state:
- module.params["province"] = current_attr.state
+ if module.params["province"] and module.params["province"] != getattr(
+ current_attr, "state", None
+ ):
+ current_attr.state = module.params["province"]
except AttributeError:
pass
try:
- if module.params["organization"] != current_attr.organization:
- module.params["organization"] = current_attr.organization
+ if module.params["organization"] and module.params["organization"] != getattr(
+ current_attr, "organization", None
+ ):
+ current_attr.organization = module.params["organization"]
except AttributeError:
pass
try:
- if module.params["org_unit"] != current_attr.organization_unit:
- module.params["org_unit"] = current_attr.organization_unit
+ if module.params["org_unit"] and module.params["org_unit"] != getattr(
+ current_attr, "organizational_unit", None
+ ):
+ current_attr.organizational_unit = module.params["org_unit"]
except AttributeError:
pass
if not module.check_mode:
certificate = flasharray.CertificateSigningRequestPost(
- certificate={"name": "management"},
- common_name=module.params["common_name"],
- country=module.params["country"],
- email=module.params["email"],
- locality=module.params["locality"],
- state=module.params["province"],
- organization=module.params["organization"],
- organization_unit=module.params["org_unit"],
+ certificate={"name": module.params["name"]},
+ common_name=getattr(current_attr, "common_name", None),
+ country=getattr(current_attr, "country", None),
+ email=getattr(current_attr, "email", None),
+ locality=getattr(current_attr, "locality", None),
+ state=getattr(current_attr, "state", None),
+ organization=getattr(current_attr, "organization", None),
+ organizational_unit=getattr(current_attr, "organizational_unit", None),
)
csr = list(
array.post_certificates_certificate_signing_requests(
@@ -452,6 +486,7 @@ def main():
required_if = [
["state", "import", ["certificate"]],
["state", "export", ["export_file"]],
+ ["state", "sign", ["export_file"]],
]
module = AnsibleModule(
@@ -468,16 +503,15 @@ def main():
module.fail_json(msg="pycountry sdk is required for this module")
email_pattern = r"^(\w|\.|\_|\-)+[@](\w|\_|\-|\.)+[.]\w{2,3}$"
- array = get_system(module)
- api_version = array._list_available_rest_versions()
+ array = get_array(module)
+ api_version = array.get_rest_version()
- if MIN_REQUIRED_API_VERSION not in api_version:
+ if LooseVersion(MIN_REQUIRED_API_VERSION) > LooseVersion(api_version):
module.fail_json(
msg="FlashArray REST version not supported. "
"Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
)
- array = get_array(module)
if module.params["email"]:
if not re.search(email_pattern, module.params["email"]):
module.fail_json(
@@ -493,7 +527,7 @@ def main():
)
)
state = module.params["state"]
- if state in ["present", "sign"]:
+ if state in ["present"]:
if not module.params["common_name"]:
module.params["common_name"] = list(array.get_arrays().items)[0].name
module.params["common_name"] = module.params["common_name"][:64]
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_connect.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_connect.py
index 3148ea482..d4d65c20a 100644
--- a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_connect.py
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_connect.py
@@ -56,13 +56,20 @@ extends_documentation_fragment:
"""
EXAMPLES = r"""
-- name: Create an async connection to remote array
+- name: Create an IPv4 async connection to remote array
purestorage.flasharray.purefa_connect:
target_url: 10.10.10.20
target_api: 9c0b56bc-f941-f7a6-9f85-dcc3e9a8f7d6
connection: async
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
+- name: Create an IPv6 async connection to remote array
+ purestorage.flasharray.purefa_connect:
+ target_url: "[2001:db8:abcd:12::10]"
+ target_api: 9c0b56bc-f941-f7a6-9f85-dcc3e9a8f7d6
+ connection: async
+ fa_url: "[2001:db8:abcd:12::13]"
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
- name: Delete connection to remote array
purestorage.flasharray.purefa_connect:
state: absent
@@ -87,14 +94,19 @@ try:
except ImportError:
HAS_PYPURECLIENT = False
-import platform
+HAS_DISTRO = True
+try:
+ import distro
+except ImportError:
+ HAS_DISTRO = False
+
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
get_array,
get_system,
purefa_argument_spec,
)
-
+import platform
P53_API_VERSION = "1.17"
FC_REPL_VERSION = "2.4"
@@ -107,14 +119,14 @@ def _check_connected(module, array):
if P53_API_VERSION in api_version:
if (
connected_arrays[target]["management_address"]
- == module.params["target_url"]
+ == module.params["target_url"].strip("[]")
and "connected" in connected_arrays[target]["status"]
):
return connected_arrays[target]
else:
if (
connected_arrays[target]["management_address"]
- == module.params["target_url"]
+ == module.params["target_url"].strip("[]")
and connected_arrays[target]["connected"]
):
return connected_arrays[target]
@@ -145,12 +157,20 @@ def create_connection(module, array):
"""Create connection between arrays"""
changed = True
remote_array = module.params["target_url"]
- user_agent = "%(base)s %(class)s/%(version)s (%(platform)s)" % {
- "base": "Ansible",
- "class": __name__,
- "version": 1.2,
- "platform": platform.platform(),
- }
+ if HAS_DISTRO:
+ user_agent = "%(base)s %(class)s/%(version)s (%(platform)s)" % {
+ "base": "Ansible",
+ "class": __name__,
+ "version": 1.5,
+ "platform": distro.name(pretty=True),
+ }
+ else:
+ user_agent = "%(base)s %(class)s/%(version)s (%(platform)s)" % {
+ "base": "Ansible",
+ "class": __name__,
+ "version": 1.5,
+ "platform": platform.platform(),
+ }
try:
remote_system = FlashArray(
module.params["target_url"],
@@ -171,7 +191,7 @@ def create_connection(module, array):
)
array_connection = flasharray.ArrayConnectionPost(
type="sync-replication",
- management_address=module.params["target_url"],
+ management_address=module.params["target_url"].strip("[]"),
replication_transport="fc",
connection_key=connection_key,
)
@@ -187,7 +207,7 @@ def create_connection(module, array):
else:
if not module.check_mode:
array.connect_array(
- module.params["target_url"],
+ module.params["target_url"].strip("[]"),
connection_key,
[module.params["connection"]],
)
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_default_protection.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_default_protection.py
index 5038de423..cdb953c0c 100644
--- a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_default_protection.py
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_default_protection.py
@@ -99,11 +99,12 @@ except ImportError:
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
- get_system,
get_array,
purefa_argument_spec,
)
-
+from ansible_collections.purestorage.flasharray.plugins.module_utils.version import (
+ LooseVersion,
+)
DEFAULT_API_VERSION = "2.16"
@@ -282,14 +283,13 @@ def main():
module.fail_json(
msg="py-pure-client sdk is required to support 'count' parameter"
)
- arrayv5 = get_system(module)
module.params["name"] = sorted(module.params["name"])
- api_version = arrayv5._list_available_rest_versions()
- if DEFAULT_API_VERSION not in api_version:
+ array = get_array(module)
+ api_version = array.get_rest_version()
+ if LooseVersion(DEFAULT_API_VERSION) > LooseVersion(api_version):
module.fail_json(
msg="Default Protection is not supported. Purity//FA 6.3.4, or higher, is required."
)
- array = get_array(module)
if module.params["scope"] == "pod":
if not _get_pod(module, array):
module.fail_json(
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_directory.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_directory.py
index 125b84172..2cd769771 100644
--- a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_directory.py
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_directory.py
@@ -90,10 +90,12 @@ except ImportError:
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
- get_system,
get_array,
purefa_argument_spec,
)
+from ansible_collections.purestorage.flasharray.plugins.module_utils.version import (
+ LooseVersion,
+)
MIN_REQUIRED_API_VERSION = "2.2"
@@ -190,14 +192,13 @@ def main():
if not HAS_PURESTORAGE:
module.fail_json(msg="py-pure-client sdk is required for this module")
- array = get_system(module)
- api_version = array._list_available_rest_versions()
- if MIN_REQUIRED_API_VERSION not in api_version:
+ array = get_array(module)
+ api_version = array.get_rest_version()
+ if LooseVersion(MIN_REQUIRED_API_VERSION) > LooseVersion(api_version):
module.fail_json(
msg="FlashArray REST version not supported. "
"Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
)
- array = get_array(module)
state = module.params["state"]
try:
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_dirsnap.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_dirsnap.py
index 4c090bde8..1c1c11a18 100644
--- a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_dirsnap.py
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_dirsnap.py
@@ -173,10 +173,12 @@ except ImportError:
import re
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
- get_system,
get_array,
purefa_argument_spec,
)
+from ansible_collections.purestorage.flasharray.plugins.module_utils.version import (
+ LooseVersion,
+)
MIN_REQUIRED_API_VERSION = "2.2"
MIN_RENAME_API_VERSION = "2.10"
@@ -413,19 +415,20 @@ def main():
)
)
- array = get_system(module)
- api_version = array._list_available_rest_versions()
- if MIN_REQUIRED_API_VERSION not in api_version:
+ array = get_array(module)
+ api_version = array.get_rest_version()
+ if LooseVersion(MIN_REQUIRED_API_VERSION) > LooseVersion(api_version):
module.fail_json(
msg="FlashArray REST version not supported. "
"Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
)
- if module.params["rename"] and MIN_RENAME_API_VERSION not in api_version:
+ if module.params["rename"] and LooseVersion(MIN_RENAME_API_VERSION) > LooseVersion(
+ api_version
+ ):
module.fail_json(
msg="Directory snapshot rename not supported. "
"Minimum Purity//FA version required: 6.2.1"
)
- array = get_array(module)
state = module.params["state"]
snapshot_root = module.params["filesystem"] + ":" + module.params["name"]
if bool(
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_dns.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_dns.py
index 746a4ed52..7085a19f6 100644
--- a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_dns.py
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_dns.py
@@ -207,12 +207,9 @@ def delete_multi_dns(module, array):
"""Delete a DNS configuration"""
changed = True
if module.params["name"] == "management":
- res = array.update_dns(
+ res = array.patch_dns(
names=[module.params["name"]],
- dns=flasharray.DnsPatch(
- domain=module.params["domain"],
- nameservers=module.params["nameservers"],
- ),
+ dns=flasharray.DnsPatch(domain="", nameservers=[]),
)
if res.status_code != 200:
module.fail_json(
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_ds.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_ds.py
index 195aa2155..ce96b1afb 100644
--- a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_ds.py
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_ds.py
@@ -139,6 +139,23 @@ options:
I(shadowAccount) for OpenLDAP servers dependent on the group type
of the server, or person for all other directory servers.
- Supported from Purity 6.0 or higher.
+ check_peer:
+ type: bool
+ description:
+ - Whether or not server authenticity is enforced when a certificate
+ is provided
+ default: false
+ version_added: 1.24.0
+ certificate:
+ type: str
+ description:
+ - The certificate of the Certificate Authority (CA) that signed the
+ certificates of the directory servers, which is used to validate the
+ authenticity of the configured servers
+ - A valid signed certicate in PEM format (Base64 encoded)
+ - Includes the "-----BEGIN CERTIFICATE-----" and "-----END CERTIFICATE-----" lines
+ - Does not exceed 3000 characters in length
+ version_added: 1.24.0
extends_documentation_fragment:
- purestorage.flasharray.purestorage.fa
"""
@@ -209,6 +226,15 @@ EXAMPLES = r"""
bind_password: password
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Upload CA certificate for management DNS and check peer
+ purestorage.flasharray.purefa_ds:
+ enable: true
+ dstype: management
+ certificate: "{{lookup('file', 'ca_cert.pem') }}"
+ check_peer: True
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
"""
RETURN = r"""
@@ -409,33 +435,32 @@ def update_ds_v6(module, array):
changed = False
ds_change = False
password_required = False
- dirserv = list(
- array.get_directory_services(
- filter="name='" + module.params["dstype"] + "'"
- ).items
- )[0]
- current_ds = dirserv
+ current_ds = []
+ dirservlist = list(array.get_directory_services().items)
+ for dirs in range(0, len(dirservlist)):
+ if dirservlist[dirs].name == module.params["dstype"]:
+ current_ds = dirservlist[dirs]
if module.params["uri"] and current_ds.uris is None:
password_required = True
- if current_ds.uris != module.params["uri"]:
+ if module.params["uri"] and current_ds.uris != module.params["uri"]:
uris = module.params["uri"]
ds_change = True
else:
uris = current_ds.uris
- try:
- base_dn = current_ds.base_dn
- except AttributeError:
- base_dn = ""
- try:
- bind_user = current_ds.bind_user
- except AttributeError:
- bind_user = ""
- if module.params["base_dn"] != "" and module.params["base_dn"] != base_dn:
+
+ base_dn = getattr(current_ds, "base_dn", "")
+ bind_user = getattr(current_ds, "bind_user", "")
+ cert = getattr(current_ds, "ca_certificate", None)
+ if module.params["base_dn"] and module.params["base_dn"] != base_dn:
base_dn = module.params["base_dn"]
ds_change = True
- if module.params["bind_user"] != "":
- bind_user = module.params["bind_user"]
+ if module.params["enable"] != current_ds.enabled:
+ ds_change = True
+ if getattr(current_ds, "bind_password", None) is None:
+ password_required = True
+ if module.params["bind_user"] is not None:
if module.params["bind_user"] != bind_user:
+ bind_user = module.params["bind_user"]
password_required = True
ds_change = True
elif module.params["force_bind_password"]:
@@ -444,19 +469,27 @@ def update_ds_v6(module, array):
if module.params["bind_password"] is not None and password_required:
bind_password = module.params["bind_password"]
ds_change = True
- if module.params["enable"] != current_ds.enabled:
- ds_change = True
if password_required and not module.params["bind_password"]:
module.fail_json(msg="'bind_password' must be provided for this task")
if module.params["dstype"] == "management":
- try:
- user_login = current_ds.management.user_login_attribute
- except AttributeError:
- user_login = ""
- try:
- user_object = current_ds.management.user_object_class
- except AttributeError:
- user_object = ""
+ if module.params["certificate"] is not None:
+ if cert is None and module.params["certificate"] != "":
+ cert = module.params["certificate"]
+ ds_change = True
+ elif cert is None and module.params["certificate"] == "":
+ pass
+ elif module.params["certificate"] != cert:
+ cert = module.params["certificate"]
+ ds_change = True
+ if module.params["check_peer"] and not cert:
+ module.warn(
+ "Cannot check_peer without a CA certificate. Disabling check_peer"
+ )
+ module.params["check_peer"] = False
+ if module.params["check_peer"] != current_ds.check_peer:
+ ds_change = True
+ user_login = getattr(current_ds.management, "user_login_attribute", "")
+ user_object = getattr(current_ds.management, "user_object_class", "")
if (
module.params["user_object"] is not None
and user_object != module.params["user_object"]
@@ -481,6 +514,8 @@ def update_ds_v6(module, array):
enabled=module.params["enable"],
services=module.params["dstype"],
management=management,
+ check_peer=module.params["check_peer"],
+ ca_certificate=cert,
)
else:
directory_service = flasharray.DirectoryService(
@@ -490,6 +525,8 @@ def update_ds_v6(module, array):
enabled=module.params["enable"],
services=module.params["dstype"],
management=management,
+ check_peer=module.params["check_peer"],
+ ca_certificate=cert,
)
else:
if password_required:
@@ -544,6 +581,8 @@ def main():
dstype=dict(
type="str", default="management", choices=["management", "data"]
),
+ check_peer=dict(type="bool", default=False),
+ certificate=dict(type="str"),
)
)
@@ -571,15 +610,17 @@ def main():
state = module.params["state"]
ds_exists = False
if FAFILES_API_VERSION in api_version:
- dirserv = list(
- arrayv6.get_directory_services(
- filter="name='" + module.params["dstype"] + "'"
- ).items
- )[0]
- if state == "absent" and dirserv.uris != []:
- delete_ds_v6(module, arrayv6)
- else:
- update_ds_v6(module, arrayv6)
+ dirserv = []
+ dirservlist = list(arrayv6.get_directory_services().items)
+ for dirs in range(0, len(dirservlist)):
+ if dirservlist[dirs].name == module.params["dstype"]:
+ dirserv = dirservlist[dirs]
+ if dirserv:
+ if state == "absent":
+ if dirserv.uris != []:
+ delete_ds_v6(module, arrayv6)
+ else:
+ update_ds_v6(module, arrayv6)
else:
dirserv = array.get_directory_service()
ds_enabled = dirserv["enabled"]
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_eradication.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_eradication.py
index ea7bd48bc..52cef0bae 100644
--- a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_eradication.py
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_eradication.py
@@ -31,6 +31,24 @@ options:
- Allowed values are integers from 1 to 30. Default is 1
default: 1
type: int
+ disabled_delay:
+ description:
+ - Configures the eradication delay
+ for destroyed objects that I(are) protected by SafeMode (objects for which
+ eradication is disabled)
+ - Allowed values are integers from 1 to 30. Default is 1
+ default: 1
+ type: int
+ version_added: "1.22.0"
+ enabled_delay:
+ description:
+ - Configures the eradication delay
+ for destroyed objects that I(are not) protected by SafeMode (objects for which
+ eradication is disabled)
+ - Allowed values are integers from 1 to 30. Default is 1
+ default: 1
+ type: int
+ version_added: "1.22.0"
extends_documentation_fragment:
- purestorage.flasharray.purestorage.fa
"""
@@ -60,39 +78,69 @@ except ImportError:
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
- get_system,
get_array,
purefa_argument_spec,
)
+from ansible_collections.purestorage.flasharray.plugins.module_utils.version import (
+ LooseVersion,
+)
SEC_PER_DAY = 86400000
ERADICATION_API_VERSION = "2.6"
+DELAY_API_VERSION = "2.26"
def main():
argument_spec = purefa_argument_spec()
argument_spec.update(
dict(
- timer=dict(type="int", default="1"),
+ timer=dict(type="int", default=1),
+ disabled_delay=dict(type="int", default=1),
+ enabled_delay=dict(type="int", default=1),
)
)
module = AnsibleModule(argument_spec, supports_check_mode=True)
if not 30 >= module.params["timer"] >= 1:
module.fail_json(msg="Eradication Timer must be between 1 and 30 days.")
+ if not 30 >= module.params["disabled_delay"] >= 1:
+ module.fail_json(msg="disabled_delay must be between 1 and 30 days.")
+ if not 30 >= module.params["enabled_delay"] >= 1:
+ module.fail_json(msg="enabled_delay must be between 1 and 30 days.")
if not HAS_PURESTORAGE:
module.fail_json(msg="py-pure-client sdk is required for this module")
- array = get_system(module)
- api_version = array._list_available_rest_versions()
+ array = get_array(module)
+ api_version = array.get_rest_version()
changed = False
- if ERADICATION_API_VERSION in api_version:
+ current_disabled = None
+ current_enabled = None
+ if LooseVersion(ERADICATION_API_VERSION) <= LooseVersion(api_version):
array = get_array(module)
- current_timer = (
- list(array.get_arrays().items)[0].eradication_config.eradication_delay
- / SEC_PER_DAY
+ base_eradication_timer = getattr(
+ list(array.get_arrays().items)[0].eradication_config,
+ "eradication_delay",
+ None,
)
- if module.params["timer"] != current_timer:
+ if base_eradication_timer:
+ current_eradication_timer = base_eradication_timer / SEC_PER_DAY
+ if (
+ LooseVersion(DELAY_API_VERSION) <= LooseVersion(api_version)
+ and not base_eradication_timer
+ ):
+ current_disabled = (
+ list(array.get_arrays().items)[0].eradication_config.disabled_delay
+ / SEC_PER_DAY
+ )
+ current_enabled = (
+ list(array.get_arrays().items)[0].eradication_config.enabled_delay
+ / SEC_PER_DAY
+ )
+
+ if (
+ base_eradication_timer
+ and module.params["timer"] != current_eradication_timer
+ ):
changed = True
if not module.check_mode:
new_timer = SEC_PER_DAY * module.params["timer"]
@@ -106,6 +154,26 @@ def main():
res.errors[0].message
)
)
+ if current_disabled and (
+ module.params["enabled_delay"] != current_enabled
+ or module.params["disabled_delay"] != current_disabled
+ ):
+ changed = True
+ if not module.check_mode:
+ new_disabled = SEC_PER_DAY * module.params["disabled_delay"]
+ new_enabled = SEC_PER_DAY * module.params["enabled_delay"]
+ eradication_config = EradicationConfig(
+ enabled_delay=new_enabled, disabled_delay=new_disabled
+ )
+ res = array.patch_arrays(
+ array=Arrays(eradication_config=eradication_config)
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to change Eradication Timers. Error: {0}".format(
+ res.errors[0].message
+ )
+ )
else:
module.fail_json(
msg="Purity version does not support changing Eradication Timer"
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_eula.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_eula.py
index 8d4d9536c..c810708b1 100644
--- a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_eula.py
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_eula.py
@@ -29,19 +29,16 @@ options:
- Full legal name of the entity.
- The value must be between 1 and 64 characters in length.
type: str
- required: true
name:
description:
- Full legal name of the individual at the company who has the authority to accept the terms of the agreement.
- The value must be between 1 and 64 characters in length.
type: str
- required: true
title:
description:
- Individual's job title at the company.
- The value must be between 1 and 64 characters in length.
type: str
- required: true
extends_documentation_fragment:
- purestorage.flasharray.purestorage.fa
"""
@@ -61,36 +58,48 @@ RETURN = r"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
- get_system,
+ get_array,
purefa_argument_spec,
)
+from ansible_collections.purestorage.flasharray.plugins.module_utils.version import (
+ LooseVersion,
+)
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient.flasharray import Eula, EulaSignature
+except ImportError:
+ HAS_PURESTORAGE = False
-EULA_API_VERSION = "1.17"
+EULA_V2 = "2.30"
def set_eula(module, array):
"""Sign EULA"""
changed = False
try:
- current_eula = array.get_eula()
+ current_eula = list(array.get_arrays_eula().items)[0]
except Exception:
module.fail_json(msg="Failed to get current EULA")
- if (
- current_eula["acceptance"]["company"] != module.params["company"]
- or current_eula["acceptance"]["title"] != module.params["title"]
- or current_eula["acceptance"]["name"] != module.params["name"]
- ):
- try:
- changed = True
- if not module.check_mode:
- array.set_eula(
- company=module.params["company"],
- title=module.params["title"],
- name=module.params["name"],
+ if not current_eula.signature.accepted:
+ changed = True
+ if not module.check_mode:
+ res = array.patch_arrays_eula(
+ eula=Eula(
+ signature=EulaSignature(
+ company=module.params["company"],
+ title=module.params["title"],
+ name=module.params["name"],
+ )
+ )
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Signing EULA failed. Error: {0}".format(res.erroros[0].message)
)
- except Exception:
- module.fail_json(msg="Signing EULA failed")
+ else:
+ module.warn("EULA already signed")
module.exit_json(changed=changed)
@@ -98,18 +107,27 @@ def main():
argument_spec = purefa_argument_spec()
argument_spec.update(
dict(
- company=dict(type="str", required=True),
- name=dict(type="str", required=True),
- title=dict(type="str", required=True),
+ company=dict(type="str"),
+ name=dict(type="str"),
+ title=dict(type="str"),
)
)
module = AnsibleModule(argument_spec, supports_check_mode=True)
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+
+ array = get_array(module)
+ api_version = array.get_rest_version()
+ if LooseVersion(EULA_V2) > LooseVersion(api_version):
+ if not (
+ module.params["company"]
+ and module.params["title"]
+ and module.params["name"]
+ ):
+ module.fail_json(msg="missing required arguments: company, name, title")
+ set_eula(module, array)
- array = get_system(module)
- api_version = array._list_available_rest_versions()
- if EULA_API_VERSION in api_version:
- set_eula(module, array)
module.exit_json(changed=False)
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_export.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_export.py
index 5188dbd96..ba8b29041 100644
--- a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_export.py
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_export.py
@@ -91,10 +91,12 @@ except ImportError:
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
- get_system,
get_array,
purefa_argument_spec,
)
+from ansible_collections.purestorage.flasharray.plugins.module_utils.version import (
+ LooseVersion,
+)
MIN_REQUIRED_API_VERSION = "2.3"
@@ -224,14 +226,13 @@ def main():
if not HAS_PURESTORAGE:
module.fail_json(msg="py-pure-client sdk is required for this module")
- array = get_system(module)
- api_version = array._list_available_rest_versions()
- if MIN_REQUIRED_API_VERSION not in api_version:
+ array = get_array(module)
+ api_version = array.get_rest_version()
+ if LooseVersion(MIN_REQUIRED_API_VERSION) > LooseVersion(api_version):
module.fail_json(
msg="FlashArray REST version not supported. "
"Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
)
- array = get_array(module)
state = module.params["state"]
exists = bool(
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_file.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_file.py
new file mode 100644
index 000000000..70371602c
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_file.py
@@ -0,0 +1,183 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_file
+version_added: '1.22.0'
+short_description: Manage FlashArray File Copies
+description:
+- Copy FlashArray File from one filesystem to another
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ source_file:
+ description:
+ - Name of the file to copy
+ - Include full path from the perspective of the source managed directory
+ type: str
+ required: true
+ source_dir:
+ description:
+ - Name of the source managed directory containing the source file to be copied
+ type: str
+ required: true
+ target_file:
+ description:
+ - Name of the file to copy to
+ - Include full path from the perspective of the target managed directory
+ - If not provided the file will be copied to the relative path specified by I(name)
+ type: str
+ target_dir:
+ description:
+ - Name of the target managed directory containing the source file to be copied
+ - If not provided will use managed directory specified by I(source_dir)
+ type: str
+ overwrite:
+ description:
+ - Define whether to overwrite an existing target file
+ type: bool
+ default: false
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Copy a file from dir foo to dir bar
+ purestorage.flasharray.purefa_file:
+ source_file: "/directory1/file1"
+ source_dir: "fs1:root"
+ target_file: "/diff_dir/file1"
+ target_dir: "fs1:root"
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Copy a file in a direcotry to the same directory with a different name
+ purestorage.flasharray.purefa_file:
+ source_file: "/directory1/file1"
+ source_dir: "fs1:root"
+ target_file: "/directory_1/file2"
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Copy a file in a direcotry to an existing file with overwrite
+ purestorage.flasharray.purefa_file:
+ source_file: "/directory1/file1"
+ source_dir: "fs1:root"
+ target_file: "/diff_dir/file1"
+ target_dir: "fs2:root"
+ overwrite: true
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_array,
+ purefa_argument_spec,
+)
+from ansible_collections.purestorage.flasharray.plugins.module_utils.version import (
+ LooseVersion,
+)
+
+MIN_REQUIRED_API_VERSION = "2.26"
+
+
+def _check_dirs(module, array):
+ if array.get_directories(names=[module.params["source_dir"]]).status_code != 200:
+ module.fail_json(
+ msg="Source directory {0} does not exist".format(
+ module.params["source_dir"]
+ )
+ )
+ if array.get_directories(names=[module.params["target_dir"]]).status_code != 200:
+ module.fail_json(
+ msg="Target directory {0} does not exist".format(
+ module.params["target_dir"]
+ )
+ )
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ overwrite=dict(type="bool", default=False),
+ source_file=dict(type="str", required=True),
+ source_dir=dict(type="str", required=True),
+ target_file=dict(type="str"),
+ target_dir=dict(type="str"),
+ )
+ )
+
+ required_one_of = [["target_file", "target_dir"]]
+ module = AnsibleModule(
+ argument_spec, required_one_of=required_one_of, supports_check_mode=True
+ )
+
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+
+ array = get_array(module)
+ api_version = array.get_rest_version()
+
+ if LooseVersion(MIN_REQUIRED_API_VERSION) > LooseVersion(api_version):
+ module.fail_json(
+ msg="FlashArray REST version not supported. "
+ "Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
+ )
+
+ if not module.params["target_file"]:
+ module.params["target_file"] = module.params["source_file"]
+ if not module.params["target_dir"]:
+ module.params["target_dir"] = module.params["source_dir"]
+ if ":" not in module.params["target_dir"]:
+ module.fail_json(msg="Target Direcotry is not formatted correctly")
+ if ":" not in module.params["source_dir"]:
+ module.fail_json(msg="Source Direcotry is not formatted correctly")
+ _check_dirs(module, array)
+ changed = True
+ if not module.check_mode:
+ res = array.post_files(
+ source_file=flasharray.FilePost(
+ source=flasharray.ReferenceWithType(
+ name=module.params["source_dir"], resource_type="directories"
+ ),
+ source_path=module.params["source_file"],
+ ),
+ overwrite=module.params["overwrite"],
+ paths=[module.params["target_file"]],
+ directory_names=[module.params["target_dir"]],
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to copy file. Error: {0}".format(res.errors[0].message)
+ )
+
+ module.exit_json(changed=changed)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_fs.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_fs.py
index 05fbcb29b..1936bf0ff 100644
--- a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_fs.py
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_fs.py
@@ -93,10 +93,12 @@ except ImportError:
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
- get_system,
get_array,
purefa_argument_spec,
)
+from ansible_collections.purestorage.flasharray.plugins.module_utils.version import (
+ LooseVersion,
+)
MIN_REQUIRED_API_VERSION = "2.2"
REPL_SUPPORT_API = "2.13"
@@ -295,19 +297,21 @@ def main():
if not HAS_PURESTORAGE:
module.fail_json(msg="py-pure-client sdk is required for this module")
- array = get_system(module)
- api_version = array._list_available_rest_versions()
+ array = get_array(module)
+ api_version = array.get_rest_version()
- if MIN_REQUIRED_API_VERSION not in api_version:
+ if LooseVersion(MIN_REQUIRED_API_VERSION) > LooseVersion(api_version):
module.fail_json(
msg="FlashArray REST version not supported. "
"Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
)
- if REPL_SUPPORT_API not in api_version and "::" in module.params["name"]:
+ if (
+ LooseVersion(REPL_SUPPORT_API) > LooseVersion(api_version)
+ and "::" in module.params["name"]
+ ):
module.fail_json(
msg="Filesystem Replication is only supported in Purity//FA 6.3.0 or higher"
)
- array = get_array(module)
state = module.params["state"]
try:
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_hardware.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_hardware.py
new file mode 100644
index 000000000..ffe9718fe
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_hardware.py
@@ -0,0 +1,110 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2021, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefa_hardware
+version_added: '1.24.0'
+short_description: Manage FlashArray Hardware Identification
+description:
+- Enable or disable FlashArray visual identification lights
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Name of hardware component
+ type: str
+ required: true
+ enabled:
+ description:
+ - State of the component identification LED
+ type: bool
+extends_documentation_fragment:
+- purestorage.flasharray.purestorage.fa
+"""
+
+EXAMPLES = r"""
+- name: Enable identification LED
+ purestorage.flasharray.purefa_hardware:
+ name: "CH1.FB1"
+ enabled: True
+ fa_url: 10.10.10.2
+ api_token: T-68618f31-0c9e-4e57-aa44-5306a2cf10e3
+
+- name: Disable identification LED
+ purestorage.flasharray.purefa_hardware:
+ name: "CH1.FB1"
+ enabled: False
+ fa_url: 10.10.10.2
+ api_token: T-68618f31-0c9e-4e57-aa44-5306a2cf10e3
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flasharray
+except ImportError:
+ HAS_PURESTORAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
+ get_array,
+ purefa_argument_spec,
+)
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(
+ dict(
+ enabled=dict(type="bool"),
+ name=dict(type="str", required=True),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+
+ array = get_array(module)
+ changed = False
+ res = array.get_hardware(names=[module.params["name"]])
+ if res.status_code == 200:
+ id_state = getattr(list(res.items)[0], "identify_enabled", None)
+ if id_state is not None and id_state != module.params["enabled"]:
+ changed = True
+ if not module.check_mode:
+ res = array.patch_hardware(
+ names=[module.params["name"]],
+ hardware=flasharray.Hardware(
+ identify_enabled=module.params["enabled"]
+ ),
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to set identification LED for {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+
+ module.exit_json(changed=changed)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_host.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_host.py
index 9054d8f30..c396975a2 100644
--- a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_host.py
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_host.py
@@ -479,7 +479,7 @@ def _set_chap_security(module, array):
host_password=module.params["host_password"],
)
except Exception:
- module.params(msg="Failed to set CHAP host username and password")
+ module.fail_json(msg="Failed to set CHAP host username and password")
if module.params["target_user"]:
if not pattern.match(module.params["target_password"]):
module.fail_json(
@@ -492,7 +492,7 @@ def _set_chap_security(module, array):
target_password=module.params["target_password"],
)
except Exception:
- module.params(msg="Failed to set CHAP target username and password")
+ module.fail_json(msg="Failed to set CHAP target username and password")
def _update_chap_security(module, array, answer=False):
@@ -507,7 +507,7 @@ def _update_chap_security(module, array, answer=False):
try:
array.set_host(module.params["name"], host_user="")
except Exception:
- module.params(
+ module.fail_json(
msg="Failed to clear CHAP host username and password"
)
else:
@@ -524,7 +524,9 @@ def _update_chap_security(module, array, answer=False):
host_password=module.params["host_password"],
)
except Exception:
- module.params(msg="Failed to set CHAP host username and password")
+ module.fail_json(
+ msg="Failed to set CHAP host username and password"
+ )
if module.params["target_user"]:
if module.params["target_password"] == "clear":
if chap["target_user"]:
@@ -533,7 +535,7 @@ def _update_chap_security(module, array, answer=False):
try:
array.set_host(module.params["name"], target_user="")
except Exception:
- module.params(
+ module.fail_json(
msg="Failed to clear CHAP target username and password"
)
else:
@@ -550,7 +552,9 @@ def _update_chap_security(module, array, answer=False):
target_password=module.params["target_password"],
)
except Exception:
- module.params(msg="Failed to set CHAP target username and password")
+ module.fail_json(
+ msg="Failed to set CHAP target username and password"
+ )
return answer
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_info.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_info.py
index de7f05002..262d227be 100644
--- a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_info.py
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_info.py
@@ -24,7 +24,7 @@ description:
Purity//FA operating system. By default, the module will collect basic
information including hosts, host groups, protection
groups and volume counts. Additional information can be collected
- based on the configured set of arguements.
+ based on the configured set of arguments.
author:
- Pure Storage ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
options:
@@ -35,7 +35,7 @@ options:
capacity, network, subnet, interfaces, hgroups, pgroups, hosts,
admins, volumes, snapshots, pods, replication, vgroups, offload, apps,
arrays, certs, kmip, clients, policies, dir_snaps, filesystems,
- alerts and virtual_machines.
+ alerts, virtual_machines, hosts_balance and subscriptions.
type: list
elements: str
required: false
@@ -90,16 +90,15 @@ from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa impo
get_system,
purefa_argument_spec,
)
+from ansible_collections.purestorage.flasharray.plugins.module_utils.version import (
+ LooseVersion,
+)
-HAS_PACKAGING = True
-try:
- from packaging import version
-except ImportError:
- HAS_PACKAGING = False
try:
from purestorage import purestorage
except ImportError:
purestorage = None
+from datetime import datetime
import time
SEC_TO_DAY = 86400000
@@ -129,6 +128,18 @@ VM_VERSION = "2.14"
VLAN_VERSION = "2.17"
NEIGHBOR_API_VERSION = "2.22"
POD_QUOTA_VERSION = "2.23"
+AUTODIR_API_VERSION = "2.24"
+SUBS_API_VERSION = "2.26"
+NSID_API_VERSION = "2.27"
+NFS_SECURITY_VERSION = "2.29"
+UPTIME_API_VERSION = "2.30"
+
+
+def _is_cbs(array):
+ """Is the selected array a Cloud Block Store"""
+ model = list(array.get_hardware(filter="type='controller'").items)[0].model
+ is_cbs = bool("CBS" in model)
+ return is_cbs
def generate_default_dict(module, array):
@@ -164,14 +175,38 @@ def generate_default_dict(module, array):
default_info["encryption_algorithm"] = encryption.data_at_rest.algorithm
default_info["encryption_module_version"] = encryption.module_version
eradication = array_data.eradication_config
- default_info["eradication_days_timer"] = int(
- eradication.eradication_delay / SEC_TO_DAY
- )
+ if SUBS_API_VERSION in api_version:
+ default_info["eradication_disabled_days_timer"] = int(
+ eradication.disabled_delay / SEC_TO_DAY
+ )
+ default_info["eradication_enabled_days_timer"] = int(
+ eradication.enabled_delay / SEC_TO_DAY
+ )
+ eradication_delay = getattr(eradication, "eradication_delay", None)
+ if eradication_delay is not None:
+ default_info["eradication_days_timer"] = int(
+ eradication_delay / SEC_TO_DAY
+ )
if SAFE_MODE_VERSION in api_version:
if eradication.manual_eradication == "all-enabled":
default_info["safe_mode"] = "Disabled"
else:
default_info["safe_mode"] = "Enabled"
+ if UPTIME_API_VERSION in api_version:
+ default_info["controller_uptime"] = []
+ controllers = list(arrayv6.get_controllers().items)
+ timenow = datetime.fromtimestamp(time.time())
+ for controller in range(0, len(controllers)):
+ boottime = datetime.fromtimestamp(
+ controllers[controller].mode_since / 1000
+ )
+ delta = timenow - boottime
+ default_info["controller_uptime"].append(
+ {
+ "controller": controllers[controller].name,
+ "uptime": str(delta),
+ }
+ )
if AC_REQUIRED_API_VERSION in api_version:
default_info["volume_groups"] = len(array.list_vgroups())
default_info["connected_arrays"] = len(array.list_array_connections())
@@ -282,12 +317,9 @@ def generate_config_dict(module, array):
"nameservers": dns_configs[config].nameservers,
"domain": dns_configs[config].domain,
}
- try:
- config_info["dns"][dns_configs[config].services[0]][
- "source"
- ] = dns_configs[config].source["name"]
- except Exception:
- pass
+ config_info["dns"][dns_configs[config].services[0]]["source"] = getattr(
+ dns_configs[config].source, "name", None
+ )
if SAML2_VERSION in api_version:
config_info["saml2sso"] = {}
saml2 = list(arrayv6.get_sso_saml2_idps().items)
@@ -355,6 +387,12 @@ def generate_config_dict(module, array):
.name,
}
)
+ if SUBS_API_VERSION in api_version:
+ array_info = list(arrayv6.get_arrays().items)[0]
+ config_info["ntp_keys"] = bool(
+ getattr(array_info, "ntp_symmetric_key", None)
+ )
+ config_info["timezone"] = array_info.time_zone
else:
config_info["directory_service"] = {}
@@ -419,6 +457,10 @@ def generate_filesystems_dict(array):
),
"exports": {},
}
+ if LooseVersion(SUBS_API_VERSION) <= LooseVersion(array.get_rest_version()):
+ files_info[fs_name]["directories"][d_name]["total_used"] = directories[
+ directory
+ ].space.total_used
exports = list(
array.get_directory_exports(
directory_names=[
@@ -485,6 +527,8 @@ def generate_dir_snaps_dict(array):
snapshots[snapshot].space, "used_provisioned", None
),
}
+ if LooseVersion(SUBS_API_VERSION) <= LooseVersion(array.get_rest_version()):
+ dir_snaps_info[s_name]["total_used"] = snapshots[snapshot].space.total_used
try:
dir_snaps_info[s_name]["policy"] = snapshots[snapshot].policy.name
except Exception:
@@ -496,7 +540,7 @@ def generate_dir_snaps_dict(array):
return dir_snaps_info
-def generate_policies_dict(array, quota_available, nfs_user_mapping):
+def generate_policies_dict(array, quota_available, autodir_available, nfs_user_mapping):
policy_info = {}
policies = list(array.get_policies().items)
for policy in range(0, len(policies)):
@@ -528,6 +572,18 @@ def generate_policies_dict(array, quota_available, nfs_user_mapping):
policy_info[p_name][
"user_mapping_enabled"
] = nfs_policy.user_mapping_enabled
+ if LooseVersion(SUBS_API_VERSION) <= LooseVersion(
+ array.get_rest_version()
+ ):
+ policy_info[p_name]["nfs_version"] = getattr(
+ nfs_policy, "nfs_version", None
+ )
+ if LooseVersion(NFS_SECURITY_VERSION) <= LooseVersion(
+ array.get_rest_version()
+ ):
+ policy_info[p_name]["security"] = getattr(
+ nfs_policy, "security", None
+ )
rules = list(
array.get_policies_nfs_client_rules(policy_names=[p_name]).items
)
@@ -537,14 +593,16 @@ def generate_policies_dict(array, quota_available, nfs_user_mapping):
"permission": rules[rule].permission,
"client": rules[rule].client,
}
+ if LooseVersion(SUBS_API_VERSION) <= LooseVersion(
+ array.get_rest_version()
+ ):
+ nfs_rules_dict["nfs_version"] = rules[rule].nfs_version
policy_info[p_name]["rules"].append(nfs_rules_dict)
if policies[policy].policy_type == "snapshot":
- if HAS_PACKAGING:
- suffix_enabled = version.parse(
- array.get_rest_version()
- ) >= version.parse(SHARED_CAP_API_VERSION)
- else:
- suffix_enabled = False
+ suffix_enabled = bool(
+ LooseVersion(array.get_rest_version())
+ >= LooseVersion(SHARED_CAP_API_VERSION)
+ )
rules = list(array.get_policies_snapshot_rules(policy_names=[p_name]).items)
for rule in range(0, len(rules)):
try:
@@ -576,6 +634,8 @@ def generate_policies_dict(array, quota_available, nfs_user_mapping):
"notifications": rules[rule].notifications,
}
policy_info[p_name]["rules"].append(quota_rules_dict)
+ if policies[policy].policy_type == "autodir" and autodir_available:
+ pass # there are currently no rules for autodir policies
return policy_info
@@ -657,8 +717,144 @@ def generate_network_dict(module, array):
for neighbor in range(0, len(neighbors)):
neighbor_info = neighbors[neighbor]
int_name = neighbor_info.local_port.name
- net_info[int_name].update(
- {
+ try:
+ net_info[int_name].update(
+ {
+ "neighbor": {
+ "initial_ttl_in_sec": neighbor_info.initial_ttl_in_sec,
+ "neighbor_port": {
+ "description": getattr(
+ neighbor_info.neighbor_port, "description", None
+ ),
+ "name": getattr(
+ neighbor_info.neighbor_chassis, "name", None
+ ),
+ "id": getattr(
+ neighbor_info.neighbor_port.id, "value", None
+ ),
+ },
+ "neighbor_chassis": {
+ "addresses": getattr(
+ neighbor_info.neighbor_chassis, "addresses", None
+ ),
+ "description": getattr(
+ neighbor_info.neighbor_chassis, "description", None
+ ),
+ "name": getattr(
+ neighbor_info.neighbor_chassis, "name", None
+ ),
+ "bridge": {
+ "enabled": getattr(
+ neighbor_info.neighbor_chassis.bridge,
+ "enabled",
+ False,
+ ),
+ "supported": getattr(
+ neighbor_info.neighbor_chassis.bridge,
+ "supported",
+ False,
+ ),
+ },
+ "repeater": {
+ "enabled": getattr(
+ neighbor_info.neighbor_chassis.repeater,
+ "enabled",
+ False,
+ ),
+ "supported": getattr(
+ neighbor_info.neighbor_chassis.repeater,
+ "supported",
+ False,
+ ),
+ },
+ "router": {
+ "enabled": getattr(
+ neighbor_info.neighbor_chassis.router,
+ "enabled",
+ False,
+ ),
+ "supported": getattr(
+ neighbor_info.neighbor_chassis.router,
+ "supported",
+ False,
+ ),
+ },
+ "station_only": {
+ "enabled": getattr(
+ neighbor_info.neighbor_chassis.station_only,
+ "enabled",
+ False,
+ ),
+ "supported": getattr(
+ neighbor_info.neighbor_chassis.station_only,
+ "supported",
+ False,
+ ),
+ },
+ "telephone": {
+ "enabled": getattr(
+ neighbor_info.neighbor_chassis.telephone,
+ "enabled",
+ False,
+ ),
+ "supported": getattr(
+ neighbor_info.neighbor_chassis.telephone,
+ "supported",
+ False,
+ ),
+ },
+ "wlan_access_point": {
+ "enabled": getattr(
+ neighbor_info.neighbor_chassis.wlan_access_point,
+ "enabled",
+ False,
+ ),
+ "supported": getattr(
+ neighbor_info.neighbor_chassis.wlan_access_point,
+ "supported",
+ False,
+ ),
+ },
+ "docsis_cable_device": {
+ "enabled": getattr(
+ neighbor_info.neighbor_chassis.docsis_cable_device,
+ "enabled",
+ False,
+ ),
+ "supported": getattr(
+ neighbor_info.neighbor_chassis.docsis_cable_device,
+ "supported",
+ False,
+ ),
+ },
+ "id": {
+ "type": getattr(
+ neighbor_info.neighbor_chassis.id,
+ "type",
+ None,
+ ),
+ "value": getattr(
+ neighbor_info.neighbor_chassis.id,
+ "value",
+ None,
+ ),
+ },
+ },
+ }
+ }
+ )
+ except KeyError:
+ net_info[int_name] = {
+ "hwaddr": None,
+ "mtu": None,
+ "enabled": None,
+ "speed": None,
+ "address": None,
+ "slaves": None,
+ "services": None,
+ "gateway": None,
+ "netmask": None,
+ "subnet": None,
"neighbor": {
"initial_ttl_in_sec": neighbor_info.initial_ttl_in_sec,
"neighbor_port": {
@@ -779,9 +975,9 @@ def generate_network_dict(module, array):
),
},
},
- }
+ },
}
- )
+
return net_info
@@ -830,6 +1026,8 @@ def generate_capacity_dict(module, array):
capacity_info["used_provisioned"] = getattr(
capacity.space, "used_provisioned", 0
)
+ if SUBS_API_VERSION in api_version:
+ capacity_info["total_used"] = capacity.space.total_used
else:
capacity_info["provisioned_space"] = capacity.space["total_provisioned"]
capacity_info["free_space"] = (
@@ -843,6 +1041,13 @@ def generate_capacity_dict(module, array):
capacity_info["thin_provisioning"] = capacity.space["thin_provisioning"]
capacity_info["total_reduction"] = capacity.space["total_reduction"]
capacity_info["replication"] = capacity.space["replication"]
+ if NFS_SECURITY_VERSION in api_version and _is_cbs(arrayv6):
+ cloud = list(arrayv6.get_arrays_cloud_capacity().items)[0]
+ capacity_info["cloud_capacity"] = {
+ "current_capacity": cloud.current_capacity,
+ "requested_capacity": cloud.requested_capacity,
+ "status": cloud.status,
+ }
elif CAP_REQUIRED_API_VERSION in api_version:
volumes = array.list_volumes(pending=True)
capacity_info["provisioned_space"] = sum(item["size"] for item in volumes)
@@ -890,9 +1095,11 @@ def generate_snap_dict(module, array):
].space.total_provisioned
snap_info[snapshot]["unique_space"] = snapsv6[snap].space.unique
if SHARED_CAP_API_VERSION in api_version:
- snap_info[snapshot]["snapshots_effective"] = snapsv6[
- snap
- ].space.snapshots_effective
+ snap_info[snapshot]["snapshots_effective"] = getattr(
+ snapsv6[snap].space, "snapshots_effective", None
+ )
+ if SUBS_API_VERSION in api_version:
+ snap_info[snapshot]["total_used"] = snapsv6[snap].space.total_used
offloads = list(arrayv6.get_offloads().items)
for offload in range(0, len(offloads)):
offload_name = offloads[offload].name
@@ -976,6 +1183,8 @@ def generate_del_snap_dict(module, array):
snap
].space.total_provisioned
snap_info[snapshot]["unique_space"] = snapsv6[snap].space.unique
+ if SUBS_API_VERSION in api_version:
+ snap_info[snapshot]["total_used"] = snapsv6[snap].space.total_used
offloads = list(arrayv6.get_offloads().items)
for offload in range(0, len(offloads)):
offload_name = offloads[offload].name
@@ -1073,15 +1282,17 @@ def generate_del_vol_dict(module, array):
vol
].space.thin_provisioning
if SHARED_CAP_API_VERSION in api_version:
- volume_info[name]["snapshots_effective"] = vols_space[
- vol
- ].space.snapshots_effective
- volume_info[name]["unique_effective"] = vols_space[
- vol
- ].space.unique_effective
+ volume_info[name]["snapshots_effective"] = getattr(
+ vols_space[vol].space, "snapshots_effective", None
+ )
+ volume_info[name]["unique_effective"] = getattr(
+ vols_space[vol].space, "unique_effective", None
+ )
volume_info[name]["used_provisioned"] = (
getattr(vols_space[vol].space, "used_provisioned", None),
)
+ if SUBS_API_VERSION in api_version:
+ volume_info[name]["total_used"] = vols_space[vol].space.total_used
if ACTIVE_DR_API in api_version:
voltags = array.list_volumes(tags=True, pending_only=True)
for voltag in range(0, len(voltags)):
@@ -1094,16 +1305,22 @@ def generate_del_vol_dict(module, array):
"namespace": voltags[voltag]["namespace"],
}
volume_info[volume]["tags"].append(tagdict)
- if SAFE_MODE_VERSION in api_version:
+ if V6_MINIMUM_API_VERSION in api_version:
volumes = list(arrayv6.get_volumes(destroyed=True).items)
for vol in range(0, len(volumes)):
name = volumes[vol].name
- volume_info[name]["priority"] = volumes[vol].priority
- volume_info[name]["priority_adjustment"] = volumes[
+ volume_info[name]["promotion_status"] = volumes[vol].promotion_status
+ volume_info[name]["requested_promotion_state"] = volumes[
vol
- ].priority_adjustment.priority_adjustment_operator + str(
- volumes[vol].priority_adjustment.priority_adjustment_value
- )
+ ].requested_promotion_state
+ if SAFE_MODE_VERSION in api_version:
+ volume_info[name]["subtype"] = volumes[vol].subtype
+ volume_info[name]["priority"] = volumes[vol].priority
+ volume_info[name]["priority_adjustment"] = volumes[
+ vol
+ ].priority_adjustment.priority_adjustment_operator + str(
+ volumes[vol].priority_adjustment.priority_adjustment_value
+ )
return volume_info
@@ -1146,18 +1363,20 @@ def generate_vol_dict(module, array):
vol
].space.total_physical
if SHARED_CAP_API_VERSION in api_version:
- volume_info[name]["snapshots_effective"] = vols_space[
- vol
- ].space.snapshots_effective
- volume_info[name]["unique_effective"] = vols_space[
- vol
- ].space.unique_effective
- volume_info[name]["total_effective"] = vols_space[
- vol
- ].space.total_effective
+ volume_info[name]["snapshots_effective"] = getattr(
+ vols_space[vol].space, "snapshots_effective", None
+ )
+ volume_info[name]["unique_effective"] = getattr(
+ vols_space[vol].space, "unique_effective", None
+ )
+ volume_info[name]["total_effective"] = getattr(
+ vols_space[vol].space, "total_effective", None
+ )
volume_info[name]["used_provisioned"] = (
getattr(vols_space[vol].space, "used_provisioned", None),
)
+ if SUBS_API_VERSION in api_version:
+ volume_info[name]["total_used"] = vols_space[vol].space.total_used
if AC_REQUIRED_API_VERSION in api_version:
qvols = array.list_volumes(qos=True)
for qvol in range(0, len(qvols)):
@@ -1176,9 +1395,9 @@ def generate_vol_dict(module, array):
"source": vvols[vvol]["source"],
"serial": vvols[vvol]["serial"],
"nvme_nguid": "eui.00"
- + vols[vol]["serial"][0:14].lower()
+ + vvols[vvol]["serial"][0:14].lower()
+ "24a937"
- + vols[vol]["serial"][-10:].lower(),
+ + vvols[vvol]["serial"][-10:].lower(),
"page83_naa": PURE_OUI + vvols[vvol]["serial"],
"tags": [],
"hosts": [],
@@ -1190,16 +1409,22 @@ def generate_vol_dict(module, array):
volume_info[volume]["host_encryption_key_status"] = e2ees[e2ee][
"host_encryption_key_status"
]
- if SAFE_MODE_VERSION in api_version:
+ if V6_MINIMUM_API_VERSION in api_version:
volumes = list(arrayv6.get_volumes(destroyed=False).items)
for vol in range(0, len(volumes)):
name = volumes[vol].name
- volume_info[name]["priority"] = volumes[vol].priority
- volume_info[name]["priority_adjustment"] = volumes[
+ volume_info[name]["promotion_status"] = volumes[vol].promotion_status
+ volume_info[name]["requested_promotion_state"] = volumes[
vol
- ].priority_adjustment.priority_adjustment_operator + str(
- volumes[vol].priority_adjustment.priority_adjustment_value
- )
+ ].requested_promotion_state
+ volume_info[name]["subtype"] = volumes[vol].subtype
+ if SAFE_MODE_VERSION in api_version:
+ volume_info[name]["priority"] = volumes[vol].priority
+ volume_info[name]["priority_adjustment"] = volumes[
+ vol
+ ].priority_adjustment.priority_adjustment_operator + str(
+ volumes[vol].priority_adjustment.priority_adjustment_value
+ )
cvols = array.list_volumes(connect=True)
for cvol in range(0, len(cvols)):
volume = cvols[cvol]["name"]
@@ -1223,6 +1448,9 @@ def generate_vol_dict(module, array):
def generate_host_dict(module, array):
api_version = array._list_available_rest_versions()
host_info = {}
+ if FC_REPL_API_VERSION in api_version:
+ arrayv6 = get_array(module)
+ hostsv6 = list(arrayv6.get_hosts().items)
hosts = array.list_hosts()
for host in range(0, len(hosts)):
hostname = hosts[host]["name"]
@@ -1246,15 +1474,31 @@ def generate_host_dict(module, array):
"personality": array.get_host(hostname, personality=True)["personality"],
"target_port": all_tports,
"volumes": [],
+ "performance_balance": [],
}
- host_connections = array.list_host_connections(hostname)
- for connection in range(0, len(host_connections)):
- connection_dict = {
- "hostgroup": host_connections[connection]["hgroup"],
- "volume": host_connections[connection]["vol"],
- "lun": host_connections[connection]["lun"],
- }
- host_info[hostname]["volumes"].append(connection_dict)
+ if FC_REPL_API_VERSION in api_version:
+ host_connections = list(
+ arrayv6.get_connections(host_names=[hostname]).items
+ )
+ for connection in range(0, len(host_connections)):
+ connection_dict = {
+ "hostgroup": getattr(
+ host_connections[connection].host_group, "name", ""
+ ),
+ "volume": host_connections[connection].volume.name,
+ "lun": getattr(host_connections[connection], "lun", ""),
+ "nsid": getattr(host_connections[connection], "nsid", ""),
+ }
+ host_info[hostname]["volumes"].append(connection_dict)
+ else:
+ host_connections = array.list_host_connections(hostname)
+ for connection in range(0, len(host_connections)):
+ connection_dict = {
+ "hostgroup": host_connections[connection]["hgroup"],
+ "volume": host_connections[connection]["vol"],
+ "lun": host_connections[connection]["lun"],
+ }
+ host_info[hostname]["volumes"].append(connection_dict)
if host_info[hostname]["iqn"]:
chap_data = array.get_host(hostname, chap=True)
host_info[hostname]["target_user"] = chap_data["target_user"]
@@ -1266,6 +1510,35 @@ def generate_host_dict(module, array):
for host in range(0, len(hosts)):
hostname = hosts[host]["name"]
host_info[hostname]["preferred_array"] = hosts[host]["preferred_array"]
+ if FC_REPL_API_VERSION in api_version:
+ hosts_balance = list(arrayv6.get_hosts_performance_balance().items)
+ for host in range(0, len(hostsv6)):
+ if hostsv6[host].is_local:
+ host_info[hostsv6[host].name]["port_connectivity"] = hostsv6[
+ host
+ ].port_connectivity.details
+ host_perf_balance = []
+ for balance in range(0, len(hosts_balance)):
+ if hosts[host]["name"] == hosts_balance[balance].name:
+ host_balance = {
+ "fraction_relative_to_max": getattr(
+ hosts_balance[balance],
+ "fraction_relative_to_max",
+ None,
+ ),
+ "op_count": getattr(hosts_balance[balance], "op_count", 0),
+ "target": getattr(
+ hosts_balance[balance].target, "name", None
+ ),
+ "failed": bool(
+ getattr(hosts_balance[balance].target, "failover", 0)
+ ),
+ }
+ if host_balance["target"]:
+ host_perf_balance.append(host_balance)
+ host_info[hosts[host]["name"]]["performance_balance"].append(
+ host_perf_balance
+ )
if VLAN_VERSION in api_version:
arrayv6 = get_array(module)
hosts = list(arrayv6.get_hosts().items)
@@ -1276,6 +1549,131 @@ def generate_host_dict(module, array):
return host_info
+def generate_del_pgroups_dict(module, array):
+ pgroups_info = {}
+ api_version = array._list_available_rest_versions()
+ pgroups = array.list_pgroups(pending_only=True)
+ if SHARED_CAP_API_VERSION in api_version:
+ array_v6 = get_array(module)
+ deleted_enabled = True
+ else:
+ deleted_enabled = False
+ for pgroup in range(0, len(pgroups)):
+ protgroup = pgroups[pgroup]["name"]
+ pgroups_info[protgroup] = {
+ "hgroups": pgroups[pgroup]["hgroups"],
+ "hosts": pgroups[pgroup]["hosts"],
+ "source": pgroups[pgroup]["source"],
+ "targets": pgroups[pgroup]["targets"],
+ "volumes": pgroups[pgroup]["volumes"],
+ "time_remaining": pgroups[pgroup]["time_remaining"],
+ }
+ try:
+ prot_sched = array.get_pgroup(protgroup, schedule=True, pending=True)
+ prot_reten = array.get_pgroup(protgroup, retention=True, pending=True)
+ snap_transfers = array.get_pgroup(
+ protgroup, snap=True, transfer=True, pending=True
+ )
+ except purestorage.PureHTTPError as err:
+ if err.code == 400:
+ continue
+ if prot_sched["snap_enabled"] or prot_sched["replicate_enabled"]:
+ pgroups_info[protgroup]["snap_frequency"] = prot_sched["snap_frequency"]
+ pgroups_info[protgroup]["replicate_frequency"] = prot_sched[
+ "replicate_frequency"
+ ]
+ pgroups_info[protgroup]["snap_enabled"] = prot_sched["snap_enabled"]
+ pgroups_info[protgroup]["replicate_enabled"] = prot_sched[
+ "replicate_enabled"
+ ]
+ pgroups_info[protgroup]["snap_at"] = prot_sched["snap_at"]
+ pgroups_info[protgroup]["replicate_at"] = prot_sched["replicate_at"]
+ pgroups_info[protgroup]["replicate_blackout"] = prot_sched[
+ "replicate_blackout"
+ ]
+ pgroups_info[protgroup]["per_day"] = prot_reten["per_day"]
+ pgroups_info[protgroup]["target_per_day"] = prot_reten["target_per_day"]
+ pgroups_info[protgroup]["target_days"] = prot_reten["target_days"]
+ pgroups_info[protgroup]["days"] = prot_reten["days"]
+ pgroups_info[protgroup]["all_for"] = prot_reten["all_for"]
+ pgroups_info[protgroup]["target_all_for"] = prot_reten["target_all_for"]
+ pgroups_info[protgroup]["snaps"] = {}
+ for snap_transfer in range(0, len(snap_transfers)):
+ snap = snap_transfers[snap_transfer]["name"]
+ pgroups_info[protgroup]["snaps"][snap] = {
+ "time_remaining": snap_transfers[snap_transfer]["time_remaining"],
+ "created": snap_transfers[snap_transfer]["created"],
+ "started": snap_transfers[snap_transfer]["started"],
+ "completed": snap_transfers[snap_transfer]["completed"],
+ "physical_bytes_written": snap_transfers[snap_transfer][
+ "physical_bytes_written"
+ ],
+ "data_transferred": snap_transfers[snap_transfer]["data_transferred"],
+ "progress": snap_transfers[snap_transfer]["progress"],
+ }
+ if deleted_enabled:
+ pgroups_info[protgroup]["deleted_volumes"] = []
+ volumes = list(
+ array_v6.get_protection_groups_volumes(group_names=[protgroup]).items
+ )
+ if volumes:
+ for volume in range(0, len(volumes)):
+ if volumes[volume].member["destroyed"]:
+ pgroups_info[protgroup]["deleted_volumes"].append(
+ volumes[volume].member["name"]
+ )
+ else:
+ pgroups_info[protgroup]["deleted_volumes"] = None
+ if PER_PG_VERSION in api_version:
+ try:
+ pgroups_info[protgroup]["retention_lock"] = list(
+ array_v6.get_protection_groups(names=[protgroup]).items
+ )[0].retention_lock
+ pgroups_info[protgroup]["manual_eradication"] = list(
+ array_v6.get_protection_groups(names=[protgroup]).items
+ )[0].eradication_config.manual_eradication
+ except Exception:
+ pass
+ if V6_MINIMUM_API_VERSION in api_version:
+ pgroups = list(array_v6.get_protection_groups(destroyed=True).items)
+ for pgroup in range(0, len(pgroups)):
+ name = pgroups[pgroup].name
+ pgroups_info[name]["snapshots"] = getattr(
+ pgroups[pgroup].space, "snapshots", None
+ )
+ pgroups_info[name]["shared"] = getattr(
+ pgroups[pgroup].space, "shared", None
+ )
+ pgroups_info[name]["data_reduction"] = getattr(
+ pgroups[pgroup].space, "data_reduction", None
+ )
+ pgroups_info[name]["thin_provisioning"] = getattr(
+ pgroups[pgroup].space, "thin_provisioning", None
+ )
+ pgroups_info[name]["total_physical"] = getattr(
+ pgroups[pgroup].space, "total_physical", None
+ )
+ pgroups_info[name]["total_provisioned"] = getattr(
+ pgroups[pgroup].space, "total_provisioned", None
+ )
+ pgroups_info[name]["total_reduction"] = getattr(
+ pgroups[pgroup].space, "total_reduction", None
+ )
+ pgroups_info[name]["unique"] = getattr(
+ pgroups[pgroup].space, "unique", None
+ )
+ pgroups_info[name]["virtual"] = getattr(
+ pgroups[pgroup].space, "virtual", None
+ )
+ pgroups_info[name]["replication"] = getattr(
+ pgroups[pgroup].space, "replication", None
+ )
+ pgroups_info[name]["used_provisioned"] = getattr(
+ pgroups[pgroup].space, "used_provisioned", None
+ )
+ return pgroups_info
+
+
def generate_pgroups_dict(module, array):
pgroups_info = {}
api_version = array._list_available_rest_versions()
@@ -1361,7 +1759,7 @@ def generate_pgroups_dict(module, array):
except Exception:
pass
if V6_MINIMUM_API_VERSION in api_version:
- pgroups = list(array_v6.get_protection_groups().items)
+ pgroups = list(array_v6.get_protection_groups(destroyed=False).items)
for pgroup in range(0, len(pgroups)):
name = pgroups[pgroup].name
pgroups_info[name]["snapshots"] = getattr(
@@ -1408,14 +1806,19 @@ def generate_rl_dict(module, array):
rlinks = array.list_pod_replica_links()
for rlink in range(0, len(rlinks)):
link_name = rlinks[rlink]["local_pod_name"]
- since_epoch = rlinks[rlink]["recovery_point"] / 1000
- recovery_datatime = time.strftime(
- "%Y-%m-%d %H:%M:%S", time.localtime(since_epoch)
- )
+ if rlinks[rlink]["recovery_point"]:
+ since_epoch = rlinks[rlink]["recovery_point"] / 1000
+ recovery_datatime = time.strftime(
+ "%Y-%m-%d %H:%M:%S", time.localtime(since_epoch)
+ )
+ else:
+ recovery_datatime = None
+ if rlinks[rlink]["lag"]:
+ lag = str(rlinks[rlink]["lag"] / 1000) + "s"
rl_info[link_name] = {
"status": rlinks[rlink]["status"],
"direction": rlinks[rlink]["direction"],
- "lag": str(rlinks[rlink]["lag"] / 1000) + "s",
+ "lag": lag,
"remote_pod_name": rlinks[rlink]["remote_pod_name"],
"remote_names": rlinks[rlink]["remote_names"],
"recovery_point": recovery_datatime,
@@ -1464,21 +1867,37 @@ def generate_del_pods_dict(module, array):
pods = list(arrayv6.get_pods(destroyed=True).items)
for pod in range(0, len(pods)):
name = pods[pod].name
- pods_info[name]["snapshots"] = pods[pod].space.snapshots
- pods_info[name]["shared"] = pods[pod].space.shared
- pods_info[name]["data_reduction"] = pods[pod].space.data_reduction
- pods_info[name]["thin_provisioning"] = pods[pod].space.thin_provisioning
- pods_info[name]["total_physical"] = pods[pod].space.total_physical
- pods_info[name]["total_provisioned"] = pods[pod].space.total_provisioned
- pods_info[name]["total_reduction"] = pods[pod].space.total_reduction
- pods_info[name]["unique"] = pods[pod].space.unique
- pods_info[name]["virtual"] = pods[pod].space.virtual
+ pods_info[name]["snapshots"] = getattr(
+ pods[pod].space, "snapshots", None
+ )
+ pods_info[name]["shared"] = getattr(pods[pod].space, "shared", None)
+ pods_info[name]["data_reduction"] = getattr(
+ pods[pod].space, "data_reduction", None
+ )
+ pods_info[name]["thin_provisioning"] = getattr(
+ pods[pod].space, "thin_provisioning", None
+ )
+ pods_info[name]["total_physical"] = getattr(
+ pods[pod].space, "total_physical", None
+ )
+ pods_info[name]["total_provisioned"] = getattr(
+ pods[pod].space, "total_provisioned", None
+ )
+ pods_info[name]["total_reduction"] = getattr(
+ pods[pod].space, "total_reduction", None
+ )
+ pods_info[name]["unique"] = getattr(pods[pod].space, "unique", None)
+ pods_info[name]["virtual"] = getattr(pods[pod].space, "virtual", None)
pods_info[name]["replication"] = pods[pod].space.replication
pods_info[name]["used_provisioned"] = getattr(
pods[pod].space, "used_provisioned", None
)
if POD_QUOTA_VERSION in api_version:
- pods_info[name]["quota_limit"] = pods[pod].quota_limit
+ pods_info[name]["quota_limit"] = getattr(
+ pods[pod], "quota_limit", None
+ )
+ if SUBS_API_VERSION in api_version:
+ pods_info[name]["total_used"] = pods[pod].space.total_used
return pods_info
@@ -1547,6 +1966,8 @@ def generate_pods_dict(module, array):
pods_info[name]["used_provisioned"] = getattr(
pods[pod].space, "used_provisioned", None
)
+ if SUBS_API_VERSION in api_version:
+ pods_info[name]["total_used"] = pods[pod].space.total_used
return pods_info
@@ -1605,16 +2026,16 @@ def generate_conn_array_dict(module, array):
pass
try:
if bool(carrays[carray].throttle.default_limit):
- conn_array_info[arrayname]["throttling"][
- "default_limit"
- ] = carrays[carray].throttle.default_limit
+ conn_array_info[arrayname]["throttling"]["default_limit"] = (
+ carrays[carray].throttle.default_limit
+ )
except AttributeError:
pass
try:
if bool(carrays[carray].throttle.window_limit):
- conn_array_info[arrayname]["throttling"][
- "window_limit"
- ] = carrays[carray].throttle.window_limit
+ conn_array_info[arrayname]["throttling"]["window_limit"] = (
+ carrays[carray].throttle.window_limit
+ )
except AttributeError:
pass
else:
@@ -1682,6 +2103,10 @@ def generate_vgroups_dict(module, array):
vgroups_info[name]["iops_limit"] = getattr(
vgroups[vgroup].qos, "iops_limit", ""
)
+ if SUBS_API_VERSION in api_version:
+ vgroups_info[name]["total_used"] = getattr(
+ vgroups[vgroup].space, "total_used", None
+ )
if SAFE_MODE_VERSION in api_version:
for vgroup in range(0, len(vgroups)):
name = vgroups[vgroup].name
@@ -1844,6 +2269,8 @@ def generate_nfs_offload_dict(module, array):
offload_info[name]["used_provisioned"] = getattr(
offloads[offload].space, "used_provisioned", None
)
+ if SUBS_API_VERSION in api_version:
+ offload_info[name]["total_used"] = offloads[offload].space.total_used
return offload_info
@@ -1902,6 +2329,8 @@ def generate_s3_offload_dict(module, array):
offload_info[name]["used_provisioned"] = getattr(
offloads[offload].space, "used_provisioned", None
)
+ if SUBS_API_VERSION in api_version:
+ offload_info[name]["total_used"] = offloads[offload].space.total_used
return offload_info
@@ -1957,6 +2386,8 @@ def generate_azure_offload_dict(module, array):
offload_info[name]["used_provisioned"] = getattr(
offloads[offload].space, "used_provisioned", None
)
+ if SUBS_API_VERSION in api_version:
+ offload_info[name]["total_used"] = offloads[offload].space.total_used
return offload_info
@@ -1989,6 +2420,8 @@ def generate_google_offload_dict(array):
offloads[offload].space, "used_provisioned", None
),
}
+ if LooseVersion(SUBS_API_VERSION) <= LooseVersion(array.get_rest_version()):
+ offload_info[name]["total_used"] = offloads[offload].space.total_used
return offload_info
@@ -2016,24 +2449,37 @@ def generate_hgroups_dict(module, array):
arrayv6 = get_array(module)
hgroups = list(arrayv6.get_host_groups().items)
for hgroup in range(0, len(hgroups)):
- name = hgroups[hgroup].name
- hgroups_info[name]["snapshots"] = hgroups[hgroup].space.snapshots
- hgroups_info[name]["data_reduction"] = hgroups[hgroup].space.data_reduction
- hgroups_info[name]["thin_provisioning"] = hgroups[
- hgroup
- ].space.thin_provisioning
- hgroups_info[name]["total_physical"] = hgroups[hgroup].space.total_physical
- hgroups_info[name]["total_provisioned"] = hgroups[
- hgroup
- ].space.total_provisioned
- hgroups_info[name]["total_reduction"] = hgroups[
- hgroup
- ].space.total_reduction
- hgroups_info[name]["unique"] = hgroups[hgroup].space.unique
- hgroups_info[name]["virtual"] = hgroups[hgroup].space.virtual
- hgroups_info[name]["used_provisioned"] = getattr(
- hgroups[hgroup].space, "used_provisioned", None
- )
+ if hgroups[hgroup].is_local:
+ name = hgroups[hgroup].name
+ hgroups_info[name]["snapshots"] = getattr(
+ hgroups[hgroup].space, "snapshots", None
+ )
+ hgroups_info[name]["data_reduction"] = getattr(
+ hgroups[hgroup].space, "data_reduction", None
+ )
+ hgroups_info[name]["thin_provisioning"] = getattr(
+ hgroups[hgroup].space, "thin_provisioning", None
+ )
+ hgroups_info[name]["total_physical"] = getattr(
+ hgroups[hgroup].space, "total_physical", None
+ )
+ hgroups_info[name]["total_provisioned"] = getattr(
+ hgroups[hgroup].space, "total_provisioned", None
+ )
+ hgroups_info[name]["total_reduction"] = getattr(
+ hgroups[hgroup].space, "total_reduction", None
+ )
+ hgroups_info[name]["unique"] = getattr(
+ hgroups[hgroup].space, "unique", None
+ )
+ hgroups_info[name]["virtual"] = getattr(
+ hgroups[hgroup].space, "virtual", None
+ )
+ hgroups_info[name]["used_provisioned"] = getattr(
+ hgroups[hgroup].space, "used_provisioned", None
+ )
+ if SUBS_API_VERSION in api_version:
+ hgroups_info[name]["total_used"] = hgroups[hgroup].space.total_used
return hgroups_info
@@ -2150,6 +2596,17 @@ def generate_vmsnap_dict(array):
return vmsnap_info
+def generate_subs_dict(array):
+ subs_info = {}
+ subs = list(array.get_subscription_assets().items)
+ for sub in range(0, len(subs)):
+ name = subs[sub].name
+ subs_info[name] = {
+ "subscription_id": subs[sub].subscription.id,
+ }
+ return subs_info
+
+
def main():
argument_spec = purefa_argument_spec()
argument_spec.update(
@@ -2188,7 +2645,9 @@ def main():
"policies",
"dir_snaps",
"filesystems",
+ "alerts",
"virtual_machines",
+ "subscriptions",
)
subset_test = (test in valid_subsets for test in subset)
if not all(subset_test):
@@ -2225,6 +2684,7 @@ def main():
info["hgroups"] = generate_hgroups_dict(module, array)
if "pgroups" in subset or "all" in subset:
info["pgroups"] = generate_pgroups_dict(module, array)
+ info["deleted_pgroups"] = generate_del_pgroups_dict(module, array)
if "pods" in subset or "all" in subset or "replication" in subset:
info["replica_links"] = generate_rl_dict(module, array)
info["pods"] = generate_pods_dict(module, array)
@@ -2264,7 +2724,13 @@ def main():
quota = True
else:
quota = False
- info["policies"] = generate_policies_dict(array_v6, quota, user_map)
+ if AUTODIR_API_VERSION in api_version:
+ autodir = True
+ else:
+ autodir = False
+ info["policies"] = generate_policies_dict(
+ array_v6, quota, autodir, user_map
+ )
if "clients" in subset or "all" in subset:
info["clients"] = generate_clients_dict(array_v6)
if "dir_snaps" in subset or "all" in subset:
@@ -2273,6 +2739,10 @@ def main():
info["pg_snapshots"] = generate_pgsnaps_dict(array_v6)
if "alerts" in subset or "all" in subset:
info["alerts"] = generate_alerts_dict(array_v6)
+ if SUBS_API_VERSION in api_version and (
+ "subscriptions" in subset or "all" in subset
+ ):
+ info["subscriptions"] = generate_subs_dict(array_v6)
if VM_VERSION in api_version and (
"virtual_machines" in subset or "all" in subset
):
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_inventory.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_inventory.py
index 8e65ee07e..396699b58 100644
--- a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_inventory.py
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_inventory.py
@@ -48,17 +48,18 @@ purefa_inventory:
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
- get_system,
get_array,
purefa_argument_spec,
)
+from ansible_collections.purestorage.flasharray.plugins.module_utils.version import (
+ LooseVersion,
+)
-NEW_API_VERSION = "2.2"
SFP_API_VERSION = "2.16"
-def generate_new_hardware_dict(array, versions):
+def generate_new_hardware_dict(array):
hw_info = {
"fans": {},
"controllers": {},
@@ -137,216 +138,137 @@ def generate_new_hardware_dict(array, versions):
"protocol": getattr(drives[drive], "protocol", None),
"type": drives[drive].type,
}
- if SFP_API_VERSION in versions:
- port_details = list(array.get_network_interfaces_port_details().items)
- for port_detail in range(0, len(port_details)):
- port_name = port_details[port_detail].name
- hw_info["interfaces"][port_name]["interface_type"] = port_details[
- port_detail
- ].interface_type
- hw_info["interfaces"][port_name]["rx_los"] = (
- port_details[port_detail].rx_los[0].flag
- )
- hw_info["interfaces"][port_name]["rx_power"] = (
- port_details[port_detail].rx_power[0].measurement
- )
- hw_info["interfaces"][port_name]["static"] = {
- "connector_type": port_details[port_detail].static.connector_type,
- "vendor_name": port_details[port_detail].static.vendor_name,
- "vendor_oui": port_details[port_detail].static.vendor_oui,
- "vendor_serial_number": port_details[
- port_detail
- ].static.vendor_serial_number,
- "vendor_part_number": port_details[
+ api_version = array.get_rest_version()
+ if LooseVersion(SFP_API_VERSION) <= LooseVersion(api_version):
+ try:
+ port_details = list(array.get_network_interfaces_port_details().items)
+ for port_detail in range(0, len(port_details)):
+ port_name = port_details[port_detail].name
+ hw_info["interfaces"][port_name]["interface_type"] = port_details[
port_detail
- ].static.vendor_part_number,
- "vendor_date_code": port_details[port_detail].static.vendor_date_code,
- "signaling_rate": port_details[port_detail].static.signaling_rate,
- "wavelength": port_details[port_detail].static.wavelength,
- "rate_identifier": port_details[port_detail].static.rate_identifier,
- "identifier": port_details[port_detail].static.identifier,
- "link_length": port_details[port_detail].static.link_length,
- "voltage_thresholds": {
- "alarm_high": port_details[
- port_detail
- ].static.voltage_thresholds.alarm_high,
- "alarm_low": port_details[
- port_detail
- ].static.voltage_thresholds.alarm_low,
- "warn_high": port_details[
- port_detail
- ].static.voltage_thresholds.warn_high,
- "warn_low": port_details[
- port_detail
- ].static.voltage_thresholds.warn_low,
- },
- "tx_power_thresholds": {
- "alarm_high": port_details[
- port_detail
- ].static.tx_power_thresholds.alarm_high,
- "alarm_low": port_details[
- port_detail
- ].static.tx_power_thresholds.alarm_low,
- "warn_high": port_details[
- port_detail
- ].static.tx_power_thresholds.warn_high,
- "warn_low": port_details[
- port_detail
- ].static.tx_power_thresholds.warn_low,
- },
- "rx_power_thresholds": {
- "alarm_high": port_details[
- port_detail
- ].static.rx_power_thresholds.alarm_high,
- "alarm_low": port_details[
- port_detail
- ].static.rx_power_thresholds.alarm_low,
- "warn_high": port_details[
+ ].interface_type
+ hw_info["interfaces"][port_name]["rx_los"] = (
+ port_details[port_detail].rx_los[0].flag
+ )
+ hw_info["interfaces"][port_name]["rx_power"] = (
+ port_details[port_detail].rx_power[0].measurement
+ )
+ hw_info["interfaces"][port_name]["static"] = {
+ "connector_type": port_details[port_detail].static.connector_type,
+ "vendor_name": port_details[port_detail].static.vendor_name,
+ "vendor_oui": port_details[port_detail].static.vendor_oui,
+ "vendor_serial_number": port_details[
port_detail
- ].static.rx_power_thresholds.warn_high,
- "warn_low": port_details[
+ ].static.vendor_serial_number,
+ "vendor_part_number": port_details[
port_detail
- ].static.rx_power_thresholds.warn_low,
- },
- "tx_bias_thresholds": {
- "alarm_high": port_details[
+ ].static.vendor_part_number,
+ "vendor_date_code": port_details[
port_detail
- ].static.tx_bias_thresholds.alarm_high,
- "alarm_low": port_details[
+ ].static.vendor_date_code,
+ "signaling_rate": port_details[port_detail].static.signaling_rate,
+ "wavelength": port_details[port_detail].static.wavelength,
+ "rate_identifier": port_details[port_detail].static.rate_identifier,
+ "identifier": port_details[port_detail].static.identifier,
+ "link_length": port_details[port_detail].static.link_length,
+ "voltage_thresholds": {
+ "alarm_high": port_details[
+ port_detail
+ ].static.voltage_thresholds.alarm_high,
+ "alarm_low": port_details[
+ port_detail
+ ].static.voltage_thresholds.alarm_low,
+ "warn_high": port_details[
+ port_detail
+ ].static.voltage_thresholds.warn_high,
+ "warn_low": port_details[
+ port_detail
+ ].static.voltage_thresholds.warn_low,
+ },
+ "tx_power_thresholds": {
+ "alarm_high": port_details[
+ port_detail
+ ].static.tx_power_thresholds.alarm_high,
+ "alarm_low": port_details[
+ port_detail
+ ].static.tx_power_thresholds.alarm_low,
+ "warn_high": port_details[
+ port_detail
+ ].static.tx_power_thresholds.warn_high,
+ "warn_low": port_details[
+ port_detail
+ ].static.tx_power_thresholds.warn_low,
+ },
+ "rx_power_thresholds": {
+ "alarm_high": port_details[
+ port_detail
+ ].static.rx_power_thresholds.alarm_high,
+ "alarm_low": port_details[
+ port_detail
+ ].static.rx_power_thresholds.alarm_low,
+ "warn_high": port_details[
+ port_detail
+ ].static.rx_power_thresholds.warn_high,
+ "warn_low": port_details[
+ port_detail
+ ].static.rx_power_thresholds.warn_low,
+ },
+ "tx_bias_thresholds": {
+ "alarm_high": port_details[
+ port_detail
+ ].static.tx_bias_thresholds.alarm_high,
+ "alarm_low": port_details[
+ port_detail
+ ].static.tx_bias_thresholds.alarm_low,
+ "warn_high": port_details[
+ port_detail
+ ].static.tx_bias_thresholds.warn_high,
+ "warn_low": port_details[
+ port_detail
+ ].static.tx_bias_thresholds.warn_low,
+ },
+ "temperature_thresholds": {
+ "alarm_high": port_details[
+ port_detail
+ ].static.temperature_thresholds.alarm_high,
+ "alarm_low": port_details[
+ port_detail
+ ].static.temperature_thresholds.alarm_low,
+ "warn_high": port_details[
+ port_detail
+ ].static.temperature_thresholds.warn_high,
+ "warn_low": port_details[
+ port_detail
+ ].static.temperature_thresholds.warn_low,
+ },
+ "fc_speeds": port_details[port_detail].static.fc_speeds,
+ "fc_technology": port_details[port_detail].static.fc_technology,
+ "encoding": port_details[port_detail].static.encoding,
+ "fc_link_lengths": port_details[port_detail].static.fc_link_lengths,
+ "fc_transmission_media": port_details[
port_detail
- ].static.tx_bias_thresholds.alarm_low,
- "warn_high": port_details[
+ ].static.fc_transmission_media,
+ "extended_identifier": port_details[
port_detail
- ].static.tx_bias_thresholds.warn_high,
- "warn_low": port_details[
- port_detail
- ].static.tx_bias_thresholds.warn_low,
- },
- "temperature_thresholds": {
- "alarm_high": port_details[
- port_detail
- ].static.temperature_thresholds.alarm_high,
- "alarm_low": port_details[
- port_detail
- ].static.temperature_thresholds.alarm_low,
- "warn_high": port_details[
- port_detail
- ].static.temperature_thresholds.warn_high,
- "warn_low": port_details[
- port_detail
- ].static.temperature_thresholds.warn_low,
- },
- "fc_speeds": port_details[port_detail].static.fc_speeds,
- "fc_technology": port_details[port_detail].static.fc_technology,
- "encoding": port_details[port_detail].static.encoding,
- "fc_link_lengths": port_details[port_detail].static.fc_link_lengths,
- "fc_transmission_media": port_details[
- port_detail
- ].static.fc_transmission_media,
- "extended_identifier": port_details[
- port_detail
- ].static.extended_identifier,
- }
- hw_info["interfaces"][port_name]["temperature"] = (
- port_details[port_detail].temperature[0].measurement
- )
- hw_info["interfaces"][port_name]["tx_bias"] = (
- port_details[port_detail].tx_bias[0].measurement
- )
- hw_info["interfaces"][port_name]["tx_fault"] = (
- port_details[port_detail].tx_fault[0].flag
- )
- hw_info["interfaces"][port_name]["tx_power"] = (
- port_details[port_detail].tx_power[0].measurement
- )
- hw_info["interfaces"][port_name]["voltage"] = (
- port_details[port_detail].voltage[0].measurement
- )
- return hw_info
-
-
-def generate_hardware_dict(array):
- hw_info = {
- "fans": {},
- "controllers": {},
- "temps": {},
- "drives": {},
- "interfaces": {},
- "power": {},
- "chassis": {},
- }
- components = array.list_hardware()
- for component in range(0, len(components)):
- component_name = components[component]["name"]
- if "FAN" in component_name:
- fan_name = component_name
- hw_info["fans"][fan_name] = {"status": components[component]["status"]}
- if "PWR" in component_name:
- pwr_name = component_name
- hw_info["power"][pwr_name] = {
- "status": components[component]["status"],
- "voltage": components[component]["voltage"],
- "serial": components[component]["serial"],
- "model": components[component]["model"],
- }
- if "IB" in component_name:
- ib_name = component_name
- hw_info["interfaces"][ib_name] = {
- "status": components[component]["status"],
- "speed": components[component]["speed"],
- }
- if "SAS" in component_name:
- sas_name = component_name
- hw_info["interfaces"][sas_name] = {
- "status": components[component]["status"],
- "speed": components[component]["speed"],
- }
- if "ETH" in component_name:
- eth_name = component_name
- hw_info["interfaces"][eth_name] = {
- "status": components[component]["status"],
- "speed": components[component]["speed"],
- }
- if "FC" in component_name:
- eth_name = component_name
- hw_info["interfaces"][eth_name] = {
- "status": components[component]["status"],
- "speed": components[component]["speed"],
- }
- if "TMP" in component_name:
- tmp_name = component_name
- hw_info["temps"][tmp_name] = {
- "status": components[component]["status"],
- "temperature": components[component]["temperature"],
- }
- if component_name in ["CT0", "CT1"]:
- cont_name = component_name
- hw_info["controllers"][cont_name] = {
- "status": components[component]["status"],
- "serial": components[component]["serial"],
- "model": components[component]["model"],
- }
- if component_name in ["CH0"]:
- cont_name = component_name
- hw_info["chassis"][cont_name] = {
- "status": components[component]["status"],
- "serial": components[component]["serial"],
- "model": components[component]["model"],
- }
-
- drives = array.list_drives()
- for drive in range(0, len(drives)):
- drive_name = drives[drive]["name"]
- hw_info["drives"][drive_name] = {
- "capacity": drives[drive]["capacity"],
- "status": drives[drive]["status"],
- "protocol": drives[drive]["protocol"],
- "type": drives[drive]["type"],
- }
- for disk in range(0, len(components)):
- if components[disk]["name"] == drive_name:
- hw_info["drives"][drive_name]["serial"] = components[disk]["serial"]
-
+ ].static.extended_identifier,
+ }
+ hw_info["interfaces"][port_name]["temperature"] = (
+ port_details[port_detail].temperature[0].measurement
+ )
+ hw_info["interfaces"][port_name]["tx_bias"] = (
+ port_details[port_detail].tx_bias[0].measurement
+ )
+ hw_info["interfaces"][port_name]["tx_fault"] = (
+ port_details[port_detail].tx_fault[0].flag
+ )
+ hw_info["interfaces"][port_name]["tx_power"] = (
+ port_details[port_detail].tx_power[0].measurement
+ )
+ hw_info["interfaces"][port_name]["voltage"] = (
+ port_details[port_detail].voltage[0].measurement
+ )
+ except AttributeError:
+ pass
return hw_info
@@ -354,13 +276,8 @@ def main():
argument_spec = purefa_argument_spec()
inv_info = {}
module = AnsibleModule(argument_spec, supports_check_mode=True)
- array = get_system(module)
- api_version = array._list_available_rest_versions()
- if NEW_API_VERSION in api_version:
- arrayv6 = get_array(module)
- inv_info = generate_new_hardware_dict(arrayv6, api_version)
- else:
- inv_info = generate_hardware_dict(array)
+ array = get_array(module)
+ inv_info = generate_new_hardware_dict(array)
module.exit_json(changed=False, purefa_inv=inv_info)
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_kmip.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_kmip.py
index 8774abe87..b422f6f1e 100644
--- a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_kmip.py
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_kmip.py
@@ -97,10 +97,12 @@ except ImportError:
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
- get_system,
get_array,
purefa_argument_spec,
)
+from ansible_collections.purestorage.flasharray.plugins.module_utils.version import (
+ LooseVersion,
+)
MIN_REQUIRED_API_VERSION = "2.2"
@@ -222,16 +224,15 @@ def main():
if not HAS_PURESTORAGE:
module.fail_json(msg="py-pure-client sdk is required for this module")
- array = get_system(module)
- api_version = array._list_available_rest_versions()
+ array = get_array(module)
+ api_version = array.get_rest_version()
- if MIN_REQUIRED_API_VERSION not in api_version:
+ if LooseVersion(MIN_REQUIRED_API_VERSION) > LooseVersion(api_version):
module.fail_json(
msg="FlashArray REST version not supported. "
"Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
)
- array = get_array(module)
state = module.params["state"]
exists = bool(array.get_kmip(names=[module.params["name"]]).status_code == 200)
if module.params["certificate"] and len(module.params["certificate"]) > 3000:
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_logging.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_logging.py
index a2f8e136d..c16818498 100644
--- a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_logging.py
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_logging.py
@@ -66,10 +66,12 @@ import time
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
- get_system,
get_array,
purefa_argument_spec,
)
+from ansible_collections.purestorage.flasharray.plugins.module_utils.version import (
+ LooseVersion,
+)
AUDIT_API_VERSION = "2.2"
@@ -87,13 +89,12 @@ def main():
if not HAS_PURESTORAGE:
module.fail_json(msg="py-pure-client sdk is required for this module")
- array = get_system(module)
- api_version = array._list_available_rest_versions()
+ array = get_array(module)
+ api_version = array.get_rest_version()
audits = []
changed = False
- if AUDIT_API_VERSION in api_version:
+ if LooseVersion(AUDIT_API_VERSION) <= LooseVersion(api_version):
changed = True
- array = get_array(module)
if not module.check_mode:
if module.params["log_type"] == "audit":
all_audits = list(
@@ -151,7 +152,7 @@ def main():
"command": all_audits[audit].command,
"subcommand": all_audits[audit].subcommand,
"user": all_audits[audit].user,
- "origin": all_audits[audit].origin.name,
+ "origin": getattr(all_audits[audit].origin, "name", None),
}
audits.append(data)
else:
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_messages.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_messages.py
index a28bd56b2..131b7971a 100644
--- a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_messages.py
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_messages.py
@@ -27,15 +27,14 @@ options:
severity:
description:
- severity of the alerts to show
- type: list
- elements: str
- choices: [ all, critical, warning, info ]
- default: [ all ]
+ type: str
+ choices: [ critical, warning, info ]
+ default: info
state:
description:
- State of alerts to show
default: open
- choices: [ all, open, closed ]
+ choices: [ open, closed ]
type: str
flagged:
description:
@@ -57,8 +56,7 @@ EXAMPLES = r"""
purefa_messages:
history: 4w
flagged : false
- severity:
- - critical
+ severity: critical
fa_url: 10.10.10.2
api_token: 89a9356f-c203-d263-8a89-c229486a13ba
"""
@@ -70,10 +68,12 @@ import time
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
- get_system,
get_array,
purefa_argument_spec,
)
+from ansible_collections.purestorage.flasharray.plugins.module_utils.version import (
+ LooseVersion,
+)
MIN_REQUIRED_API_VERSION = "2.2"
ALLOWED_PERIODS = ["h", "d", "w", "y"]
@@ -101,28 +101,26 @@ def main():
argument_spec = purefa_argument_spec()
argument_spec.update(
dict(
- state=dict(type="str", default="open", choices=["all", "open", "closed"]),
+ state=dict(type="str", default="open", choices=["open", "closed"]),
history=dict(type="str", default="1w"),
flagged=dict(type="bool", default=False),
severity=dict(
- type="list",
- elements="str",
- default=["all"],
- choices=["all", "critical", "warning", "info"],
+ type="str",
+ default="info",
+ choices=["critical", "warning", "info"],
),
)
)
module = AnsibleModule(argument_spec, supports_check_mode=True)
time_now = int(time.time() * 1000)
- array = get_system(module)
- api_version = array._list_available_rest_versions()
- if MIN_REQUIRED_API_VERSION not in api_version:
+ array = get_array(module)
+ api_version = array.get_rest_version()
+ if LooseVersion(MIN_REQUIRED_API_VERSION) > LooseVersion(api_version):
module.fail_json(
msg="FlashArray REST version not supported. "
"Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
)
- array_v6 = get_array(module)
if module.params["history"][-1].lower() not in ALLOWED_PERIODS:
module.fail_json(msg="historical window value is not an allowsd time period")
since_time = str(time_now - _create_time_window(module.params["history"].lower()))
@@ -131,30 +129,12 @@ def main():
else:
flagged = " and flagged='False'"
- multi_sev = False
- if len(module.params["severity"]) > 1:
- if "all" in module.params["severity"]:
- module.params["severity"] = ["*"]
- else:
- multi_sev = True
- if multi_sev:
- severity = " and ("
- for level in range(0, len(module.params["severity"])):
- severity += "severity='" + str(module.params["severity"][level]) + "' or "
- severity = severity[0:-4] + ")"
- else:
- if module.params["severity"] == ["all"]:
- severity = " and severity='*'"
- else:
- severity = " and severity='" + str(module.params["severity"][0]) + "'"
messages = {}
- if module.params["state"] == "all":
- state = " and state='*'"
- else:
- state = " and state='" + module.params["state"] + "'"
+ severity = " and severity='" + module.params["severity"] + "'"
+ state = " and state='" + module.params["state"] + "'"
filter_string = "notified>" + since_time + state + flagged + severity
try:
- res = array_v6.get_alerts(filter=filter_string)
+ res = array.get_alerts(filter=filter_string)
alerts = list(res.items)
except Exception:
module.fail_json(
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_network.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_network.py
index e5004568a..c296707d0 100644
--- a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_network.py
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_network.py
@@ -67,6 +67,39 @@ options:
type: list
choices: [ "replication", "management", "ds", "file", "iscsi", "scsi-fc", "nvme-fc", "nvme-tcp", "nvme-roce", "system"]
version_added: '1.15.0'
+ interface:
+ description:
+ - Type of interface to create if subinterfaces is supplied
+ type: str
+ choices: [ "vif", "lacp" ]
+ version_added: '1.22.0'
+ subordinates:
+ description:
+ - List of one or more child devices to be added to a LACP interface
+ - Subordinates must be on the same controller, therefore the full device needs
+ to be provided.
+ type: list
+ elements: str
+ version_added: '1.22.0'
+ subinterfaces:
+ description:
+ - List of one or more child devices to be added to a VIF interface
+ - Only the 'eth' name needs to be provided, such as 'eth6'. This interface on
+ all controllers will be assigned to the interface.
+ type: list
+ elements: str
+ version_added: '1.22.0'
+ subnet:
+ description:
+ - Name of the subnet which interface is to be attached
+ type: str
+ version_added: '1.22.0'
+ enabled:
+ description:
+ - State of the network interface
+ type: bool
+ default: true
+ version_added: '1.22.0'
extends_documentation_fragment:
- purestorage.flasharray.purestorage.fa
"""
@@ -117,14 +150,21 @@ RETURN = """
"""
try:
- from netaddr import IPAddress, IPNetwork
+ from netaddr import IPAddress, IPNetwork, valid_ipv4, valid_ipv6
HAS_NETADDR = True
except ImportError:
HAS_NETADDR = False
try:
- from pypureclient.flasharray import NetworkInterfacePatch
+ from pypureclient.flasharray import (
+ NetworkInterfacePatch,
+ NetworkInterfacePost,
+ NetworkinterfacepostEth,
+ NetworkinterfacepatchEth,
+ FixedReferenceNoId,
+ ReferenceNoId,
+ )
HAS_PYPURECLIENT = True
except ImportError:
@@ -154,8 +194,7 @@ def _get_fc_interface(module, array):
if interface_list.status_code == 200:
interface = list(interface_list.items)[0]
return interface
- else:
- return None
+ return None
def _get_interface(module, array):
@@ -227,35 +266,107 @@ def update_fc_interface(module, array, interface, api_version):
module.exit_json(changed=changed)
+def _create_subordinates(module, array):
+ subordinates_v1 = []
+ subordinates_v2 = []
+ all_children = True
+ if module.params["subordinates"]:
+ for inter in sorted(module.params["subordinates"]):
+ if array.get_network_interfaces(names=[inter]).status_code != 200:
+ all_children = False
+ if not all_children:
+ module.fail_json(
+ msg="Subordinate {0} does not exist. Ensure you have specified the controller.".format(
+ inter
+ )
+ )
+ subordinates_v2.append(FixedReferenceNoId(name=inter))
+ subordinates_v1.append(inter)
+ return subordinates_v1, subordinates_v2
+
+
+def _create_subinterfaces(module, array):
+ subinterfaces_v1 = []
+ subinterfaces_v2 = []
+ all_children = True
+ purity_vm = bool(len(array.get_controllers().items) == 1)
+ if module.params["subinterfaces"]:
+ for inter in sorted(module.params["subinterfaces"]):
+ # As we may be on a single controller device, only check for the ct0 version of the interface
+ if array.get_network_interfaces(names=["ct0." + inter]).status_code != 200:
+ all_children = False
+ if not all_children:
+ module.fail_json(
+ msg="Child subinterface {0} does not exist".format(inter)
+ )
+ subinterfaces_v2.append(FixedReferenceNoId(name="ct0." + inter))
+ subinterfaces_v1.append("ct0." + inter)
+ if not purity_vm:
+ subinterfaces_v2.append(FixedReferenceNoId(name="ct1." + inter))
+ subinterfaces_v1.append("ct1." + inter)
+ return subinterfaces_v1, subinterfaces_v2
+
+
def update_interface(module, array, interface):
"""Modify Interface settings"""
changed = False
current_state = {
+ "enabled": interface["enabled"],
"mtu": interface["mtu"],
"gateway": interface["gateway"],
"address": interface["address"],
"netmask": interface["netmask"],
"services": sorted(interface["services"]),
+ "slaves": sorted(interface["slaves"]),
}
+ array6 = get_array(module)
+ subinterfaces = sorted(current_state["slaves"])
+ if module.params["subinterfaces"]:
+ new_subinterfaces, dummy = _create_subinterfaces(module, array6)
+ if new_subinterfaces != subinterfaces:
+ subinterfaces = new_subinterfaces
+ else:
+ subinterfaces = current_state["slaves"]
+ if module.params["subordinates"]:
+ new_subordinates, dummy = _create_subordinates(module, array6)
+ if new_subordinates != subinterfaces:
+ subinterfaces = new_subordinates
+ else:
+ subinterfaces = current_state["slaves"]
+ if module.params["enabled"] != current_state["enabled"]:
+ enabled = module.params["enabled"]
+ else:
+ enabled = current_state["enabled"]
+ if not current_state["gateway"]:
+ try:
+ if valid_ipv4(interface["address"]):
+ current_state["gateway"] = None
+ elif valid_ipv6(interface["address"]):
+ current_state["gateway"] = None
+ except AttributeError:
+ current_state["gateway"] = None
if not module.params["servicelist"]:
services = sorted(interface["services"])
else:
services = sorted(module.params["servicelist"])
if not module.params["address"]:
address = interface["address"]
+ netmask = interface["netmask"]
else:
- if module.params["gateway"]:
- if module.params["gateway"] and module.params["gateway"] not in IPNetwork(
- module.params["address"]
- ):
- module.fail_json(msg="Gateway and subnet are not compatible.")
- elif not module.params["gateway"] and interface["gateway"] not in [
- None,
- IPNetwork(module.params["address"]),
- ]:
+ if module.params["gateway"] and module.params["gateway"] not in [
+ "0.0.0.0",
+ "::",
+ ]:
+ if module.params["gateway"] not in IPNetwork(module.params["address"]):
module.fail_json(msg="Gateway and subnet are not compatible.")
+ if not module.params["gateway"] and interface["gateway"] not in [
+ None,
+ IPNetwork(module.params["address"]),
+ ]:
+ module.fail_json(msg="Gateway and subnet are not compatible.")
address = str(module.params["address"].split("/", 1)[0])
- ip_version = str(IPAddress(address).version)
+ if address in ["0.0.0.0", "::"]:
+ address = None
if not module.params["mtu"]:
mtu = interface["mtu"]
else:
@@ -268,36 +379,87 @@ def update_interface(module, array, interface):
else:
mtu = module.params["mtu"]
if module.params["address"]:
- netmask = str(IPNetwork(module.params["address"]).netmask)
+ if valid_ipv4(address):
+ netmask = str(IPNetwork(module.params["address"]).netmask)
+ else:
+ netmask = str(module.params["address"].split("/", 1)[1])
+ if netmask in ["0.0.0.0", "0"]:
+ netmask = None
else:
netmask = interface["netmask"]
if not module.params["gateway"]:
gateway = interface["gateway"]
- else:
+ elif module.params["gateway"] in ["0.0.0.0", "::"]:
+ gateway = None
+ elif valid_ipv4(address):
cidr = str(IPAddress(netmask).netmask_bits())
full_addr = address + "/" + cidr
if module.params["gateway"] not in IPNetwork(full_addr):
module.fail_json(msg="Gateway and subnet are not compatible.")
gateway = module.params["gateway"]
- if ip_version == "6":
- netmask = str(IPAddress(netmask).netmask_bits())
+ else:
+ gateway = module.params["gateway"]
+
new_state = {
+ "enabled": enabled,
"address": address,
"mtu": mtu,
"gateway": gateway,
"netmask": netmask,
- "services": services,
+ "services": sorted(services),
+ "slaves": sorted(subinterfaces),
}
+ if new_state["address"]:
+ if (
+ current_state["address"]
+ and IPAddress(new_state["address"]).version
+ != IPAddress(current_state["address"]).version
+ ):
+ if new_state["gateway"]:
+ if (
+ IPAddress(new_state["gateway"]).version
+ != IPAddress(new_state["address"]).version
+ ):
+ module.fail_json(
+ msg="Changing IP protocol requires gateway to change as well."
+ )
if new_state != current_state:
changed = True
if (
+ module.params["servicelist"]
+ and sorted(module.params["servicelist"]) != interface["services"]
+ ):
+ api_version = array._list_available_rest_versions()
+ if FC_ENABLE_API in api_version:
+ if HAS_PYPURECLIENT:
+ if not module.check_mode:
+ network = NetworkInterfacePatch(
+ services=module.params["servicelist"]
+ )
+ res = array6.patch_network_interfaces(
+ names=[module.params["name"]], network=network
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to update interface service list {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ else:
+ module.warn_json(
+ "Servicelist not updated as pypureclient module is required"
+ )
+ if (
"management" in interface["services"] or "app" in interface["services"]
- ) and address == "0.0.0.0/0":
+ ) and address in ["0.0.0.0/0", "::/0"]:
module.fail_json(
msg="Removing IP address from a management or app port is not supported"
)
if not module.check_mode:
try:
+ array.set_network_interface(
+ interface["name"], enabled=new_state["enabled"]
+ )
if new_state["gateway"] is not None:
array.set_network_interface(
interface["name"],
@@ -306,67 +468,200 @@ def update_interface(module, array, interface):
netmask=new_state["netmask"],
gateway=new_state["gateway"],
)
+ if (
+ current_state["slaves"] != new_state["slaves"]
+ and new_state["slaves"] != []
+ ):
+ array.set_network_interface(
+ interface["name"],
+ subinterfacelist=new_state["slaves"],
+ )
else:
+ if valid_ipv4(new_state["address"]):
+ empty_gateway = "0.0.0.0"
+ else:
+ empty_gateway = "::"
array.set_network_interface(
interface["name"],
address=new_state["address"],
mtu=new_state["mtu"],
netmask=new_state["netmask"],
+ gateway=empty_gateway,
)
+ if (
+ current_state["slaves"] != new_state["slaves"]
+ and new_state["slaves"] != []
+ ):
+ array.set_network_interface(
+ interface["name"],
+ subinterfacelist=new_state["slaves"],
+ )
except Exception:
module.fail_json(
msg="Failed to change settings for interface {0}.".format(
interface["name"]
)
)
- if not interface["enabled"] and module.params["state"] == "present":
- changed = True
- if not module.check_mode:
- try:
- array.enable_network_interface(interface["name"])
- except Exception:
+ module.exit_json(changed=changed)
+
+
+def create_interface(module, array):
+ changed = True
+ subnet_exists = bool(
+ array.get_subnets(names=[module.params["subnet"]]).status_code == 200
+ )
+ if module.params["subnet"] and not subnet_exists:
+ module.fail_json(
+ msg="Subnet {0} does not exist".format(module.params["subnet"])
+ )
+
+ if module.params["interface"] == "vif":
+ dummy, subinterfaces = _create_subinterfaces(module, array)
+ else:
+ dummy, subinterfaces = _create_subordinates(module, array)
+
+ if not module.check_mode:
+ if module.params["address"]:
+ address = str(module.params["address"].strip("[]").split("/", 1)[0])
+ if valid_ipv4(address):
+ netmask = str(IPNetwork(module.params["address"]).netmask)
+ else:
+ netmask = str(module.params["address"].strip("[]").split("/", 1)[1])
+ else:
+ netmask = None
+ address = None
+ if module.params["gateway"]:
+ gateway = str(module.params["gateway"].strip("[]"))
+ if gateway not in ["0.0.0.0", "::"]:
+ if address and gateway not in IPNetwork(module.params["address"]):
+ module.fail_json(msg="Gateway and subnet are not compatible.")
+ else:
+ gateway = None
+ if module.params["interface"] == "vif":
+ res = array.post_network_interfaces(
+ names=[module.params["name"]],
+ network=NetworkInterfacePost(
+ eth=NetworkinterfacepostEth(subtype="vif")
+ ),
+ )
+ else:
+ res = array.post_network_interfaces(
+ names=[module.params["name"]],
+ network=NetworkInterfacePost(
+ eth=NetworkinterfacepostEth(
+ subtype="lacpbond", subinterfaces=subinterfaces
+ ),
+ ),
+ )
+
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to create interface {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+
+ if module.params["subinterfaces"] and module.params["subnet"]:
+ res = array.patch_network_interfaces(
+ names=[module.params["name"]],
+ network=NetworkInterfacePatch(
+ enabled=module.params["enabled"],
+ eth=NetworkinterfacepatchEth(
+ subinterfaces=subinterfaces,
+ address=address,
+ gateway=gateway,
+ mtu=module.params["mtu"],
+ netmask=netmask,
+ subnet=ReferenceNoId(name=module.params["subnet"]),
+ ),
+ ),
+ )
+ if res.status_code != 200:
+ array.delete_network_interfaces(names=[module.params["name"]])
module.fail_json(
- msg="Failed to enable interface {0}.".format(interface["name"])
+ msg="Failed to create interface {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
)
- if interface["enabled"] and module.params["state"] == "absent":
- changed = True
- if not module.check_mode:
- try:
- array.disable_network_interface(interface["name"])
- except Exception:
+ elif module.params["subinterfaces"] and not module.params["subnet"]:
+ res = array.patch_network_interfaces(
+ names=[module.params["name"]],
+ network=NetworkInterfacePatch(
+ enabled=module.params["enabled"],
+ eth=NetworkinterfacepatchEth(
+ subinterfaces=subinterfaces,
+ address=address,
+ gateway=gateway,
+ mtu=module.params["mtu"],
+ netmask=netmask,
+ ),
+ ),
+ )
+ if res.status_code != 200:
+ array.delete_network_interfaces(names=[module.params["name"]])
module.fail_json(
- msg="Failed to disable interface {0}.".format(interface["name"])
+ msg="Failed to create interface {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
)
- if (
- module.params["servicelist"]
- and sorted(module.params["servicelist"]) != interface["services"]
- ):
- api_version = array._list_available_rest_versions()
- if FC_ENABLE_API in api_version:
- if HAS_PYPURECLIENT:
- array = get_array(module)
- changed = True
- if not module.check_mode:
- network = NetworkInterfacePatch(
- services=module.params["servicelist"]
+ elif not module.params["subinterfaces"] and module.params["subnet"]:
+ res = array.patch_network_interfaces(
+ names=[module.params["name"]],
+ network=NetworkInterfacePatch(
+ enabled=module.params["enabled"],
+ eth=NetworkinterfacepatchEth(
+ address=address,
+ gateway=gateway,
+ mtu=module.params["mtu"],
+ netmask=netmask,
+ subnet=ReferenceNoId(name=module.params["subnet"]),
+ ),
+ ),
+ )
+ if res.status_code != 200:
+ array.delete_network_interfaces(names=[module.params["name"]])
+ module.fail_json(
+ msg="Failed to create interface {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
)
- res = array.patch_network_interfaces(
- names=[module.params["name"]], network=network
+ )
+ else:
+ res = array.patch_network_interfaces(
+ names=[module.params["name"]],
+ network=NetworkInterfacePatch(
+ enabled=module.params["enabled"],
+ eth=NetworkinterfacepatchEth(
+ address=address,
+ gateway=gateway,
+ mtu=module.params["mtu"],
+ netmask=netmask,
+ ),
+ ),
+ )
+ if res.status_code != 200:
+ array.delete_network_interfaces(names=[module.params["name"]])
+ module.fail_json(
+ msg="Failed to create interface {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
)
- if res.status_code != 200:
- module.fail_json(
- msg="Failed to update interface service list {0}. Error: {1}".format(
- module.params["name"], res.errors[0].message
- )
- )
- else:
- module.warn_json(
- "Servicelist not update as pypureclient module is required"
)
module.exit_json(changed=changed)
+def delete_interface(module, array):
+ changed = True
+ if not module.check_mode:
+ res = array.delete_network_interfaces(names=[module.params["name"]])
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete network interface {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
def main():
argument_spec = purefa_argument_spec()
argument_spec.update(
@@ -392,15 +687,34 @@ def main():
"system",
],
),
+ interface=dict(type="str", choices=["vif", "lacp"]),
+ subinterfaces=dict(type="list", elements="str"),
+ subordinates=dict(type="list", elements="str"),
+ subnet=dict(type="str"),
+ enabled=dict(type="bool", default=True),
)
)
module = AnsibleModule(argument_spec, supports_check_mode=True)
+ if module.params["state"] == "present":
+ if module.params["interface"] == "lacp" and not module.params["subordinates"]:
+ module.fail_json(
+ msg="interface is lacp but all of the following are missing: subordinates"
+ )
+
+ creating_new_if = bool(module.params["interface"])
+
if not HAS_NETADDR:
module.fail_json(msg="netaddr module is required")
array = get_system(module)
+ if module.params["address"]:
+ module.params["address"] = module.params["address"].strip("[]")
+ if module.params["gateway"]:
+ module.params["gateway"] = module.params["gateway"].strip("[]")
+ if "/" not in module.params["address"]:
+ module.fail_json(msg="address must include valid netmask bits")
api_version = array._list_available_rest_versions()
if not _is_cbs(array):
if module.params["servicelist"] and "system" in module.params["servicelist"]:
@@ -424,11 +738,29 @@ def main():
else:
update_interface(module, array, interface)
else:
+ if (module.params["interface"] == "vif" and module.params["subordinates"]) or (
+ module.params["interface"] == "lacp" and module.params["subinterfaces"]
+ ):
+ module.fail_json(
+ msg="interface type not compatable with provided subinterfaces | subordinates"
+ )
interface = _get_interface(module, array)
- if not interface:
- module.fail_json(msg="Invalid network interface specified.")
- else:
+ array6 = get_array(module)
+ if not creating_new_if:
+ if not interface:
+ module.fail_json(msg="Invalid network interface specified.")
+ elif module.params["state"] == "present":
+ update_interface(module, array, interface)
+ else:
+ delete_interface(module, array6)
+ elif not interface and module.params["state"] == "present":
+ create_interface(module, array6)
+ elif interface and module.params["state"] == "absent":
+ delete_interface(module, array6)
+ elif module.params["state"] == "present":
update_interface(module, array, interface)
+ else:
+ module.exit_json(changed=False)
module.exit_json(changed=False)
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_ntp.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_ntp.py
index e2a5c8f18..348d1fed8 100644
--- a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_ntp.py
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_ntp.py
@@ -40,6 +40,15 @@ options:
- If more than 4 servers are provided, only the first 4 unique
nameservers will be used.
- if no servers are given a default of I(0.pool.ntp.org) will be used.
+ ntp_key:
+ type: str
+ description:
+ - The NTP symmetric key to be used for NTP authentication.
+ - If it is an ASCII string, it cannot contain the character "#"
+ and cannot be longer than 20 characters.
+ - If it is a hex-encoded string, it cannot be longer than 64 characters.
+ - Setting this parameter is not idempotent.
+ version_added: "1.22.0"
extends_documentation_fragment:
- purestorage.flasharray.purestorage.fa
"""
@@ -68,14 +77,26 @@ RETURN = r"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
- get_system,
+ get_array,
purefa_argument_spec,
)
+from ansible_collections.purestorage.flasharray.plugins.module_utils.version import (
+ LooseVersion,
+)
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient.flasharray import Arrays
+except ImportError:
+ HAS_PURESTORAGE = False
+
+
+KEY_API_VERSION = "2.26"
def _is_cbs(array, is_cbs=False):
"""Is the selected array a Cloud Block Store"""
- model = array.get(controllers=True)[0]["model"]
+ model = list(array.get_controllers().items)[0].model
is_cbs = bool("CBS" in model)
return is_cbs
@@ -94,7 +115,7 @@ def delete_ntp(module, array):
changed = True
if not module.check_mode:
try:
- array.set(ntpserver=[])
+ array.patch_arrays(array=Arrays(ntp_servers=[]))
except Exception:
module.fail_json(msg="Deletion of NTP servers failed")
else:
@@ -109,41 +130,85 @@ def create_ntp(module, array):
if not module.params["ntp_servers"]:
module.params["ntp_servers"] = ["0.pool.ntp.org"]
try:
- array.set(ntpserver=module.params["ntp_servers"][0:4])
+ array.patch_arrays(
+ array=Arrays(ntp_servers=module.params["ntp_servers"][0:4])
+ )
except Exception:
module.fail_json(msg="Update of NTP servers failed")
module.exit_json(changed=changed)
+def update_ntp_key(module, array):
+ """Update NTP Symmetric Key"""
+ if module.params["ntp_key"] == "" and not getattr(
+ list(array.get_arrays().items)[0], "ntp_symmetric_key", None
+ ):
+ changed = False
+ else:
+ try:
+ int(module.params["ntp_key"], 16)
+ if len(module.params["ntp_key"]) > 64:
+ module.fail_json(msg="HEX string cannot be longer than 64 characters")
+ except ValueError:
+ if len(module.params["ntp_key"]) > 20:
+ module.fail_json(msg="ASCII string cannot be longer than 20 characters")
+ if "#" in module.params["ntp_key"]:
+ module.fail_json(msg="ASCII string cannot contain # character")
+ if not all(ord(c) < 128 for c in module.params["ntp_key"]):
+ module.fail_json(msg="NTP key is non-ASCII")
+ changed = True
+ res = array.patch_arrays(
+ array=Arrays(ntp_symmetric_key=module.params["ntp_key"])
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to update NTP Symmetric Key. Error: {0}".format(
+ res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+ if len(module.params["ntp_key"]) > 20:
+ # Must be HEX string is greter than 20 characters
+ try:
+ int(module.params["ntp_key"], 16)
+ except ValueError:
+ module.fail_json(msg="NTP key is not HEX")
+
+
def main():
argument_spec = purefa_argument_spec()
argument_spec.update(
dict(
ntp_servers=dict(type="list", elements="str"),
+ ntp_key=dict(type="str", no_log=True),
state=dict(type="str", default="present", choices=["absent", "present"]),
)
)
- required_if = [["state", "present", ["ntp_servers"]]]
-
- module = AnsibleModule(
- argument_spec, required_if=required_if, supports_check_mode=True
- )
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
- array = get_system(module)
+ array = get_array(module)
+ api_version = array.get_rest_version()
if _is_cbs(array):
module.warn("NTP settings are not necessary for a CBS array - ignoring...")
module.exit_json(changed=False)
if module.params["state"] == "absent":
delete_ntp(module, array)
- else:
+ elif module.params["ntp_servers"]:
module.params["ntp_servers"] = remove(module.params["ntp_servers"])
- if sorted(array.get(ntpserver=True)["ntpserver"]) != sorted(
+ if sorted(list(array.get_arrays().items)[0].ntp_servers) != sorted(
module.params["ntp_servers"][0:4]
):
create_ntp(module, array)
-
+ if module.params["ntp_key"] or module.params["ntp_key"] == "":
+ if LooseVersion(KEY_API_VERSION) > LooseVersion(api_version):
+ module.fail_json(msg="REST API does not support setting NTP Symmetric Key")
+ else:
+ update_ntp_key(module, array)
module.exit_json(changed=False)
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_offload.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_offload.py
index 1265911fe..5b911a24d 100644
--- a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_offload.py
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_offload.py
@@ -40,6 +40,7 @@ options:
protocol:
description:
- Define which protocol the offload engine uses
+ - NFS is not a supported protocl from Purity//FA 6.6.0 and higher
default: nfs
choices: [ nfs, s3, azure, gcp ]
type: str
@@ -91,7 +92,14 @@ options:
type: str
choices: ['retention-based', 'aws-standard-class']
default: retention-based
-
+ profile:
+ description:
+ - The Offload target profile that will be selected for this target.
+ - This option allows more granular configuration for the target on top
+ of the protocol parameter
+ type: str
+ version_added: '1.21.0'
+ choices: ['azure', 'gcp', 'nfs', 'nfs-flashblade', 's3-aws', 's3-flashblade', 's3-scality-ring', 's3-wasabi-pay-as-you-go', 's3-wasabi-rcs', 's3-other']
extends_documentation_fragment:
- purestorage.flasharray.purestorage.fa
"""
@@ -143,137 +151,129 @@ RETURN = r"""
HAS_PURESTORAGE = True
try:
- from pypureclient import flasharray
+ from pypureclient.flasharray import (
+ OffloadAzure,
+ OffloadGoogleCloud,
+ OffloadNfs,
+ OffloadPost,
+ OffloadS3,
+ )
except ImportError:
HAS_PURESTORAGE = False
-HAS_PACKAGING = True
-try:
- from packaging import version
-except ImportError:
- HAS_PACKAGING = False
-
import re
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
get_array,
- get_system,
purefa_argument_spec,
)
+from ansible_collections.purestorage.flasharray.plugins.module_utils.version import (
+ LooseVersion,
+)
-MIN_REQUIRED_API_VERSION = "1.16"
REGEX_TARGET_NAME = re.compile(r"^[a-zA-Z0-9\-]*$")
-P53_API_VERSION = "1.17"
-GCP_API_VERSION = "2.3"
-MULTIOFFLOAD_API_VERSION = "2.11"
-MULTIOFFLOAD_LIMIT = 5
+MULTIOFFLOAD_LIMIT = 1
+PROFILE_API_VERSION = "2.25"
+NO_SNAP2NFS_VERSION = "2.27"
def get_target(module, array):
"""Return target or None"""
- try:
- return array.get_offload(module.params["name"])
- except Exception:
+ res = array.get_offloads(names=[module.params["name"]])
+ if res.status_code == 200:
+ return list(res.items)[0]
+ else:
return None
def create_offload(module, array):
"""Create offload target"""
changed = True
- api_version = array._list_available_rest_versions()
+ api_version = array.get_rest_version()
# First check if the offload network inteface is there and enabled
- try:
- if not array.get_network_interface("@offload.data")["enabled"]:
- module.fail_json(
- msg="Offload Network interface not enabled. Please resolve."
- )
- except Exception:
+ res = array.get_network_interfaces(names=["@offload.data0"])
+ if res.status != 200:
+ module.fail_json(msg="Offload Network interface doesn't exist. Please resolve.")
+ if not list(res.items)[0].enabled:
module.fail_json(
msg="Offload Network interface not correctly configured. Please resolve."
)
if not module.check_mode:
- if module.params["protocol"] == "nfs":
- try:
- array.connect_nfs_offload(
- module.params["name"],
- mount_point=module.params["share"],
- address=module.params["address"],
- mount_options=module.params["options"],
+ if module.params["protocol"] == "gcp":
+ if PROFILE_API_VERSION in api_version and module.params["profile"]:
+ bucket = OffloadGoogleCloud(
+ access_key_id=module.params["access_key"],
+ bucket=module.params["bucket"],
+ secret_access_key=module.params["secret"],
+ profile=module.params["profile"],
)
- except Exception:
- module.fail_json(
- msg="Failed to create NFS offload {0}. "
- "Please perform diagnostic checks.".format(module.params["name"])
+ else:
+ bucket = OffloadGoogleCloud(
+ access_key_id=module.params["access_key"],
+ bucket=module.params["bucket"],
+ secret_access_key=module.params["secret"],
+ )
+ offload = OffloadPost(google_cloud=bucket)
+ if module.params["protocol"] == "azure" and module.params["profile"]:
+ if PROFILE_API_VERSION in api_version:
+ bucket = OffloadAzure(
+ container_name=module.params["container"],
+ secret_access_key=module.params["secret"],
+ account_name=module.params[".bucket"],
+ profile=module.params["profile"],
)
- if module.params["protocol"] == "s3":
- if P53_API_VERSION in api_version:
- try:
- array.connect_s3_offload(
- module.params["name"],
- access_key_id=module.params["access_key"],
- secret_access_key=module.params["secret"],
- bucket=module.params["bucket"],
- placement_strategy=module.params["placement"],
- initialize=module.params["initialize"],
- )
- except Exception:
- module.fail_json(
- msg="Failed to create S3 offload {0}. "
- "Please perform diagnostic checks.".format(
- module.params["name"]
- )
- )
else:
- try:
- array.connect_s3_offload(
- module.params["name"],
- access_key_id=module.params["access_key"],
- secret_access_key=module.params["secret"],
- bucket=module.params["bucket"],
- initialize=module.params["initialize"],
- )
- except Exception:
- module.fail_json(
- msg="Failed to create S3 offload {0}. "
- "Please perform diagnostic checks.".format(
- module.params["name"]
- )
- )
- if module.params["protocol"] == "azure" and P53_API_VERSION in api_version:
- try:
- array.connect_azure_offload(
- module.params["name"],
+ bucket = OffloadAzure(
container_name=module.params["container"],
secret_access_key=module.params["secret"],
account_name=module.params[".bucket"],
- initialize=module.params["initialize"],
)
- except Exception:
- module.fail_json(
- msg="Failed to create Azure offload {0}. "
- "Please perform diagnostic checks.".format(module.params["name"])
+ offload = OffloadPost(azure=bucket)
+ if module.params["protocol"] == "s3" and module.params["profile"]:
+ if PROFILE_API_VERSION in api_version:
+ bucket = OffloadS3(
+ access_key_id=module.params["access_key"],
+ bucket=module.params["bucket"],
+ secret_access_key=module.params["secret"],
+ profile=module.params["profile"],
)
- if module.params["protocol"] == "gcp" and GCP_API_VERSION in api_version:
- arrayv6 = get_array(module)
- bucket = flasharray.OffloadGoogleCloud(
- access_key_id=module.params["access_key"],
- bucket=module.params["bucket"],
- secret_access_key=module.params["secret"],
- )
- offload = flasharray.OffloadPost(google_cloud=bucket)
- res = arrayv6.post_offloads(
- offload=offload,
- initialize=module.params["initialize"],
- names=[module.params["name"]],
- )
- if res.status_code != 200:
- module.fail_json(
- msg="Failed to create GCP offload {0}. Error: {1}"
- "Please perform diagnostic checks.".format(
- module.params["name"], res.errors[0].message
- )
+ else:
+ bucket = OffloadS3(
+ access_key_id=module.params["access_key"],
+ bucket=module.params["bucket"],
+ secret_access_key=module.params["secret"],
)
+ offload = OffloadPost(s3=bucket)
+ if module.params["protocol"] == "nfs" and module.params["profile"]:
+ if PROFILE_API_VERSION in api_version:
+ bucket = OffloadNfs(
+ mount_point=module.params["share"],
+ address=module.params["address"],
+ mount_options=module.params["options"],
+ profile=module.params["profile"],
+ )
+ else:
+ bucket = OffloadNfs(
+ mount_point=module.params["share"],
+ address=module.params["address"],
+ mount_options=module.params["options"],
+ )
+ offload = OffloadPost(nfs=bucket)
+ res = array.post_offloads(
+ offload=offload,
+ initialize=module.params["initialize"],
+ names=[module.params["name"]],
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to create {0} offload {1}. Error: {2}"
+ "Please perform diagnostic checks.".format(
+ module.params["protocol"].upper(),
+ module.params["name"],
+ res.errors[0].message,
+ )
+ )
module.exit_json(changed=changed)
@@ -286,33 +286,14 @@ def update_offload(module, array):
def delete_offload(module, array):
"""Delete offload target"""
changed = True
- api_version = array._list_available_rest_versions()
if not module.check_mode:
- if module.params["protocol"] == "nfs":
- try:
- array.disconnect_nfs_offload(module.params["name"])
- except Exception:
- module.fail_json(
- msg="Failed to delete NFS offload {0}.".format(
- module.params["name"]
- )
- )
- if module.params["protocol"] == "s3":
- try:
- array.disconnect_s3_offload(module.params["name"])
- except Exception:
- module.fail_json(
- msg="Failed to delete S3 offload {0}.".format(module.params["name"])
- )
- if module.params["protocol"] == "azure" and P53_API_VERSION in api_version:
- try:
- array.disconnect_azure_offload(module.params["name"])
- except Exception:
- module.fail_json(
- msg="Failed to delete Azure offload {0}.".format(
- module.params["name"]
- )
+ res = array.delete_offloads(names=[module.params["name"]])
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete offload {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
)
+ )
module.exit_json(changed=changed)
@@ -329,6 +310,21 @@ def main():
default="retention-based",
choices=["retention-based", "aws-standard-class"],
),
+ profile=dict(
+ type="str",
+ choices=[
+ "azure",
+ "gcp",
+ "nfs",
+ "nfs-flashblade",
+ "s3-aws",
+ "s3-flashblade",
+ "s3-scality-ring",
+ "s3-wasabi-pay-as-you-go",
+ "s3-wasabi-rcs",
+ "s3-other",
+ ],
+ ),
name=dict(type="str", required=True),
initialize=dict(default=True, type="bool"),
access_key=dict(type="str", no_log=False),
@@ -356,19 +352,47 @@ def main():
argument_spec, required_if=required_if, supports_check_mode=True
)
- if not HAS_PACKAGING:
- module.fail_json(msg="packagingsdk is required for this module")
- if not HAS_PURESTORAGE and module.params["protocol"] == "gcp":
+ if not HAS_PURESTORAGE:
module.fail_json(msg="py-pure-client sdk is required for this module")
- array = get_system(module)
- api_version = array._list_available_rest_versions()
+ array = get_array(module)
+ api_version = array.get_rest_version()
- if MIN_REQUIRED_API_VERSION not in api_version:
+ if (
+ LooseVersion(NO_SNAP2NFS_VERSION) <= LooseVersion(api_version)
+ and module.params["protocol"] == "nfs"
+ ):
module.fail_json(
- msg="FlashArray REST version not supported. "
- "Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
+ msg="NFS offload target is not supported from Purity//FA 6.6.0 and higher"
+ )
+ if (
+ (
+ module.params["protocol"].lower() == "azure"
+ and module.params["profile"] != "azure"
+ )
+ or (
+ module.params["protocol"].lower() == "gcp"
+ and module.params["profile"] != "gcp"
)
+ or (
+ module.params["protocol"].lower() == "nfs"
+ and module.params["profile"] not in ["nfs", "nfs-flashblade"]
+ )
+ or (
+ module.params["protocol"].lower() == "s3"
+ and module.params["profile"]
+ not in [
+ "s3-aws",
+ "s3-flashblade",
+ "s3-scality-ring",
+ "s3-wasabi-pay-as-you-go",
+ "s3-wasabi-rcs",
+ "s3-other",
+ ]
+ )
+ ):
+ module.warn("Specified profile not valid, ignoring...")
+ module.params["profile"] = None
if (
not re.match(r"^[a-zA-Z][a-zA-Z0-9\-]*[a-zA-Z0-9]$", module.params["name"])
@@ -391,45 +415,29 @@ def main():
"and begin and end with a letter or number."
)
- apps = array.list_apps()
- app_version = 0
- all_good = False
- for app in range(0, len(apps)):
- if apps[app]["name"] == "offload":
- if (
- apps[app]["enabled"]
- and apps[app]["status"] == "healthy"
- and version.parse(apps[app]["version"]) >= version.parse("5.2.0")
- ):
- all_good = True
- app_version = apps[app]["version"]
- break
-
- if not all_good:
+ res = array.get_apps(names=["offload"])
+ if res.status_code != 200:
module.fail_json(
msg="Correct Offload app not installed or incorrectly configured"
)
else:
- if version.parse(array.get()["version"]) != version.parse(app_version):
+ app_state = list(res.items)[0]
+ if LooseVersion(app_state.version) != LooseVersion(array.get_rest_version()):
module.fail_json(
msg="Offload app version must match Purity version. Please upgrade."
)
target = get_target(module, array)
if module.params["state"] == "present" and not target:
- offloads = array.list_offload()
- target_count = len(offloads)
- if MIN_REQUIRED_API_VERSION not in api_version:
- MULTIOFFLOAD_LIMIT = 1
- if target_count >= MULTIOFFLOAD_LIMIT:
+ offloads = list(array.get_offloads().items)
+ if len(offloads) >= MULTIOFFLOAD_LIMIT:
module.fail_json(
msg="Cannot add offload target {0}. Offload Target Limit of {1} would be exceeded.".format(
module.params["name"], MULTIOFFLOAD_LIMIT
)
)
- # TODO: (SD) Remove this check when multi-protocol offloads are supported
- if offloads[0].protocol != module.params["protocol"]:
- module.fail_json(msg="Currently all offloads must be of the same type.")
+ if offloads[0].protocol != module.params["protocol"]:
+ module.fail_json(msg="Currently all offloads must be of the same type.")
create_offload(module, array)
elif module.params["state"] == "present" and target:
update_offload(module, array)
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pg.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pg.py
index 3fa51ebbb..3344c0895 100644
--- a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pg.py
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pg.py
@@ -374,68 +374,65 @@ def make_pgroup(module, array):
module.fail_json(
msg="Creation of pgroup {0} failed.".format(module.params["name"])
)
+ if not module.check_mode:
+ try:
+ if module.params["target"]:
+ array.set_pgroup(
+ module.params["name"],
+ replicate_enabled=module.params["enabled"],
+ )
+ else:
+ array.set_pgroup(
+ module.params["name"], snap_enabled=module.params["enabled"]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Enabling pgroup {0} failed.".format(module.params["name"])
+ )
+ if module.params["volume"]:
try:
- if module.params["target"]:
- array.set_pgroup(
- module.params["name"],
- replicate_enabled=module.params["enabled"],
- )
- else:
- array.set_pgroup(
- module.params["name"], snap_enabled=module.params["enabled"]
- )
+ array.set_pgroup(module.params["name"], vollist=module.params["volume"])
except Exception:
module.fail_json(
- msg="Enabling pgroup {0} failed.".format(module.params["name"])
- )
- if module.params["volume"]:
- try:
- array.set_pgroup(
- module.params["name"], vollist=module.params["volume"]
- )
- except Exception:
- module.fail_json(
- msg="Adding volumes to pgroup {0} failed.".format(
- module.params["name"]
- )
- )
- if module.params["host"]:
- try:
- array.set_pgroup(
- module.params["name"], hostlist=module.params["host"]
- )
- except Exception:
- module.fail_json(
- msg="Adding hosts to pgroup {0} failed.".format(
- module.params["name"]
- )
- )
- if module.params["hostgroup"]:
- try:
- array.set_pgroup(
- module.params["name"], hgrouplist=module.params["hostgroup"]
+ msg="Adding volumes to pgroup {0} failed.".format(
+ module.params["name"]
)
- except Exception:
- module.fail_json(
- msg="Adding hostgroups to pgroup {0} failed.".format(
- module.params["name"]
- )
+ )
+ if module.params["host"]:
+ try:
+ array.set_pgroup(module.params["name"], hostlist=module.params["host"])
+ except Exception:
+ module.fail_json(
+ msg="Adding hosts to pgroup {0} failed.".format(
+ module.params["name"]
)
- if module.params["safe_mode"]:
- arrayv6 = get_array(module)
- try:
- arrayv6.patch_protection_groups(
- names=[module.params["name"]],
- protection_group=flasharray.ProtectionGroup(
- retention_lock="ratcheted"
- ),
+ )
+ if module.params["hostgroup"]:
+ try:
+ array.set_pgroup(
+ module.params["name"], hgrouplist=module.params["hostgroup"]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Adding hostgroups to pgroup {0} failed.".format(
+ module.params["name"]
)
- except Exception:
- module.fail_json(
- msg="Failed to set SafeMode on pgroup {0}".format(
- module.params["name"]
- )
+ )
+ if module.params["safe_mode"]:
+ arrayv6 = get_array(module)
+ try:
+ arrayv6.patch_protection_groups(
+ names=[module.params["name"]],
+ protection_group=flasharray.ProtectionGroup(
+ retention_lock="ratcheted"
+ ),
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to set SafeMode on pgroup {0}".format(
+ module.params["name"]
)
+ )
module.exit_json(changed=changed)
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pgsched.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pgsched.py
index dc0a488d4..c3ebcb09f 100644
--- a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pgsched.py
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pgsched.py
@@ -48,8 +48,9 @@ options:
default: true
replicate_at:
description:
- - Specifies the preferred time as HH:MM:SS, using 24-hour clock, at which to generate snapshots.
- type: int
+ - Provide a time in 12-hour AM/PM format, eg. 11AM
+ - Only valid if I(replicate_frequency) is an exact multiple of 86400, ie 1 day.
+ type: str
blackout_start:
description:
- Specifies the time at which to suspend replication.
@@ -68,9 +69,9 @@ options:
type: int
snap_at:
description:
- - Specifies the preferred time as HH:MM:SS, using 24-hour clock, at which to generate snapshots.
+ - Provide a time in 12-hour AM/PM format, eg. 11AM
- Only valid if I(snap_frequency) is an exact multiple of 86400, ie 1 day.
- type: int
+ type: str
snap_frequency:
description:
- Specifies the snapshot frequency in seconds.
@@ -120,7 +121,7 @@ EXAMPLES = r"""
schedule: snapshot
enabled: true
snap_frequency: 86400
- snap_at: 15:30:00
+ snap_at: 3PM
per_day: 5
all_for: 5
fa_url: 10.10.10.2
@@ -132,7 +133,7 @@ EXAMPLES = r"""
schedule: replication
enabled: true
replicate_frequency: 86400
- replicate_at: 15:30:00
+ replicate_at: 3PM
target_per_day: 5
target_all_for: 5
blackout_start: 2AM
@@ -217,7 +218,7 @@ def _convert_to_minutes(hour):
return (int(hour[:-2]) + 12) * 3600
-def update_schedule(module, array):
+def update_schedule(module, array, snap_time, repl_time):
"""Update Protection Group Schedule"""
changed = False
try:
@@ -260,10 +261,15 @@ def update_schedule(module, array):
else:
snap_frequency = module.params["snap_frequency"]
+ if module.params["enabled"] is None:
+ snap_enabled = current_snap["snap_enabled"]
+ else:
+ snap_enabled = module.params["enabled"]
+
if not module.params["snap_at"]:
snap_at = current_snap["snap_at"]
else:
- snap_at = module.params["snap_at"]
+ snap_at = _convert_to_minutes(module.params["snap_at"].upper())
if not module.params["days"]:
if isinstance(module.params["days"], int):
@@ -294,11 +300,12 @@ def update_schedule(module, array):
new_snap = {
"days": days,
"snap_frequency": snap_frequency,
- "snap_enabled": module.params["enabled"],
+ "snap_enabled": snap_enabled,
"snap_at": snap_at,
"per_day": per_day,
"all_for": all_for,
}
+ module.warn("current {0}; new: {1}".format(current_snap, new_snap))
if current_snap != new_snap:
changed = True
if not module.check_mode:
@@ -306,11 +313,17 @@ def update_schedule(module, array):
array.set_pgroup(
module.params["name"], snap_enabled=module.params["enabled"]
)
- array.set_pgroup(
- module.params["name"],
- snap_frequency=snap_frequency,
- snap_at=snap_at,
- )
+ if snap_time:
+ array.set_pgroup(
+ module.params["name"],
+ snap_frequency=snap_frequency,
+ snap_at=snap_at,
+ )
+ else:
+ array.set_pgroup(
+ module.params["name"],
+ snap_frequency=snap_frequency,
+ )
array.set_pgroup(
module.params["name"],
days=days,
@@ -343,10 +356,15 @@ def update_schedule(module, array):
else:
replicate_frequency = module.params["replicate_frequency"]
+ if module.params["enabled"] is None:
+ replicate_enabled = current_repl["replicate_enabled"]
+ else:
+ replicate_enabled = module.params["enabled"]
+
if not module.params["replicate_at"]:
replicate_at = current_repl["replicate_at"]
else:
- replicate_at = module.params["replicate_at"]
+ replicate_at = _convert_to_minutes(module.params["replicate_at"].upper())
if not module.params["target_days"]:
if isinstance(module.params["target_days"], int):
@@ -380,15 +398,17 @@ def update_schedule(module, array):
if not module.params["blackout_end"]:
blackout_end = current_repl["blackout_start"]
else:
- blackout_end = _convert_to_minutes(module.params["blackout_end"])
+ blackout_end = _convert_to_minutes(module.params["blackout_end"].upper())
if not module.params["blackout_start"]:
blackout_start = current_repl["blackout_start"]
else:
- blackout_start = _convert_to_minutes(module.params["blackout_start"])
+ blackout_start = _convert_to_minutes(
+ module.params["blackout_start"].upper()
+ )
new_repl = {
"replicate_frequency": replicate_frequency,
- "replicate_enabled": module.params["enabled"],
+ "replicate_enabled": replicate_enabled,
"target_days": target_days,
"replicate_at": replicate_at,
"target_per_day": target_per_day,
@@ -405,11 +425,17 @@ def update_schedule(module, array):
module.params["name"],
replicate_enabled=module.params["enabled"],
)
- array.set_pgroup(
- module.params["name"],
- replicate_frequency=replicate_frequency,
- replicate_at=replicate_at,
- )
+ if repl_time:
+ array.set_pgroup(
+ module.params["name"],
+ replicate_frequency=replicate_frequency,
+ replicate_at=replicate_at,
+ )
+ else:
+ array.set_pgroup(
+ module.params["name"],
+ replicate_frequency=replicate_frequency,
+ )
if blackout_start == 0:
array.set_pgroup(module.params["name"], replicate_blackout=None)
else:
@@ -482,8 +508,8 @@ def main():
),
blackout_start=dict(type="str"),
blackout_end=dict(type="str"),
- snap_at=dict(type="int"),
- replicate_at=dict(type="int"),
+ snap_at=dict(type="str"),
+ replicate_at=dict(type="str"),
replicate_frequency=dict(type="int"),
snap_frequency=dict(type="int"),
all_for=dict(type="int"),
@@ -506,13 +532,22 @@ def main():
array = get_system(module)
pgroup = get_pgroup(module, array)
+ repl_time = False
+ if module.params["replicate_at"] and module.params["replicate_frequency"]:
+ if not module.params["replicate_frequency"] % 86400 == 0:
+ module.fail_json(
+ msg="replicate_at not valid unless replicate frequency is measured in days, ie. a multiple of 86400"
+ )
+ repl_time = True
+ snap_time = False
if module.params["snap_at"] and module.params["snap_frequency"]:
if not module.params["snap_frequency"] % 86400 == 0:
module.fail_json(
msg="snap_at not valid unless snapshot frequency is measured in days, ie. a multiple of 86400"
)
+ snap_time = True
if pgroup and state == "present":
- update_schedule(module, array)
+ update_schedule(module, array, snap_time, repl_time)
elif pgroup and state == "absent":
delete_schedule(module, array)
elif pgroup is None:
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pgsnap.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pgsnap.py
index 822b0491f..4f3f6f16c 100644
--- a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pgsnap.py
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pgsnap.py
@@ -42,7 +42,7 @@ options:
Copy (added in 2.7) will create a full read/write clone of the
snapshot.
type: str
- choices: [ absent, present, copy ]
+ choices: [ absent, present, copy, rename ]
default: present
eradicate:
description:
@@ -64,6 +64,7 @@ options:
description:
- Volume to restore a specified volume to.
- If not supplied this will default to the volume defined in I(restore)
+ - Name of new snapshot suffix if renaming a snapshot
type: str
offload:
description:
@@ -85,6 +86,27 @@ options:
- Force immeadiate snapshot to remote targets
type: bool
default: false
+ throttle:
+ description:
+ - If set to true, allows snapshot to fail if array health is not optimal.
+ type: bool
+ default: false
+ version_added: '1.21.0'
+ with_default_protection:
+ description:
+ - Whether to add the default container protection groups to
+ those specified in I(add_to_pgs) as the inital protection
+ of a volume created from a snapshot.
+ type: bool
+ default: true
+ version_added: '1.27.0'
+ add_to_pgs:
+ description:
+ - A volume created from a snapshot will be added to the specified
+ protection groups
+ type: list
+ elements: str
+ version_added: '1.27.0'
extends_documentation_fragment:
- purestorage.flasharray.purestorage.fa
"""
@@ -114,6 +136,7 @@ EXAMPLES = r"""
restore: data
target: data2
overwrite: true
+ with_default_protection: false
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
state: copy
@@ -127,7 +150,7 @@ EXAMPLES = r"""
api_token: e31060a7-21fc-e277-6240-25983c6c4592
state: copy
-- name: Restore AC pod protection group snapshot pod1::pgname.snap.data to pdo1::data2
+- name: Restore AC pod protection group snapshot pod1::pgname.snap.data to pod1::data2
purestorage.flasharray.purefa_pgsnap:
name: pod1::pgname
suffix: snap
@@ -156,28 +179,53 @@ EXAMPLES = r"""
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
state: absent
+
+- name: Rename protection group snapshot foo.fred to foo.dave
+ purestorage.flasharray.purefa_pgsnap:
+ name: foo
+ suffix: fred
+ target: dave
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+ state: rename
"""
RETURN = r"""
"""
+HAS_PURESTORAGE = True
+try:
+ from pypureclient.flasharray import (
+ ProtectionGroupSnapshot,
+ ProtectionGroupSnapshotPatch,
+ VolumePost,
+ Reference,
+ FixedReference,
+ DestroyedPatchPost,
+ )
+except ImportError:
+ HAS_PURESTORAGE = False
+
import re
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
- get_system,
+ get_array,
purefa_argument_spec,
)
+from ansible_collections.purestorage.flasharray.plugins.module_utils.version import (
+ LooseVersion,
+)
from datetime import datetime
-OFFLOAD_API = "1.16"
-POD_SNAPSHOT = "2.4"
+THROTTLE_API = "2.25"
+DEFAULT_API = "2.16"
def _check_offload(module, array):
try:
- offload = array.get_offload(module.params["offload"])
- if offload["status"] == "connected":
+ offload = list(array.get_offloads(names=[module.params["offload"]]).items)[0]
+ if offload.status == "connected":
return True
return False
except Exception:
@@ -187,7 +235,7 @@ def _check_offload(module, array):
def get_pgroup(module, array):
"""Return Protection Group or None"""
try:
- return array.get_pgroup(module.params["name"])
+ return list(array.get_protection_groups(names=[module.params["name"]]).items)[0]
except Exception:
return None
@@ -195,14 +243,71 @@ def get_pgroup(module, array):
def get_pgroupvolume(module, array):
"""Return Protection Group Volume or None"""
try:
- pgroup = array.get_pgroup(module.params["name"])
+ volumes = []
+ pgroup = list(array.get_protection_groups(names=[module.params["name"]]).items)[
+ 0
+ ]
+ if pgroup.host_count > 0: # We have a host PG
+ host_dict = list(
+ array.get_protection_groups_hosts(
+ group_names=[module.params["name"]]
+ ).items
+ )
+ for host in range(0, len(host_dict)):
+ hostvols = list(
+ array.get_connections(
+ host_names=[host_dict[host].member["name"]]
+ ).items
+ )
+ for hvol in range(0, len(hostvols)):
+ volumes.append(hostvols[hvol].volume["name"])
+ elif pgroup.host_group_count > 0: # We have a hostgroup PG
+ hgroup_dict = list(
+ array.get_protection_groups_host_groups(
+ group_names=[module.params["name"]]
+ ).items
+ )
+ hgroups = []
+ # First check if there are any volumes in the host groups
+ for hgentry in range(0, len(hgroup_dict)):
+ hgvols = list(
+ array.get_connections(
+ host_group_names=[hgroup_dict[hgentry].member["name"]]
+ ).items
+ )
+ for hgvol in range(0, len(hgvols)):
+ volumes.append(hgvols[hgvol].volume["name"])
+ # Second check for host specific volumes
+ for hgroup in range(0, len(hgroup_dict)):
+ hg_hosts = list(
+ array.get_host_groups_hosts(
+ group_names=[hgroup_dict[hgroup].member["name"]]
+ ).items
+ )
+ for hg_host in range(0, len(hg_hosts)):
+ host_vols = list(
+ array.get_connections(
+ host_names=[hg_hosts[hg_host].member["name"]]
+ ).items
+ )
+ for host_vol in range(0, len(host_vols)):
+ volumes.append(host_vols[host_vol].volume["name"])
+ else: # We have a volume PG
+ vol_dict = list(
+ array.get_protection_groups_volumes(
+ group_names=[module.params["name"]]
+ ).items
+ )
+ for entry in range(0, len(vol_dict)):
+ volumes.append(vol_dict[entry].member["name"])
+ volumes = list(set(volumes))
if "::" in module.params["name"]:
restore_volume = (
module.params["name"].split("::")[0] + "::" + module.params["restore"]
)
else:
restore_volume = module.params["restore"]
- for volume in pgroup["volumes"]:
+ for volume in volumes:
if volume == restore_volume:
return volume
except Exception:
@@ -210,7 +315,7 @@ def get_pgroupvolume(module, array):
def get_rpgsnapshot(module, array):
- """Return iReplicated Snapshot or None"""
+ """Return Replicated Snapshot or None"""
try:
snapname = (
module.params["name"]
@@ -219,83 +324,103 @@ def get_rpgsnapshot(module, array):
+ "."
+ module.params["restore"]
)
- for snap in array.list_volumes(snap=True):
- if snap["name"] == snapname:
- return snapname
- except Exception:
- return None
-
-
-def get_offload_snapshot(module, array):
- """Return Snapshot (active or deleted) or None"""
- try:
- snapname = module.params["name"] + "." + module.params["suffix"]
- for snap in array.get_pgroup(
- module.params["name"], snap=True, on=module.params["offload"]
- ):
- if snap["name"] == snapname:
- return snapname
- except Exception:
+ array.get_volume_snapshots(names=[snapname])
+ return snapname
+ except AttributeError:
return None
def get_pgsnapshot(module, array):
"""Return Snapshot (active or deleted) or None"""
- try:
- snapname = module.params["name"] + "." + module.params["suffix"]
- for snap in array.get_pgroup(module.params["name"], pending=True, snap=True):
- if snap["name"] == snapname:
- return snapname
- except Exception:
+ snapname = module.params["name"] + "." + module.params["suffix"]
+ res = array.get_protection_group_snapshots(names=[snapname])
+ if res.status_code == 200:
+ return list(res.items)[0]
+ else:
return None
def create_pgsnapshot(module, array):
"""Create Protection Group Snapshot"""
+ api_version = array.get_rest_version()
changed = True
if not module.check_mode:
- try:
+ suffix = ProtectionGroupSnapshot(suffix=module.params["suffix"])
+ if LooseVersion(THROTTLE_API) >= LooseVersion(api_version):
if (
- module.params["now"]
- and array.get_pgroup(module.params["name"])["targets"] is not None
+ list(array.get_protection_groups(names=[module.params["name"]]).items)[
+ 0
+ ].target_count
+ > 0
):
- array.create_pgroup_snapshot(
- source=module.params["name"],
- suffix=module.params["suffix"],
- snap=True,
+ if module.params["now"]:
+ res = array.post_protection_group_snapshots(
+ source_names=[module.params["name"]],
+ apply_retention=module.params["apply_retention"],
+ replicate_now=True,
+ protection_group_snapshot=suffix,
+ )
+ else:
+ res = array.post_protection_group_snapshots(
+ source_names=[module.params["name"]],
+ apply_retention=module.params["apply_retention"],
+ protection_group_snapshot=suffix,
+ replicate=module.params["remote"],
+ )
+ else:
+ res = array.post_protection_group_snapshots(
+ source_names=[module.params["name"]],
apply_retention=module.params["apply_retention"],
- replicate_now=module.params["remote"],
+ protection_group_snapshot=suffix,
)
+ else:
+ if (
+ list(array.get_protection_groups(names=[module.params["name"]]).items)[
+ 0
+ ].target_count
+ > 0
+ ):
+ if module.params["now"]:
+ res = array.post_protection_group_snapshots(
+ source_names=[module.params["name"]],
+ apply_retention=module.params["apply_retention"],
+ replicate_now=True,
+ allow_throttle=module.params["throttle"],
+ protection_group_snapshot=suffix,
+ )
+ else:
+ res = array.post_protection_group_snapshots(
+ source_names=[module.params["name"]],
+ apply_retention=module.params["apply_retention"],
+ allow_throttle=module.params["throttle"],
+ protection_group_snapshot=suffix,
+ replicate=module.params["remote"],
+ )
else:
- array.create_pgroup_snapshot(
- source=module.params["name"],
- suffix=module.params["suffix"],
- snap=True,
+ res = array.post_protection_group_snapshots(
+ source_names=[module.params["name"]],
apply_retention=module.params["apply_retention"],
+ allow_throttle=module.params["throttle"],
+ protection_group_snapshot=suffix,
)
- except Exception:
+
+ if res.status_code != 200:
module.fail_json(
- msg="Snapshot of pgroup {0} failed.".format(module.params["name"])
+ msg="Snapshot of pgroup {0} failed. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
)
module.exit_json(changed=changed)
def restore_pgsnapvolume(module, array):
"""Restore a Protection Group Snapshot Volume"""
- api_version = array._list_available_rest_versions()
changed = True
if module.params["suffix"] == "latest":
- all_snaps = array.get_pgroup(
- module.params["name"], snap=True, transfer=True
- ).reverse()
- for snap in all_snaps:
- if not snap["completed"]:
- latest_snap = snap["name"]
- break
- try:
- module.params["suffix"] = latest_snap.split(".")[1]
- except NameError:
- module.fail_json(msg="There is no completed snapshot available.")
+ latest_snapshot = list(
+ array.get_protection_group_snapshots(names=[module.params["name"]]).items
+ )[-1].suffix
+ module.params["suffix"] = latest_snapshot
if ":" in module.params["name"] and "::" not in module.params["name"]:
if get_rpgsnapshot(module, array) is None:
module.fail_json(
@@ -310,7 +435,7 @@ def restore_pgsnapvolume(module, array):
module.params["restore"]
)
)
- volume = (
+ source_volume = (
module.params["name"]
+ "."
+ module.params["suffix"]
@@ -324,20 +449,49 @@ def restore_pgsnapvolume(module, array):
else:
source_pod_name = ""
if source_pod_name != target_pod_name:
- if (
- len(array.get_pod(target_pod_name, mediator=True)["arrays"]) > 1
- and POD_SNAPSHOT not in api_version
- ):
+ if list(array.get_pods(names=[target_pod_name]).items)[0].array_count > 1:
module.fail_json(msg="Volume cannot be restored to a stretched pod")
if not module.check_mode:
- try:
- array.copy_volume(
- volume, module.params["target"], overwrite=module.params["overwrite"]
+ if LooseVersion(DEFAULT_API) <= LooseVersion(array.get_rest_version()):
+ if module.params["add_to_pgs"]:
+ add_to_pgs = []
+ for add_pg in range(0, len(module.params["add_to_pgs"])):
+ add_to_pgs.append(
+ FixedReference(name=module.params["add_to_pgs"][add_pg])
+ )
+ res = array.post_volumes(
+ names=[module.params["target"]],
+ volume=VolumePost(source=Reference(name=source_volume)),
+ with_default_protection=module.params["with_default_protection"],
+ add_to_protection_group_names=add_to_pgs,
+ )
+ else:
+ if module.params["overwrite"]:
+ res = array.post_volumes(
+ names=[module.params["target"]],
+ volume=VolumePost(source=Reference(name=source_volume)),
+ overwrite=module.params["overwrite"],
+ )
+ else:
+ res = array.post_volumes(
+ names=[module.params["target"]],
+ volume=VolumePost(source=Reference(name=source_volume)),
+ with_default_protection=module.params[
+ "with_default_protection"
+ ],
+ )
+ else:
+ res = array.post_volumes(
+ names=[module.params["target"]],
+ overwrite=module.params["overwrite"],
+ volume=VolumePost(source=Reference(name=source_volume)),
)
- except Exception:
+ if res.status_code != 200:
module.fail_json(
- msg="Failed to restore {0} from pgroup {1}".format(
- volume, module.params["name"]
+ msg="Failed to restore {0} from pgroup {1}. Error: {2}".format(
+ module.params["restore"],
+ module.params["name"],
+ res.errors[0].message,
)
)
module.exit_json(changed=changed)
@@ -349,23 +503,61 @@ def delete_offload_snapshot(module, array):
snapname = module.params["name"] + "." + module.params["suffix"]
if ":" in module.params["name"] and module.params["offload"]:
if _check_offload(module, array):
- changed = True
+ res = array.get_remote_protection_group_snapshots(
+ names=[snapname], on=module.params["offload"]
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Offload snapshot {0} does not exist on {1}".format(
+ snapname, module.params["offload"]
+ )
+ )
+
+ rpg_destroyed = list(res.items)[0].destroyed
if not module.check_mode:
- try:
- array.destroy_pgroup(snapname, on=module.params["offload"])
+ if not rpg_destroyed:
+ changed = True
+ res = array.patch_remote_protection_group_snapshots(
+ names=[snapname],
+ on=module.params["offload"],
+ remote_protection_group_snapshot=DestroyedPatchPost(
+ destroyed=True
+ ),
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete offloaded snapshot {0} on target {1}. Error: {2}".format(
+ snapname,
+ module.params["offload"],
+ res.errors[0].message,
+ )
+ )
if module.params["eradicate"]:
- try:
- array.eradicate_pgroup(
- snapname, on=module.params["offload"]
+ res = array.delete_remote_protection_group_snapshots(
+ names=[snapname], on=module.params["offload"]
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to eradicate offloaded snapshot {0} on target {1}. Error: {2}".format(
+ snapname,
+ module.params["offload"],
+ res.errors[0].message,
+ )
)
- except Exception:
+ else:
+ if module.params["eradicate"]:
+ changed = True
+ res = array.delete_remote_protection_group_snapshots(
+ names=[snapname], on=module.params["offload"]
+ )
+ if res.status_code != 200:
module.fail_json(
- msg="Failed to eradicate offloaded snapshot {0} on target {1}".format(
- snapname, module.params["offload"]
+ msg="Failed to eradicate offloaded snapshot {0} on target {1}. Error: {2}".format(
+ snapname,
+ module.params["offload"],
+ res.errors[0].message,
)
)
- except Exception:
- pass
else:
module.fail_json(
msg="Offload target {0} does not exist or not connected".format(
@@ -383,17 +575,58 @@ def delete_pgsnapshot(module, array):
changed = True
if not module.check_mode:
snapname = module.params["name"] + "." + module.params["suffix"]
- try:
- array.destroy_pgroup(snapname)
- if module.params["eradicate"]:
- try:
- array.eradicate_pgroup(snapname)
- except Exception:
- module.fail_json(
- msg="Failed to eradicate pgroup {0}".format(snapname)
+ res = array.patch_protection_group_snapshots(
+ names=[snapname],
+ protection_group_snapshot=ProtectionGroupSnapshotPatch(destroyed=True),
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete pgroup {0}. Error {1}".format(
+ snapname, res.errors[0].message
+ )
+ )
+ if module.params["eradicate"]:
+ res = array.delete_protection_group_snapshots(names=[snapname])
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete pgroup {0}. Error {1}".format(
+ snapname, res.errors[0].message
)
- except Exception:
- module.fail_json(msg="Failed to delete pgroup {0}".format(snapname))
+ )
+ module.exit_json(changed=changed)
+
+
+def eradicate_pgsnapshot(module, array):
+ """Eradicate Protection Group Snapshot"""
+ changed = True
+ if not module.check_mode:
+ snapname = module.params["name"] + "." + module.params["suffix"]
+ res = array.delete_protection_group_snapshots(names=[snapname])
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete pgroup {0}. Error {1}".format(
+ snapname, res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def update_pgsnapshot(module, array):
+ """Update Protection Group Snapshot - basically just rename..."""
+ changed = True
+ if not module.check_mode:
+ current_name = module.params["name"] + "." + module.params["suffix"]
+ new_name = module.params["name"] + "." + module.params["target"]
+ res = array.patch_protection_group_snapshots(
+ names=[current_name],
+ protection_group_snapshot=ProtectionGroupSnapshotPatch(name=new_name),
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to rename {0} to {1}. Error: {2}".format(
+ current_name, new_name, res.errors[0].message
+ )
+ )
module.exit_json(changed=changed)
@@ -405,6 +638,7 @@ def main():
suffix=dict(type="str"),
restore=dict(type="str"),
offload=dict(type="str"),
+ throttle=dict(type="bool", default=False),
overwrite=dict(type="bool", default=False),
target=dict(type="str"),
eradicate=dict(type="bool", default=False),
@@ -412,18 +646,26 @@ def main():
apply_retention=dict(type="bool", default=False),
remote=dict(type="bool", default=False),
state=dict(
- type="str", default="present", choices=["absent", "present", "copy"]
+ type="str",
+ default="present",
+ choices=["absent", "present", "copy", "rename"],
),
+ with_default_protection=dict(type="bool", default=True),
+ add_to_pgs=dict(type="list", elements="str"),
)
)
required_if = [("state", "copy", ["suffix", "restore"])]
+ mutually_exclusive = [["now", "remote"]]
module = AnsibleModule(
- argument_spec, required_if=required_if, supports_check_mode=True
+ argument_spec,
+ required_if=required_if,
+ mutually_exclusive=mutually_exclusive,
+ supports_check_mode=True,
)
- pattern = re.compile("^(?=.*[a-zA-Z-])[a-zA-Z0-9]([a-zA-Z0-9-]{0,63}[a-zA-Z0-9])?$")
state = module.params["state"]
+ pattern = re.compile("^(?=.*[a-zA-Z-])[a-zA-Z0-9]([a-zA-Z0-9-]{0,63}[a-zA-Z0-9])?$")
if state == "present":
if module.params["suffix"] is None:
suffix = "snap-" + str(
@@ -431,6 +673,10 @@ def main():
)
module.params["suffix"] = suffix.replace(".", "")
else:
+ if module.params["restore"]:
+ pattern = re.compile(
+ "^[0-9]{0,63}$|^(?=.*[a-zA-Z-])[a-zA-Z0-9]([a-zA-Z0-9-]{0,63}[a-zA-Z0-9])?$"
+ )
if not pattern.match(module.params["suffix"]):
module.fail_json(
msg="Suffix name {0} does not conform to suffix name rules".format(
@@ -441,11 +687,18 @@ def main():
if not module.params["target"] and module.params["restore"]:
module.params["target"] = module.params["restore"]
- array = get_system(module)
- api_version = array._list_available_rest_versions()
- if OFFLOAD_API not in api_version and module.params["offload"]:
- module.fail_json(
- msg="Minimum version {0} required for offload support".format(OFFLOAD_API)
+ if state == "rename" and module.params["target"] is not None:
+ if not pattern.match(module.params["target"]):
+ module.fail_json(
+ msg="Suffix target {0} does not conform to suffix name rules".format(
+ module.params["target"]
+ )
+ )
+ array = get_array(module)
+ api_version = array.get_rest_version()
+ if not HAS_PURESTORAGE and module.params["throttle"]:
+ module.warn(
+ "Throttle capability disable as py-pure-client sdk is not installed"
)
pgroup = get_pgroup(module, array)
if pgroup is None:
@@ -453,24 +706,34 @@ def main():
msg="Protection Group {0} does not exist.".format(module.params["name"])
)
pgsnap = get_pgsnapshot(module, array)
+ if pgsnap:
+ pgsnap_deleted = pgsnap.destroyed
if state != "absent" and module.params["offload"]:
module.fail_json(
msg="offload parameter not supported for state {0}".format(state)
)
elif state == "copy":
+ if module.params["overwrite"] and (
+ module.params["add_to_pgs"] or module.params["with_default_protection"]
+ ):
+ module.fail_json(
+ msg="overwrite and add_to_pgs or with_default_protection are incompatable"
+ )
restore_pgsnapvolume(module, array)
elif state == "present" and not pgsnap:
create_pgsnapshot(module, array)
elif state == "present" and pgsnap:
module.exit_json(changed=False)
elif (
- state == "absent"
- and module.params["offload"]
- and get_offload_snapshot(module, array)
+ state == "absent" and module.params["offload"] and get_pgsnapshot(module, array)
):
delete_offload_snapshot(module, array)
- elif state == "absent" and pgsnap:
+ elif state == "rename" and pgsnap:
+ update_pgsnapshot(module, array)
+ elif state == "absent" and pgsnap and not pgsnap_deleted:
delete_pgsnapshot(module, array)
+ elif state == "absent" and pgsnap and pgsnap_deleted and module.params["eradicate"]:
+ eradicate_pgsnapshot(module, array)
elif state == "absent" and not pgsnap:
module.exit_json(changed=False)
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pod.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pod.py
index 75c4eb6c9..a41e346eb 100644
--- a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pod.py
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_pod.py
@@ -179,38 +179,15 @@ from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa impo
get_array,
purefa_argument_spec,
)
+from ansible_collections.purestorage.flasharray.plugins.module_utils.common import (
+ human_to_bytes,
+)
POD_API_VERSION = "1.13"
POD_QUOTA_VERSION = "2.23"
-def human_to_bytes(size):
- """Given a human-readable byte string (e.g. 2G, 30M),
- return the number of bytes. Will return 0 if the argument has
- unexpected form.
- """
- bytes = size[:-1]
- unit = size[-1].upper()
- if bytes.isdigit():
- bytes = int(bytes)
- if unit == "P":
- bytes *= 1125899906842624
- elif unit == "T":
- bytes *= 1099511627776
- elif unit == "G":
- bytes *= 1073741824
- elif unit == "M":
- bytes *= 1048576
- elif unit == "K":
- bytes *= 1024
- else:
- bytes = 0
- else:
- bytes = 0
- return bytes
-
-
def get_pod(module, array):
"""Return Pod or None"""
try:
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_policy.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_policy.py
index 37017e4df..7247d376f 100644
--- a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_policy.py
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_policy.py
@@ -38,7 +38,7 @@ options:
policy:
description:
- The type of policy to use
- choices: [ nfs, smb, snapshot, quota ]
+ choices: [ nfs, smb, snapshot, quota, autodir ]
required: true
type: str
enabled:
@@ -73,10 +73,18 @@ options:
choices: [ ro, rw ]
default: rw
type: str
+ nfs_version:
+ description:
+ - NFS protocol version allowed for the export
+ type: list
+ elements: str
+ choices: [ nfsv3, nfsv4 ]
+ version_added: "1.22.0"
user_mapping:
description:
- Defines if user mapping is enabled
type: bool
+ default: true
version_added: 1.14.0
snap_at:
description:
@@ -174,6 +182,21 @@ options:
type: str
default: "65534"
version_added: 1.14.0
+ security:
+ description:
+ - The security flavors to use for accessing files on a mount point.
+ - If the server does not support the requested flavor, the mount operation fails.
+ - This operation updates all rules of the specified policy.
+ type: list
+ elements: str
+ choices: [ auth_sys, krb5, krb5i, krb5p ]
+ version_added: 1.25.0
+ access_based_enumeration:
+ description:
+ - Defines if access based enumeration for SMB is enabled
+ type: bool
+ default: false
+ version_added: 1.26.0
extends_documentation_fragment:
- purestorage.flasharray.purestorage.fa
"""
@@ -332,60 +355,29 @@ try:
except ImportError:
HAS_PURESTORAGE = False
-HAS_PACKAGING = True
-try:
- from packaging import version
-except ImportError:
- HAS_PACKAGING = False
-
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
get_system,
get_array,
purefa_argument_spec,
)
+from ansible_collections.purestorage.flasharray.plugins.module_utils.version import (
+ LooseVersion,
+)
+from ansible_collections.purestorage.flasharray.plugins.module_utils.common import (
+ human_to_bytes,
+ convert_to_millisecs,
+)
MIN_REQUIRED_API_VERSION = "2.3"
MIN_QUOTA_API_VERSION = "2.7"
MIN_SUFFIX_API_VERSION = "2.9"
USER_MAP_VERSION = "2.15"
ALL_SQUASH_VERSION = "2.16"
-
-
-def _human_to_bytes(size):
- """Given a human-readable byte string (e.g. 2G, 30M),
- return the number of bytes. Will return 0 if the argument has
- unexpected form.
- """
- bytes = size[:-1]
- unit = size[-1].upper()
- if bytes.isdigit():
- bytes = int(bytes)
- if unit == "P":
- bytes *= 1125899906842624
- elif unit == "T":
- bytes *= 1099511627776
- elif unit == "G":
- bytes *= 1073741824
- elif unit == "M":
- bytes *= 1048576
- elif unit == "K":
- bytes *= 1024
- else:
- bytes = 0
- else:
- bytes = 0
- return bytes
-
-
-def _convert_to_millisecs(hour):
- if hour[-2:].upper() == "AM" and hour[:2] == "12":
- return 0
- elif hour[-2:].upper() == "AM":
- return int(hour[:-2]) * 3600000
- elif hour[-2:].upper() == "PM" and hour[:2] == "12":
- return 43200000
- return (int(hour[:-2]) + 12) * 3600000
+AUTODIR_VERSION = "2.24"
+NFS_VERSION = "2.26"
+SECURITY_VERSION = "2.29"
+ABE_VERSION = "2.4"
def rename_policy(module, array):
@@ -560,7 +552,7 @@ def delete_policy(module, array):
if module.params["directory"][old_dir] in dirs:
old_dirs.append(module.params["directory"][old_dir])
else:
- old_dirs = module.params["directory"]
+ pass
if old_dirs:
changed = True
for rem_dir in range(0, len(old_dirs)):
@@ -607,9 +599,53 @@ def delete_policy(module, array):
deleted.errors[0].message,
)
)
- else:
+ elif module.params["policy"] == "autodir":
+ if not module.params["directory"]:
+ res = array.delete_policies_autodir(names=[module.params["name"]])
+ if res.status_code == 200:
+ changed = True
+ else:
+ module.fail_json(
+ msg="Deletion of Autodir policy {0} failed. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ if module.params["directory"]:
+ dirs = []
+ old_dirs = []
+ current_dirs = list(
+ array.get_directories_policies_autodir(
+ policy_names=[module.params["name"]]
+ ).items
+ )
+ if current_dirs:
+ for current_dir in range(0, len(current_dirs)):
+ dirs.append(current_dirs[current_dir].member.name)
+ for old_dir in range(0, len(module.params["directory"])):
+ if module.params["directory"][old_dir] in dirs:
+ old_dirs.append(module.params["directory"][old_dir])
+ else:
+ pass
+ if old_dirs:
+ changed = True
+ for rem_dir in range(0, len(old_dirs)):
+ if not module.check_mode:
+ directory_removed = (
+ array.delete_directories_policies_autodir(
+ member_names=[old_dirs[rem_dir]],
+ policy_names=module.params["name"],
+ )
+ )
+ if directory_removed.status_code != 200:
+ module.fail_json(
+ msg="Failed to remove directory from Autodir policy {0}. Error: {1}".format(
+ module.params["name"],
+ directory_removed.errors[0].message,
+ )
+ )
+ else: # quota
if module.params["quota_limit"]:
- quota_limit = _human_to_bytes(module.params["quota_limit"])
+ quota_limit = human_to_bytes(module.params["quota_limit"])
rules = list(
array.get_policies_quota_rules(
policy_names=[module.params["name"]]
@@ -704,18 +740,7 @@ def create_policy(module, array, all_squash):
)
if created.status_code == 200:
- policy = flasharray.PolicyNfsPost(
- user_mapping_enabled=module.params["user_mapping"],
- )
- res = array.patch_policies_nfs(
- names=[module.params["name"]], policy=policy
- )
- if res.status_code != 200:
- module.fail_json(
- msg="Failed to set NFS policy {0}. Error: {1}".format(
- module.params["name"], res.errors[0].message
- )
- )
+ changed = True
if module.params["client"]:
if all_squash:
rules = flasharray.PolicyrulenfsclientpostRules(
@@ -741,7 +766,52 @@ def create_policy(module, array, all_squash):
module.params["name"], rule_created.errors[0].message
)
)
- changed = True
+ policy = flasharray.PolicyNfsPatch(
+ user_mapping_enabled=module.params["user_mapping"],
+ )
+ res = array.patch_policies_nfs(
+ names=[module.params["name"]], policy=policy
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to set NFS policy user_mapping {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ if (
+ LooseVersion(array.get_rest_version()) >= LooseVersion(NFS_VERSION)
+ and module.params["client"]
+ and module.params["nfs_version"]
+ ):
+ policy = flasharray.PolicyNfsPatch(
+ nfs_version=module.params["nfs_version"],
+ )
+ res = array.patch_policies_nfs(
+ names=[module.params["name"]], policy=policy
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to set NFS policy version {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ if (
+ LooseVersion(array.get_rest_version())
+ >= LooseVersion(SECURITY_VERSION)
+ and module.params["security"]
+ ):
+ policy = flasharray.PolicyNfsPatch(
+ security=module.params["security"],
+ )
+ res = array.patch_policies_nfs(
+ names=[module.params["name"]], policy=policy
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to set NFS policy security {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
else:
module.fail_json(
msg="Failed to create NFS policy {0}. Error: {1}".format(
@@ -754,7 +824,21 @@ def create_policy(module, array, all_squash):
policy=flasharray.PolicyPost(enabled=module.params["enabled"]),
)
if created.status_code == 200:
- changed = True
+ if LooseVersion(ABE_VERSION) <= LooseVersion(array.get_rest_version()):
+ res = array.patch_policies_smb(
+ names=[module.params["name"]],
+ policy=flasharray.PolicySmbPatch(
+ access_based_enumeration_enabled=module.params[
+ "access_based_enumeration"
+ ]
+ ),
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to set SMB policy {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
if module.params["client"]:
rules = flasharray.PolicyrulesmbclientpostRules(
anonymous_access_allowed=module.params["smb_anon_allowed"],
@@ -771,6 +855,7 @@ def create_policy(module, array, all_squash):
module.params["name"], rule_created.errors[0].message
)
)
+ changed = True
else:
module.fail_json(
msg="Failed to create SMB policy {0}. Error: {1}".format(
@@ -778,12 +863,10 @@ def create_policy(module, array, all_squash):
)
)
elif module.params["policy"] == "snapshot":
- if HAS_PACKAGING:
- suffix_enabled = version.parse(
- array.get_rest_version()
- ) >= version.parse(MIN_SUFFIX_API_VERSION)
- else:
- suffix_enabled = False
+ suffix_enabled = bool(
+ LooseVersion(array.get_rest_version())
+ >= LooseVersion(MIN_SUFFIX_API_VERSION)
+ )
created = array.post_policies_snapshot(
names=[module.params["name"]],
policy=flasharray.PolicyPost(enabled=module.params["enabled"]),
@@ -802,7 +885,7 @@ def create_policy(module, array, all_squash):
)
if suffix_enabled:
rules = flasharray.PolicyrulesnapshotpostRules(
- at=_convert_to_millisecs(module.params["snap_at"]),
+ at=convert_to_millisecs(module.params["snap_at"]),
client_name=module.params["snap_client_name"],
every=module.params["snap_every"] * 60000,
keep_for=module.params["snap_keep_for"] * 60000,
@@ -810,7 +893,7 @@ def create_policy(module, array, all_squash):
)
else:
rules = flasharray.PolicyrulesnapshotpostRules(
- at=_convert_to_millisecs(module.params["snap_at"]),
+ at=convert_to_millisecs(module.params["snap_at"]),
client_name=module.params["snap_client_name"],
every=module.params["snap_every"] * 60000,
keep_for=module.params["snap_keep_for"] * 60000,
@@ -863,7 +946,38 @@ def create_policy(module, array, all_squash):
module.params["name"], created.errors[0].message
)
)
- else:
+ elif module.params["policy"] == "autodir":
+ created = array.post_policies_autodir(
+ names=[module.params["name"]],
+ policy=flasharray.PolicyPost(enabled=module.params["enabled"]),
+ )
+ if created.status_code == 200:
+ changed = True
+ if module.params["directory"]:
+ policies = flasharray.DirectoryPolicyPost(
+ policies=[
+ flasharray.DirectorypolicypostPolicies(
+ policy=flasharray.Reference(name=module.params["name"])
+ )
+ ]
+ )
+ directory_added = array.post_directories_policies_autodir(
+ member_names=module.params["directory"], policies=policies
+ )
+ if directory_added.status_code != 200:
+ module.fail_json(
+ msg="Failed to add directory for Autodir policy {0}. Error: {1}".format(
+ module.params["name"],
+ directory_added.errors[0].message,
+ )
+ )
+ else:
+ module.fail_json(
+ msg="Failed to create Autodir policy {0}. Error: {1}".format(
+ module.params["name"], created.errors[0].message
+ )
+ )
+ else: # quota
created = array.post_policies_quota(
names=[module.params["name"]],
policy=flasharray.PolicyPost(enabled=module.params["enabled"]),
@@ -871,7 +985,7 @@ def create_policy(module, array, all_squash):
if created.status_code == 200:
changed = True
if module.params["quota_limit"]:
- quota = _human_to_bytes(module.params["quota_limit"])
+ quota = human_to_bytes(module.params["quota_limit"])
rules = flasharray.PolicyrulequotapostRules(
enforced=module.params["quota_enforced"],
quota_limit=quota,
@@ -924,16 +1038,15 @@ def create_policy(module, array, all_squash):
def update_policy(module, array, api_version, all_squash):
"""Update an existing policy including add/remove rules"""
- changed = (
- changed_dir
- ) = (
- changed_rule
- ) = changed_enable = changed_quota = changed_member = changed_user_map = False
+ changed = changed_dir = changed_rule = changed_enable = changed_quota = (
+ changed_member
+ ) = changed_user_map = changed_abe = changed_nfs = False
if module.params["policy"] == "nfs":
+ current_policy = list(
+ array.get_policies_nfs(names=[module.params["name"]]).items
+ )[0]
try:
- current_enabled = list(
- array.get_policies_nfs(names=[module.params["name"]]).items
- )[0].enabled
+ current_enabled = current_policy.enabled
if USER_MAP_VERSION in api_version:
current_user_map = list(
array.get_policies_nfs(names=[module.params["name"]]).items
@@ -944,6 +1057,23 @@ def update_policy(module, array, api_version, all_squash):
module.params["name"]
)
)
+ if module.params["nfs_version"] and sorted(
+ module.params["nfs_version"]
+ ) != sorted(getattr(current_policy, "nfs_version", [])):
+ changed_nfs = True
+ if not module.check_mode:
+ res = array.patch_policies_nfs(
+ names=[module.params["name"]],
+ policy=flasharray.PolicyNfsPatch(
+ nfs_version=module.params["nfs_version"]
+ ),
+ )
+ if res.status_code != 200:
+ module.exit_json(
+ msg="Failed to change NFS version for NFS policy {0}".format(
+ module.params["name"]
+ )
+ )
if (
module.params["user_mapping"]
and current_user_map != module.params["user_mapping"]
@@ -988,20 +1118,79 @@ def update_policy(module, array, api_version, all_squash):
rule_name = rules[rule].name
break
if not rule_name:
- if all_squash:
- rules = flasharray.PolicyrulenfsclientpostRules(
- permission=module.params["nfs_permission"],
- client=module.params["client"],
- anongid=module.params["anongid"],
- anonuid=module.params["anonuid"],
- access=module.params["nfs_access"],
- )
+ if LooseVersion(NFS_VERSION) > LooseVersion(
+ array.get_rest_version()
+ ):
+ if all_squash:
+ rules = flasharray.PolicyrulenfsclientpostRules(
+ permission=module.params["nfs_permission"],
+ client=module.params["client"],
+ anongid=module.params["anongid"],
+ anonuid=module.params["anonuid"],
+ access=module.params["nfs_access"],
+ )
+ else:
+ rules = flasharray.PolicyrulenfsclientpostRules(
+ permission=module.params["nfs_permission"],
+ client=module.params["client"],
+ access=module.params["nfs_access"],
+ nfs_version=module.params["nfs_version"],
+ )
+ elif (
+ LooseVersion(SECURITY_VERSION)
+ > LooseVersion(array.get_rest_version())
+ <= LooseVersion(NFS_VERSION)
+ ):
+ if all_squash:
+ rules = flasharray.PolicyrulenfsclientpostRules(
+ permission=module.params["nfs_permission"],
+ client=module.params["client"],
+ anongid=module.params["anongid"],
+ anonuid=module.params["anonuid"],
+ access=module.params["nfs_access"],
+ nfs_version=module.params["nfs_version"],
+ )
+ else:
+ rules = flasharray.PolicyrulenfsclientpostRules(
+ permission=module.params["nfs_permission"],
+ client=module.params["client"],
+ access=module.params["nfs_access"],
+ )
else:
- rules = flasharray.PolicyrulenfsclientpostRules(
- permission=module.params["nfs_permission"],
- client=module.params["client"],
- access=module.params["nfs_access"],
- )
+ if module.params["security"]:
+ if all_squash:
+ rules = flasharray.PolicyrulenfsclientpostRules(
+ permission=module.params["nfs_permission"],
+ client=module.params["client"],
+ anongid=module.params["anongid"],
+ anonuid=module.params["anonuid"],
+ access=module.params["nfs_access"],
+ nfs_version=module.params["nfs_version"],
+ security=module.params["security"],
+ )
+ else:
+ rules = flasharray.PolicyrulenfsclientpostRules(
+ permission=module.params["nfs_permission"],
+ client=module.params["client"],
+ access=module.params["nfs_access"],
+ security=module.params["security"],
+ )
+ else:
+ if all_squash:
+ rules = flasharray.PolicyrulenfsclientpostRules(
+ permission=module.params["nfs_permission"],
+ client=module.params["client"],
+ anongid=module.params["anongid"],
+ anonuid=module.params["anonuid"],
+ access=module.params["nfs_access"],
+ nfs_version=module.params["nfs_version"],
+ )
+ else:
+ rules = flasharray.PolicyrulenfsclientpostRules(
+ permission=module.params["nfs_permission"],
+ client=module.params["client"],
+ access=module.params["nfs_access"],
+ )
rule = flasharray.PolicyRuleNfsClientPost(rules=[rules])
changed_rule = True
if not module.check_mode:
@@ -1044,15 +1233,38 @@ def update_policy(module, array, api_version, all_squash):
)
elif module.params["policy"] == "smb":
try:
- current_enabled = list(
- array.get_policies_smb(names=[module.params["name"]]).items
- )[0].enabled
+ current = list(array.get_policies_smb(names=[module.params["name"]]).items)[
+ 0
+ ]
+ current_enabled = current.enabled
+ current_access_based_enumeration = current.access_based_enumeration_enabled
except Exception:
module.fail_json(
msg="Incorrect policy type specified for existing policy {0}".format(
module.params["name"]
)
)
+ if (
+ "access_based_enumeration" in module.params
+ and current_access_based_enumeration
+ != module.params["access_based_enumeration"]
+ ):
+ changed_abe = True
+ if not module.check_mode:
+ res = array.patch_policies_smb(
+ names=[module.params["name"]],
+ policy=flasharray.PolicySmbPatch(
+ access_based_enumeration_enabled=module.params[
+ "access_based_enumeration"
+ ]
+ ),
+ )
+ if res.status_code != 200:
+ module.exit_json(
+ msg="Failed to enable/disable Access based enueration for SMB policy {0}".format(
+ module.params["name"]
+ )
+ )
if current_enabled != module.params["enabled"]:
changed_enable = True
if not module.check_mode:
@@ -1116,12 +1328,10 @@ def update_policy(module, array, api_version, all_squash):
)
)
elif module.params["policy"] == "snapshot":
- if HAS_PACKAGING:
- suffix_enabled = version.parse(array.get_rest_version()) >= version.parse(
- MIN_SUFFIX_API_VERSION
- )
- else:
- suffix_enabled = False
+ suffix_enabled = bool(
+ LooseVersion(array.get_rest_version())
+ >= LooseVersion(MIN_SUFFIX_API_VERSION)
+ )
try:
current_enabled = list(
array.get_policies_snapshot(names=[module.params["name"]]).items
@@ -1223,7 +1433,7 @@ def update_policy(module, array, api_version, all_squash):
)
if suffix_enabled:
rules = flasharray.PolicyrulesnapshotpostRules(
- at=_convert_to_millisecs(module.params["snap_at"]),
+ at=convert_to_millisecs(module.params["snap_at"]),
client_name=module.params["snap_client_name"],
every=module.params["snap_every"] * 60000,
keep_for=module.params["snap_keep_for"] * 60000,
@@ -1231,7 +1441,7 @@ def update_policy(module, array, api_version, all_squash):
)
else:
rules = flasharray.PolicyrulesnapshotpostRules(
- at=_convert_to_millisecs(module.params["snap_at"]),
+ at=convert_to_millisecs(module.params["snap_at"]),
client_name=module.params["snap_client_name"],
every=module.params["snap_every"] * 60000,
keep_for=module.params["snap_keep_for"] * 60000,
@@ -1276,7 +1486,7 @@ def update_policy(module, array, api_version, all_squash):
)
if suffix_enabled:
rules = flasharray.PolicyrulesnapshotpostRules(
- at=_convert_to_millisecs(module.params["snap_at"]),
+ at=convert_to_millisecs(module.params["snap_at"]),
client_name=module.params["snap_client_name"],
every=module.params["snap_every"] * 60000,
keep_for=module.params["snap_keep_for"] * 60000,
@@ -1284,7 +1494,7 @@ def update_policy(module, array, api_version, all_squash):
)
else:
rules = flasharray.PolicyrulesnapshotpostRules(
- at=_convert_to_millisecs(module.params["snap_at"]),
+ at=convert_to_millisecs(module.params["snap_at"]),
client_name=module.params["snap_client_name"],
every=module.params["snap_every"] * 60000,
keep_for=module.params["snap_keep_for"] * 60000,
@@ -1317,7 +1527,69 @@ def update_policy(module, array, api_version, all_squash):
rule_created.errors[err_no].message,
)
)
- else:
+ elif module.params["policy"] == "autodir":
+ try:
+ current_enabled = list(
+ array.get_policies_autodir(names=[module.params["name"]]).items
+ )[0].enabled
+ except Exception:
+ module.fail_json(
+ msg="Incorrect policy type specified for existing policy {0}".format(
+ module.params["name"]
+ )
+ )
+ if current_enabled != module.params["enabled"]:
+ changed_enable = True
+ if not module.check_mode:
+ res = array.patch_policies_autodir(
+ names=[module.params["name"]],
+ policy=flasharray.PolicyPatch(enabled=module.params["enabled"]),
+ )
+ if res.status_code != 200:
+ module.exit_json(
+ msg="Failed to enable/disable autodir policy {0}".format(
+ module.params["name"]
+ )
+ )
+ if module.params["directory"]:
+ dirs = []
+ new_dirs = []
+ current_dirs = list(
+ array.get_directories_policies_autodir(
+ policy_names=[module.params["name"]]
+ ).items
+ )
+ if current_dirs:
+ for current_dir in range(0, len(current_dirs)):
+ dirs.append(current_dirs[current_dir].member.name)
+ for new_dir in range(0, len(module.params["directory"])):
+ if module.params["directory"][new_dir] not in dirs:
+ changed_dir = True
+ new_dirs.append(module.params["directory"][new_dir])
+ else:
+ new_dirs = module.params["directory"]
+ if new_dirs:
+ policies = flasharray.DirectoryPolicyPost(
+ policies=[
+ flasharray.DirectorypolicypostPolicies(
+ policy=flasharray.Reference(name=module.params["name"])
+ )
+ ]
+ )
+ changed_dir = True
+ for add_dir in range(0, len(new_dirs)):
+ if not module.check_mode:
+ directory_added = array.post_directories_policies_autodir(
+ member_names=[new_dirs[add_dir]], policies=policies
+ )
+ if directory_added.status_code != 200:
+ module.fail_json(
+ msg="Failed to add new directory to Autodir policy {0}. Error: {1}".format(
+ module.params["name"],
+ directory_added.errors[0].message,
+ )
+ )
+ else: # quota
current_enabled = list(
array.get_policies_quota(names=[module.params["name"]]).items
)[0].enabled
@@ -1419,7 +1691,7 @@ def update_policy(module, array, api_version, all_squash):
)
)
if module.params["quota_limit"]:
- quota = _human_to_bytes(module.params["quota_limit"])
+ quota = human_to_bytes(module.params["quota_limit"])
current_rules = list(
array.get_policies_quota_rules(
policy_names=[module.params["name"]]
@@ -1502,6 +1774,8 @@ def update_policy(module, array, api_version, all_squash):
or changed_member
or changed_dir
or changed_user_map
+ or changed_abe
+ or changed_nfs
):
changed = True
module.exit_json(changed=changed)
@@ -1519,7 +1793,9 @@ def main():
),
nfs_permission=dict(type="str", default="rw", choices=["rw", "ro"]),
policy=dict(
- type="str", required=True, choices=["nfs", "smb", "snapshot", "quota"]
+ type="str",
+ required=True,
+ choices=["nfs", "smb", "snapshot", "quota", "autodir"],
),
name=dict(type="str", required=True),
rename=dict(type="str"),
@@ -1540,8 +1816,19 @@ def main():
quota_notifications=dict(
type="list", elements="str", choices=["user", "group"]
),
- user_mapping=dict(type="bool"),
+ user_mapping=dict(type="bool", default=True),
directory=dict(type="list", elements="str"),
+ nfs_version=dict(
+ type="list",
+ elements="str",
+ choices=["nfsv3", "nfsv4"],
+ ),
+ security=dict(
+ type="list",
+ elements="str",
+ choices=["auth_sys", "krb5", "krb5i", "krb5p"],
+ ),
+ access_based_enumeration=dict(type="bool", default=False),
)
)
@@ -1565,6 +1852,11 @@ def main():
msg="FlashArray REST version not supportedi for directory quotas. "
"Minimum version required: {0}".format(MIN_QUOTA_API_VERSION)
)
+ if module.params["policy"] == "autodir" and AUTODIR_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashArray REST version not supported for autodir policies. "
+ "Minimum version required: {0}".format(AUTODIR_VERSION)
+ )
array = get_array(module)
state = module.params["state"]
if module.params["quota_notifications"]:
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_proxy.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_proxy.py
index 37dd7ac6a..c97615ced 100644
--- a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_proxy.py
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_proxy.py
@@ -30,6 +30,13 @@ options:
default: present
type: str
choices: [ absent, present ]
+ protocol:
+ description:
+ - The proxy protocol.
+ choices: [http, https ]
+ default: https
+ type: str
+ version_added: '1.20.0'
host:
description:
- The proxy host name.
@@ -87,7 +94,11 @@ def create_proxy(module, array):
current_proxy = array.get(proxy=True)
if current_proxy is not None:
new_proxy = (
- "https://" + module.params["host"] + ":" + str(module.params["port"])
+ module.params["protocol"]
+ + "://"
+ + module.params["host"]
+ + ":"
+ + str(module.params["port"])
)
if new_proxy != current_proxy["proxy"]:
changed = True
@@ -105,6 +116,7 @@ def main():
argument_spec.update(
dict(
state=dict(type="str", default="present", choices=["absent", "present"]),
+ protocol=dict(type="str", default="https", choices=["http", "https"]),
host=dict(type="str"),
port=dict(type="int"),
)
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_ra.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_ra.py
index 4899b0797..19c192828 100644
--- a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_ra.py
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_ra.py
@@ -30,8 +30,8 @@ options:
- When set to I(enable) the RA port can be exposed using the
I(debug) module.
type: str
- default: enable
- choices: [ enable, disable ]
+ default: present
+ choices: [ enable, disable, absent, present ]
extends_documentation_fragment:
- purestorage.flasharray.purestorage.fa
"""
@@ -58,43 +58,75 @@ RETURN = r"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
- get_system,
+ get_array,
purefa_argument_spec,
)
+HAS_PURESTORAGE = True
+try:
+ from pypureclient.flasharray import SupportPatch
+except ImportError:
+ HAS_PURESTORAGE = False
+
def enable_ra(module, array):
"""Enable Remote Assist"""
changed = False
ra_facts = {}
- if not array.get_remote_assist_status()["status"] in ["connected", "enabled"]:
+ if not list(array.get_support().items)[0].remote_assist_status in [
+ "connected",
+ "connecting",
+ "enabled",
+ ]:
changed = True
if not module.check_mode:
- try:
- ra_data = array.enable_remote_assist()
- ra_facts["fa_ra"] = {"name": ra_data["name"], "port": ra_data["port"]}
- except Exception:
- module.fail_json(msg="Enabling Remote Assist failed")
+ res = array.patch_support(support=SupportPatch(remote_assist_active=True))
+ if res.status_code == 200:
+ ra_data = list(res.items)[0]
+ ra_facts["fa_ra"] = {
+ "name": ra_data.remote_assist_paths[0].component_name,
+ "port": None,
+ }
+ else:
+ module.fail_json(
+ msg="Enabling Remote Assist failed. Error: {0}".format(
+ res.errors[0].message
+ )
+ )
else:
- if not module.check_mode:
- try:
- ra_data = array.get_remote_assist_status()
- ra_facts["fa_ra"] = {"name": ra_data["name"], "port": ra_data["port"]}
- except Exception:
- module.fail_json(msg="Getting Remote Assist failed")
+ res = array.get_support()
+ if res.status_code == 200:
+ ra_data = list(res.items)[0]
+ ra_facts["fa_ra"] = {
+ "name": ra_data.remote_assist_paths[0].component_name,
+ "port": None,
+ }
+ else:
+ module.fail_json(
+ msg="Getting Remote Assist failed. Error: {0}".format(
+ res.errors[0].message
+ )
+ )
module.exit_json(changed=changed, ra_info=ra_facts)
def disable_ra(module, array):
"""Disable Remote Assist"""
changed = False
- if array.get_remote_assist_status()["status"] in ["connected", "enabled"]:
+ if list(array.get_support().items)[0].remote_assist_status in [
+ "connected",
+ "connecting",
+ "enabled",
+ ]:
changed = True
if not module.check_mode:
- try:
- array.disable_remote_assist()
- except Exception:
- module.fail_json(msg="Disabling Remote Assist failed")
+ res = array.patch_support(support=SupportPatch(remote_assist_active=False))
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Disabling Remote Assist failed. Error: {0}".format(
+ res.errors[0].message
+ )
+ )
module.exit_json(changed=changed)
@@ -102,15 +134,21 @@ def main():
argument_spec = purefa_argument_spec()
argument_spec.update(
dict(
- state=dict(type="str", default="enable", choices=["enable", "disable"]),
+ state=dict(
+ type="str",
+ default="present",
+ choices=["enable", "disable", "absent", "present"],
+ ),
)
)
module = AnsibleModule(argument_spec, supports_check_mode=True)
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
- array = get_system(module)
+ array = get_array(module)
- if module.params["state"] == "enable":
+ if module.params["state"] in ["enable", "present"]:
enable_ra(module, array)
else:
disable_ra(module, array)
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_saml.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_saml.py
index 9d5fc7443..3acf3f748 100644
--- a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_saml.py
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_saml.py
@@ -121,10 +121,12 @@ except ImportError:
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
- get_system,
get_array,
purefa_argument_spec,
)
+from ansible_collections.purestorage.flasharray.plugins.module_utils.version import (
+ LooseVersion,
+)
MIN_REQUIRED_API_VERSION = "2.11"
@@ -310,15 +312,14 @@ def main():
if not HAS_PURESTORAGE:
module.fail_json(msg="py-pure-client sdk is required for this module")
- array = get_system(module)
- api_version = array._list_available_rest_versions()
+ array = get_array(module)
+ api_version = array.get_rest_version()
- if MIN_REQUIRED_API_VERSION not in api_version:
+ if LooseVersion(MIN_REQUIRED_API_VERSION) > LooseVersion(api_version):
module.fail_json(
msg="FlashArray REST version not supported. "
"Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
)
- array = get_array(module)
state = module.params["state"]
try:
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_smis.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_smis.py
index f752cb950..b92c65bcb 100644
--- a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_smis.py
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_smis.py
@@ -65,10 +65,12 @@ except ImportError:
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
- get_system,
get_array,
purefa_argument_spec,
)
+from ansible_collections.purestorage.flasharray.plugins.module_utils.version import (
+ LooseVersion,
+)
MIN_REQUIRED_API_VERSION = "2.2"
@@ -115,15 +117,14 @@ def main():
if not HAS_PURESTORAGE:
module.fail_json(msg="py-pure-client sdk is required for this module")
- array = get_system(module)
- api_version = array._list_available_rest_versions()
+ array = get_array(module)
+ api_version = array.get_rest_version()
- if MIN_REQUIRED_API_VERSION not in api_version:
+ if LooseVersion(MIN_REQUIRED_API_VERSION) > LooseVersion(api_version):
module.fail_json(
msg="FlashArray REST version not supported. "
"Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
)
- array = get_array(module)
update_smis(module, array)
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_snap.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_snap.py
index db567a398..1b3207878 100644
--- a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_snap.py
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_snap.py
@@ -32,7 +32,6 @@ options:
suffix:
description:
- Suffix of snapshot name.
- - Not used during creation if I(offload) is provided.
type: str
target:
description:
@@ -51,7 +50,6 @@ options:
- Target can be either another FlashArray or an Offload Target
- This is only applicable for creation, deletion and eradication of snapshots
- I(state) of I(copy) is not supported.
- - I(suffix) is not supported for offload snapshots.
type: str
state:
description:
@@ -71,6 +69,12 @@ options:
- If set to false, allow destruction/eradication of snapshots not in use by replication
type: bool
default: false
+ throttle:
+ description:
+ - If set to true, allows snapshot to fail if array health is not optimal.
+ type: bool
+ default: false
+ version_added: '1.21.0'
extends_documentation_fragment:
- purestorage.flasharray.purestorage.fa
"""
@@ -173,6 +177,8 @@ from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa impo
from datetime import datetime
GET_SEND_API = "2.4"
+THROTTLE_API = "2.25"
+SNAPSHOT_SUFFIX_API = "2.28"
def _check_offload(module, array):
@@ -258,10 +264,14 @@ def get_deleted_snapshot(module, array, arrayv6):
def get_snapshot(module, array):
- """Return Snapshot or None"""
+ """Return True if snapshot exists, False otherwise"""
try:
snapname = module.params["name"] + "." + module.params["suffix"]
- for snaps in array.get_volume(module.params["name"], snap=True, pending=False):
+ name = module.params["name"]
+ if len(name.split(":")) == 2:
+ # API 1.x raises exception if name is a remote snap
+ name = module.params["name"] + "*"
+ for snaps in array.get_volume(name, snap=True, pending=False):
if snaps["name"] == snapname:
return True
except Exception:
@@ -271,12 +281,18 @@ def get_snapshot(module, array):
def create_snapshot(module, array, arrayv6):
"""Create Snapshot"""
changed = False
+ api_version = array._list_available_rest_versions()
if module.params["offload"]:
- module.params["suffix"] = None
+ if SNAPSHOT_SUFFIX_API not in api_version:
+ module.params["suffix"] = None
changed = True
if not module.check_mode:
res = arrayv6.post_remote_volume_snapshots(
- source_names=[module.params["name"]], on=module.params["offload"]
+ source_names=[module.params["name"]],
+ on=module.params["offload"],
+ remote_volume_snapshot=flasharray.RemoteVolumeSnapshotPost(
+ suffix=module.params["suffix"]
+ ),
)
if res.status_code != 200:
module.fail_json(
@@ -285,21 +301,37 @@ def create_snapshot(module, array, arrayv6):
)
)
else:
- remote_snap = list(res.items)[0].name
- module.params["suffix"] = remote_snap.split(".")[1]
+ if SNAPSHOT_SUFFIX_API not in api_version:
+ remote_snap = list(res.items)[0].name
+ module.params["suffix"] = remote_snap.split(".")[1]
else:
changed = True
if not module.check_mode:
- try:
- array.create_snapshot(
- module.params["name"], suffix=module.params["suffix"]
+ if THROTTLE_API in api_version:
+ res = arrayv6.post_volume_snapshots(
+ allow_throttle=module.params["throttle"],
+ volume_snapshot=flasharray.VolumeSnapshotPost(
+ suffix=module.params["suffix"]
+ ),
+ source_names=[module.params["name"]],
)
- except Exception:
- module.fail_json(
- msg="Failed to create snapshot for volume {0}".format(
- module.params["name"]
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to create snapshot for volume {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ else:
+ try:
+ array.create_snapshot(
+ module.params["name"], suffix=module.params["suffix"]
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to create snapshot for volume {0}".format(
+ module.params["name"]
+ )
)
- )
module.exit_json(changed=changed, suffix=module.params["suffix"])
@@ -518,6 +550,7 @@ def main():
suffix=dict(type="str"),
target=dict(type="str"),
offload=dict(type="str"),
+ throttle=dict(type="bool", default=False),
ignore_repl=dict(type="bool", default=False),
overwrite=dict(type="bool", default=False),
eradicate=dict(type="bool", default=False),
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_snmp_agent.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_snmp_agent.py
index b9dc8ca94..c3ecb2e64 100644
--- a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_snmp_agent.py
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_snmp_agent.py
@@ -120,10 +120,12 @@ except ImportError:
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
- get_system,
get_array,
purefa_argument_spec,
)
+from ansible_collections.purestorage.flasharray.plugins.module_utils.version import (
+ LooseVersion,
+)
MIN_REQUIRED_API_VERSION = "2.1"
@@ -238,10 +240,10 @@ def main():
supports_check_mode=True,
)
- array = get_system(module)
- api_version = array._list_available_rest_versions()
+ array = get_array(module)
+ api_version = array.get_rest_version()
- if MIN_REQUIRED_API_VERSION not in api_version:
+ if LooseVersion(MIN_REQUIRED_API_VERSION) > LooseVersion(api_version):
module.fail_json(
msg="FlashArray REST version not supported. "
"Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_sso.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_sso.py
index c1199215f..404a2c044 100644
--- a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_sso.py
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_sso.py
@@ -66,10 +66,12 @@ except ImportError:
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
- get_system,
get_array,
purefa_argument_spec,
)
+from ansible_collections.purestorage.flasharray.plugins.module_utils.version import (
+ LooseVersion,
+)
SSO_API_VERSION = "2.2"
@@ -88,11 +90,10 @@ def main():
module.fail_json(msg="py-pure-client sdk is required for this module")
state = module.params["state"]
- array = get_system(module)
- api_version = array._list_available_rest_versions()
+ array = get_array(module)
+ api_version = array.get_rest_version()
changed = False
- if SSO_API_VERSION in api_version:
- array = get_array(module)
+ if LooseVersion(SSO_API_VERSION) <= LooseVersion(api_version):
current_sso = list(array.get_admins_settings().items)[0].single_sign_on_enabled
if (state == "present" and not current_sso) or (
state == "absent" and current_sso
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_subnet.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_subnet.py
index efce8db9e..84083c522 100644
--- a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_subnet.py
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_subnet.py
@@ -97,7 +97,7 @@ RETURN = """
"""
try:
- from netaddr import IPNetwork
+ from netaddr import IPNetwork, valid_ipv4, valid_ipv6
HAS_NETADDR = True
except ImportError:
@@ -130,22 +130,38 @@ def update_subnet(module, array, subnet):
"prefix": subnet["prefix"],
"gateway": subnet["gateway"],
}
+ address = str(subnet["prefix"].split("/", 1)[0])
+ if not current_state["vlan"]:
+ current_state["vlan"] = 0
+ if not current_state["gateway"]:
+ if valid_ipv4(address):
+ current_state["gateway"] = "0.0.0.0"
+ elif valid_ipv6(address):
+ current_state["gateway"] = "::"
+ else:
+ module.fail_json(msg="Prefix address is not valid IPv4 or IPv6")
+
if not module.params["prefix"]:
prefix = subnet["prefix"]
else:
- if module.params["gateway"] and module.params["gateway"] not in IPNetwork(
- module.params["prefix"]
- ):
- module.fail_json(msg="Gateway and subnet are not compatible.")
- elif (
- not module.params["gateway"]
- and subnet["gateway"]
- and subnet["gateway"] not in IPNetwork(module.params["prefix"])
+ if module.params["gateway"] and not (
+ module.params["gateway"] in ["0.0.0.0", "::"]
):
- module.fail_json(msg="Gateway and subnet are not compatible.")
+ if module.params["gateway"] and module.params["gateway"] not in IPNetwork(
+ module.params["prefix"]
+ ):
+ module.fail_json(msg="Gateway and subnet are not compatible.")
+ elif (
+ not module.params["gateway"]
+ and subnet["gateway"]
+ and subnet["gateway"] not in IPNetwork(module.params["prefix"])
+ ):
+ module.fail_json(msg="Gateway and subnet are not compatible.")
prefix = module.params["prefix"]
if not module.params["vlan"]:
vlan = subnet["vlan"]
+ if not vlan:
+ vlan = 0
else:
if not 0 <= module.params["vlan"] <= 4094:
module.fail_json(
@@ -165,8 +181,11 @@ def update_subnet(module, array, subnet):
if not module.params["gateway"]:
gateway = subnet["gateway"]
else:
- if module.params["gateway"] not in IPNetwork(prefix):
- module.fail_json(msg="Gateway and subnet are not compatible.")
+ if module.params["gateway"] and not (
+ module.params["gateway"] in ["0.0.0.0", "::"]
+ ):
+ if module.params["gateway"] not in IPNetwork(prefix):
+ module.fail_json(msg="Gateway and subnet are not compatible.")
gateway = module.params["gateway"]
new_state = {"prefix": prefix, "mtu": mtu, "gateway": gateway, "vlan": vlan}
if new_state != current_state:
@@ -214,10 +233,13 @@ def create_subnet(module, array):
if not module.params["prefix"]:
module.fail_json(msg="Prefix required when creating subnet.")
else:
- if module.params["gateway"] and module.params["gateway"] not in IPNetwork(
- module.params["prefix"]
+ if module.params["gateway"] and not (
+ module.params["gateway"] in ["0.0.0.0", "::"]
):
- module.fail_json(msg="Gateway and subnet are not compatible.")
+ if module.params["gateway"] and module.params["gateway"] not in IPNetwork(
+ module.params["prefix"]
+ ):
+ module.fail_json(msg="Gateway and subnet are not compatible.")
prefix = module.params["prefix"]
if module.params["vlan"]:
if not 0 <= module.params["vlan"] <= 4094:
@@ -235,7 +257,7 @@ def create_subnet(module, array):
)
else:
mtu = module.params["mtu"]
- if module.params["gateway"]:
+ if module.params["gateway"] and not (module.params["gateway"] in ["0.0.0.0", "::"]):
if module.params["gateway"] not in IPNetwork(prefix):
module.fail_json(msg="Gateway and subnet are not compatible.")
gateway = module.params["gateway"]
@@ -313,6 +335,10 @@ def main():
state = module.params["state"]
array = get_system(module)
subnet = _get_subnet(module, array)
+ if module.params["prefix"]:
+ module.params["prefix"] = module.params["prefix"].strip("[]")
+ if module.params["gateway"]:
+ module.params["gateway"] = module.params["gateway"].strip("[]")
if state == "present" and not subnet:
create_subnet(module, array)
if state == "present" and subnet:
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_syslog.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_syslog.py
index adb385ca4..20b3104fe 100644
--- a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_syslog.py
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_syslog.py
@@ -21,14 +21,13 @@ version_added: '1.0.0'
short_description: Configure Pure Storage FlashArray syslog settings
description:
- Configure syslog configuration for Pure Storage FlashArrays.
-- Add or delete an individual syslog server to the existing
- list of serves.
+- Manage individual syslog servers.
author:
- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
options:
state:
description:
- - Create or delete syslog servers configuration
+ - Create, update or delete syslog servers configuration
default: present
type: str
choices: [ absent, present ]
@@ -55,14 +54,14 @@ options:
description:
- A user-specified name.
The name must be locally unique and cannot be changed.
- - Only applicable with FlashArrays running Purity//FA 6.0 or higher.
type: str
+ required: true
extends_documentation_fragment:
- purestorage.flasharray.purestorage.fa
"""
EXAMPLES = r"""
-- name: Delete exisitng syslog server entries
+- name: Delete existing syslog server entry
purestorage.flasharray.purefa_syslog:
address: syslog1.com
protocol: tcp
@@ -70,13 +69,23 @@ EXAMPLES = r"""
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
-- name: Set array syslog servers
+- name: Add syslog server entry
purestorage.flasharray.purefa_syslog:
state: present
address: syslog1.com
+ port: 8081
protocol: udp
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Update syslog server entry
+ purestorage.flasharray.purefa_syslog:
+ state: present
+ address: syslog1.com
+ port: 8081
+ protocol: tcp
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
"""
RETURN = r"""
@@ -85,7 +94,7 @@ RETURN = r"""
HAS_PURESTORAGE = True
try:
- from pypureclient import flasharray
+ from pypureclient.flasharray import SyslogServer
except ImportError:
HAS_PURESTORAGE = False
@@ -93,90 +102,71 @@ except ImportError:
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
get_array,
- get_system,
purefa_argument_spec,
)
-SYSLOG_NAME_API = "2.4"
-
-
def delete_syslog(module, array):
"""Delete Syslog Server"""
- changed = False
+ changed = True
+ if not module.check_mode:
+ res = array.delete_syslog_servers(names=[module.params["name"]])
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to remove syslog server {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def add_syslog(module, array):
+ """Add Syslog Server"""
+ changed = True
noport_address = module.params["protocol"] + "://" + module.params["address"]
if module.params["port"]:
full_address = noport_address + ":" + module.params["port"]
else:
full_address = noport_address
-
- address_list = array.get(syslogserver=True)["syslogserver"]
-
- if address_list:
- for address in range(0, len(address_list)):
- if address_list[address] == full_address:
- del address_list[address]
- changed = True
- if not module.check_mode:
- try:
- array.set(syslogserver=address_list)
- break
- except Exception:
- module.fail_json(
- msg="Failed to remove syslog server: {0}".format(
- full_address
- )
- )
-
+ if not module.check_mode:
+ res = array.post_syslog_servers(
+ names=[module.params["name"]],
+ syslog_server=SyslogServer(name=module.params["name"], uri=full_address),
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Adding syslog server {0} failed. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
module.exit_json(changed=changed)
-def add_syslog(module, array, arrayv6):
- """Add Syslog Server"""
+def update_syslog(module, array):
+ """Update Syslog Server"""
changed = False
+ syslog_config = list(array.get_syslog_servers(names=[module.params["name"]]).items)[
+ 0
+ ]
noport_address = module.params["protocol"] + "://" + module.params["address"]
if module.params["port"]:
full_address = noport_address + ":" + module.params["port"]
else:
full_address = noport_address
-
- address_list = array.get(syslogserver=True)["syslogserver"]
- exists = False
-
- if address_list:
- for address in range(0, len(address_list)):
- if address_list[address] == full_address:
- exists = True
- break
- if not exists:
- if arrayv6 and module.params["name"]:
- changed = True
- if not module.check_mode:
- res = arrayv6.post_syslog_servers(
- names=[module.params["name"]],
- syslog_server=flasharray.SyslogServer(
- name=module.params["name"], uri=full_address
- ),
+ if full_address != syslog_config.uri:
+ changed = True
+ res = array.patch_syslog_servers(
+ names=[module.params["name"]],
+ syslog_server=SyslogServer(uri=full_address),
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Updating syslog server {0} failed. Error: {1}".format(
+ module.params["name"], res.errors[0].message
)
- if res.status_code != 200:
- module.fail_json(
- msg="Adding syslog server {0} failed. Error: {1}".format(
- module.params["name"], res.errors[0].message
- )
- )
- else:
- changed = True
- if not module.check_mode:
- try:
- address_list.append(full_address)
- array.set(syslogserver=address_list)
- except Exception:
- module.fail_json(
- msg="Failed to add syslog server: {0}".format(full_address)
- )
-
+ )
module.exit_json(changed=changed)
@@ -187,29 +177,30 @@ def main():
address=dict(type="str", required=True),
protocol=dict(type="str", choices=["tcp", "tls", "udp"], required=True),
port=dict(type="str"),
- name=dict(type="str"),
+ name=dict(type="str", required=True),
state=dict(type="str", default="present", choices=["absent", "present"]),
)
)
module = AnsibleModule(argument_spec, supports_check_mode=True)
- array = get_system(module)
+ array = get_array(module)
- if module.params["name"] and not HAS_PURESTORAGE:
+ if not HAS_PURESTORAGE:
module.fail_json(msg="py-pure-client sdk is required for this module")
- api_version = array._list_available_rest_versions()
-
- if SYSLOG_NAME_API in api_version and module.params["name"]:
- arrayv6 = get_array(module)
+ res = array.get_syslog_servers(names=[module.params["name"]])
+ if res.status_code == 200:
+ exists = True
else:
- arrayv6 = None
+ exists = False
- if module.params["state"] == "absent":
+ if module.params["state"] == "absent" and exists:
delete_syslog(module, array)
- else:
- add_syslog(module, array, arrayv6)
+ elif module.params["state"] == "present" and not exists:
+ add_syslog(module, array)
+ elif module.params["state"] == "present" and exists:
+ update_syslog(module, array)
module.exit_json(changed=False)
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_syslog_settings.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_syslog_settings.py
index fce6dffa3..735930e08 100644
--- a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_syslog_settings.py
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_syslog_settings.py
@@ -76,10 +76,12 @@ except ImportError:
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
- get_system,
get_array,
purefa_argument_spec,
)
+from ansible_collections.purestorage.flasharray.plugins.module_utils.version import (
+ LooseVersion,
+)
MIN_REQUIRED_API_VERSION = "2.9"
@@ -106,15 +108,14 @@ def main():
if not HAS_PURESTORAGE:
module.fail_json(msg="py-pure-client sdk is required for this module")
- array = get_system(module)
- api_version = array._list_available_rest_versions()
+ array = get_array(module)
+ api_version = array.get_rest_version()
- if MIN_REQUIRED_API_VERSION not in api_version:
+ if LooseVersion(MIN_REQUIRED_API_VERSION) > LooseVersion(api_version):
module.fail_json(
msg="Purity//FA version not supported. Minimum version required: 6.2.0"
)
- array = get_array(module)
changed = cert_change = False
if module.params["ca_certificate"] and len(module.params["ca_certificate"]) > 3000:
module.fail_json(msg="Certificate exceeds 3000 characters")
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_token.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_token.py
index fa66fe308..f7acb8889 100644
--- a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_token.py
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_token.py
@@ -89,13 +89,22 @@ from ansible.module_utils.basic import AnsibleModule
from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
get_array,
)
+from ansible_collections.purestorage.flasharray.plugins.module_utils.common import (
+ convert_time_to_millisecs,
+)
from os import environ
import platform
-VERSION = 1.0
+VERSION = 1.5
USER_AGENT_BASE = "Ansible_token"
TIMEOUT_API_VERSION = "2.2"
+HAS_DISTRO = True
+try:
+ import distro
+except ImportError:
+ HAS_DISTRO = False
+
HAS_PURESTORAGE = True
try:
from purestorage import purestorage
@@ -103,30 +112,22 @@ except ImportError:
HAS_PURESTORAGE = False
-def _convert_time_to_millisecs(timeout):
- if timeout[-1:].lower() not in ["w", "d", "h", "m", "s"]:
- return 0
- try:
- if timeout[-1:].lower() == "w":
- return int(timeout[:-1]) * 7 * 86400000
- elif timeout[-1:].lower() == "d":
- return int(timeout[:-1]) * 86400000
- elif timeout[-1:].lower() == "h":
- return int(timeout[:-1]) * 3600000
- elif timeout[-1:].lower() == "m":
- return int(timeout[:-1]) * 60000
- except Exception:
- return 0
-
-
def get_session(module):
"""Return System Object or Fail"""
- user_agent = "%(base)s %(class)s/%(version)s (%(platform)s)" % {
- "base": USER_AGENT_BASE,
- "class": __name__,
- "version": VERSION,
- "platform": platform.platform(),
- }
+ if HAS_DISTRO:
+ user_agent = "%(base)s %(class)s/%(version)s (%(platform)s)" % {
+ "base": USER_AGENT_BASE,
+ "class": __name__,
+ "version": VERSION,
+ "platform": distro.name(pretty=True),
+ }
+ else:
+ user_agent = "%(base)s %(class)s/%(version)s (%(platform)s)" % {
+ "base": USER_AGENT_BASE,
+ "class": __name__,
+ "version": VERSION,
+ "platform": platform.platform(),
+ }
array_name = module.params["fa_url"]
username = module.params["username"]
@@ -205,7 +206,7 @@ def main():
):
module.params["api_token"] = api_token
array6 = get_array(module)
- ttl = _convert_time_to_millisecs(module.params["timeout"])
+ ttl = convert_time_to_millisecs(module.params["timeout"])
if ttl != 0:
changed = True
array6.delete_admins_api_tokens(names=[username])
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_vg.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_vg.py
index febb0d5a2..3720ee7cd 100644
--- a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_vg.py
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_vg.py
@@ -99,6 +99,11 @@ options:
choices: [ 0, 10 ]
default: 0
version_added: '1.13.0'
+ rename:
+ description:
+ - Value to rename the specified volume group to
+ type: str
+ version_added: '1.22.0'
extends_documentation_fragment:
- purestorage.flasharray.purestorage.fa
"""
@@ -160,6 +165,13 @@ EXAMPLES = r"""
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
state: absent
+
+- name: Rename volume group foo to bar
+ purestorage.flasharray.purefa_vg:
+ name: foo
+ rename: bar
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
"""
RETURN = r"""
@@ -177,6 +189,10 @@ from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa impo
get_system,
purefa_argument_spec,
)
+from ansible_collections.purestorage.flasharray.plugins.module_utils.common import (
+ human_to_bytes,
+ human_to_real,
+)
VGROUP_API_VERSION = "1.13"
@@ -185,52 +201,15 @@ MULTI_VG_VERSION = "2.2"
PRIORITY_API_VERSION = "2.11"
-def human_to_bytes(size):
- """Given a human-readable byte string (e.g. 2G, 30M),
- return the number of bytes. Will return 0 if the argument has
- unexpected form.
- """
- bytes = size[:-1]
- unit = size[-1].upper()
- if bytes.isdigit():
- bytes = int(bytes)
- if unit == "P":
- bytes *= 1125899906842624
- elif unit == "T":
- bytes *= 1099511627776
- elif unit == "G":
- bytes *= 1073741824
- elif unit == "M":
- bytes *= 1048576
- elif unit == "K":
- bytes *= 1024
- else:
- bytes = 0
- else:
- bytes = 0
- return bytes
-
-
-def human_to_real(iops):
- """Given a human-readable IOPs string (e.g. 2K, 30M),
- return the real number. Will return 0 if the argument has
- unexpected form.
- """
- digit = iops[:-1]
- unit = iops[-1].upper()
- if unit.isdigit():
- digit = iops
- elif digit.isdigit():
- digit = int(digit)
- if unit == "M":
- digit *= 1000000
- elif unit == "K":
- digit *= 1000
- else:
- digit = 0
- else:
- digit = 0
- return digit
+def rename_exists(module, array):
+ """Determine if rename target already exists"""
+ exists = False
+ new_name = module.params["rename"]
+ for vgroup in array.list_vgroups():
+ if vgroup["name"].casefold() == new_name.casefold():
+ exists = True
+ break
+ return exists
def get_multi_vgroups(module, destroyed=False):
@@ -272,6 +251,25 @@ def get_vgroup(module, array):
return vgroup
+def rename_vgroup(module, array):
+ changed = True
+ if not rename_exists(module, array):
+ try:
+ if not module.check_mode:
+ array.rename_vgroup(module.params["name"], module.params["rename"])
+ except Exception:
+ module.fail_json(
+ msg="Rename to {0} failed.".format(module.params["rename"])
+ )
+ else:
+ module.warn(
+ "Rename failed. Volume Group {0} already exists.".format(
+ module.params["rename"]
+ )
+ )
+ module.exit_json(changed=changed)
+
+
def make_vgroup(module, array):
"""Create Volume Group"""
changed = True
@@ -630,6 +628,7 @@ def main():
priority_operator=dict(type="str", choices=["+", "-"], default="+"),
priority_value=dict(type="int", choices=[0, 10], default=0),
eradicate=dict(type="bool", default=False),
+ rename=dict(type="str"),
)
)
@@ -673,6 +672,8 @@ def main():
eradicate_vgroup(module, array)
elif not vgroup and not xvgroup and state == "present":
make_vgroup(module, array)
+ elif state == "present" and vgroup and module.params["rename"] and not xvgroup:
+ rename_vgroup(module, array)
elif vgroup and state == "present":
update_vgroup(module, array)
elif vgroup is None and state == "absent":
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_vnc.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_vnc.py
index 48e154c77..f9dd627a3 100644
--- a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_vnc.py
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_vnc.py
@@ -32,7 +32,7 @@ options:
choices: [ present, absent ]
name:
description:
- - Name od app
+ - Name of app
type: str
required: true
extends_documentation_fragment:
@@ -80,44 +80,65 @@ vnc:
type: str
"""
+HAS_PURESTORAGE = True
+try:
+ from pypureclient.flasharray import App
+except ImportError:
+ HAS_PURESTORAGE = False
+
+
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
- get_system,
+ get_array,
purefa_argument_spec,
)
-MIN_REQUIRED_API_VERSION = "1.17"
-
def enable_vnc(module, array, app):
"""Enable VNC port"""
changed = False
vnc_fact = []
- if not app["vnc_enabled"]:
- try:
- if not module.check_mode:
- array.enable_app_vnc(module.params["name"])
- vnc_fact = array.get_app_node(module.params["name"])
- changed = True
- except Exception:
- module.fail_json(
- msg="Enabling VNC for {0} failed".format(module.params["name"])
+ if not app.vnc_enabled:
+ changed = True
+ if not module.check_mode:
+ res = array.patch_apps(
+ names=[module.params["name"]], app=App(vnc_enabled=True)
)
+ if res.status_code == 200:
+ vnc_nodes = list(
+ array.get_apps_nodes(app_names=[module.params["name"]]).items
+ )[0]
+ vnc_fact = {
+ "status": vnc_nodes.status,
+ "index": vnc_nodes.index,
+ "version": vnc_nodes.version,
+ "vnc": vnc_nodes.vnc,
+ "name": module.params["name"],
+ }
+ else:
+ module.fail_json(
+ msg="Enabling VNC for {0} failed. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
module.exit_json(changed=changed, vnc=vnc_fact)
def disable_vnc(module, array, app):
"""Disable VNC port"""
changed = False
- if app["vnc_enabled"]:
- try:
- if not module.check_mode:
- array.disable_app_vnc(module.params["name"])
- changed = True
- except Exception:
- module.fail_json(
- msg="Disabling VNC for {0} failed".format(module.params["name"])
+ if app.vnc_enabled:
+ changed = True
+ if not module.check_mode:
+ res = array.patch_apps(
+ names=[module.params["name"]], app=App(vnc_enabled=False)
)
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Disabling VNC for {0} failed. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
module.exit_json(changed=changed)
@@ -132,21 +153,18 @@ def main():
module = AnsibleModule(argument_spec, supports_check_mode=True)
- array = get_system(module)
- api_version = array._list_available_rest_versions()
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
- if MIN_REQUIRED_API_VERSION not in api_version:
- module.fail_json(
- msg="FlashArray REST version not supported. "
- "Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
- )
- try:
- app = array.get_app(module.params["name"])
- except Exception:
+ array = get_array(module)
+
+ res = array.get_apps(names=[module.params["name"]])
+ if res.status_code != 200:
module.fail_json(
msg="Selected application {0} does not exist".format(module.params["name"])
)
- if not app["enabled"]:
+ app = list(res.items)[0]
+ if not app.enabled:
module.fail_json(
msg="Application {0} is not enabled".format(module.params["name"])
)
diff --git a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_volume.py b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_volume.py
index c3c92f6d4..877af7f74 100644
--- a/ansible_collections/purestorage/flasharray/plugins/modules/purefa_volume.py
+++ b/ansible_collections/purestorage/flasharray/plugins/modules/purefa_volume.py
@@ -86,6 +86,7 @@ options:
See associated descriptions
- Only supported from Purity//FA v6.0.0 and higher
type: str
+ default: ""
bw_qos:
description:
- Bandwidth limit for volume in M or G units.
@@ -326,6 +327,10 @@ from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa impo
get_system,
purefa_argument_spec,
)
+from ansible_collections.purestorage.flasharray.plugins.module_utils.common import (
+ human_to_bytes,
+ human_to_real,
+)
QOS_API_VERSION = "1.14"
@@ -402,54 +407,6 @@ def get_pgroup(module, array):
return pgroup
-def human_to_bytes(size):
- """Given a human-readable byte string (e.g. 2G, 30M),
- return the number of bytes. Will return 0 if the argument has
- unexpected form.
- """
- bytes = size[:-1]
- unit = size[-1].upper()
- if bytes.isdigit():
- bytes = int(bytes)
- if unit == "P":
- bytes *= 1125899906842624
- elif unit == "T":
- bytes *= 1099511627776
- elif unit == "G":
- bytes *= 1073741824
- elif unit == "M":
- bytes *= 1048576
- elif unit == "K":
- bytes *= 1024
- else:
- bytes = 0
- else:
- bytes = 0
- return bytes
-
-
-def human_to_real(iops):
- """Given a human-readable IOPs string (e.g. 2K, 30M),
- return the real number. Will return 0 if the argument has
- unexpected form.
- """
- digit = iops[:-1]
- unit = iops[-1].upper()
- if unit.isdigit():
- digit = iops
- elif digit.isdigit():
- digit = int(digit)
- if unit == "M":
- digit *= 1000000
- elif unit == "K":
- digit *= 1000
- else:
- digit = 0
- else:
- digit = 0
- return digit
-
-
def get_multi_volumes(module, destroyed=False):
"""Return True is all volumes exist or None"""
names = []
@@ -1553,7 +1510,7 @@ def main():
count=dict(type="int"),
start=dict(type="int", default=0),
digits=dict(type="int", default=1),
- suffix=dict(type="str"),
+ suffix=dict(type="str", default=""),
priority_operator=dict(type="str", choices=["+", "-", "="]),
priority_value=dict(type="int", choices=[-10, 0, 10]),
size=dict(type="str"),
diff --git a/ansible_collections/purestorage/flasharray/requirements.txt b/ansible_collections/purestorage/flasharray/requirements.txt
index 3cf5d0672..0d2366d54 100644
--- a/ansible_collections/purestorage/flasharray/requirements.txt
+++ b/ansible_collections/purestorage/flasharray/requirements.txt
@@ -4,4 +4,3 @@ python >= 3.6
netaddr
requests
pycountry
-packaging
diff --git a/ansible_collections/purestorage/flasharray/tests/config.yaml b/ansible_collections/purestorage/flasharray/tests/config.yaml
new file mode 100644
index 000000000..9e402bda7
--- /dev/null
+++ b/ansible_collections/purestorage/flasharray/tests/config.yaml
@@ -0,0 +1,2 @@
+modules:
+ python_requires: ">=3.6"
diff --git a/ansible_collections/purestorage/flashblade/.github/workflows/ansible-lint.yml b/ansible_collections/purestorage/flashblade/.github/workflows/ansible-lint.yml
index 0b2102184..384c5ac93 100644
--- a/ansible_collections/purestorage/flashblade/.github/workflows/ansible-lint.yml
+++ b/ansible_collections/purestorage/flashblade/.github/workflows/ansible-lint.yml
@@ -1,5 +1,5 @@
-name: Ansible Lint # feel free to pick your own name
-on: [push, pull_request]
+name: Ansible Lint # feel free to pick your own name
+"on": [push, pull_request]
jobs:
build:
diff --git a/ansible_collections/purestorage/flashblade/.github/workflows/black.yaml b/ansible_collections/purestorage/flashblade/.github/workflows/black.yaml
index e5f9711f6..19b2b01d3 100644
--- a/ansible_collections/purestorage/flashblade/.github/workflows/black.yaml
+++ b/ansible_collections/purestorage/flashblade/.github/workflows/black.yaml
@@ -1,6 +1,6 @@
name: Lint
-on: [push, pull_request]
+"on": [push, pull_request]
jobs:
lint:
diff --git a/ansible_collections/purestorage/flashblade/.github/workflows/main.yml b/ansible_collections/purestorage/flashblade/.github/workflows/main.yml
index e66ce2991..4368a1708 100644
--- a/ansible_collections/purestorage/flashblade/.github/workflows/main.yml
+++ b/ansible_collections/purestorage/flashblade/.github/workflows/main.yml
@@ -1,6 +1,6 @@
name: Pure Storage Ansible CI
-on:
+"on":
pull_request:
push:
schedule:
@@ -13,32 +13,20 @@ jobs:
strategy:
matrix:
ansible:
- - stable-2.11
- - stable-2.12
- - stable-2.13
- stable-2.14
- stable-2.15
+ - stable-2.16
- devel
python-version:
- - 3.8
- 3.9
- "3.10"
- "3.11"
exclude:
- - python-version: "3.11"
- ansible: stable-2.11
- - python-version: "3.11"
- ansible: stable-2.12
- - python-version: "3.11"
- ansible: stable-2.13
- - python-version: "3.10"
- ansible: stable-2.11
- - python-version: 3.8
- ansible: stable-2.14
- - python-version: 3.8
- ansible: stable-2.15
- - python-version: 3.8
+ - python-version: 3.9
+ ansible: stable-2.16
+ - python-version: 3.9
ansible: devel
+
steps:
- name: Check out code
uses: actions/checkout@v3
diff --git a/ansible_collections/purestorage/flashblade/.github/workflows/stale.yml b/ansible_collections/purestorage/flashblade/.github/workflows/stale.yml
index 7bbc0505b..ee7c9796e 100644
--- a/ansible_collections/purestorage/flashblade/.github/workflows/stale.yml
+++ b/ansible_collections/purestorage/flashblade/.github/workflows/stale.yml
@@ -1,6 +1,6 @@
name: Mark stale issues and pull requests
-on:
+"on":
schedule:
- cron: "0 0 * * *"
diff --git a/ansible_collections/purestorage/flashblade/.pylintrc b/ansible_collections/purestorage/flashblade/.pylintrc
deleted file mode 100644
index 9570a2b59..000000000
--- a/ansible_collections/purestorage/flashblade/.pylintrc
+++ /dev/null
@@ -1,587 +0,0 @@
-[MASTER]
-
-# A comma-separated list of package or module names from where C extensions may
-# be loaded. Extensions are loading into the active Python interpreter and may
-# run arbitrary code
-extension-pkg-whitelist=
-
-# Add files or directories to the blacklist. They should be base names, not
-# paths.
-ignore=CVS
-
-# Add files or directories matching the regex patterns to the blacklist. The
-# regex matches against base names, not paths.
-ignore-patterns=
-
-# Python code to execute, usually for sys.path manipulation such as
-# pygtk.require().
-#init-hook=
-
-# Use multiple processes to speed up Pylint.
-jobs=1
-
-# List of plugins (as comma separated values of python modules names) to load,
-# usually to register additional checkers.
-load-plugins=
-
-# Pickle collected data for later comparisons.
-persistent=yes
-
-# Specify a configuration file.
-#rcfile=
-
-# When enabled, pylint would attempt to guess common misconfiguration and emit
-# user-friendly hints instead of false-positive error messages
-suggestion-mode=yes
-
-# Allow loading of arbitrary C extensions. Extensions are imported into the
-# active Python interpreter and may run arbitrary code.
-unsafe-load-any-extension=no
-
-
-[MESSAGES CONTROL]
-
-# Only show warnings with the listed confidence levels. Leave empty to show
-# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED
-confidence=
-
-# Disable the message, report, category or checker with the given id(s). You
-# can either give multiple identifiers separated by comma (,) or put this
-# option multiple times (only on the command line, not in the configuration
-# file where it should appear only once).You can also use "--disable=all" to
-# disable everything first and then reenable specific checks. For example, if
-# you want to run only the similarities checker, you can use "--disable=all
-# --enable=similarities". If you want to run only the classes checker, but have
-# no Warning level messages displayed, use"--disable=all --enable=classes
-# --disable=W"
-disable=
- abstract-method,
- access-member-before-definition,
- ansible-deprecated-version,
- arguments-differ,
- assignment-from-no-return,
- assignment-from-none,
- attribute-defined-outside-init,
- bad-continuation,
- bad-indentation,
- bad-mcs-classmethod-argument,
- broad-except,
- c-extension-no-member,
- cell-var-from-loop,
- chained-comparison,
- comparison-with-callable,
- consider-iterating-dictionary,
- consider-merging-isinstance,
- consider-using-dict-comprehension,
- consider-using-enumerate,
- consider-using-get,
- consider-using-in,
- consider-using-set-comprehension,
- consider-using-ternary,
- deprecated-lambda,
- deprecated-method,
- deprecated-module,
- eval-used,
- exec-used,
- expression-not-assigned,
- fixme,
- function-redefined,
- global-statement,
- global-variable-undefined,
- import-error,
- import-self,
- inconsistent-return-statements,
- invalid-envvar-default,
- invalid-name,
- invalid-sequence-index,
- keyword-arg-before-vararg,
- len-as-condition,
- line-too-long,
- literal-comparison,
- locally-disabled,
- method-hidden,
- misplaced-comparison-constant,
- missing-docstring,
- no-else-raise,
- no-else-return,
- no-init,
- no-member,
- no-name-in-module,
- no-self-use,
- no-value-for-parameter,
- non-iterator-returned,
- not-a-mapping,
- not-an-iterable,
- not-callable,
- old-style-class,
- pointless-statement,
- pointless-string-statement,
- possibly-unused-variable,
- protected-access,
- redefined-argument-from-local,
- redefined-builtin,
- redefined-outer-name,
- redefined-variable-type,
- reimported,
- relative-import,
- signature-differs,
- simplifiable-if-expression,
- simplifiable-if-statement,
- subprocess-popen-preexec-fn,
- super-init-not-called,
- superfluous-parens,
- too-few-public-methods,
- too-many-ancestors,
- too-many-arguments,
- too-many-boolean-expressions,
- too-many-branches,
- too-many-function-args,
- too-many-instance-attributes,
- too-many-lines,
- too-many-locals,
- too-many-nested-blocks,
- too-many-public-methods,
- too-many-return-statements,
- too-many-statements,
- trailing-comma-tuple,
- trailing-comma-tuple,
- try-except-raise,
- unbalanced-tuple-unpacking,
- undefined-loop-variable,
- unexpected-keyword-arg,
- ungrouped-imports,
- unidiomatic-typecheck,
- unnecessary-pass,
- unsubscriptable-object,
- unsupported-assignment-operation,
- unsupported-delete-operation,
- unsupported-membership-test,
- unused-argument,
- unused-import,
- unused-variable,
- used-before-assignment,
- useless-object-inheritance,
- useless-return,
- useless-super-delegation,
- wrong-import-order,
- wrong-import-position,
-
-# Enable the message, report, category or checker with the given id(s). You can
-# either give multiple identifier separated by comma (,) or put this option
-# multiple time (only on the command line, not in the configuration file where
-# it should appear only once). See also the "--disable" option for examples.
-enable=c-extension-no-member
-
-
-[REPORTS]
-
-# Python expression which should return a note less than 10 (10 is the highest
-# note). You have access to the variables errors warning, statement which
-# respectively contain the number of errors / warnings messages and the total
-# number of statements analyzed. This is used by the global evaluation report
-# (RP0004).
-evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
-
-# Template used to display messages. This is a python new-style format string
-# used to format the message information. See doc for all details
-#msg-template=
-
-# Set the output format. Available formats are text, parseable, colorized, json
-# and msvs (visual studio).You can also give a reporter class, eg
-# mypackage.mymodule.MyReporterClass.
-output-format=text
-
-# Tells whether to display a full report or only the messages
-reports=no
-
-# Activate the evaluation score.
-score=yes
-
-
-[REFACTORING]
-
-# Maximum number of nested blocks for function / method body
-max-nested-blocks=5
-
-# Complete name of functions that never returns. When checking for
-# inconsistent-return-statements if a never returning function is called then
-# it will be considered as an explicit return statement and no message will be
-# printed.
-never-returning-functions=optparse.Values,sys.exit
-
-
-[VARIABLES]
-
-# List of additional names supposed to be defined in builtins. Remember that
-# you should avoid to define new builtins when possible.
-additional-builtins=
-
-# Tells whether unused global variables should be treated as a violation.
-allow-global-unused-variables=yes
-
-# List of strings which can identify a callback function by name. A callback
-# name must start or end with one of those strings.
-callbacks=cb_,
- _cb
-
-# A regular expression matching the name of dummy variables (i.e. expectedly
-# not used).
-dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_
-
-# Argument names that match this expression will be ignored. Default to name
-# with leading underscore
-ignored-argument-names=_.*|^ignored_|^unused_
-
-# Tells whether we should check for unused import in __init__ files.
-init-import=no
-
-# List of qualified module names which can have objects that can redefine
-# builtins.
-redefining-builtins-modules=six.moves,past.builtins,future.builtins,io,builtins
-
-
-[BASIC]
-
-# Naming style matching correct argument names
-argument-naming-style=snake_case
-
-# Regular expression matching correct argument names. Overrides argument-
-# naming-style
-#argument-rgx=
-
-# Naming style matching correct attribute names
-attr-naming-style=snake_case
-
-# Regular expression matching correct attribute names. Overrides attr-naming-
-# style
-#attr-rgx=
-
-# Bad variable names which should always be refused, separated by a comma
-bad-names=foo,
- bar,
- baz,
- toto,
- tutu,
- tata,
- _,
-
-# Naming style matching correct class attribute names
-class-attribute-naming-style=any
-
-# Regular expression matching correct class attribute names. Overrides class-
-# attribute-naming-style
-#class-attribute-rgx=
-
-# Naming style matching correct class names
-class-naming-style=PascalCase
-
-# Regular expression matching correct class names. Overrides class-naming-style
-#class-rgx=
-
-# Naming style matching correct constant names
-const-naming-style=UPPER_CASE
-
-# Regular expression matching correct constant names. Overrides const-naming-
-# style
-#const-rgx=
-
-# Minimum line length for functions/classes that require docstrings, shorter
-# ones are exempt.
-docstring-min-length=-1
-
-# Naming style matching correct function names
-function-naming-style=snake_case
-
-# Regular expression matching correct function names. Overrides function-
-# naming-style
-#function-rgx=
-
-# Good variable names which should always be accepted, separated by a comma
-good-names=i,
- j,
- k,
- f,
- e,
- ex,
- Run,
- C,
- __metaclass__,
-
-# Include a hint for the correct naming format with invalid-name
-include-naming-hint=no
-
-# Naming style matching correct inline iteration names
-inlinevar-naming-style=any
-
-# Regular expression matching correct inline iteration names. Overrides
-# inlinevar-naming-style
-#inlinevar-rgx=
-
-# Naming style matching correct method names
-method-naming-style=snake_case
-
-# Regular expression matching correct method names. Overrides method-naming-
-# style
-#method-rgx=
-
-# Naming style matching correct module names
-module-naming-style=snake_case
-
-# Regular expression matching correct module names. Overrides module-naming-
-# style
-#module-rgx=
-module-rgx=[a-z_][a-z0-9_-]{2,40}$
-method-rgx=[a-z_][a-z0-9_]{2,40}$
-function-rgx=[a-z_][a-z0-9_]{2,40}$
-
-# Colon-delimited sets of names that determine each other's naming style when
-# the name regexes allow several styles.
-name-group=
-
-# Regular expression which should only match function or class names that do
-# not require a docstring.
-no-docstring-rgx=^_
-
-# List of decorators that produce properties, such as abc.abstractproperty. Add
-# to this list to register other decorators that produce valid properties.
-property-classes=abc.abstractproperty
-
-# Naming style matching correct variable names
-variable-naming-style=snake_case
-
-# Regular expression matching correct variable names. Overrides variable-
-# naming-style
-#variable-rgx=
-
-
-[SPELLING]
-
-# Limits count of emitted suggestions for spelling mistakes
-max-spelling-suggestions=4
-
-# Spelling dictionary name. Available dictionaries: none. To make it working
-# install python-enchant package.
-spelling-dict=
-
-# List of comma separated words that should not be checked.
-spelling-ignore-words=
-
-# A path to a file that contains private dictionary; one word per line.
-spelling-private-dict-file=
-
-# Tells whether to store unknown words to indicated private dictionary in
-# --spelling-private-dict-file option instead of raising a message.
-spelling-store-unknown-words=no
-
-
-[FORMAT]
-
-# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
-expected-line-ending-format=
-
-# Regexp for a line that is allowed to be longer than the limit.
-ignore-long-lines=^\s*(# )?<?https?://\S+>?$
-
-# Number of spaces of indent required inside a hanging or continued line.
-indent-after-paren=4
-
-# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
-# tab).
-indent-string=' '
-
-# Maximum number of characters on a single line.
-max-line-length=160
-
-# Maximum number of lines in a module
-max-module-lines=1000
-
-# List of optional constructs for which whitespace checking is disabled. `dict-
-# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}.
-# `trailing-comma` allows a space between comma and closing bracket: (a, ).
-# `empty-line` allows space-only lines.
-no-space-check=trailing-comma,
- dict-separator
-
-# Allow the body of a class to be on the same line as the declaration if body
-# contains single statement.
-single-line-class-stmt=no
-
-# Allow the body of an if to be on the same line as the test if there is no
-# else.
-single-line-if-stmt=no
-
-
-[TYPECHECK]
-
-# List of decorators that produce context managers, such as
-# contextlib.contextmanager. Add to this list to register other decorators that
-# produce valid context managers.
-contextmanager-decorators=contextlib.contextmanager
-
-# List of members which are set dynamically and missed by pylint inference
-# system, and so shouldn't trigger E1101 when accessed. Python regular
-# expressions are accepted.
-#generated-members=PurityFb.*
-
-# Tells whether missing members accessed in mixin class should be ignored. A
-# mixin class is detected if its name ends with "mixin" (case insensitive).
-ignore-mixin-members=yes
-
-# This flag controls whether pylint should warn about no-member and similar
-# checks whenever an opaque object is returned when inferring. The inference
-# can return multiple potential results while evaluating a Python object, but
-# some branches might not be evaluated, which results in partial inference. In
-# that case, it might be useful to still emit no-member and other checks for
-# the rest of the inferred objects.
-ignore-on-opaque-inference=yes
-
-# List of class names for which member attributes should not be checked (useful
-# for classes with dynamically set attributes). This supports the use of
-# qualified names.
-ignored-classes=optparse.Values,thread._local,_thread._local
-
-# List of module names for which member attributes should not be checked
-# (useful for modules/projects where namespaces are manipulated during runtime
-# and thus existing member attributes cannot be deduced by static analysis. It
-# supports qualified module names, as well as Unix pattern matching.
-ignored-modules=
- _MovedItems,
-# Show a hint with possible names when a member name was not found. The aspect
-# of finding the hint is based on edit distance.
-missing-member-hint=yes
-
-# The minimum edit distance a name should have in order to be considered a
-# similar match for a missing member name.
-missing-member-hint-distance=1
-
-# The total number of similar names that should be taken in consideration when
-# showing a hint for a missing member.
-missing-member-max-choices=1
-
-
-[SIMILARITIES]
-
-# Ignore comments when computing similarities.
-ignore-comments=yes
-
-# Ignore docstrings when computing similarities.
-ignore-docstrings=yes
-
-# Ignore imports when computing similarities.
-ignore-imports=no
-
-# Minimum lines number of a similarity.
-min-similarity-lines=4
-
-
-[LOGGING]
-
-# Logging modules to check that the string format arguments are in logging
-# function parameter format
-logging-modules=logging
-
-
-[MISCELLANEOUS]
-
-# List of note tags to take in consideration, separated by a comma.
-notes=FIXME,
- XXX,
- TODO
-
-
-[CLASSES]
-
-# List of method names used to declare (i.e. assign) instance attributes.
-defining-attr-methods=__init__,
- __new__,
- setUp
-
-# List of member names, which should be excluded from the protected access
-# warning.
-exclude-protected=_asdict,
- _fields,
- _replace,
- _source,
- _make
-
-# List of valid names for the first argument in a class method.
-valid-classmethod-first-arg=cls
-
-# List of valid names for the first argument in a metaclass class method.
-valid-metaclass-classmethod-first-arg=mcs
-
-
-[DESIGN]
-
-# Maximum number of arguments for function / method
-max-args=5
-
-# Maximum number of attributes for a class (see R0902).
-max-attributes=7
-
-# Maximum number of boolean expressions in a if statement
-max-bool-expr=5
-
-# Maximum number of branch for function / method body
-max-branches=12
-
-# Maximum number of locals for function / method body
-max-locals=15
-
-# Maximum number of parents for a class (see R0901).
-max-parents=7
-
-# Maximum number of public methods for a class (see R0904).
-max-public-methods=20
-
-# Maximum number of return / yield for function / method body
-max-returns=6
-
-# Maximum number of statements in function / method body
-max-statements=50
-
-# Minimum number of public methods for a class (see R0903).
-min-public-methods=2
-
-
-[IMPORTS]
-
-# Allow wildcard imports from modules that define __all__.
-allow-wildcard-with-all=no
-
-# Analyse import fallback blocks. This can be used to support both Python 2 and
-# 3 compatible code, which means that the block might have code that exists
-# only in one or another interpreter, leading to false positives when analysed.
-analyse-fallback-blocks=no
-
-# Deprecated modules which should not be used, separated by a comma
-deprecated-modules=regsub,
- TERMIOS,
- Bastion,
- rexec
-
-# Create a graph of external dependencies in the given file (report RP0402 must
-# not be disabled)
-ext-import-graph=
-
-# Create a graph of every (i.e. internal and external) dependencies in the
-# given file (report RP0402 must not be disabled)
-import-graph=
-
-# Create a graph of internal dependencies in the given file (report RP0402 must
-# not be disabled)
-int-import-graph=
-
-# Force import order to recognize a module as part of the standard
-# compatibility libraries.
-known-standard-library=
-
-# Force import order to recognize a module as part of a third party library.
-known-third-party=enchant
-
-
-[EXCEPTIONS]
-
-# Exceptions that will emit a warning when being caught. Defaults to
-# "Exception"
-overgeneral-exceptions=Exception
diff --git a/ansible_collections/purestorage/flashblade/CHANGELOG.rst b/ansible_collections/purestorage/flashblade/CHANGELOG.rst
index c252af127..13b845abd 100644
--- a/ansible_collections/purestorage/flashblade/CHANGELOG.rst
+++ b/ansible_collections/purestorage/flashblade/CHANGELOG.rst
@@ -5,6 +5,98 @@ Purestorage.Flashblade Release Notes
.. contents:: Topics
+v1.16.0
+=======
+
+Minor Changes
+-------------
+
+- purefb_ds - Add `force_bind_password` parameter to allow module to be idempotent.
+
+Bugfixes
+--------
+
+- purefb_bucket - Changed logic to allow complex buckets to be created in a single call, rather than having to split into two tasks.
+- purefb_lag - Enable LAG port configuration with multi-chassis
+- purefb_timeout - Fixed arithmetic error that resulted in module incorrectly reporting changed when no change was required.
+
+v1.15.0
+=======
+
+Minor Changes
+-------------
+
+- purefb_bucket - Add support for public buckets
+- purefb_bucket - From REST 2.12 the `mode` parameter default changes to `multi-site-writable`.
+- purefb_fs - Added SMB Continuous Availability parameter. Requires REST 2.12 or higher.
+- purefb_info - Added enhanced information for buckets, filesystems and snapshots, based on new features in REST 2.12
+- purefb_s3acc - Add support for public buckets
+- purefb_s3acc - Remove default requirements for ``hard_limit`` and ``default_hard_limit``
+
+Bugfixes
+--------
+
+- purefb_info - Added missing object lock retention details if enabledd
+
+New Modules
+-----------
+
+- purestorage.flashblade.purefb_hardware - Manage FlashBlade Hardware
+
+v1.14.0
+=======
+
+Minor Changes
+-------------
+
+- purefb_bucket_replica - Added support for cascading replica links
+- purefb_info - New fields to display free space (remaining quota) for Accounts and Buckets. Space used by destroyed buckets is split out from virtual field to new destroyed_virtual field
+- purefb_info - Report encryption state in SMB client policy rules
+- purefb_info - Report more detailed space data from Purity//FB 4.3.0
+- purefb_policy - Add deny effect for object store policy rules. Requires Purity//FB 4.3.0+
+- purefb_policy - Added parameter to define object store policy description
+
+Bugfixes
+--------
+
+- purefb_userpolicy - Fixed `show` state for all user policies
+
+v1.13.1
+=======
+
+Minor Changes
+-------------
+
+- purefb_policy - Add new and updated policy access rights
+
+Bugfixes
+--------
+
+- purefb_info - Fixed missing atributes for SMB client policy rules
+
+v1.13.0
+=======
+
+v1.12.0
+=======
+
+Minor Changes
+-------------
+
+- purefb_fs - Added support for SMB client and share policies
+- purefb_fs_replica - Added support to delete filesystem replica links from REST 2.10
+- purefb_info - Add drive type in drives subset for //S and //E platforms. Only available from REST 2.9.
+- purefb_info - Added support for SMB client and share policies
+- purefb_policy - Added support for SMB client and share policies
+- purefb_s3acc - Allow human readable quota sizes; eg. 1T, 230K, etc
+- purefb_s3user - Add new boolean parameter I(multiple_keys) to limit access keys for a user to a single key.
+
+Bugfixes
+--------
+
+- purefb_bucket - Fixed bucket type mode name typo
+- purefb_fs - Fixed issue with incorrect promotion state setting
+
v1.11.0
=======
@@ -24,7 +116,7 @@ Minor Changes
Bugfixes
--------
-- purefa_info - Fixed issue when more than 10 buckets have lifecycle rules.
+- purefb_info - Fixed issue when more than 10 buckets have lifecycle rules.
- purefb_s3user - Fix incorrect response when bad key/secret pair provided for new user
New Modules
@@ -133,7 +225,7 @@ v1.6.0
Minor Changes
-------------
-- purefa_virtualhost - New module to manage API Clients
+- purefb_virtualhost - New module to manage API Clients
- purefb_ad - New module to manage Active Directory Account
- purefb_eula - New module to sign EULA
- purefb_info - Add Active Directory, Kerberos and Object Store Account information
@@ -200,7 +292,7 @@ Minor Changes
Bugfixes
--------
-- purefa_policy - Resolve multiple issues related to incorrect use of timezones
+- purefb_policy - Resolve multiple issues related to incorrect use of timezones
- purefb_connect - Ensure changing encryption status on array connection is performed correctly
- purefb_connect - Fix breaking change created in purity_fb SDK 1.9.2 for deletion of array connections
- purefb_connect - Hide target array API token
diff --git a/ansible_collections/purestorage/flashblade/FILES.json b/ansible_collections/purestorage/flashblade/FILES.json
index 5cfa68659..d7b39dbda 100644
--- a/ansible_collections/purestorage/flashblade/FILES.json
+++ b/ansible_collections/purestorage/flashblade/FILES.json
@@ -8,402 +8,388 @@
"format": 1
},
{
- "name": "roles",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/.keep",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests",
+ "name": "changelogs",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
- "name": "tests/sanity",
+ "name": "changelogs/fragments",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
- "name": "tests/sanity/ignore-2.9.txt",
+ "name": "changelogs/fragments/140_more_32_info.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "83175635ac646a5cfc169ec6316e0ea90071bf4e8e9a79212a413f326a3049bd",
+ "chksum_sha256": "4e57a10a71ab3dd1c151a6867c0da118a21e13df2ef8b9d2fbb799108ddebcd4",
"format": 1
},
{
- "name": "tests/sanity/ignore-2.13.txt",
+ "name": "changelogs/fragments/169_pypureclient_fix.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "83175635ac646a5cfc169ec6316e0ea90071bf4e8e9a79212a413f326a3049bd",
+ "chksum_sha256": "fb6e7bfc1c816ec77dadf6bd4ab040a8089b98a1c9c75ec15603d407c27ce3f2",
"format": 1
},
{
- "name": "tests/sanity/ignore-2.16.txt",
+ "name": "changelogs/fragments/96_fix_update_connection.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "83175635ac646a5cfc169ec6316e0ea90071bf4e8e9a79212a413f326a3049bd",
+ "chksum_sha256": "828cc0c94acf44d1d373402a0cc657527d9fce8ac744319fbe0d8035684932b4",
"format": 1
},
{
- "name": "tests/sanity/ignore-2.14.txt",
+ "name": "changelogs/fragments/129-virtualhost.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "83175635ac646a5cfc169ec6316e0ea90071bf4e8e9a79212a413f326a3049bd",
+ "chksum_sha256": "0af56f02e1b7ad1ea585b3bbce897022faf28b448b69ea755951be3b5da40f7e",
"format": 1
},
{
- "name": "tests/sanity/ignore-2.11.txt",
+ "name": "changelogs/fragments/161_add_lifecycle_info.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "83175635ac646a5cfc169ec6316e0ea90071bf4e8e9a79212a413f326a3049bd",
+ "chksum_sha256": "b8c87e250274f2b5007ce0898c9bb6d79129faedaa8427a52377f86c24c6e90f",
"format": 1
},
{
- "name": "tests/sanity/ignore-2.12.txt",
+ "name": "changelogs/fragments/105_max_access_key.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "83175635ac646a5cfc169ec6316e0ea90071bf4e8e9a79212a413f326a3049bd",
+ "chksum_sha256": "fb9f5707e7466fe7c94479891f218bacd04ae45a37c2f207dcf51ac756fb7259",
"format": 1
},
{
- "name": "tests/sanity/ignore-2.15.txt",
+ "name": "changelogs/fragments/153_add_quota.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "83175635ac646a5cfc169ec6316e0ea90071bf4e8e9a79212a413f326a3049bd",
+ "chksum_sha256": "2b2517ea362d7128333d6fab7f99f6b70c4253d2807eae3ec417aa4451b3ae6c",
"format": 1
},
{
- "name": "tests/sanity/ignore-2.10.txt",
+ "name": "changelogs/fragments/76_default_fs_size.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "83175635ac646a5cfc169ec6316e0ea90071bf4e8e9a79212a413f326a3049bd",
+ "chksum_sha256": "6d8689e8f46ab7d3286b7d3ee46dfa13a8bf0585cc9b197a5ca8271c9dd9590e",
"format": 1
},
{
- "name": ".github",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
+ "name": "changelogs/fragments/185_nfs_export_rule.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8f53ac3485ed3849ca99fee6015e2767f636c1186a368b3d4e91ba6076afd7d4",
"format": 1
},
{
- "name": ".github/CONTRIBUTING.md",
+ "name": "changelogs/fragments/200_proxy.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "7c429527b799623f57e6363e14ff8a319844c9120f4dfa18bcea3849cdc07128",
+ "chksum_sha256": "26631d7434c86b739bcd75c8905f8f668555217610cafb47f11a6e24937c7eb8",
"format": 1
},
{
- "name": ".github/workflows",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
+ "name": "changelogs/fragments/211_change_booleans.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f04fd18a42e321cb3818a579e14cc50a6d27935196ff04632e2db44f7b807322",
"format": 1
},
{
- "name": ".github/workflows/black.yaml",
+ "name": "changelogs/fragments/135_add_user_policies.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "6fb3e0af2e41fb0618586a2990e6645fb9b29d1a7b64b7168c5d27af320569c8",
+ "chksum_sha256": "a0b78f5b1a5be3bfb87a00a4e638fad67600b0bab4cfddd72b3bfa4d2e217e3f",
"format": 1
},
{
- "name": ".github/workflows/ansible-lint.yml",
+ "name": "changelogs/fragments/97_fix_encrpyted_array_connection_info.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "4c85688d98b71e3a6594530a362cd5d2cf83842ceaccd0e0fc76e233777c1cef",
+ "chksum_sha256": "e140fbfc3ac4eaab3dd9c482e3beb37efd98ad4c3892b36f93ffb00d89c9283f",
"format": 1
},
{
- "name": ".github/workflows/stale.yml",
+ "name": "changelogs/fragments/230_prom_fix.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "0bdef4889afabcd627fc30711a0809c7468b8c9e64cbcebe1334f794a41e7bd9",
+ "chksum_sha256": "9059ca5e0ead1a93ffc503a86539d0b9ee64f0fc2526ba63ba76a0366192e178",
"format": 1
},
{
- "name": ".github/workflows/main.yml",
+ "name": "changelogs/fragments/162_new_lifecycle.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "7f7d9b7fc9ac71a4ff36243422b04f4cf163a254c52e8ab647fb84807bc3ea21",
+ "chksum_sha256": "bd6214f7380736e34ed7a21396f1842c6796afba6c3b7413536522d4b6d0b531",
"format": 1
},
{
- "name": ".github/ISSUE_TEMPLATE",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
+ "name": "changelogs/fragments/225_delete_rl.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e9c0ed90d651ea4b4b83a2cb05cd9435e9707323207efc042f056eaa616a629c",
"format": 1
},
{
- "name": ".github/ISSUE_TEMPLATE/feature_request.md",
+ "name": "changelogs/fragments/243_policy_desc.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "1f48c52f209a971b8e7eae4120144d28fcf8ee38a7778a7b4d8cf1ab356617d2",
+ "chksum_sha256": "7612dd96fa7e9be64784224d1fb2579eb57d28751a397fffe4f55213f507f78a",
"format": 1
},
{
- "name": ".github/ISSUE_TEMPLATE/bug_report.md",
+ "name": "changelogs/fragments/92_fix_ds_update.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "0c8d64f29fb4536513653bf8c97da30f3340e2041b91c8952db1515d6b23a7b3",
+ "chksum_sha256": "8befcbbddf6fc2db62ff48b4f3a1030fe115fb7ababfc9b03c8e693628087337",
"format": 1
},
{
- "name": ".github/pull_request_template.md",
+ "name": "changelogs/fragments/132_add_timeout.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "565ead1b588caaa10cd6f2ed1bb6c809eb2ad93bf75da3a198690cac778432d6",
+ "chksum_sha256": "8aea8125471f4717c0efa211756fb2086542362d9bee50295686dbce9ba86db7",
"format": 1
},
{
- "name": "meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
+ "name": "changelogs/fragments/202_multiple_snap_rules.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4ed9e6c99d409df00b7cd2cb4a60bee536b9e0608c107a0944fb3a738ec0bd9f",
"format": 1
},
{
- "name": "meta/runtime.yml",
+ "name": "changelogs/fragments/84_add_cert.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "df18179bb2f5447a56ac92261a911649b96821c0b2c08eea62d5cc6b0195203f",
+ "chksum_sha256": "1d286bf0fe3301a898bcdcad0bf70955732608eb51468097ca6d70ae269654d8",
"format": 1
},
{
- "name": ".yamllint",
+ "name": "changelogs/fragments/121_replication_perf.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "2970fa4875092f99825ac0da3c82d2413ce973087b9945e68fdfa7b3b1e2012e",
+ "chksum_sha256": "372e2b49c1b2fb2f637e01023dd3a5146ee61171adbf619062ceb5e53a5d3e96",
"format": 1
},
{
- "name": "README.md",
+ "name": "changelogs/fragments/217_inventory.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3c4d2257a4a25daf934a2b149aaa3397371d32f99f0b7042ca51a1a5fe981917",
+ "chksum_sha256": "4832bed915e1a18327ab9d7c15c65f55094f08215a26028d426ca694a90c2ae7",
"format": 1
},
{
- "name": "requirements.txt",
+ "name": "changelogs/fragments/v1.3.0_summary.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "87cb6471722fa1096099f228091480939c5b7c3ac39c2819543324a7701e66a3",
+ "chksum_sha256": "64bd3d32085373ce61a414518c2ed87bfd003d163d3002d087f41f4a54b0b1a0",
"format": 1
},
{
- "name": "playbooks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
+ "name": "changelogs/fragments/216_extra_bucket_info.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cf88a27b9c51eefd78e80b587012be110c967d0185597cac22cf5de86b73b053",
"format": 1
},
{
- "name": "playbooks/roles",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
+ "name": "changelogs/fragments/154_add_snap_now.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6bde815114a219fd03941a080c2e6acebd5ef748e7f67503e8c3ef5f81decd54",
"format": 1
},
{
- "name": "playbooks/.keep",
+ "name": "changelogs/fragments/127_add_eula.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "chksum_sha256": "9b092f3766cf4309ac60ab77c2e51142ffbc81eb4bfa4da581d531ee2de633ac",
"format": 1
},
{
- "name": "playbooks/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
+ "name": "changelogs/fragments/179_fqcn.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d4c60f377dd4cd40de9c777a7d54f6d185afa785fdc45a751d67f2baccf9efdf",
"format": 1
},
{
- "name": "playbooks/templates/.keep",
+ "name": "changelogs/fragments/115_multiprotocol.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "chksum_sha256": "51375d2aac996039ee4d338cbb7cc8f9d77f423f8f519ab6f84012ff021812ae",
"format": 1
},
{
- "name": "playbooks/files",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
+ "name": "changelogs/fragments/176_nfs_export_policies.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "36fc1c990afd6fb48068d113d6e4a6846368ad32523554acc9b9d9e5ba861161",
"format": 1
},
{
- "name": "playbooks/files/.keep",
+ "name": "changelogs/fragments/187_rename_nfs_policy.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "chksum_sha256": "d8b9f4112fea72954805eca3c01cf04524d5bd02a5b2559cdfef68c09d616e49",
"format": 1
},
{
- "name": "playbooks/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
+ "name": "changelogs/fragments/152_s3acc_lowercase.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0a02995d6eeb1ac3968e952c61a552e5fc2feeef62ef7642d5f8714157da7d41",
"format": 1
},
{
- "name": "playbooks/vars/.keep",
+ "name": "changelogs/fragments/83_add_certgrp.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "chksum_sha256": "b7513178564ee1707090e4b3df65af56f28a71119e0ebf73b074dc9d2c0e1d65",
"format": 1
},
{
- "name": "playbooks/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
+ "name": "changelogs/fragments/237_info_policy.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3fccb2162770f16d8a32e93a24795388bad88a6fdad1eac209ebe132f4394b63",
"format": 1
},
{
- "name": "playbooks/tasks/.keep",
+ "name": "changelogs/fragments/244_add_deny.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "chksum_sha256": "823eed6a5affd85e6c8c0d8a313520caebb86ddf32908ce3ee62efa76d8f9cd2",
"format": 1
},
{
- "name": "CHANGELOG.rst",
+ "name": "changelogs/fragments/78_update_filesystem_replica_link.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "9f92bbcfdf90122b0ffdbe430cd0ff9b2a3b1e3cd1c099e0436b251de8674d74",
+ "chksum_sha256": "57a7b5ed892c4ea2f5149023b2bdde9481eb8c0a7593e4e76a4603e706971100",
"format": 1
},
{
- "name": "settings.json",
+ "name": "changelogs/fragments/150_fix_joint_nfs_version_change.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f64528ffd800423e1d49a3c79cdd3892548a57177ea1a1caacbbcd275390b792",
+ "chksum_sha256": "0e1a7b9242317cf785fa07608c5a661bad07fc79e8fd187264d9263dc0609479",
"format": 1
},
{
- "name": ".gitignore",
+ "name": "changelogs/fragments/239_access_rights.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f3e019033a4ff6d651103704d47629e6d911cb949652bd5e6121d7a918dbc480",
+ "chksum_sha256": "c050dd606a488ff604f085e772c5dbb7425a11afb40a3b42bc000473fe213150",
"format": 1
},
{
- "name": "changelogs",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
+ "name": "changelogs/fragments/238_user_policy.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e0870b9801eb51578bf27f52c0af7acdb599d92ccdcf82ca9391246e50058a62",
"format": 1
},
{
- "name": "changelogs/config.yaml",
+ "name": "changelogs/fragments/163_admin_key.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d1b77eeb2d9f7242075e746537713be29e397fe6954f13a1caf8b10695434b9b",
+ "chksum_sha256": "bd290345ed66c0809e6be94cabb6f1823b7e0b3f61d6a88a13f16ae849ce4399",
"format": 1
},
{
- "name": "changelogs/.plugin-cache.yaml",
+ "name": "changelogs/fragments/164_add_admin.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "b528379cbf853914f8e8192b15e34bad21ea8c2b4de7faaab4f045fe1921fa4b",
+ "chksum_sha256": "53b89a2de09c79fcb3fdbdf82917985124d53f793046f1164c04a8578adb7df9",
"format": 1
},
{
- "name": "changelogs/changelog.yaml",
+ "name": "changelogs/fragments/246_smb_encrypt.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "728d1a92a9effec8bd73c032a3bd53fc8eb4d9029c824a2b6e1179b6922bf488",
+ "chksum_sha256": "340f37b3d71019cf3ea50de4c91b45570ff493ac3630940ec3b1c85c6fcc9cc5",
"format": 1
},
{
- "name": "changelogs/fragments",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
+ "name": "changelogs/fragments/80_support_reverse_replica_link.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f3504f5e1acadaf52bd9d420373b7edce2015435232e5fa53282455361bcd440",
"format": 1
},
{
- "name": "changelogs/fragments/186_add_tz.yaml",
+ "name": "changelogs/fragments/220_s3user_key_fix.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "44209d75080c5e4f437f409bb37e0f16c662658a6243fa890339fc076dfa7cd3",
+ "chksum_sha256": "ae00607f47b12b62456cb037b31474be8b7de0820b46ced24fc4a96b43f0eb76",
"format": 1
},
{
- "name": "changelogs/fragments/81_purefb_fs_new_options.yaml",
+ "name": "changelogs/fragments/114_certificate_update.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "abb817b52fdfa70b538ca9efce8d642282383b6961c47bde20ce0a023d2b941d",
+ "chksum_sha256": "ce77387c64b0714a4abe011d4eabc7b1a803058c1e7b407646ceb8249545e8aa",
"format": 1
},
{
- "name": "changelogs/fragments/101_fix_policy_and_timezone_error.yaml",
+ "name": "changelogs/fragments/257_mode_change.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "2e9c5c95b8333fee22646f4e83e9034172182b1e99c084725f08df48e45d3d47",
+ "chksum_sha256": "29463aaf4d93f7d77361c6f2c7b00eba1c228feacf152b78a0249c27c32f0b11",
"format": 1
},
{
- "name": "changelogs/fragments/163_admin_key.yaml",
+ "name": "changelogs/fragments/224_smb_policies.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "bd290345ed66c0809e6be94cabb6f1823b7e0b3f61d6a88a13f16ae849ce4399",
+ "chksum_sha256": "92a42a7a700f4cab0c2d0e7e9a7de6fd65813784e14284d866a99e4e5e3ec289",
"format": 1
},
{
- "name": "changelogs/fragments/220_s3user_key_fix.yaml",
+ "name": "changelogs/fragments/139_add_keytabs.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "ae00607f47b12b62456cb037b31474be8b7de0820b46ced24fc4a96b43f0eb76",
+ "chksum_sha256": "c4d64b50797e36e3861e530b3e7c080277ebceb17ac5f58d4a08b8ac59c14d10",
"format": 1
},
{
- "name": "changelogs/fragments/211_change_booleans.yaml",
+ "name": "changelogs/fragments/107_add_remove_s3user_key.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f04fd18a42e321cb3818a579e14cc50a6d27935196ff04632e2db44f7b807322",
+ "chksum_sha256": "8a2bb28b43962c08ea8916db02a401f8bd7b4989bd1aa04f201ed8c602d94124",
"format": 1
},
{
- "name": "changelogs/fragments/217_inventory.yaml",
+ "name": "changelogs/fragments/232_multiple_keys.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "4832bed915e1a18327ab9d7c15c65f55094f08215a26028d426ca694a90c2ae7",
+ "chksum_sha256": "899bef3076d8d2952069177a3c8de917b6ecdaa622ccbfd00933a4756adb4314",
"format": 1
},
{
- "name": "changelogs/fragments/174_access_policies.yaml",
+ "name": "changelogs/fragments/175_throttle_support.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "25f5a86a2a977555359c8088fab65902f1ee2b0cc3bc417a7383d5d5176d4802",
+ "chksum_sha256": "738e0e9c2f7789b1c931b5563416ca436fd0e04401232a502e6ce59fd03da28f",
"format": 1
},
{
- "name": "changelogs/fragments/152_s3acc_lowercase.yaml",
+ "name": "changelogs/fragments/123_lifecycle_rule_fix.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "0a02995d6eeb1ac3968e952c61a552e5fc2feeef62ef7642d5f8714157da7d41",
+ "chksum_sha256": "87a3f72b0ac11e72103dfb4766faecdd2b0c1fe5fad379e322c910c5134f7025",
"format": 1
},
{
- "name": "changelogs/fragments/76_default_fs_size.yaml",
+ "name": "changelogs/fragments/188_bucket_type.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "6d8689e8f46ab7d3286b7d3ee46dfa13a8bf0585cc9b197a5ca8271c9dd9590e",
+ "chksum_sha256": "3c8485b792ba73283807489b10a7b6df8298c5f932aaeec7b6b841b2f504464a",
"format": 1
},
{
@@ -414,507 +400,563 @@
"format": 1
},
{
- "name": "changelogs/fragments/90_imported_keys.yaml",
+ "name": "changelogs/fragments/247_space_consistency.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "ad1078e90875745edce8071846183eed39c3878156d14f96b5db78ab1c5be973",
+ "chksum_sha256": "1c6d43da77f2242c6f783856bff87b35ac3936fa2339feb38a6cc1640d46f341",
"format": 1
},
{
- "name": "changelogs/fragments/150_fix_joint_nfs_version_change.yaml",
+ "name": "changelogs/fragments/205_fix_multi_lifecycle.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "0e1a7b9242317cf785fa07608c5a661bad07fc79e8fd187264d9263dc0609479",
+ "chksum_sha256": "909b52d292f6b41efd85760990e8ff59b58fab416ba2c24c4b409878cd724543",
"format": 1
},
{
- "name": "changelogs/fragments/164_add_admin.yaml",
+ "name": "changelogs/fragments/183_v2_connections.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "53b89a2de09c79fcb3fdbdf82917985124d53f793046f1164c04a8578adb7df9",
+ "chksum_sha256": "700e1509315604807c70d5b186542e74e058e4f912b1fe796df41c3d8a125d57",
"format": 1
},
{
- "name": "changelogs/fragments/129-virtualhost.yaml",
+ "name": "changelogs/fragments/242_cascade.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "0af56f02e1b7ad1ea585b3bbce897022faf28b448b69ea755951be3b5da40f7e",
+ "chksum_sha256": "3795c4541a2ac413e40079ad215a431c79974a2e5f8aedb1019e729e6af9d1fb",
"format": 1
},
{
- "name": "changelogs/fragments/105_max_access_key.yaml",
+ "name": "changelogs/fragments/215_encrypt_sec_info.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "fb9f5707e7466fe7c94479891f218bacd04ae45a37c2f207dcf51ac756fb7259",
+ "chksum_sha256": "6915aa0ddabb1f73dbced52d0114b84317958f29a2ef7ea4dcd72a10952f8017",
"format": 1
},
{
- "name": "changelogs/fragments/169_pypureclient_fix.yaml",
+ "name": "changelogs/fragments/109_update_info.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "fb6e7bfc1c816ec77dadf6bd4ab040a8089b98a1c9c75ec15603d407c27ce3f2",
+ "chksum_sha256": "857bb23faa48e2d894f432cca4219681d7b3dab68473b3502dfe9319d751a3e1",
"format": 1
},
{
- "name": "changelogs/fragments/183_v2_connections.yaml",
+ "name": "changelogs/fragments/227_s3acc_human_quota.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "700e1509315604807c70d5b186542e74e058e4f912b1fe796df41c3d8a125d57",
+ "chksum_sha256": "4a6a471aeb6120e67aab7ada3f3c1736dff5bcd43311fef2bd90d846e510b0c1",
"format": 1
},
{
- "name": "changelogs/fragments/96_fix_update_connection.yaml",
+ "name": "changelogs/fragments/128_add_32_to_info.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "828cc0c94acf44d1d373402a0cc657527d9fce8ac744319fbe0d8035684932b4",
+ "chksum_sha256": "b18c7cf868d5699e4ad67e2d924c7a6323353147f8850757f7f2c4c7dda877c8",
"format": 1
},
{
- "name": "changelogs/fragments/200_proxy.yaml",
+ "name": "changelogs/fragments/88_add_lifecycle.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "26631d7434c86b739bcd75c8905f8f668555217610cafb47f11a6e24937c7eb8",
+ "chksum_sha256": "fdc6c425f03ffc0b4a008230f290f6ef37874a270909cb2ee311843dc08909f6",
"format": 1
},
{
- "name": "changelogs/fragments/159_add_lag.yaml",
+ "name": "changelogs/fragments/136_add_s3user_policy.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "5b1d95e41e550ed7b8bdda62f09e9ae883915afd1b547d5f5bb863b21b803df3",
+ "chksum_sha256": "b97c8a102be108e8d74c9ec6d9aa73ec151fe7a77c676452d7b96cf75a4ecf6b",
"format": 1
},
{
- "name": "changelogs/fragments/115_multiprotocol.yaml",
+ "name": "changelogs/fragments/268_multi-chassis-lag.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "51375d2aac996039ee4d338cbb7cc8f9d77f423f8f519ab6f84012ff021812ae",
+ "chksum_sha256": "dc873b8118b78049c297f0bb54508ae3c5df169373c1a8810e5034b9bab75e3d",
"format": 1
},
{
- "name": "changelogs/fragments/85_add_banner.yaml",
+ "name": "changelogs/fragments/158_support_lags.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "ee600c3bcae632d7450ff3447192f8ca2d1622eecd67bc87c59fdd3dd8326bc6",
+ "chksum_sha256": "68b3e104addfa10fb7f2f974bff2e5dad2c950e261c603f37409f42ab7afed02",
"format": 1
},
{
- "name": "changelogs/fragments/185_nfs_export_rule.yaml",
+ "name": "changelogs/fragments/194_lists_for_service.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8f53ac3485ed3849ca99fee6015e2767f636c1186a368b3d4e91ba6076afd7d4",
+ "chksum_sha256": "9e139b9ea88f7700071e57500cff497a6be300d8425b4a4ddaba77c36a8dc128",
"format": 1
},
{
- "name": "changelogs/fragments/128_add_32_to_info.yaml",
+ "name": "changelogs/fragments/186_add_tz.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "b18c7cf868d5699e4ad67e2d924c7a6323353147f8850757f7f2c4c7dda877c8",
+ "chksum_sha256": "44209d75080c5e4f437f409bb37e0f16c662658a6243fa890339fc076dfa7cd3",
"format": 1
},
{
- "name": "changelogs/fragments/161_add_lifecycle_info.yaml",
+ "name": "changelogs/fragments/255_smb_ca.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "b8c87e250274f2b5007ce0898c9bb6d79129faedaa8427a52377f86c24c6e90f",
+ "chksum_sha256": "70852998288bfd96ffe5999e13409b9f8645ba86743e8ca197f16fd4433f10c8",
"format": 1
},
{
- "name": "changelogs/fragments/138_add_ad_module.yaml",
+ "name": "changelogs/fragments/222_bucket_type_fix.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "972d7c56c40a909882eeb3c199f4b7dfd05b080d8b159d2f4915c3d86beb055d",
+ "chksum_sha256": "68e1b5898249bac3068d141454779600b5682070bae9590d7335f9a2fff5d4fb",
"format": 1
},
{
- "name": "changelogs/fragments/202_multiple_snap_rules.yaml",
+ "name": "changelogs/fragments/167_fix_logins.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "4ed9e6c99d409df00b7cd2cb4a60bee536b9e0608c107a0944fb3a738ec0bd9f",
+ "chksum_sha256": "426451dd9cb0925943b74eae2fe37702574efc7974f630a049737bfa74991ff3",
"format": 1
},
{
- "name": "changelogs/fragments/213_sec_update.yaml",
+ "name": "changelogs/fragments/81_purefb_fs_new_options.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "6b71174c00e982cada0d051fae5e28c853207ec6d0f42a783db35a9519733769",
+ "chksum_sha256": "abb817b52fdfa70b538ca9efce8d642282383b6961c47bde20ce0a023d2b941d",
"format": 1
},
{
- "name": "changelogs/fragments/218_object_account_info.yaml",
+ "name": "changelogs/fragments/138_add_ad_module.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "ef0f569461747bfcb2f294a8317d113b829323f9e6994e652d4344b2590099fa",
+ "chksum_sha256": "972d7c56c40a909882eeb3c199f4b7dfd05b080d8b159d2f4915c3d86beb055d",
"format": 1
},
{
- "name": "changelogs/fragments/80_support_reverse_replica_link.yaml",
+ "name": "changelogs/fragments/174_access_policies.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f3504f5e1acadaf52bd9d420373b7edce2015435232e5fa53282455361bcd440",
+ "chksum_sha256": "25f5a86a2a977555359c8088fab65902f1ee2b0cc3bc417a7383d5d5176d4802",
"format": 1
},
{
- "name": "changelogs/fragments/84_add_cert.yaml",
+ "name": "changelogs/fragments/86_add_syslog.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "1d286bf0fe3301a898bcdcad0bf70955732608eb51468097ca6d70ae269654d8",
+ "chksum_sha256": "e42ee9ea2a2bffa465347a52a3fcf4bfaa51f377e7f33bf4a405eb46ae507442",
"format": 1
},
{
- "name": "changelogs/fragments/123_lifecycle_rule_fix.yaml",
+ "name": "changelogs/fragments/90_imported_keys.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "87a3f72b0ac11e72103dfb4766faecdd2b0c1fe5fad379e322c910c5134f7025",
+ "chksum_sha256": "ad1078e90875745edce8071846183eed39c3878156d14f96b5db78ab1c5be973",
"format": 1
},
{
- "name": "changelogs/fragments/83_add_certgrp.yml",
+ "name": "changelogs/fragments/245_quota_plus.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "b7513178564ee1707090e4b3df65af56f28a71119e0ebf73b074dc9d2c0e1d65",
+ "chksum_sha256": "8379355479a2da9937127e1af246827353a15d4ec169da72c6090007f18760fb",
"format": 1
},
{
- "name": "changelogs/fragments/97_fix_encrpyted_array_connection_info.yaml",
+ "name": "changelogs/fragments/79_hide_connect_api.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e140fbfc3ac4eaab3dd9c482e3beb37efd98ad4c3892b36f93ffb00d89c9283f",
+ "chksum_sha256": "b4cd3cbdb65de6b71cfbe179d56a42be2afbf6486e1ce0df9fdd3a7042bd57b0",
"format": 1
},
{
- "name": "changelogs/fragments/136_add_s3user_policy.yaml",
+ "name": "changelogs/fragments/166_lag_mac_note.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "b97c8a102be108e8d74c9ec6d9aa73ec151fe7a77c676452d7b96cf75a4ecf6b",
+ "chksum_sha256": "b639987ccd53708ee210a1812bd8c6af30292a3a1b6b42c7b839dd7120967e13",
"format": 1
},
{
- "name": "changelogs/fragments/114_certificate_update.yaml",
+ "name": "changelogs/fragments/223_add_drive_type.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "ce77387c64b0714a4abe011d4eabc7b1a803058c1e7b407646ceb8249545e8aa",
+ "chksum_sha256": "f63992a09eef7139800eddd09d4a094b36d150e0e0074c552078045f27b1cf3a",
"format": 1
},
{
- "name": "changelogs/fragments/162_new_lifecycle.yaml",
+ "name": "changelogs/fragments/218_object_account_info.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "bd6214f7380736e34ed7a21396f1842c6796afba6c3b7413536522d4b6d0b531",
+ "chksum_sha256": "ef0f569461747bfcb2f294a8317d113b829323f9e6994e652d4344b2590099fa",
"format": 1
},
{
- "name": "changelogs/fragments/132_add_timeout.yaml",
+ "name": "changelogs/fragments/77_filesystem_policies_info.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8aea8125471f4717c0efa211756fb2086542362d9bee50295686dbce9ba86db7",
+ "chksum_sha256": "8c7090d551cb59c49622a89c0ed25f12ad89104a9e2ab6708a01fc01fce9e049",
"format": 1
},
{
- "name": "changelogs/fragments/154_add_snap_now.yaml",
+ "name": "changelogs/fragments/85_add_banner.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "6bde815114a219fd03941a080c2e6acebd5ef748e7f67503e8c3ef5f81decd54",
+ "chksum_sha256": "ee600c3bcae632d7450ff3447192f8ca2d1622eecd67bc87c59fdd3dd8326bc6",
"format": 1
},
{
- "name": "changelogs/fragments/158_support_lags.yaml",
+ "name": "changelogs/fragments/254_update_212_info.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "68b3e104addfa10fb7f2f974bff2e5dad2c950e261c603f37409f42ab7afed02",
+ "chksum_sha256": "7dfe31757a7c234c82a6175df2c608c4657b23ac5c029671a083cbdfc4846960",
"format": 1
},
{
- "name": "changelogs/fragments/92_fix_ds_update.yaml",
+ "name": "changelogs/fragments/191_add_quota_info.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8befcbbddf6fc2db62ff48b4f3a1030fe115fb7ababfc9b03c8e693628087337",
+ "chksum_sha256": "58ae5507364e9af847ac1806d27d6497bd36967ef3bdb34e3716cc294c178440",
"format": 1
},
{
- "name": "changelogs/fragments/86_add_syslog.yaml",
+ "name": "changelogs/fragments/131-apiclient.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e42ee9ea2a2bffa465347a52a3fcf4bfaa51f377e7f33bf4a405eb46ae507442",
+ "chksum_sha256": "04e3fdc25643fb469342e82df9213e49d2e4eb3e5411035f49f825d19542721c",
"format": 1
},
{
- "name": "changelogs/fragments/191_add_quota_info.yaml",
+ "name": "changelogs/fragments/159_add_lag.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "58ae5507364e9af847ac1806d27d6497bd36967ef3bdb34e3716cc294c178440",
+ "chksum_sha256": "5b1d95e41e550ed7b8bdda62f09e9ae883915afd1b547d5f5bb863b21b803df3",
"format": 1
},
{
- "name": "changelogs/fragments/188_bucket_type.yaml",
+ "name": "changelogs/fragments/266_bucket_fix.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3c8485b792ba73283807489b10a7b6df8298c5f932aaeec7b6b841b2f504464a",
+ "chksum_sha256": "5dca2f8134265d79896952375deb436840338d883d644dea4d0ce7037c052eff",
"format": 1
},
{
- "name": "changelogs/fragments/109_update_info.yaml",
+ "name": "changelogs/fragments/213_sec_update.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "857bb23faa48e2d894f432cca4219681d7b3dab68473b3502dfe9319d751a3e1",
+ "chksum_sha256": "6b71174c00e982cada0d051fae5e28c853207ec6d0f42a783db35a9519733769",
"format": 1
},
{
- "name": "changelogs/fragments/121_replication_perf.yaml",
+ "name": "changelogs/fragments/147_no_gateway.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "372e2b49c1b2fb2f637e01023dd3a5146ee61171adbf619062ceb5e53a5d3e96",
+ "chksum_sha256": "0ca2ad2e1c1d60b110b87b2b37013bae6ee9daff64056f1dea691f2376cb8448",
"format": 1
},
{
- "name": "changelogs/fragments/153_add_quota.yaml",
+ "name": "changelogs/fragments/184_certificate_typos.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "2b2517ea362d7128333d6fab7f99f6b70c4253d2807eae3ec417aa4451b3ae6c",
+ "chksum_sha256": "827c27fb0d7c31d13e89e829db35890c97a16cf437149264074c1c6fa52be9be",
"format": 1
},
{
- "name": "changelogs/fragments/v1.3.0_summary.yaml",
+ "name": "changelogs/fragments/212_object_account_quota.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "64bd3d32085373ce61a414518c2ed87bfd003d163d3002d087f41f4a54b0b1a0",
+ "chksum_sha256": "2d9dd6bbb0f690de495ad9416117baf213d1d60f164fbcaedafa5f941ebeba28",
"format": 1
},
{
- "name": "changelogs/fragments/113_policy_cleanup.yaml",
+ "name": "changelogs/fragments/101_fix_policy_and_timezone_error.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "11023f4d159bc146016fe9e9f40d18edb659518cb9dbc733750146e00de2b05c",
+ "chksum_sha256": "ca31175fc5d17623bd2988bdd67901cf45b209a2bae6e1edbac128489cbee6cd",
"format": 1
},
{
- "name": "changelogs/fragments/216_extra_bucket_info.yaml",
+ "name": "changelogs/fragments/90_delete_conn_fix.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "cf88a27b9c51eefd78e80b587012be110c967d0185597cac22cf5de86b73b053",
+ "chksum_sha256": "787138033d123fa59a9d3cdb424dc093183a020eebf1e76b46cbf059006e18e5",
"format": 1
},
{
- "name": "changelogs/fragments/212_object_account_quota.yaml",
+ "name": "changelogs/fragments/263_fix_multiple_modules_idempotency.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "2d9dd6bbb0f690de495ad9416117baf213d1d60f164fbcaedafa5f941ebeba28",
+ "chksum_sha256": "9ece785a9cccd881deb20eaa8b83c2d23e799ce239e7897709b9a6b4436ad239",
"format": 1
},
{
- "name": "changelogs/fragments/215_encrypt_sec_info.yaml",
+ "name": "changelogs/fragments/252_object_lock_info.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "6915aa0ddabb1f73dbced52d0114b84317958f29a2ef7ea4dcd72a10952f8017",
+ "chksum_sha256": "57a27b67a14a08762e081ac9facaec3004e366385cfdd272972e906a69467f9b",
"format": 1
},
{
- "name": "changelogs/fragments/131-apiclient.yaml",
+ "name": "changelogs/fragments/112_fix_check_mode.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "92dd9507a2a0476d24f0c1e7a5342925be49b4a84142fe8e33f4a76f422283c3",
+ "chksum_sha256": "11f8266ad857ed327ddbe8ef65f810a54e6c57df7ef24d1ec1d4c132abaa23a7",
"format": 1
},
{
- "name": "changelogs/fragments/107_add_remove_s3user_key.yaml",
+ "name": "changelogs/fragments/108_dns_search_fix.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8a2bb28b43962c08ea8916db02a401f8bd7b4989bd1aa04f201ed8c602d94124",
+ "chksum_sha256": "056e8181176826dc43b62100b6c50c8770680f0fcc37cf73737848233382b2e8",
"format": 1
},
{
- "name": "changelogs/fragments/88_add_lifecycle.yml",
+ "name": "changelogs/fragments/258_add_public_buckets.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "fdc6c425f03ffc0b4a008230f290f6ef37874a270909cb2ee311843dc08909f6",
+ "chksum_sha256": "7c921911e2d432aae93be67fa251495af3bea63abf874f4837a59b6e7b61f85b",
"format": 1
},
{
- "name": "changelogs/fragments/147_no_gateway.yaml",
+ "name": "changelogs/fragments/113_policy_cleanup.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "0ca2ad2e1c1d60b110b87b2b37013bae6ee9daff64056f1dea691f2376cb8448",
+ "chksum_sha256": "11023f4d159bc146016fe9e9f40d18edb659518cb9dbc733750146e00de2b05c",
"format": 1
},
{
- "name": "changelogs/fragments/78_update_filesystem_replica_link.yaml",
+ "name": "changelogs/changelog.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "57a7b5ed892c4ea2f5149023b2bdde9481eb8c0a7593e4e76a4603e706971100",
+ "chksum_sha256": "58ab80ddfd28321e4c9f245810097a8efbcd09898e013b3a83e650d2dd8440ed",
"format": 1
},
{
- "name": "changelogs/fragments/176_nfs_export_policies.yaml",
+ "name": "changelogs/.plugin-cache.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "36fc1c990afd6fb48068d113d6e4a6846368ad32523554acc9b9d9e5ba861161",
+ "chksum_sha256": "74f8ee5c9b2c27b9b655d822e47443fc68975023c10a6e58c08dc4b925c61bb3",
"format": 1
},
{
- "name": "changelogs/fragments/112_fix_check_mode.yaml",
+ "name": "changelogs/config.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "11f8266ad857ed327ddbe8ef65f810a54e6c57df7ef24d1ec1d4c132abaa23a7",
+ "chksum_sha256": "d1b77eeb2d9f7242075e746537713be29e397fe6954f13a1caf8b10695434b9b",
"format": 1
},
{
- "name": "changelogs/fragments/79_hide_connect_api.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b4cd3cbdb65de6b71cfbe179d56a42be2afbf6486e1ce0df9fdd3a7042bd57b0",
+ "name": "tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
"format": 1
},
{
- "name": "changelogs/fragments/167_fix_logins.yaml",
+ "name": "tests/config.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "426451dd9cb0925943b74eae2fe37702574efc7974f630a049737bfa74991ff3",
+ "chksum_sha256": "9a009a349eaaf78c93ff56072d2ef171937bdb884e4976592ab5aaa9c68e1044",
"format": 1
},
{
- "name": "changelogs/fragments/194_lists_for_service.yaml",
+ "name": "COPYING.GPLv3",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "9e139b9ea88f7700071e57500cff497a6be300d8425b4a4ddaba77c36a8dc128",
+ "chksum_sha256": "8ceb4b9ee5adedde47b31e975c1d90c73ad27b6b165a1dcd80c7c545eb65b903",
"format": 1
},
{
- "name": "changelogs/fragments/175_throttle_support.yaml",
+ "name": "playbooks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/tasks/.keep",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "738e0e9c2f7789b1c931b5563416ca436fd0e04401232a502e6ce59fd03da28f",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
"format": 1
},
{
- "name": "changelogs/fragments/139_add_keytabs.yaml",
+ "name": "playbooks/.keep",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "c4d64b50797e36e3861e530b3e7c080277ebceb17ac5f58d4a08b8ac59c14d10",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
"format": 1
},
{
- "name": "changelogs/fragments/135_add_user_policies.yaml",
+ "name": "playbooks/templates",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/templates/.keep",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a0b78f5b1a5be3bfb87a00a4e638fad67600b0bab4cfddd72b3bfa4d2e217e3f",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
"format": 1
},
{
- "name": "changelogs/fragments/77_filesystem_policies_info.yaml",
+ "name": "playbooks/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/vars/.keep",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8c7090d551cb59c49622a89c0ed25f12ad89104a9e2ab6708a01fc01fce9e049",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
"format": 1
},
{
- "name": "changelogs/fragments/127_add_eula.yaml",
+ "name": "playbooks/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/files/.keep",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "9b092f3766cf4309ac60ab77c2e51142ffbc81eb4bfa4da581d531ee2de633ac",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
"format": 1
},
{
- "name": "changelogs/fragments/187_rename_nfs_policy.yaml",
+ "name": "playbooks/roles",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "requirements.txt",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d8b9f4112fea72954805eca3c01cf04524d5bd02a5b2559cdfef68c09d616e49",
+ "chksum_sha256": "87cb6471722fa1096099f228091480939c5b7c3ac39c2819543324a7701e66a3",
"format": 1
},
{
- "name": "changelogs/fragments/140_more_32_info.yaml",
+ "name": "plugins",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_ds.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "4e57a10a71ab3dd1c151a6867c0da118a21e13df2ef8b9d2fbb799108ddebcd4",
+ "chksum_sha256": "d1f931875f4c3db92d3ceae0503fdf65635155ddba3a73ebb5a1bf1d2fde2d13",
"format": 1
},
{
- "name": "changelogs/fragments/205_fix_multi_lifecycle.yaml",
+ "name": "plugins/modules/purefb_alert.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "c4080535eeb4ad5e56715dc1dd7683679072d027a65bce93a49adb4b56b68618",
+ "chksum_sha256": "80d6d4747cf607c7f73ac70a70a7c5f71c527c628f928e49b21de377f5cdbc25",
"format": 1
},
{
- "name": "changelogs/fragments/166_lag_mac_note.yaml",
+ "name": "plugins/modules/purefb_target.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "b639987ccd53708ee210a1812bd8c6af30292a3a1b6b42c7b839dd7120967e13",
+ "chksum_sha256": "47eea0605e82c442152c801f95a3f55e31f816720bde09b7153caa4d9c58228f",
"format": 1
},
{
- "name": "changelogs/fragments/90_delete_conn_fix.yaml",
+ "name": "plugins/modules/purefb_inventory.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "787138033d123fa59a9d3cdb424dc093183a020eebf1e76b46cbf059006e18e5",
+ "chksum_sha256": "fccc8b5171f8a6252437a24c7c27829f0c41a7f13f3d058fc6dc80f69b820e3c",
"format": 1
},
{
- "name": "changelogs/fragments/108_dns_search_fix.yaml",
+ "name": "plugins/modules/purefb_ra.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "056e8181176826dc43b62100b6c50c8770680f0fcc37cf73737848233382b2e8",
+ "chksum_sha256": "3a9172183c8afdd07d3eb854f466a6c687ea881f6978053909ad9908f76db71b",
"format": 1
},
{
- "name": "changelogs/fragments/184_certificate_typos.yaml",
+ "name": "plugins/modules/purefb_timeout.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "827c27fb0d7c31d13e89e829db35890c97a16cf437149264074c1c6fa52be9be",
+ "chksum_sha256": "1a109f31c4f6aa429394238674140d2e38f36aaba2c007522f6749ee2c0bf31b",
"format": 1
},
{
- "name": "changelogs/fragments/179_fqcn.yaml",
+ "name": "plugins/modules/purefb_bladename.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d4c60f377dd4cd40de9c777a7d54f6d185afa785fdc45a751d67f2baccf9efdf",
+ "chksum_sha256": "1b21f650ae77744ba23b47de5b5bcf220ee68c77b127f569908c48eba08a8f24",
"format": 1
},
{
- "name": "LICENSE",
+ "name": "plugins/modules/purefb_remote_cred.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3972dc9744f6499f0f9b2dbf76696f2ae7ad8af9b23dde66d6af86c9dfb36986",
+ "chksum_sha256": "51baa72db5641ac2a00f98b07cc626fc65d11412ae11c24e7c5f2a381d2b63df",
"format": 1
},
{
- "name": "COPYING.GPLv3",
+ "name": "plugins/modules/purefb_smtp.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8ceb4b9ee5adedde47b31e975c1d90c73ad27b6b165a1dcd80c7c545eb65b903",
+ "chksum_sha256": "76d37be7050f2e57b7fa09cae4b7555fe8b644c031ae7b93a3de5af2cbe19781",
"format": 1
},
{
- "name": "plugins",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
+ "name": "plugins/modules/purefb_policy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9c6abe4b8cf5db61cd7a27db057f8d2f28cf0d2ec2bf9b398cf3f9eba68bb0e1",
"format": 1
},
{
- "name": "plugins/modules",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
+ "name": "plugins/modules/purefb_ad.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "40baf6272707344af09ee6c329457532462df5fedf087fc58662e295847444df",
"format": 1
},
{
- "name": "plugins/modules/purefb_bladename.py",
+ "name": "plugins/modules/purefb_apiclient.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "1b21f650ae77744ba23b47de5b5bcf220ee68c77b127f569908c48eba08a8f24",
+ "chksum_sha256": "2cc1381512d001748885bd41104f8215397c74f464b696c216368de7598e47bb",
"format": 1
},
{
- "name": "plugins/modules/purefb_proxy.py",
+ "name": "plugins/modules/purefb_fs_replica.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "42514c4241a3e3f254d0cd0fd8a27f394a417990aed0dcc4888efc93fb2a2b7c",
+ "chksum_sha256": "cecd86a22b1111751fd5aa60cddf16e974d5511ceff825a0e878b51b88ae3ef8",
"format": 1
},
{
- "name": "plugins/modules/purefb_admin.py",
+ "name": "plugins/modules/purefb_eula.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "76c2ce2781241b7338e05f4d443090cb5fd5e7cb6fc1845ae5f78e9a0f9f5002",
+ "chksum_sha256": "1d06a41aeae5febbc2d1fecd64b888e5947f14b0944f473c3c5d1d46e50acfc4",
"format": 1
},
{
@@ -925,66 +967,66 @@
"format": 1
},
{
- "name": "plugins/modules/purefb_dns.py",
+ "name": "plugins/modules/purefb_dsrole.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "9ebd127691bb88001865cba5e1813f0895111b8806c3c5fbfef5a21c24954bdb",
+ "chksum_sha256": "d625a7248695e857cc0eaf32beb340de4772c406278de8b3c81b1ce2740854c3",
"format": 1
},
{
- "name": "plugins/modules/purefb_tz.py",
+ "name": "plugins/modules/purefb_messages.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "4d08c8115e92f613d74e1bbf53a59a379f95513e3a7d231a9f745a9dfe1d23d5",
+ "chksum_sha256": "0a0bcd83ebb86063ed9fb3db1bacbda9a89d4d82f11590b1d2cbfd978cd1c198",
"format": 1
},
{
- "name": "plugins/modules/purefb_alert.py",
+ "name": "plugins/modules/purefb_connect.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "80d6d4747cf607c7f73ac70a70a7c5f71c527c628f928e49b21de377f5cdbc25",
+ "chksum_sha256": "820c57b48e107ef852e6b2665c65ef76d67ffcde916cb21a368dcdae8e1e23e4",
"format": 1
},
{
- "name": "plugins/modules/purefb_remote_cred.py",
+ "name": "plugins/modules/purefb_proxy.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "51baa72db5641ac2a00f98b07cc626fc65d11412ae11c24e7c5f2a381d2b63df",
+ "chksum_sha256": "42514c4241a3e3f254d0cd0fd8a27f394a417990aed0dcc4888efc93fb2a2b7c",
"format": 1
},
{
- "name": "plugins/modules/purefb_ad.py",
+ "name": "plugins/modules/purefb_admin.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "40baf6272707344af09ee6c329457532462df5fedf087fc58662e295847444df",
+ "chksum_sha256": "76c2ce2781241b7338e05f4d443090cb5fd5e7cb6fc1845ae5f78e9a0f9f5002",
"format": 1
},
{
- "name": "plugins/modules/purefb_connect.py",
+ "name": "plugins/modules/purefb_userpolicy.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "78d93cd41217bfcca2d6cc900b560fb0a03d16e502162e52eb89c0e432b08820",
+ "chksum_sha256": "387f3f81064bcde26ef875b63c0fdb71472206a4c41ccc1db4a20eae0b0422eb",
"format": 1
},
{
- "name": "plugins/modules/purefb_fs.py",
+ "name": "plugins/modules/purefb_userquota.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "dfee64d096d76c62d7b9081845b29b4f924bc2d6e6e699c3ff2b0ceb1b3c5714",
+ "chksum_sha256": "cf1a39e2b307e395b54c2a6ced7335971cf127f03ca6f1bd8af17a2aff28b9c2",
"format": 1
},
{
- "name": "plugins/modules/purefb_network.py",
+ "name": "plugins/modules/purefb_snmp_mgr.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "21398dfcfc59ad0c094ea608027bd44c121ecffc8fbff9ae96fde4f61ba65774",
+ "chksum_sha256": "2ff095c16f369a129dff76ab9c2660ba2f45d0bc62b2c07bcbf58d62067addfd",
"format": 1
},
{
- "name": "plugins/modules/purefb_target.py",
+ "name": "plugins/modules/purefb_banner.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "47eea0605e82c442152c801f95a3f55e31f816720bde09b7153caa4d9c58228f",
+ "chksum_sha256": "5daf1a121e8086c3ce3b510c9a52119ba256e49591932f4a575484fc7230b1f9",
"format": 1
},
{
@@ -995,283 +1037,353 @@
"format": 1
},
{
- "name": "plugins/modules/purefb_ds.py",
+ "name": "plugins/modules/purefb_certs.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "756950f76e59b5099a8a331bb9afa80976cd7e37c605791f517af6442b9040b7",
+ "chksum_sha256": "b79151ea9333e6bde34361ab8a8e18b8d961ed6ed18c601c0b574d12020fa35f",
"format": 1
},
{
- "name": "plugins/modules/purefb_userpolicy.py",
+ "name": "plugins/modules/purefb_s3user.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8e9fe1856db864f057d4eb3bafb1107dce0d7c429acc4deeb25dfba991e510f0",
+ "chksum_sha256": "cb785aa8af88dc04e7e8d89a564855b928e34af99099cfd2d3774663212c5a93",
"format": 1
},
{
- "name": "plugins/modules/purefb_certs.py",
+ "name": "plugins/modules/purefb_groupquota.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "b79151ea9333e6bde34361ab8a8e18b8d961ed6ed18c601c0b574d12020fa35f",
+ "chksum_sha256": "fb933221f221bc66e49534594bd0ed6c06f3d83fe57b1ec45bfda80ec593becd",
"format": 1
},
{
- "name": "plugins/modules/purefb_snmp_agent.py",
+ "name": "plugins/modules/purefb_tz.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "2da4ecae583c8c94c55046e4a72a9437ac1f01aefa83e77d315e02792edf4a2c",
+ "chksum_sha256": "4d08c8115e92f613d74e1bbf53a59a379f95513e3a7d231a9f745a9dfe1d23d5",
"format": 1
},
{
- "name": "plugins/modules/purefb_subnet.py",
+ "name": "plugins/modules/purefb_dns.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "2ff34ed58891cf1dcca1757f2d2a2d79a21f40e61195cc2d509fc56108560409",
+ "chksum_sha256": "9ebd127691bb88001865cba5e1813f0895111b8806c3c5fbfef5a21c24954bdb",
"format": 1
},
{
- "name": "plugins/modules/purefb_apiclient.py",
+ "name": "plugins/modules/purefb_phonehome.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "2cc1381512d001748885bd41104f8215397c74f464b696c216368de7598e47bb",
+ "chksum_sha256": "53bcb5901f85f1938f06ef36f36ed37537b5ec2997b596c3906971ee016a3b9f",
"format": 1
},
{
- "name": "plugins/modules/purefb_inventory.py",
+ "name": "plugins/modules/purefb_syslog.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fe39dc9131937befc223fd3efd96a369238fa320618e77323fedaa8c7f2e7621",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_snap.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "391dedb1a7265a3f57b2193ee5efa254e981d3f4be1c6425adb036c6ddb7cf6b",
+ "chksum_sha256": "c0ccbd3a590ee10c35445717c2f0378abb36078d3fbb5908e195e40022eaa802",
"format": 1
},
{
- "name": "plugins/modules/purefb_syslog.py",
+ "name": "plugins/modules/purefb_keytabs.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "fe39dc9131937befc223fd3efd96a369238fa320618e77323fedaa8c7f2e7621",
+ "chksum_sha256": "9e68ef5023904b2b70f95567ef69356b43ed4324ab18fd080cc054c217326445",
"format": 1
},
{
"name": "plugins/modules/purefb_lag.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "911181fd37fedbb616cb2d2cc6b94c070a04ca56f4a69b97299ccff40be2c803",
+ "chksum_sha256": "aa70ba13b897ebb5b2d3059e253f173410259d98e72089632025b9d83b63927c",
"format": 1
},
{
- "name": "plugins/modules/purefb_messages.py",
+ "name": "plugins/modules/purefb_ntp.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "0a0bcd83ebb86063ed9fb3db1bacbda9a89d4d82f11590b1d2cbfd978cd1c198",
+ "chksum_sha256": "3df2990a95399fb343b3d9733534ffe3cef10b5546b939924aa17d04fb10fdd2",
"format": 1
},
{
- "name": "plugins/modules/purefb_banner.py",
+ "name": "plugins/modules/purefb_fs.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "5daf1a121e8086c3ce3b510c9a52119ba256e49591932f4a575484fc7230b1f9",
+ "chksum_sha256": "2438642ad2a6ce605587eb84e0573010449ce0710d601cbf337dfa4690d7b736",
"format": 1
},
{
- "name": "plugins/modules/purefb_snmp_mgr.py",
+ "name": "plugins/modules/purefb_snmp_agent.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "2ff095c16f369a129dff76ab9c2660ba2f45d0bc62b2c07bcbf58d62067addfd",
+ "chksum_sha256": "2da4ecae583c8c94c55046e4a72a9437ac1f01aefa83e77d315e02792edf4a2c",
"format": 1
},
{
- "name": "plugins/modules/purefb_ra.py",
+ "name": "plugins/modules/purefb_network.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3a9172183c8afdd07d3eb854f466a6c687ea881f6978053909ad9908f76db71b",
+ "chksum_sha256": "21398dfcfc59ad0c094ea608027bd44c121ecffc8fbff9ae96fde4f61ba65774",
"format": 1
},
{
- "name": "plugins/modules/purefb_groupquota.py",
+ "name": "plugins/modules/purefb_hardware.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "fb933221f221bc66e49534594bd0ed6c06f3d83fe57b1ec45bfda80ec593becd",
+ "chksum_sha256": "ec8d5321dfbb3825a06ae8332c1755a8befd5edd56b8b9064b05f70d065a2f1f",
"format": 1
},
{
- "name": "plugins/modules/purefb_s3acc.py",
+ "name": "plugins/modules/purefb_lifecycle.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "ff4391301e7e1a21329460afd11d73b60fec6dbab050bea8ab0d8c740f571218",
+ "chksum_sha256": "beff3e20624460b82775e554a8c27cfd6b345d3a5a787f96df582a7026e23449",
"format": 1
},
{
- "name": "plugins/modules/purefb_info.py",
+ "name": "plugins/modules/purefb_virtualhost.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "45900eaeaafc923ea85e88c1dc87d2948e5a07f3ccb3aa2a4767c69fb2da3ac9",
+ "chksum_sha256": "37d614801411069d3c3aab20c018daf17496832bc73e59976b5bc25f8f5cddc2",
"format": 1
},
{
- "name": "plugins/modules/purefb_s3user.py",
+ "name": "plugins/modules/purefb_subnet.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "4e3221ed572489da65f749e185123f662047918a8f9b8b9391f665d343e6acf4",
+ "chksum_sha256": "2ff34ed58891cf1dcca1757f2d2a2d79a21f40e61195cc2d509fc56108560409",
"format": 1
},
{
- "name": "plugins/modules/purefb_pingtrace.py",
+ "name": "plugins/modules/purefb_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "facfd9bbb4ec84cca4c6dc3608da73a2ab8af7a9b5b1f139fbcf6f91b4f83612",
+ "chksum_sha256": "b6dc24aac2c4733f7f37f0901a70fc3a9679cb06994d1407ba85f92bcc110d53",
"format": 1
},
{
- "name": "plugins/modules/purefb_snap.py",
+ "name": "plugins/modules/purefb_s3acc.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "c0ccbd3a590ee10c35445717c2f0378abb36078d3fbb5908e195e40022eaa802",
+ "chksum_sha256": "0173d7180a53b8b4c1b74e39eb48cc85089834375c8c2055688b5e533782be3d",
"format": 1
},
{
- "name": "plugins/modules/purefb_smtp.py",
+ "name": "plugins/modules/purefb_bucket_replica.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "76d37be7050f2e57b7fa09cae4b7555fe8b644c031ae7b93a3de5af2cbe19781",
+ "chksum_sha256": "3a838e4dd90a4bf16368994e1214340362abf4f3338c8c6197783c147ed19c43",
"format": 1
},
{
- "name": "plugins/modules/purefb_keytabs.py",
+ "name": "plugins/modules/purefb_bucket.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "9e68ef5023904b2b70f95567ef69356b43ed4324ab18fd080cc054c217326445",
+ "chksum_sha256": "11e6a2e0aa40ab4f7e50a4c2be3dfd17363e094b8ac126b5ad042c4d65c16055",
"format": 1
},
{
- "name": "plugins/modules/purefb_dsrole.py",
+ "name": "plugins/modules/purefb_pingtrace.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d625a7248695e857cc0eaf32beb340de4772c406278de8b3c81b1ce2740854c3",
+ "chksum_sha256": "facfd9bbb4ec84cca4c6dc3608da73a2ab8af7a9b5b1f139fbcf6f91b4f83612",
"format": 1
},
{
- "name": "plugins/modules/purefb_bucket.py",
+ "name": "plugins/module_utils",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/purefb.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "2b7e76b4c8be29be79485ec99cf01ce365e725801f7467931d6eb656c5f64120",
+ "chksum_sha256": "3f821bf25d0ecc7d686e0fe39b96b01fbdd87ebbf3047b5ae5a720a9fac47e30",
"format": 1
},
{
- "name": "plugins/modules/purefb_ntp.py",
+ "name": "plugins/doc_fragments",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/purestorage.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3df2990a95399fb343b3d9733534ffe3cef10b5546b939924aa17d04fb10fdd2",
+ "chksum_sha256": "cb96797756b79883247778bbf7c9ed0c9a34e3e6f14d97b753e3d6401ec25f0f",
"format": 1
},
{
- "name": "plugins/modules/purefb_policy.py",
+ "name": ".github",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".github/pull_request_template.md",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "6cbb2b5f7a2bbbebefc28ab19d06344fdf43f316a31839a440f2f29b652d130b",
+ "chksum_sha256": "565ead1b588caaa10cd6f2ed1bb6c809eb2ad93bf75da3a198690cac778432d6",
"format": 1
},
{
- "name": "plugins/modules/purefb_bucket_replica.py",
+ "name": ".github/ISSUE_TEMPLATE",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".github/ISSUE_TEMPLATE/bug_report.md",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a8ad0c4a4506527009dbb28920c81b8cef6dddde65382af33e47c22522d27332",
+ "chksum_sha256": "0c8d64f29fb4536513653bf8c97da30f3340e2041b91c8952db1515d6b23a7b3",
"format": 1
},
{
- "name": "plugins/modules/purefb_virtualhost.py",
+ "name": ".github/ISSUE_TEMPLATE/feature_request.md",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "37d614801411069d3c3aab20c018daf17496832bc73e59976b5bc25f8f5cddc2",
+ "chksum_sha256": "1f48c52f209a971b8e7eae4120144d28fcf8ee38a7778a7b4d8cf1ab356617d2",
"format": 1
},
{
- "name": "plugins/modules/purefb_userquota.py",
+ "name": ".github/workflows",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".github/workflows/ansible-lint.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "cf1a39e2b307e395b54c2a6ced7335971cf127f03ca6f1bd8af17a2aff28b9c2",
+ "chksum_sha256": "62dbc43cafdab8da066ba0d86a08924e433f8b2919cdef935c116c5962d3a572",
"format": 1
},
{
- "name": "plugins/modules/purefb_fs_replica.py",
+ "name": ".github/workflows/stale.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "6ef60aaaa8d397ecbef11da23f16d707829db7613811a3142f426076b2e8d577",
+ "chksum_sha256": "544ccc9f17e16d9087802e3dcec69741e6ff79e31cf7302947ce2c08126ce1d4",
"format": 1
},
{
- "name": "plugins/modules/purefb_timeout.py",
+ "name": ".github/workflows/black.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "2c25d12eff522c44580b77e457c0496368e877bfe72cb41f1a9402a96ad18418",
+ "chksum_sha256": "803b5d6a6d7448701e1b7eb09595f783cb7ca83bd4d298f91c60ce7143c3607b",
"format": 1
},
{
- "name": "plugins/modules/purefb_phonehome.py",
+ "name": ".github/workflows/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "53bcb5901f85f1938f06ef36f36ed37537b5ec2997b596c3906971ee016a3b9f",
+ "chksum_sha256": "2be488cb3b6926e2f859ec951616fcfe2d50df7cdf98a978f2929c2b247633b4",
"format": 1
},
{
- "name": "plugins/modules/purefb_eula.py",
+ "name": ".github/CONTRIBUTING.md",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "1d06a41aeae5febbc2d1fecd64b888e5947f14b0944f473c3c5d1d46e50acfc4",
+ "chksum_sha256": "7c429527b799623f57e6363e14ff8a319844c9120f4dfa18bcea3849cdc07128",
"format": 1
},
{
- "name": "plugins/modules/purefb_lifecycle.py",
+ "name": "README.md",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "beff3e20624460b82775e554a8c27cfd6b345d3a5a787f96df582a7026e23449",
+ "chksum_sha256": "33409fed9bd21cc282415c711d33a6a5170c7565354c40696ee532f0bc46c2a6",
"format": 1
},
{
- "name": "plugins/doc_fragments",
+ "name": "meta",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
- "name": "plugins/doc_fragments/purestorage.py",
+ "name": "meta/runtime.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "cb96797756b79883247778bbf7c9ed0c9a34e3e6f14d97b753e3d6401ec25f0f",
+ "chksum_sha256": "4833e2900333e7a035d7e0f63f6d55777c2697476ee0a2f9bfcf250167c7571d",
"format": 1
},
{
- "name": "plugins/module_utils",
+ "name": "README.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9eca16f5db9ebc48387f94f50a9762c57fcb6a6eb4cd6c258f13b0a9a371be8e",
+ "format": 1
+ },
+ {
+ "name": "LICENSE",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3972dc9744f6499f0f9b2dbf76696f2ae7ad8af9b23dde66d6af86c9dfb36986",
+ "format": 1
+ },
+ {
+ "name": ".gitignore",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f3e019033a4ff6d651103704d47629e6d911cb949652bd5e6121d7a918dbc480",
+ "format": 1
+ },
+ {
+ "name": "roles",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
- "name": "plugins/module_utils/purefb.py",
+ "name": "roles/.keep",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "5a7a9657951dec2667ad720e965452a0003924cd36fe260527c01f83948d0473",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
"format": 1
},
{
- "name": "README.rst",
+ "name": "settings.json",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "9eca16f5db9ebc48387f94f50a9762c57fcb6a6eb4cd6c258f13b0a9a371be8e",
+ "chksum_sha256": "f64528ffd800423e1d49a3c79cdd3892548a57177ea1a1caacbbcd275390b792",
"format": 1
},
{
- "name": ".pylintrc",
+ "name": ".git-blame-ignore-revs",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "75d8dc97586bc956a906be2aa0b86ec465eb78ce48d3d651ea1ddad3935d27cf",
+ "chksum_sha256": "272d9a8e8654881cd42bb4108716e720bc634065d74064fb09f29d0e6e817e21",
"format": 1
},
{
- "name": ".git-blame-ignore-revs",
+ "name": ".yamllint",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "272d9a8e8654881cd42bb4108716e720bc634065d74064fb09f29d0e6e817e21",
+ "chksum_sha256": "2970fa4875092f99825ac0da3c82d2413ce973087b9945e68fdfa7b3b1e2012e",
+ "format": 1
+ },
+ {
+ "name": "CHANGELOG.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "de7d63a6d1d411e66f64971129b4630faaca228eb0a8348f261034aab83faa04",
"format": 1
}
],
diff --git a/ansible_collections/purestorage/flashblade/MANIFEST.json b/ansible_collections/purestorage/flashblade/MANIFEST.json
index c111f1bf6..2af712175 100644
--- a/ansible_collections/purestorage/flashblade/MANIFEST.json
+++ b/ansible_collections/purestorage/flashblade/MANIFEST.json
@@ -2,7 +2,7 @@
"collection_info": {
"namespace": "purestorage",
"name": "flashblade",
- "version": "1.11.0",
+ "version": "1.16.0",
"authors": [
"Pure Storage Ansible Team <pure-ansible-team@purestorage.com>"
],
@@ -14,7 +14,7 @@
"object",
"nfs"
],
- "description": "Collection of modules to manage Pure Storage FlashBlades",
+ "description": "Collection of modules to manage Pure Storage FlashBlade",
"license": [
"GPL-3.0-or-later",
"BSD-2-Clause"
@@ -30,7 +30,7 @@
"name": "FILES.json",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "1bb9f78982cdd6334e7f063927e0a32f11b5d6c6940b0cd253d3311be4717cda",
+ "chksum_sha256": "14b510daea00c6bbcbf0a5bcfff2c45937740e569c86fedeb64970968dc4eecc",
"format": 1
},
"format": 1
diff --git a/ansible_collections/purestorage/flashblade/README.md b/ansible_collections/purestorage/flashblade/README.md
index 7972158bc..6f288d63f 100644
--- a/ansible_collections/purestorage/flashblade/README.md
+++ b/ansible_collections/purestorage/flashblade/README.md
@@ -15,15 +15,16 @@ The Pure Storage FlashBlade collection consists of the latest versions of the Fl
## Prerequisites
-- Ansible 2.9 or later
+- Ansible 2.14 or later
- Pure Storage FlashBlade system running Purity//FB 2.1.2 or later
- some modules require higher versions of Purity//FB
-- purity_fb >=v1.12.2
+- purity-fb >=v1.12.2
- py-pure-client >=v1.27.0
-- python >=3.6
+- python >=3.9
- netaddr
- datetime
- pytz
+- distro
## Idempotency
@@ -48,6 +49,7 @@ All modules are idempotent with the exception of modules that change or set pass
- purefb_fs - manage filesystems on a FlashBlade
- purefb_fs_replica - manage filesystem replica links on a FlashBlade
- purefb_groupquota - manage individual group quotas on FlashBlade filesystems
+- purefb_hardware - manage hardware LED identifiers and hardware connectors
- purefb_info - get information about the configuration of a FlashBlade
- purefb_inventory - get information about the hardware inventory of a FlashBlade
- purefb_keytabs - manage FlashBlade Kerberos keytabs
diff --git a/ansible_collections/purestorage/flashblade/changelogs/.plugin-cache.yaml b/ansible_collections/purestorage/flashblade/changelogs/.plugin-cache.yaml
index 9834bdfed..c99d4477d 100644
--- a/ansible_collections/purestorage/flashblade/changelogs/.plugin-cache.yaml
+++ b/ansible_collections/purestorage/flashblade/changelogs/.plugin-cache.yaml
@@ -101,6 +101,11 @@ plugins:
name: purefb_groupquota
namespace: ''
version_added: 1.7.0
+ purefb_hardware:
+ description: Manage FlashBlade Hardware
+ name: purefb_hardware
+ namespace: ''
+ version_added: 1.15.0
purefb_info:
description: Collect information from Pure Storage FlashBlade
name: purefb_info
@@ -251,4 +256,4 @@ plugins:
strategy: {}
test: {}
vars: {}
-version: 1.11.0
+version: 1.16.0
diff --git a/ansible_collections/purestorage/flashblade/changelogs/changelog.yaml b/ansible_collections/purestorage/flashblade/changelogs/changelog.yaml
index 9995182fa..eaeb07ed3 100644
--- a/ansible_collections/purestorage/flashblade/changelogs/changelog.yaml
+++ b/ansible_collections/purestorage/flashblade/changelogs/changelog.yaml
@@ -38,7 +38,7 @@ releases:
1.11.0:
changes:
bugfixes:
- - purefa_info - Fixed issue when more than 10 buckets have lifecycle rules.
+ - purefb_info - Fixed issue when more than 10 buckets have lifecycle rules.
- purefb_s3user - Fix incorrect response when bad key/secret pair provided for
new user
minor_changes:
@@ -69,6 +69,105 @@ releases:
name: purefb_pingtrace
namespace: ''
release_date: '2023-04-13'
+ 1.12.0:
+ changes:
+ bugfixes:
+ - purefb_bucket - Fixed bucket type mode name typo
+ - purefb_fs - Fixed issue with incorrect promotion state setting
+ minor_changes:
+ - purefb_fs - Added support for SMB client and share policies
+ - purefb_fs_replica - Added support to delete filesystem replica links from
+ REST 2.10
+ - purefb_info - Add drive type in drives subset for //S and //E platforms. Only
+ available from REST 2.9.
+ - purefb_info - Added support for SMB client and share policies
+ - purefb_policy - Added support for SMB client and share policies
+ - purefb_s3acc - Allow human readable quota sizes; eg. 1T, 230K, etc
+ - purefb_s3user - Add new boolean parameter I(multiple_keys) to limit access
+ keys for a user to a single key.
+ fragments:
+ - 222_bucket_type_fix.yaml
+ - 223_add_drive_type.yaml
+ - 224_smb_policies.yaml
+ - 225_delete_rl.yaml
+ - 227_s3acc_human_quota.yaml
+ - 230_prom_fix.yaml
+ - 232_multiple_keys.yaml
+ release_date: '2023-07-10'
+ 1.13.0:
+ release_date: '2023-09-07'
+ 1.13.1:
+ changes:
+ bugfixes:
+ - purefb_info - Fixed missing atributes for SMB client policy rules
+ minor_changes:
+ - purefb_policy - Add new and updated policy access rights
+ fragments:
+ - 237_info_policy.yaml
+ - 239_access_rights.yaml
+ release_date: '2023-09-07'
+ 1.14.0:
+ changes:
+ bugfixes:
+ - purefb_userpolicy - Fixed `show` state for all user policies
+ minor_changes:
+ - purefb_bucket_replica - Added support for cascading replica links
+ - purefb_info - New fields to display free space (remaining quota) for Accounts
+ and Buckets. Space used by destroyed buckets is split out from virtual field
+ to new destroyed_virtual field
+ - purefb_info - Report encryption state in SMB client policy rules
+ - purefb_info - Report more detailed space data from Purity//FB 4.3.0
+ - purefb_policy - Add deny effect for object store policy rules. Requires Purity//FB
+ 4.3.0+
+ - purefb_policy - Added parameter to define object store policy description
+ fragments:
+ - 238_user_policy.yaml
+ - 242_cascade.yaml
+ - 243_policy_desc.yaml
+ - 244_add_deny.yaml
+ - 245_quota_plus.yaml
+ - 246_smb_encrypt.yaml
+ - 247_space_consistency.yaml
+ release_date: '2023-10-04'
+ 1.15.0:
+ changes:
+ bugfixes:
+ - purefb_info - Added missing object lock retention details if enabledd
+ minor_changes:
+ - purefb_bucket - Add support for public buckets
+ - purefb_bucket - From REST 2.12 the `mode` parameter default changes to `multi-site-writable`.
+ - purefb_fs - Added SMB Continuous Availability parameter. Requires REST 2.12
+ or higher.
+ - purefb_info - Added enhanced information for buckets, filesystems and snapshots,
+ based on new features in REST 2.12
+ - purefb_s3acc - Add support for public buckets
+ - purefb_s3acc - Remove default requirements for ``hard_limit`` and ``default_hard_limit``
+ fragments:
+ - 252_object_lock_info.yaml
+ - 254_update_212_info.yaml
+ - 255_smb_ca.yaml
+ - 257_mode_change.yaml
+ - 258_add_public_buckets.yaml
+ modules:
+ - description: Manage FlashBlade Hardware
+ name: purefb_hardware
+ namespace: ''
+ release_date: '2024-01-12'
+ 1.16.0:
+ changes:
+ bugfixes:
+ - purefb_bucket - Changed logic to allow complex buckets to be created in a
+ single call, rather than having to split into two tasks.
+ - purefb_lag - Enable LAG port configuration with multi-chassis
+ - purefb_timeout - Fixed arithmetic error that resulted in module incorrectly
+ reporting changed when no change was required.
+ minor_changes:
+ - purefb_ds - Add `force_bind_password` parameter to allow module to be idempotent.
+ fragments:
+ - 263_fix_multiple_modules_idempotency.yaml
+ - 266_bucket_fix.yaml
+ - 268_multi-chassis-lag.yaml
+ release_date: '2024-02-27'
1.3.0:
changes:
bugfixes:
@@ -111,7 +210,7 @@ releases:
1.4.0:
changes:
bugfixes:
- - purefa_policy - Resolve multiple issues related to incorrect use of timezones
+ - purefb_policy - Resolve multiple issues related to incorrect use of timezones
- purefb_connect - Ensure changing encryption status on array connection is
performed correctly
- purefb_connect - Fix breaking change created in purity_fb SDK 1.9.2 for deletion
@@ -201,7 +300,7 @@ releases:
1.6.0:
changes:
minor_changes:
- - purefa_virtualhost - New module to manage API Clients
+ - purefb_virtualhost - New module to manage API Clients
- purefb_ad - New module to manage Active Directory Account
- purefb_eula - New module to sign EULA
- purefb_info - Add Active Directory, Kerberos and Object Store Account information
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/101_fix_policy_and_timezone_error.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/101_fix_policy_and_timezone_error.yaml
index e6c1ea64d..4db1d1d7f 100644
--- a/ansible_collections/purestorage/flashblade/changelogs/fragments/101_fix_policy_and_timezone_error.yaml
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/101_fix_policy_and_timezone_error.yaml
@@ -1,2 +1,2 @@
bugfixes:
- - purefa_policy - Resolve multiple issues related to incorrect use of timezones
+ - purefb_policy - Resolve multiple issues related to incorrect use of timezones
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/131-apiclient.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/131-apiclient.yaml
index 7a3f021b5..ec8c5ec64 100644
--- a/ansible_collections/purestorage/flashblade/changelogs/fragments/131-apiclient.yaml
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/131-apiclient.yaml
@@ -1,2 +1,2 @@
minor_changes:
- - purefa_virtualhost - New module to manage API Clients
+ - purefb_virtualhost - New module to manage API Clients
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/205_fix_multi_lifecycle.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/205_fix_multi_lifecycle.yaml
index b6810884b..ec892b23c 100644
--- a/ansible_collections/purestorage/flashblade/changelogs/fragments/205_fix_multi_lifecycle.yaml
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/205_fix_multi_lifecycle.yaml
@@ -1,2 +1,2 @@
bugfixes:
- - purefa_info - Fixed issue when more than 10 buckets have lifecycle rules.
+ - purefb_info - Fixed issue when more than 10 buckets have lifecycle rules.
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/222_bucket_type_fix.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/222_bucket_type_fix.yaml
new file mode 100644
index 000000000..d6cda4cb9
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/222_bucket_type_fix.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefb_bucket - Fixed bucket type mode name typo
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/223_add_drive_type.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/223_add_drive_type.yaml
new file mode 100644
index 000000000..7fb4f3e80
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/223_add_drive_type.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_info - Add drive type in drives subset for //S and //E platforms. Only available from REST 2.9.
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/224_smb_policies.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/224_smb_policies.yaml
new file mode 100644
index 000000000..351711dfe
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/224_smb_policies.yaml
@@ -0,0 +1,4 @@
+minor_changes:
+ - purefb_info - Added support for SMB client and share policies
+ - purefb_fs - Added support for SMB client and share policies
+ - purefb_policy - Added support for SMB client and share policies
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/225_delete_rl.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/225_delete_rl.yaml
new file mode 100644
index 000000000..3ccdea050
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/225_delete_rl.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_fs_replica - Added support to delete filesystem replica links from REST 2.10
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/227_s3acc_human_quota.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/227_s3acc_human_quota.yaml
new file mode 100644
index 000000000..677cfded7
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/227_s3acc_human_quota.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_s3acc - Allow human readable quota sizes; eg. 1T, 230K, etc
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/230_prom_fix.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/230_prom_fix.yaml
new file mode 100644
index 000000000..0c3611993
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/230_prom_fix.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefb_fs - Fixed issue with incorrect promotion state setting
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/232_multiple_keys.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/232_multiple_keys.yaml
new file mode 100644
index 000000000..b46b238e3
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/232_multiple_keys.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_s3user - Add new boolean parameter I(multiple_keys) to limit access keys for a user to a single key.
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/237_info_policy.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/237_info_policy.yaml
new file mode 100644
index 000000000..fa4662b69
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/237_info_policy.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefb_info - Fixed missing atributes for SMB client policy rules
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/238_user_policy.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/238_user_policy.yaml
new file mode 100644
index 000000000..b49f6ebc0
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/238_user_policy.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefb_userpolicy - Fixed `show` state for all user policies
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/239_access_rights.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/239_access_rights.yaml
new file mode 100644
index 000000000..c0c59fd42
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/239_access_rights.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_policy - Add new and updated policy access rights
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/242_cascade.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/242_cascade.yaml
new file mode 100644
index 000000000..d0c00a484
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/242_cascade.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_bucket_replica - Added support for cascading replica links
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/243_policy_desc.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/243_policy_desc.yaml
new file mode 100644
index 000000000..a3ce1be18
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/243_policy_desc.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_policy - Added parameter to define object store policy description
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/244_add_deny.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/244_add_deny.yaml
new file mode 100644
index 000000000..ce741d778
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/244_add_deny.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_policy - Add deny effect for object store policy rules. Requires Purity//FB 4.3.0+
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/245_quota_plus.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/245_quota_plus.yaml
new file mode 100644
index 000000000..607d8a637
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/245_quota_plus.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_info - New fields to display free space (remaining quota) for Accounts and Buckets. Space used by destroyed buckets is split out from virtual field to new destroyed_virtual field
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/246_smb_encrypt.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/246_smb_encrypt.yaml
new file mode 100644
index 000000000..de6b9ae19
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/246_smb_encrypt.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_info - Report encryption state in SMB client policy rules
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/247_space_consistency.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/247_space_consistency.yaml
new file mode 100644
index 000000000..692bae5d7
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/247_space_consistency.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_info - Report more detailed space data from Purity//FB 4.3.0
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/252_object_lock_info.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/252_object_lock_info.yaml
new file mode 100644
index 000000000..49fdeaa8c
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/252_object_lock_info.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefb_info - Added missing object lock retention details if enabledd
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/254_update_212_info.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/254_update_212_info.yaml
new file mode 100644
index 000000000..2a7d90be9
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/254_update_212_info.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_info - Added enhanced information for buckets, filesystems and snapshots, based on new features in REST 2.12
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/255_smb_ca.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/255_smb_ca.yaml
new file mode 100644
index 000000000..8517e3b45
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/255_smb_ca.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_fs - Added SMB Continuous Availability parameter. Requires REST 2.12 or higher.
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/257_mode_change.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/257_mode_change.yaml
new file mode 100644
index 000000000..e00c10643
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/257_mode_change.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - purefb_bucket - From REST 2.12 the `mode` parameter default changes to `multi-site-writable`.
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/258_add_public_buckets.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/258_add_public_buckets.yaml
new file mode 100644
index 000000000..5600e7e84
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/258_add_public_buckets.yaml
@@ -0,0 +1,4 @@
+minor_changes:
+ - purefb_s3acc - Add support for public buckets
+ - purefb_s3acc - Remove default requirements for ``hard_limit`` and ``default_hard_limit``
+ - purefb_bucket - Add support for public buckets
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/263_fix_multiple_modules_idempotency.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/263_fix_multiple_modules_idempotency.yaml
new file mode 100644
index 000000000..693e846d3
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/263_fix_multiple_modules_idempotency.yaml
@@ -0,0 +1,4 @@
+minor_changes:
+ - purefb_ds - Add `force_bind_password` parameter to allow module to be idempotent.
+bugfixes:
+ - purefb_timeout - Fixed arithmetic error that resulted in module incorrectly reporting changed when no change was required.
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/266_bucket_fix.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/266_bucket_fix.yaml
new file mode 100644
index 000000000..f7d66e894
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/266_bucket_fix.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefb_bucket - Changed logic to allow complex buckets to be created in a single call, rather than having to split into two tasks.
diff --git a/ansible_collections/purestorage/flashblade/changelogs/fragments/268_multi-chassis-lag.yaml b/ansible_collections/purestorage/flashblade/changelogs/fragments/268_multi-chassis-lag.yaml
new file mode 100644
index 000000000..e9146f642
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/changelogs/fragments/268_multi-chassis-lag.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - purefb_lag - Enable LAG port configuration with multi-chassis
diff --git a/ansible_collections/purestorage/flashblade/meta/runtime.yml b/ansible_collections/purestorage/flashblade/meta/runtime.yml
index 2ee3c9fa9..be99ccf4b 100644
--- a/ansible_collections/purestorage/flashblade/meta/runtime.yml
+++ b/ansible_collections/purestorage/flashblade/meta/runtime.yml
@@ -1,2 +1,2 @@
---
-requires_ansible: '>=2.9.10'
+requires_ansible: '>=2.14.0'
diff --git a/ansible_collections/purestorage/flashblade/plugins/module_utils/purefb.py b/ansible_collections/purestorage/flashblade/plugins/module_utils/purefb.py
index cf987a3e5..87b27a821 100644
--- a/ansible_collections/purestorage/flashblade/plugins/module_utils/purefb.py
+++ b/ansible_collections/purestorage/flashblade/plugins/module_utils/purefb.py
@@ -32,6 +32,12 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
+HAS_DISTRO = True
+try:
+ import distro
+except ImportError:
+ HAS_DISTRO = False
+
HAS_PURITY_FB = True
try:
from purity_fb import PurityFb
@@ -47,19 +53,27 @@ except ImportError:
from os import environ
import platform
-VERSION = "1.4"
+VERSION = "1.5"
USER_AGENT_BASE = "Ansible"
API_AGENT_VERSION = "1.5"
def get_blade(module):
"""Return System Object or Fail"""
- user_agent = "%(base)s %(class)s/%(version)s (%(platform)s)" % {
- "base": USER_AGENT_BASE,
- "class": __name__,
- "version": VERSION,
- "platform": platform.platform(),
- }
+ if HAS_DISTRO:
+ user_agent = "%(base)s %(class)s/%(version)s (%(platform)s)" % {
+ "base": USER_AGENT_BASE,
+ "class": __name__,
+ "version": VERSION,
+ "platform": distro.name(pretty=True),
+ }
+ else:
+ user_agent = "%(base)s %(class)s/%(version)s (%(platform)s)" % {
+ "base": USER_AGENT_BASE,
+ "class": __name__,
+ "version": VERSION,
+ "platform": platform.platform(),
+ }
blade_name = module.params["fb_url"]
api = module.params["api_token"]
@@ -100,12 +114,20 @@ def get_blade(module):
def get_system(module):
"""Return System Object or Fail"""
- user_agent = "%(base)s %(class)s/%(version)s (%(platform)s)" % {
- "base": USER_AGENT_BASE,
- "class": __name__,
- "version": VERSION,
- "platform": platform.platform(),
- }
+ if HAS_DISTRO:
+ user_agent = "%(base)s %(class)s/%(version)s (%(platform)s)" % {
+ "base": USER_AGENT_BASE,
+ "class": __name__,
+ "version": VERSION,
+ "platform": distro.name(pretty=True),
+ }
+ else:
+ user_agent = "%(base)s %(class)s/%(version)s (%(platform)s)" % {
+ "base": USER_AGENT_BASE,
+ "class": __name__,
+ "version": VERSION,
+ "platform": platform.platform(),
+ }
blade_name = module.params["fb_url"]
api = module.params["api_token"]
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_bucket.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_bucket.py
index 67b6b1545..27cd7e317 100644
--- a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_bucket.py
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_bucket.py
@@ -59,11 +59,72 @@ options:
description:
- The type of bucket to be created. Also referred to a VSO Mode.
- Requires Purity//FB 3.3.3 or higher
- - I(multi-site) type can only be used after feature is
+ - I(multi-site-writable) type can only be used after feature is
enabled by Pure Technical Support
type: str
- choices: [ "classic", "multi-site" ]
+ choices: [ "classic", "multi-site-writable" ]
version_added: '1.10.0'
+ quota:
+ description:
+ - User quota in M, G, T or P units. This cannot be 0.
+ - This value will override the object store account's default bucket quota.
+ type: str
+ version_added: '1.12.0'
+ hard_limit:
+ description:
+ - Whether the I(quota) value is enforced or not.
+ - If not provided the object store account default value will be used.
+ type: bool
+ version_added: '1.12.0'
+ retention_lock:
+ description:
+ - Set retention lock level for the bucket
+ - Once set to I(ratcheted) can only be lowered by Pure Technical Services
+ type: str
+ choices: [ "ratcheted", "unlocked" ]
+ default: unlocked
+ version_added: '1.12.0'
+ retention_mode:
+ description:
+ - The retention mode used to apply locks on new objects if none is specified by the S3 client
+ - Use "" to clear
+ - Once set to I(compliance) this can only be changed by contacting Pure Technical Services
+ type: str
+ choices: [ "compliance", "governance", "" ]
+ version_added: '1.12.0'
+ object_lock_enabled:
+ description:
+ - If set to true, then S3 APIs relating to object lock may be used
+ type: bool
+ default: false
+ version_added: '1.12.0'
+ freeze_locked_objects:
+ description:
+ - If set to true, a locked object will be read-only and no new versions of
+ the object may be created due to modifications
+ - After enabling, can be disabled only by contacting Pure Technical Services
+ type: bool
+ default: false
+ version_added: '1.12.0'
+ default_retention:
+ description:
+ - The retention period, in days, used to apply locks on new objects if
+ none is specified by the S3 client
+ - Valid values between 1 and 365000
+ - Use "" to clear
+ type: str
+ version_added: '1.12.0'
+ block_new_public_policies:
+ description:
+ - If set to true, adding bucket policies that grant public access to a bucket is not allowed.
+ type: bool
+ version_added: 1.15.0
+ block_public_access:
+ description:
+ - If set to true, access to a bucket with a public policy is restricted to only authenticated
+ users within the account that bucket belongs to.
+ type: bool
+ version_added: 1.15.0
extends_documentation_fragment:
- purestorage.flashblade.purestorage.fb
"""
@@ -125,7 +186,7 @@ try:
except ImportError:
HAS_PYPURECLIENT = False
-from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.basic import AnsibleModule, human_to_bytes
from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
get_blade,
get_system,
@@ -135,7 +196,9 @@ from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb impo
MIN_REQUIRED_API_VERSION = "1.5"
VERSIONING_VERSION = "1.9"
-VSO_VERSION = "2.4"
+VSO_VERSION = "2.5"
+QUOTA_VERSION = "2.8"
+MODE_VERSION = "2.12"
def get_s3acc(module, blade):
@@ -161,18 +224,56 @@ def get_bucket(module, blade):
def create_bucket(module, blade):
"""Create bucket"""
changed = True
+ bladev2 = get_system(module)
if not module.check_mode:
try:
api_version = blade.api_version.list_versions().versions
- if VSO_VERSION in api_version and module.params["mode"]:
- bladev2 = get_system(module)
- res = bladev2.post_buckets(
- names=[module.params["name"]],
- bucket=flashblade.BucketPost(
+ if VSO_VERSION in api_version:
+ account_defaults = list(
+ bladev2.get_object_store_accounts(
+ names=[module.params["account"]]
+ ).items
+ )[0]
+ if QUOTA_VERSION in api_version:
+ if not module.params["hard_limit"]:
+ module.params["hard_limit"] = (
+ account_defaults.hard_limit_enabled
+ )
+ if module.params["quota"]:
+ quota = str(human_to_bytes(module.params["quota"]))
+ else:
+ if not account_defaults.quota_limit:
+ quota = ""
+ else:
+ quota = str(account_defaults.quota_limit)
+ if not module.params["retention_mode"]:
+ module.params["retention_mode"] = ""
+ if not module.params["default_retention"]:
+ module.params["default_retention"] = ""
+ else:
+ module.params["default_retention"] = str(
+ int(module.params["default_retention"]) * 86400000
+ )
+ if module.params["object_lock_enabled"]:
+ bucket = flashblade.BucketPost(
+ account=flashblade.Reference(name=module.params["account"]),
+ bucket_type=module.params["mode"],
+ hard_limit_enabled=module.params["hard_limit"],
+ quota_limit=quota,
+ )
+ else:
+ bucket = flashblade.BucketPost(
+ account=flashblade.Reference(name=module.params["account"]),
+ bucket_type=module.params["mode"],
+ hard_limit_enabled=module.params["hard_limit"],
+ quota_limit=quota,
+ )
+ else:
+ bucket = flashblade.BucketPost(
account=flashblade.Reference(name=module.params["account"]),
bucket_type=module.params["mode"],
- ),
- )
+ )
+ res = bladev2.post_buckets(names=[module.params["name"]], bucket=bucket)
if res.status_code != 200:
module.fail_json(
msg="Object Store Bucket {0} creation failed. Error: {1}".format(
@@ -180,37 +281,79 @@ def create_bucket(module, blade):
res.errors[0].message,
)
)
- elif VERSIONING_VERSION in api_version:
- attr = BucketPost()
- attr.account = Reference(name=module.params["account"])
- blade.buckets.create_buckets(names=[module.params["name"]], bucket=attr)
- else:
- attr = Bucket()
- attr.account = Reference(name=module.params["account"])
- blade.buckets.create_buckets(
- names=[module.params["name"]], account=attr
- )
- if (
- module.params["versioning"] != "absent"
- and VERSIONING_VERSION in api_version
- ):
- try:
- blade.buckets.update_buckets(
- names=[module.params["name"]],
- bucket=BucketPatch(versioning=module.params["versioning"]),
+ if QUOTA_VERSION in api_version:
+ bucket = flashblade.BucketPatch(
+ retention_lock=module.params["retention_lock"],
+ object_lock_config=flashblade.ObjectLockConfigRequestBody(
+ default_retention_mode=module.params["retention_mode"],
+ enabled=module.params["object_lock_enabled"],
+ freeze_locked_objects=module.params[
+ "freeze_locked_objects"
+ ],
+ default_retention=module.params["default_retention"],
+ ),
+ versioning=module.params["versioning"],
)
- except Exception:
+ else:
+ bucket = flashblade.BucketPatch(
+ retention_lock=module.params["retention_lock"],
+ versioning=module.params["versioning"],
+ )
+ res = bladev2.patch_buckets(
+ names=[module.params["name"]], bucket=bucket
+ )
+ if res.status_code != 200:
module.fail_json(
- msg="Object Store Bucket {0} Created but versioning state failed".format(
- module.params["name"]
+ msg="Object Store Bucket {0} creation update failed. Error: {1}".format(
+ module.params["name"],
+ res.errors[0].message,
)
)
+ else:
+ attr = BucketPost()
+ attr.account = Reference(name=module.params["account"])
+ blade.buckets.create_buckets(names=[module.params["name"]], bucket=attr)
+ if module.params["versioning"] != "absent":
+ try:
+ blade.buckets.update_buckets(
+ names=[module.params["name"]],
+ bucket=BucketPatch(versioning=module.params["versioning"]),
+ )
+ except Exception:
+ module.fail_json(
+ msg="Object Store Bucket {0} Created but versioning state failed".format(
+ module.params["name"]
+ )
+ )
except Exception:
+ blade.buckets.update_buckets(
+ names=[module.params["name"]], bucket=BucketPatch(destroyed=True)
+ )
+ blade.buckets.delete_buckets(names=[module.params["name"]])
module.fail_json(
msg="Object Store Bucket {0}: Creation failed".format(
module.params["name"]
)
)
+ if MODE_VERSION in api_version:
+ if not module.params["block_new_public_policies"]:
+ module.params["block_new_public_policies"] = False
+ if not module.params["block_public_access"]:
+ module.params["block_public_access"] = False
+ pac = BucketPatch(
+ public_access_config=flashblade.PublicAccessConfig(
+ block_new_public_policies=module.params[
+ "block_new_public_policies"
+ ],
+ block_public_access=module.params["block_public_access"],
+ )
+ )
+ res = bladev2.patch_buckets(bucket=pac, names=[module.params["name"]])
+ if res.status_code != 200:
+ module.warn(
+ msg="Failed to set Public Access config correctly for bucket {0}. "
+ "Error: {1}".format(module.params["name"], res.errors[0].message)
+ )
module.exit_json(changed=changed)
@@ -272,13 +415,42 @@ def recover_bucket(module, blade):
def update_bucket(module, blade, bucket):
"""Update Bucket"""
changed = False
+ change_pac = False
+ bladev2 = get_system(module)
+ bucket_detail = list(bladev2.get_buckets(names=[module.params["name"]]).items)[0]
api_version = blade.api_version.list_versions().versions
if VSO_VERSION in api_version:
- if module.params["mode"]:
- bladev2 = get_system(module)
- bucket_detail = bladev2.get_buckets(names=[module.params["name"]])
- if list(bucket_detail.items)[0].bucket_type != module.params["mode"]:
- module.warn("Changing bucket type is not permitted.")
+ if module.params["mode"] and bucket_detail.bucket_type != module.params["mode"]:
+ module.warn("Changing bucket type is not permitted.")
+ if QUOTA_VERSION in api_version:
+ if (
+ bucket_detail.retention_lock == "ratcheted"
+ and getattr(
+ bucket_detail.object_lock_config, "default_retention_mode", None
+ )
+ == "compliance"
+ and module.params["retention_mode"] != "compliance"
+ ):
+ module.warn(
+ "Changing retention_mode can onlt be performed by Pure Technical Support."
+ )
+ if not module.params["object_lock_enabled"] and getattr(
+ bucket_detail.object_lock_config, "enabled", False
+ ):
+ module.warn("Object lock cannot be disabled.")
+ if not module.params["freeze_locked_objects"] and getattr(
+ bucket_detail.object_lock_config, "freeze_locked_objects", False
+ ):
+ module.warn("Freeze locked onjects cannot be disabled.")
+ if getattr(bucket_detail.object_lock_config, "default_retention", 0) > 1:
+ if (
+ bucket_detail.object_lock_config.default_retention / 86400000
+ > int(module.params["default_retention"])
+ and bucket_detail.retention_lock == "ratcheted"
+ ):
+ module.warn(
+ "Default retention can only be reduced by Pure Technical Support."
+ )
if VERSIONING_VERSION in api_version:
if bucket.versioning != "none":
@@ -316,7 +488,39 @@ def update_bucket(module, blade, bucket):
module.params["name"]
)
)
- module.exit_json(changed=changed)
+ if MODE_VERSION in api_version:
+ current_pac = {
+ "block_new_public_policies": bucket_detail.public_access_config.block_new_public_policies,
+ "block_public_access": bucket_detail.public_access_config.block_public_access,
+ }
+ if module.params["block_new_public_policies"] is None:
+ new_public_policies = current_pac["block_new_public_policies"]
+ else:
+ new_public_policies = module.params["block_new_public_policies"]
+ if module.params["block_public_access"] is None:
+ new_public_access = current_pac["block_public_access"]
+ else:
+ new_public_access = module.params["block_public_access"]
+ new_pac = {
+ "block_new_public_policies": new_public_policies,
+ "block_public_access": new_public_access,
+ }
+ if current_pac != new_pac:
+ change_pac = True
+ pac = BucketPatch(
+ public_access_config=flashblade.PublicAccessConfig(
+ block_new_public_policies=new_pac.block_new_public_policies,
+ block_public_access=new_pac.block_public_access,
+ )
+ )
+ if change_pac and not module.check_mode:
+ res = bladev2.patch_buckets(bucket=pac, names=[module.params["name"]])
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to update Public Access config correctly for bucket {0}. "
+ "Error: {1}".format(module.params["name"], res.errors[0].message)
+ )
+ module.exit_json(changed=(changed or change_pac))
def eradicate_bucket(module, blade):
@@ -341,7 +545,21 @@ def main():
name=dict(required=True),
account=dict(required=True),
eradicate=dict(default="false", type="bool"),
- mode=dict(type="str", choices=["classic", "multi-site"]),
+ mode=dict(
+ type="str",
+ choices=["classic", "multi-site-writable"],
+ ),
+ retention_mode=dict(type="str", choices=["compliance", "governance", ""]),
+ default_retention=dict(type="str"),
+ retention_lock=dict(
+ type="str", choices=["ratcheted", "unlocked"], default="unlocked"
+ ),
+ hard_limit=dict(type="bool"),
+ block_new_public_policies=dict(type="bool"),
+ block_public_access=dict(type="bool"),
+ object_lock_enabled=dict(type="bool", default=False),
+ freeze_locked_objects=dict(type="bool", default=False),
+ quota=dict(type="str"),
versioning=dict(
default="absent", choices=["enabled", "suspended", "absent"]
),
@@ -362,9 +580,13 @@ def main():
api_version = blade.api_version.list_versions().versions
if MIN_REQUIRED_API_VERSION not in api_version:
module.fail_json(msg="Purity//FB must be upgraded to support this module.")
- if module.params["mode"] and VSO_VERSION not in api_version:
- module.fail_json(msg="VSO mode requires Purity//FB 3.3.3 or higher.")
+ # From REST 2.12 classic is no longer the default mode
+ if MODE_VERSION in api_version:
+ if not module.params["mode"]:
+ module.params["mode"] = "multi-site-writable"
+ elif not module.params["mode"]:
+ module.params["mode"] = "classic"
bucket = get_bucket(module, blade)
if not get_s3acc(module, blade):
module.fail_json(
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_bucket_replica.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_bucket_replica.py
index 6ac3775ae..265fd5481 100644
--- a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_bucket_replica.py
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_bucket_replica.py
@@ -58,6 +58,14 @@ options:
- Name of remote credential name to use.
required: false
type: str
+ cascading:
+ description:
+ - Objects replicated to this bucket via a replica link from
+ another array will also be replicated by this link to the
+ remote bucket
+ type: bool
+ default: false
+ version_added: "1.14.0"
extends_documentation_fragment:
- purestorage.flashblade.purestorage.fb
"""
@@ -96,11 +104,19 @@ try:
except ImportError:
HAS_PURITY_FB = False
+HAS_PYPURECLIENT = True
+try:
+ from pypureclient import flashblade
+except ImportError:
+ HAS_PYPURECLIENT = False
+
MIN_REQUIRED_API_VERSION = "1.9"
+CASCADE_API_VERSION = "2.2"
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
get_blade,
+ get_system,
purefb_argument_spec,
)
@@ -167,24 +183,46 @@ def get_connected(module, blade):
def create_rl(module, blade, remote_cred):
"""Create Bucket Replica Link"""
changed = True
+ api_version = blade.api_version.list_versions().versions
if not module.check_mode:
- try:
- if not module.params["target_bucket"]:
- module.params["target_bucket"] = module.params["name"]
- else:
- module.params["target_bucket"] = module.params["target_bucket"].lower()
- blade.bucket_replica_links.create_bucket_replica_links(
+ if not module.params["target_bucket"]:
+ module.params["target_bucket"] = module.params["name"]
+ else:
+ module.params["target_bucket"] = module.params["target_bucket"].lower()
+ if CASCADE_API_VERSION in api_version:
+ bladev2 = get_system(module)
+ new_rl = flashblade.BucketReplicaLinkPost(
+ cascading_enabled=module.params["cascading"],
+ paused=module.params["paused"],
+ )
+ res = bladev2.post_bucket_replica_links(
local_bucket_names=[module.params["name"]],
remote_bucket_names=[module.params["target_bucket"]],
remote_credentials_names=[remote_cred.name],
- bucket_replica_link=BucketReplicaLink(paused=module.params["paused"]),
+ bucket_replica_link=new_rl,
)
- except Exception:
- module.fail_json(
- msg="Failed to create bucket replica link {0}.".format(
- module.params["name"]
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to create bucket replica link {0}.".format(
+ module.params["name"]
+ )
+ )
+ else:
+ try:
+ blade.bucket_replica_links.create_bucket_replica_links(
+ local_bucket_names=[module.params["name"]],
+ remote_bucket_names=[module.params["target_bucket"]],
+ remote_credentials_names=[remote_cred.name],
+ bucket_replica_link=BucketReplicaLink(
+ paused=module.params["paused"]
+ ),
+ )
+ except Exception:
+ module.fail_json(
+ msg="Failed to create bucket replica link {0}.".format(
+ module.params["name"]
+ )
)
- )
module.exit_json(changed=changed)
@@ -245,6 +283,7 @@ def main():
target=dict(type="str"),
target_bucket=dict(type="str"),
paused=dict(type="bool", default=False),
+ cascading=dict(type="bool", default=False),
credential=dict(type="str"),
state=dict(default="present", choices=["present", "absent"]),
)
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_connect.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_connect.py
index 508c6a322..846351453 100644
--- a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_connect.py
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_connect.py
@@ -117,7 +117,9 @@ RETURN = r"""
HAS_PURITYFB = True
try:
- from purity_fb import PurityFb, ArrayConnection, ArrayConnectionPost
+ from purity_fb import PurityFb
+ from purity_fb import ArrayConnection as ArrayConnectionv1
+ from purity_fb import ArrayConnectionPost as ArrayConnectionPostv1
except ImportError:
HAS_PURITYFB = False
@@ -224,7 +226,7 @@ def create_connection(module, blade):
.items[0]
.connection_key
)
- connection_info = ArrayConnectionPost(
+ connection_info = ArrayConnectionPostv1(
management_address=module.params["target_url"],
encrypted=module.params["encrypted"],
connection_key=connection_key,
@@ -346,7 +348,7 @@ def update_connection(module, blade, target_blade):
module.fail_json(
msg="Cannot turn array connection encryption on if file system replica links exist"
)
- new_attr = ArrayConnection(encrypted=module.params["encrypted"])
+ new_attr = ArrayConnectionv1(encrypted=module.params["encrypted"])
changed = True
if not module.check_mode:
try:
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_ds.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_ds.py
index 6433d3d9d..2a81648e5 100644
--- a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_ds.py
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_ds.py
@@ -67,6 +67,15 @@ options:
description:
- Sets the password of the bind_user user name account.
type: str
+ force_bind_password:
+ type: bool
+ default: true
+ description:
+ - Will force the bind password to be reset even if the bind user password
+ is unchanged.
+ - If set to I(false) and I(bind_user) is unchanged the password will not
+ be reset.
+ version_added: 1.16.0
bind_user:
description:
- Sets the user name that can be used to bind to and query the directory.
@@ -257,6 +266,8 @@ def delete_ds(module, blade):
def update_ds(module, blade):
"""Update Directory Service"""
mod_ds = False
+ changed = False
+ password_required = False
attr = {}
try:
ds_now = blade.directory_services.list_directory_services(
@@ -278,21 +289,31 @@ def update_ds(module, blade):
if sorted(module.params["uri"][0:30]) != sorted(ds_now.uris):
attr["uris"] = module.params["uri"][0:30]
mod_ds = True
+ password_required = True
if module.params["base_dn"]:
if module.params["base_dn"] != ds_now.base_dn:
attr["base_dn"] = module.params["base_dn"]
mod_ds = True
if module.params["bind_user"]:
if module.params["bind_user"] != ds_now.bind_user:
+ password_required = True
attr["bind_user"] = module.params["bind_user"]
mod_ds = True
+ elif module.params["force_bind_password"]:
+ password_required = True
+ mod_ds = True
if module.params["enable"]:
if module.params["enable"] != ds_now.enabled:
attr["enabled"] = module.params["enable"]
mod_ds = True
- if module.params["bind_password"]:
- attr["bind_password"] = module.params["bind_password"]
- mod_ds = True
+ if password_required:
+ if module.params["bind_password"]:
+ attr["bind_password"] = module.params["bind_password"]
+ mod_ds = True
+ else:
+ module.fail_json(
+ msg="'bind_password' must be provided for this task"
+ )
if module.params["dstype"] == "smb":
if module.params["join_ou"] != ds_now.smb.join_ou:
attr["smb"] = {"join_ou": module.params["join_ou"]}
@@ -397,6 +418,7 @@ def main():
state=dict(type="str", default="present", choices=["absent", "present"]),
enable=dict(type="bool", default=False),
bind_password=dict(type="str", no_log=True),
+ force_bind_password=dict(type="bool", default=True),
bind_user=dict(type="str"),
base_dn=dict(type="str"),
join_ou=dict(type="str"),
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_fs.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_fs.py
index a07180793..8d332e8b7 100644
--- a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_fs.py
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_fs.py
@@ -179,6 +179,27 @@ options:
- Only valid for Purity//FB 3.3.0 or higher
type: str
version_added: "1.9.0"
+ share_policy:
+ description:
+ - Name of SMB share policy to assign to filesystem
+ - Only valid with REST 2.10 or higher
+ - Remove policy with empty string
+ type: str
+ version_added: "1.12.0"
+ client_policy:
+ description:
+ - Name of SMB client policy to assign to filesystem
+ - Only valid with REST 2.10 or higher
+ - Remove policy with empty string
+ type: str
+ version_added: "1.12.0"
+ continuous_availability:
+ description:
+ - Deifines if the file system will be continuously available during
+ disruptive scenarios such as network disruption, blades failover, etc
+ type: bool
+ default: true
+ version_added: "1.15.0"
extends_documentation_fragment:
- purestorage.flashblade.purestorage.fb
"""
@@ -267,6 +288,7 @@ try:
FileSystemPatch,
NfsPatch,
Reference,
+ Smb,
)
except ImportError:
HAS_PYPURECLIENT = False
@@ -290,6 +312,8 @@ NFSV4_API_VERSION = "1.6"
REPLICATION_API_VERSION = "1.9"
MULTIPROTOCOL_API_VERSION = "1.11"
EXPORT_POLICY_API_VERSION = "2.3"
+SMB_POLICY_API_VERSION = "2.10"
+CA_API_VERSION = "2.12"
def get_fs(module, blade):
@@ -488,12 +512,71 @@ def create_fs(module, blade):
res.errors[0].message,
)
)
+ if SMB_POLICY_API_VERSION in api_version:
+ system = get_system(module)
+ if module.params["client_policy"]:
+ export_attr = FileSystemPatch(
+ smb=Smb(
+ client_policy=Reference(name=module.params["client_policy"])
+ )
+ )
+ res = system.patch_file_systems(
+ names=[module.params["name"]], file_system=export_attr
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Filesystem {0} created, but failed to assign client "
+ "policy {1}. Error: {2}".format(
+ module.params["name"],
+ module.params["client_policy"],
+ res.errors[0].message,
+ )
+ )
+ if module.params["share_policy"]:
+ export_attr = FileSystemPatch(
+ smb=Smb(share_policy=Reference(name=module.params["share_policy"]))
+ )
+ res = system.patch_file_systems(
+ names=[module.params["name"]], file_system=export_attr
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Filesystem {0} created, but failed to assign share "
+ "policy {1}. Error: {2}".format(
+ module.params["name"],
+ module.params["share_policy"],
+ res.errors[0].message,
+ )
+ )
+ if CA_API_VERSION in api_version:
+ ca_attr = FileSystemPatch(
+ smb=Smb(
+ continuous_availability_enabled=module.params[
+ "continuous_availability"
+ ]
+ )
+ )
+ res = system.patch_file_systems(
+ names=[module.params["name"]], file_system=ca_attr
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Filesystem {0} created, but failed to set continuous availability"
+ "Error: {1}".format(
+ module.params["name"],
+ res.errors[0].message,
+ )
+ )
+
module.exit_json(changed=changed)
def modify_fs(module, blade):
"""Modify Filesystem"""
changed = False
+ change_export = False
+ change_share = False
+ change_ca = False
mod_fs = False
attr = {}
if module.params["policy"] and module.params["policy_state"] == "present":
@@ -689,7 +772,7 @@ def modify_fs(module, blade):
module.params["name"]
)
)
- attr["requested_promotion_state"] = module.params["promote"]
+ attr["requested_promotion_state"] = "demoted"
mod_fs = True
if mod_fs:
changed = True
@@ -721,12 +804,12 @@ def modify_fs(module, blade):
module.params["name"], message
)
)
+ system = get_system(module)
+ current_fs = list(
+ system.get_file_systems(filter="name='" + module.params["name"] + "'").items
+ )[0]
if EXPORT_POLICY_API_VERSION in api_version and module.params["export_policy"]:
- system = get_system(module)
change_export = False
- current_fs = list(
- system.get_file_systems(filter="name='" + module.params["name"] + "'").items
- )[0]
if (
current_fs.nfs.export_policy.name
and current_fs.nfs.export_policy.name != module.params["export_policy"]
@@ -752,8 +835,84 @@ def modify_fs(module, blade):
res.errors[0].message,
)
)
+ if SMB_POLICY_API_VERSION in api_version and module.params["client_policy"]:
+ change_client = False
+ if (
+ current_fs.smb.client_policy.name
+ and current_fs.smb.client_policy.name != module.params["client_policy"]
+ ):
+ change_client = True
+ if not current_fs.smb.client_policy.name and module.params["client_policy"]:
+ change_client = True
+ if change_client and not module.check_mode:
+ client_attr = FileSystemPatch(
+ smb=Smb(client_policy=Reference(name=module.params["client_policy"]))
+ )
+ res = system.patch_file_systems(
+ names=[module.params["name"]], file_system=client_attr
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to modify client policy {1} for "
+ "filesystem {0}. Error: {2}".format(
+ module.params["name"],
+ module.params["client_policy"],
+ res.errors[0].message,
+ )
+ )
+ if SMB_POLICY_API_VERSION in api_version and module.params["share_policy"]:
+ change_share = False
+ if (
+ current_fs.smb.share_policy.name
+ and current_fs.smb.share_policy.name != module.params["share_policy"]
+ ):
+ change_share = True
+ if not current_fs.smb.share_policy.name and module.params["share_policy"]:
+ change_share = True
+ if change_share and not module.check_mode:
+ share_attr = FileSystemPatch(
+ smb=Smb(share_policy=Reference(name=module.params["share_policy"]))
+ )
+ res = system.patch_file_systems(
+ names=[module.params["name"]], file_system=share_attr
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to modify share policy {1} for "
+ "filesystem {0}. Error: {2}".format(
+ module.params["name"],
+ module.params["share_policy"],
+ res.errors[0].message,
+ )
+ )
+ if CA_API_VERSION in api_version:
+ change_ca = False
+ if (
+ module.params["continuous_availability"]
+ != current_fs.continuous_availability_enabled
+ ):
+ change_ca = True
+ if not module.check_mode:
+ ca_attr = FileSystemPatch(
+ smb=Smb(
+ continuous_availability_enabled=module.params[
+ "continuous_availability"
+ ]
+ )
+ )
+ res = system.patch_file_systems(
+ names=[module.params["name"]], file_system=ca_attr
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to modify continuous availability for "
+ "filesystem {0}. Error: {1}".format(
+ module.params["name"],
+ res.errors[0].message,
+ )
+ )
- module.exit_json(changed=changed)
+ module.exit_json(changed=(changed or change_export or change_share or change_ca))
def _delete_fs(module, blade):
@@ -910,6 +1069,9 @@ def main():
),
size=dict(type="str"),
export_policy=dict(type="str"),
+ share_policy=dict(type="str"),
+ client_policy=dict(type="str"),
+ continuous_availability=dict(type="bool", default="true"),
)
)
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_fs_replica.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_fs_replica.py
index f96903788..ca52a64bd 100644
--- a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_fs_replica.py
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_fs_replica.py
@@ -53,6 +53,12 @@ options:
- Name of filesystem snapshot policy to apply to the replica link.
required: false
type: str
+ in_progress:
+ description:
+ - Confirmation that you wish to delete a filesystem replica link
+ - This may cancel any in-progress replication transfers)
+ type: bool
+ default: false
extends_documentation_fragment:
- purestorage.flashblade.purestorage.fb
"""
@@ -97,9 +103,12 @@ MIN_REQUIRED_API_VERSION = "1.9"
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
get_blade,
+ get_system,
purefb_argument_spec,
)
+DELETE_RL_API_VERSION = "2.10"
+
def get_local_fs(module, blade):
"""Return Filesystem or None"""
@@ -241,6 +250,30 @@ def delete_rl_policy(module, blade):
module.exit_json(changed=changed)
+def delete_rl(module, blade):
+ """Delete filesystem replica link"""
+ changed = True
+ if not module.check_mode:
+ res = list(
+ blade.delete_file_system_replica_links(
+ local_file_system_names=[module.params["name"]],
+ remote_file_system_names=[module.params["target_fs"]],
+ remote_names=[module.params["target_array"]],
+ cancel_in_progress_transfers=module.params["in_progress"],
+ )
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete replica link from {0} to {1}:{2}. Error: {3}".format(
+ module.params["name"],
+ module.params["target_array"],
+ module.params["target_fs"],
+ res.errors[0].message,
+ )
+ )
+ module.exit_jsob(changed=changed)
+
+
def main():
argument_spec = purefb_argument_spec()
argument_spec.update(
@@ -249,6 +282,7 @@ def main():
target_fs=dict(type="str"),
target_array=dict(type="str"),
policy=dict(type="str"),
+ in_progress=dict(type="bool", default=False),
state=dict(default="present", choices=["present", "absent"]),
)
)
@@ -296,6 +330,12 @@ def main():
policy = None
if state == "present" and not local_replica_link:
create_rl(module, blade)
+ elif state == "absent" and local_replica_link:
+ if DELETE_RL_API_VERSION not in versions:
+ module.fail_json("Deleting a replica link requires REST 2.10 or higher")
+ else:
+ bladev6 = get_system(module)
+ delete_rl(module, bladev6)
elif state == "present" and local_replica_link and policy:
add_rl_policy(module, blade)
elif state == "absent" and policy:
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_hardware.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_hardware.py
new file mode 100644
index 000000000..49849156b
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_hardware.py
@@ -0,0 +1,187 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2021, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = r"""
+---
+module: purefb_hardware
+version_added: '1.15.0'
+short_description: Manage FlashBlade Hardware
+description:
+- Enable or disable FlashBlade visual identification lights and set connector parameters
+author:
+- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ name:
+ description:
+ - Name of hardware component
+ type: str
+ required: true
+ enabled:
+ description:
+ - State of the component identification LED
+ type: bool
+ speed:
+ description:
+ - If the component specified is a connector, set the configured speed
+ of each lane in the connector in gigabits-per-second
+ type: int
+ choices: [ 10, 25, 40 ]
+ ports:
+ description:
+ - If the component specificed is a connector, the number of configured
+ ports in the connector
+ type: int
+ choices: [ 1, 4 ]
+extends_documentation_fragment:
+- purestorage.flashblade.purestorage.fb
+"""
+
+EXAMPLES = r"""
+- name: Set connector to be 4 x 40Gb ports
+ purestorage.flashblade.purefb_hardware:
+ name: "CH1.FM1.ETH1"
+ speed: 40
+ ports: 4
+ fb_url: 10.10.10.2
+ api_token: T-68618f31-0c9e-4e57-aa44-5306a2cf10e3
+
+- name: Enable identification LED
+ purestorage.flashblade.purefb_hardware:
+ name: "CH1.FB1"
+ enabled: True
+ fb_url: 10.10.10.2
+ api_token: T-68618f31-0c9e-4e57-aa44-5306a2cf10e3
+
+- name: Disable identification LED
+ purestorage.flashblade.purefb_hardware:
+ name: "CH1.FB1"
+ enabled: False
+ fb_url: 10.10.10.2
+ api_token: T-68618f31-0c9e-4e57-aa44-5306a2cf10e3
+"""
+
+RETURN = r"""
+"""
+
+HAS_PURESTORAGE = True
+try:
+ from pypureclient import flashblade
+except ImportError:
+ HAS_PURESTORAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
+ get_system,
+ purefb_argument_spec,
+)
+
+MIN_REQUIRED_API_VERSION = "2.2"
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(
+ dict(
+ enabled=dict(type="bool"),
+ name=dict(type="str", required=True),
+ speed=dict(
+ type="int",
+ choices=[10, 25, 40],
+ ),
+ ports=dict(
+ type="int",
+ choices=[1, 4],
+ ),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+ if not HAS_PURESTORAGE:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+
+ blade = get_system(module)
+ api_version = list(blade.get_versions().items)
+
+ if MIN_REQUIRED_API_VERSION not in api_version:
+ module.fail_json(
+ msg="FlashBlade REST version not supported. "
+ "Minimum version required: {0}".format(MIN_REQUIRED_API_VERSION)
+ )
+ if module.params["speed"]:
+ speed = module.params["speed"] * 1000000000
+ changed = False
+ change_connector = False
+ hardware = None
+ res = blade.get_hardware(names=[module.params["name"]])
+ if res.status_code == 200:
+ hardware = list(res.items)[0]
+ if hardware.identify_enabled != module.params["enabled"]:
+ changed = True
+ if not module.check_mode:
+ res = blade.patch_hardware(
+ names=[module.params["name"]],
+ hardware=flashblade.Hardware(
+ identify_enabled=module.params["enabled"]
+ ),
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to set identification LED for {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ res = blade.get_hardware_connectors(names=[module.params["name"]])
+ if res.status_code == 200:
+ if res.status_code == 200:
+ connector = list(res.items)[0]
+ if connector.port_count != module.params["ports"]:
+ new_port = module.params["ports"]
+ changed = True
+ if not module.check_mode:
+ res = blade.patch_hardware_connectors(
+ names=[module.params["name"]],
+ hardware_connector=flashblade.HardwareConnector(
+ port_count=module.params["ports"]
+ ),
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to change connector port count {0}. Error: Invalid port count".format(
+ module.params["name"]
+ )
+ )
+ if connector.lane_speed != speed:
+ new_speed = speed
+ changed = True
+ if not module.check_mode:
+ res = blade.patch_hardware_connectors(
+ names=[module.params["name"]],
+ hardware_connector=flashblade.HardwareConnector(
+ lane_speed=speed
+ ),
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to change connector lane speed {0}. Error: Invalid lane speed".format(
+ module.params["name"]
+ )
+ )
+
+ module.exit_json(changed=changed)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_info.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_info.py
index 8525bd8e3..033312e82 100644
--- a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_info.py
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_info.py
@@ -81,357 +81,7 @@ RETURN = r"""
purefb_info:
description: Returns the information collected from the FlashBlade
returned: always
- type: complex
- sample: {
- "admins": {
- "pureuser": {
- "api_token_timeout": null,
- "local": true,
- "public_key": null
- },
- "another_user": {
- "api_token_timeout": null,
- "local": false,
- "public_key": null
- },
- },
- "buckets": {
- "central": {
- "account_name": "jake",
- "bucket_type": "classic",
- "created": 1628900154000,
- "data_reduction": null,
- "destroyed": false,
- "id": "43758f09-9e71-7bf7-5757-2028a95a2b65",
- "lifecycle_rules": {},
- "object_count": 0,
- "snapshot_space": 0,
- "time_remaining": null,
- "total_physical_space": 0,
- "unique_space": 0,
- "versioning": "none",
- "virtual_space": 0
- },
- "test": {
- "account_name": "acme",
- "bucket_type": "classic",
- "created": 1630591952000,
- "data_reduction": 3.6,
- "destroyed": false,
- "id": "d5f6149c-fbef-f3c5-58b6-8fd143110ba9",
- "lifecycle_rules": {
- "test": {
- "abort_incomplete_multipart_uploads_after (days)": 1,
- "cleanup_expired_object_delete_marker": true,
- "enabled": true,
- "keep_current_version_for (days)": null,
- "keep_current_version_until": "2023-12-21",
- "keep_previous_version_for (days)": null,
- "prefix": "foo"
- }
- },
- },
- },
- "capacity": {
- "aggregate": {
- "data_reduction": 1.1179228,
- "snapshots": 0,
- "total_physical": 17519748439,
- "unique": 17519748439,
- "virtual": 19585726464
- },
- "file-system": {
- "data_reduction": 1.3642412,
- "snapshots": 0,
- "total_physical": 4748219708,
- "unique": 4748219708,
- "virtual": 6477716992
- },
- "object-store": {
- "data_reduction": 1.0263462,
- "snapshots": 0,
- "total_physical": 12771528731,
- "unique": 12771528731,
- "virtual": 6477716992
- },
- "total": 83359896948925
- },
- "config": {
- "alert_watchers": {
- "enabled": true,
- "name": "notify@acmestorage.com"
- },
- "array_management": {
- "base_dn": null,
- "bind_password": null,
- "bind_user": null,
- "enabled": false,
- "name": "management",
- "services": [
- "management"
- ],
- "uris": []
- },
- "directory_service_roles": {
- "array_admin": {
- "group": null,
- "group_base": null
- },
- "ops_admin": {
- "group": null,
- "group_base": null
- },
- "readonly": {
- "group": null,
- "group_base": null
- },
- "storage_admin": {
- "group": null,
- "group_base": null
- }
- },
- "dns": {
- "domain": "demo.acmestorage.com",
- "name": "demo-fb-1",
- "nameservers": [
- "8.8.8.8"
- ],
- "search": [
- "demo.acmestorage.com"
- ]
- },
- "nfs_directory_service": {
- "base_dn": null,
- "bind_password": null,
- "bind_user": null,
- "enabled": false,
- "name": "nfs",
- "services": [
- "nfs"
- ],
- "uris": []
- },
- "ntp": [
- "0.ntp.pool.org"
- ],
- "smb_directory_service": {
- "base_dn": null,
- "bind_password": null,
- "bind_user": null,
- "enabled": false,
- "name": "smb",
- "services": [
- "smb"
- ],
- "uris": []
- },
- "smtp": {
- "name": "demo-fb-1",
- "relay_host": null,
- "sender_domain": "acmestorage.com"
- },
- "ssl_certs": {
- "certificate": "-----BEGIN CERTIFICATE-----\n\n-----END CERTIFICATE-----",
- "common_name": "Acme Storage",
- "country": "US",
- "email": null,
- "intermediate_certificate": null,
- "issued_by": "Acme Storage",
- "issued_to": "Acme Storage",
- "key_size": 4096,
- "locality": null,
- "name": "global",
- "organization": "Acme Storage",
- "organizational_unit": "Acme Storage",
- "passphrase": null,
- "private_key": null,
- "state": null,
- "status": "self-signed",
- "valid_from": "1508433967000",
- "valid_to": "2458833967000"
- }
- },
- "default": {
- "blades": 15,
- "buckets": 7,
- "filesystems": 2,
- "flashblade_name": "demo-fb-1",
- "object_store_accounts": 1,
- "object_store_users": 1,
- "purity_version": "2.2.0",
- "snapshots": 1,
- "total_capacity": 83359896948925,
- "smb_mode": "native"
- },
- "filesystems": {
- "k8s-pvc-d24b1357-579e-11e8-811f-ecf4bbc88f54": {
- "default_group_quota": 0,
- "default_user_quota": 0,
- "destroyed": false,
- "fast_remove": false,
- "hard_limit": true,
- "nfs_rules": "10.21.255.0/24(rw,no_root_squash)",
- "provisioned": 21474836480,
- "snapshot_enabled": false
- },
- "z": {
- "default_group_quota": 0,
- "default_user_quota": 0,
- "destroyed": false,
- "fast_remove": false,
- "hard_limit": false,
- "provisioned": 1073741824,
- "snapshot_enabled": false
- }
- },
- "lag": {
- "uplink": {
- "lag_speed": 0,
- "port_speed": 40000000000,
- "ports": [
- {
- "name": "CH1.FM1.ETH1.1"
- },
- {
- "name": "CH1.FM1.ETH1.2"
- },
- ],
- "status": "healthy"
- }
- },
- "network": {
- "fm1.admin0": {
- "address": "10.10.100.6",
- "gateway": "10.10.100.1",
- "mtu": 1500,
- "netmask": "255.255.255.0",
- "services": [
- "support"
- ],
- "type": "vip",
- "vlan": 2200
- },
- "fm2.admin0": {
- "address": "10.10.100.7",
- "gateway": "10.10.100.1",
- "mtu": 1500,
- "netmask": "255.255.255.0",
- "services": [
- "support"
- ],
- "type": "vip",
- "vlan": 2200
- },
- "nfs1": {
- "address": "10.10.100.4",
- "gateway": "10.10.100.1",
- "mtu": 1500,
- "netmask": "255.255.255.0",
- "services": [
- "data"
- ],
- "type": "vip",
- "vlan": 2200
- },
- "vir0": {
- "address": "10.10.100.5",
- "gateway": "10.10.100.1",
- "mtu": 1500,
- "netmask": "255.255.255.0",
- "services": [
- "management"
- ],
- "type": "vip",
- "vlan": 2200
- }
- },
- "performance": {
- "aggregate": {
- "bytes_per_op": 0,
- "bytes_per_read": 0,
- "bytes_per_write": 0,
- "read_bytes_per_sec": 0,
- "reads_per_sec": 0,
- "usec_per_other_op": 0,
- "usec_per_read_op": 0,
- "usec_per_write_op": 0,
- "write_bytes_per_sec": 0,
- "writes_per_sec": 0
- },
- "http": {
- "bytes_per_op": 0,
- "bytes_per_read": 0,
- "bytes_per_write": 0,
- "read_bytes_per_sec": 0,
- "reads_per_sec": 0,
- "usec_per_other_op": 0,
- "usec_per_read_op": 0,
- "usec_per_write_op": 0,
- "write_bytes_per_sec": 0,
- "writes_per_sec": 0
- },
- "nfs": {
- "bytes_per_op": 0,
- "bytes_per_read": 0,
- "bytes_per_write": 0,
- "read_bytes_per_sec": 0,
- "reads_per_sec": 0,
- "usec_per_other_op": 0,
- "usec_per_read_op": 0,
- "usec_per_write_op": 0,
- "write_bytes_per_sec": 0,
- "writes_per_sec": 0
- },
- "s3": {
- "bytes_per_op": 0,
- "bytes_per_read": 0,
- "bytes_per_write": 0,
- "read_bytes_per_sec": 0,
- "reads_per_sec": 0,
- "usec_per_other_op": 0,
- "usec_per_read_op": 0,
- "usec_per_write_op": 0,
- "write_bytes_per_sec": 0,
- "writes_per_sec": 0
- }
- },
- "snapshots": {
- "z.188": {
- "destroyed": false,
- "source": "z",
- "source_destroyed": false,
- "suffix": "188"
- }
- },
- "subnet": {
- "new-mgmt": {
- "gateway": "10.10.100.1",
- "interfaces": [
- {
- "name": "fm1.admin0"
- },
- {
- "name": "fm2.admin0"
- },
- {
- "name": "nfs1"
- },
- {
- "name": "vir0"
- }
- ],
- "lag": "uplink",
- "mtu": 1500,
- "prefix": "10.10.100.0/24",
- "services": [
- "data",
- "management",
- "support"
- ],
- "vlan": 2200
- }
- }
- }
+ type: dict
"""
@@ -458,6 +108,9 @@ VSO_VERSION = "2.4"
DRIVES_API_VERSION = "2.5"
SECURITY_API_VERSION = "2.7"
BUCKET_API_VERSION = "2.8"
+SMB_CLIENT_API_VERSION = "2.10"
+SPACE_API_VERSION = "2.11"
+PUBLIC_API_VERSION = "2.12"
def _millisecs_to_time(millisecs):
@@ -711,12 +364,12 @@ def generate_config_dict(blade):
"engine_id": snmp_agents.items[agent].engine_id,
}
if config_info["snmp_agents"][agent_name]["version"] == "v3":
- config_info["snmp_agents"][agent_name][
- "auth_protocol"
- ] = snmp_agents.items[agent].v3.auth_protocol
- config_info["snmp_agents"][agent_name][
- "privacy_protocol"
- ] = snmp_agents.items[agent].v3.privacy_protocol
+ config_info["snmp_agents"][agent_name]["auth_protocol"] = (
+ snmp_agents.items[agent].v3.auth_protocol
+ )
+ config_info["snmp_agents"][agent_name]["privacy_protocol"] = (
+ snmp_agents.items[agent].v3.privacy_protocol
+ )
config_info["snmp_agents"][agent_name]["user"] = snmp_agents.items[
agent
].v3.user
@@ -730,12 +383,12 @@ def generate_config_dict(blade):
"notification": snmp_managers.items[manager].notification,
}
if config_info["snmp_managers"][mgr_name]["version"] == "v3":
- config_info["snmp_managers"][mgr_name][
- "auth_protocol"
- ] = snmp_managers.items[manager].v3.auth_protocol
- config_info["snmp_managers"][mgr_name][
- "privacy_protocol"
- ] = snmp_managers.items[manager].v3.privacy_protocol
+ config_info["snmp_managers"][mgr_name]["auth_protocol"] = (
+ snmp_managers.items[manager].v3.auth_protocol
+ )
+ config_info["snmp_managers"][mgr_name]["privacy_protocol"] = (
+ snmp_managers.items[manager].v3.privacy_protocol
+ )
config_info["snmp_managers"][mgr_name]["user"] = snmp_managers.items[
manager
].v3.user
@@ -920,33 +573,77 @@ def generate_network_dict(blade):
return net_info
-def generate_capacity_dict(blade):
+def generate_capacity_dict(module, blade):
capacity_info = {}
- total_cap = blade.arrays.list_arrays_space()
- file_cap = blade.arrays.list_arrays_space(type="file-system")
- object_cap = blade.arrays.list_arrays_space(type="object-store")
- capacity_info["total"] = total_cap.items[0].capacity
- capacity_info["aggregate"] = {
- "data_reduction": total_cap.items[0].space.data_reduction,
- "snapshots": total_cap.items[0].space.snapshots,
- "total_physical": total_cap.items[0].space.total_physical,
- "unique": total_cap.items[0].space.unique,
- "virtual": total_cap.items[0].space.virtual,
- }
- capacity_info["file-system"] = {
- "data_reduction": file_cap.items[0].space.data_reduction,
- "snapshots": file_cap.items[0].space.snapshots,
- "total_physical": file_cap.items[0].space.total_physical,
- "unique": file_cap.items[0].space.unique,
- "virtual": file_cap.items[0].space.virtual,
- }
- capacity_info["object-store"] = {
- "data_reduction": object_cap.items[0].space.data_reduction,
- "snapshots": object_cap.items[0].space.snapshots,
- "total_physical": object_cap.items[0].space.total_physical,
- "unique": object_cap.items[0].space.unique,
- "virtual": file_cap.items[0].space.virtual,
- }
+ api_version = blade.api_version.list_versions().versions
+ if SPACE_API_VERSION in api_version:
+ blade2 = get_system(module)
+ total_cap = list(blade2.get_arrays_space().items)[0]
+ file_cap = list(blade2.get_arrays_space(type="file-system").items)[0]
+ object_cap = list(blade2.get_arrays_space(type="object-store").items)[0]
+ capacity_info["total"] = total_cap.space.capacity
+ capacity_info["aggregate"] = {
+ "data_reduction": total_cap.space.data_reduction,
+ "snapshots": total_cap.space.snapshots,
+ "total_physical": total_cap.space.total_physical,
+ "unique": total_cap.space.unique,
+ "virtual": total_cap.space.virtual,
+ "total_provisioned": total_cap.space.total_provisioned,
+ "available_provisioned": total_cap.space.available_provisioned,
+ "available_ratio": total_cap.space.available_ratio,
+ "destroyed": total_cap.space.destroyed,
+ "destroyed_virtual": total_cap.space.destroyed_virtual,
+ }
+ capacity_info["file-system"] = {
+ "data_reduction": file_cap.space.data_reduction,
+ "snapshots": file_cap.space.snapshots,
+ "total_physical": file_cap.space.total_physical,
+ "unique": file_cap.space.unique,
+ "virtual": file_cap.space.virtual,
+ "total_provisioned": total_cap.space.total_provisioned,
+ "available_provisioned": total_cap.space.available_provisioned,
+ "available_ratio": total_cap.space.available_ratio,
+ "destroyed": total_cap.space.destroyed,
+ "destroyed_virtual": total_cap.space.destroyed_virtual,
+ }
+ capacity_info["object-store"] = {
+ "data_reduction": object_cap.space.data_reduction,
+ "snapshots": object_cap.space.snapshots,
+ "total_physical": object_cap.space.total_physical,
+ "unique": object_cap.space.unique,
+ "virtual": file_cap.space.virtual,
+ "total_provisioned": total_cap.space.total_provisioned,
+ "available_provisioned": total_cap.space.available_provisioned,
+ "available_ratio": total_cap.space.available_ratio,
+ "destroyed": total_cap.space.destroyed,
+ "destroyed_virtual": total_cap.space.destroyed_virtual,
+ }
+ else:
+ total_cap = blade.arrays.list_arrays_space()
+ file_cap = blade.arrays.list_arrays_space(type="file-system")
+ object_cap = blade.arrays.list_arrays_space(type="object-store")
+ capacity_info["total"] = total_cap.items[0].capacity
+ capacity_info["aggregate"] = {
+ "data_reduction": total_cap.items[0].space.data_reduction,
+ "snapshots": total_cap.items[0].space.snapshots,
+ "total_physical": total_cap.items[0].space.total_physical,
+ "unique": total_cap.items[0].space.unique,
+ "virtual": total_cap.items[0].space.virtual,
+ }
+ capacity_info["file-system"] = {
+ "data_reduction": file_cap.items[0].space.data_reduction,
+ "snapshots": file_cap.items[0].space.snapshots,
+ "total_physical": file_cap.items[0].space.total_physical,
+ "unique": file_cap.items[0].space.unique,
+ "virtual": file_cap.items[0].space.virtual,
+ }
+ capacity_info["object-store"] = {
+ "data_reduction": object_cap.items[0].space.data_reduction,
+ "snapshots": object_cap.items[0].space.snapshots,
+ "total_physical": object_cap.items[0].space.total_physical,
+ "unique": object_cap.items[0].space.unique,
+ "virtual": file_cap.items[0].space.virtual,
+ }
return capacity_info
@@ -973,6 +670,17 @@ def generate_snap_dict(blade):
snap_info[snapshot]["source_location"] = snaps.items[
snap
].source_location.name
+ snap_info[snapshot]["policies"] = []
+ if PUBLIC_API_VERSION in api_version:
+ for policy in range(0, len(snaps.items[snap].policies)):
+ snap_info[snapshot]["policies"].append(
+ {
+ "name": snaps.items[snap].policies[policy].name,
+ "location": snaps.items[snap]
+ .policies[policy]
+ .location.name,
+ }
+ )
return snap_info
@@ -1065,6 +773,19 @@ def generate_bucket_dict(module, blade):
"total_physical_space": buckets.items[bckt].space.total_physical,
"unique_space": buckets.items[bckt].space.unique,
"virtual_space": buckets.items[bckt].space.virtual,
+ "total_provisioned_space": getattr(
+ buckets.items[bckt].space, "total_provisioned", None
+ ),
+ "available_provisioned_space": getattr(
+ buckets.items[bckt].space, "available_provisioned", None
+ ),
+ "available_ratio": getattr(
+ buckets.items[bckt].space, "available_ratio", None
+ ),
+ "destroyed_space": getattr(buckets.items[bckt].space, "destroyed", None),
+ "destroyed_virtual_space": getattr(
+ buckets.items[bckt].space, "destroyed_virtual", None
+ ),
"created": buckets.items[bckt].created,
"destroyed": buckets.items[bckt].destroyed,
"time_remaining": buckets.items[bckt].time_remaining,
@@ -1139,6 +860,19 @@ def generate_bucket_dict(module, blade):
bucket
].object_lock_config.freeze_locked_objects,
}
+ if buckets[bucket].object_lock_config.enabled:
+ bucket_info[buckets[bucket].name]["object_lock_config"][
+ "default_retention"
+ ] = getattr(
+ buckets[bucket].object_lock_config, "default_retention", ""
+ )
+ bucket_info[buckets[bucket].name]["object_lock_config"][
+ "default_retention_mode"
+ ] = getattr(
+ buckets[bucket].object_lock_config,
+ "default_retention_mode",
+ "",
+ )
bucket_info[buckets[bucket].name]["eradication_config"] = {
"eradication_delay": buckets[
bucket
@@ -1147,6 +881,19 @@ def generate_bucket_dict(module, blade):
bucket
].eradication_config.manual_eradication,
}
+ if PUBLIC_API_VERSION in api_version:
+ bucket_info[buckets[bucket].name]["public_status"] = buckets[
+ bucket
+ ].public_status
+ bucket_info[buckets[bucket].name]["public_access_config"] = {
+ "block_new_public_policies": buckets[
+ bucket
+ ].public_access_config.block_new_public_policies,
+ "block_public_access": buckets[
+ bucket
+ ].public_access_config.block_public_access,
+ }
+
return bucket_info
@@ -1181,10 +928,50 @@ def generate_ad_dict(blade):
"service_principals": ad_account.service_principal_names,
"join_ou": ad_account.join_ou,
"encryption_types": ad_account.encryption_types,
+ "global_catalog_servers": getattr(
+ ad_account, "global_catalog_servers", None
+ ),
}
return ad_info
+def generate_bucket_access_policies_dict(blade):
+ policies_info = {}
+ policies = list(blade.get_buckets_bucket_access_policies().items)
+ for policy in range(0, len(policies)):
+ policy_name = policies[policy].name
+ policies_info[policy_name] = {
+ "description": policies[policy].description,
+ "enabled": policies[policy].enabled,
+ "local": policies[policy].is_local,
+ "rules": [],
+ }
+ for rule in range(0, len(policies[policy].rules)):
+ policies_info[policy_name]["rules"].append(
+ {
+ "actions": policies[policy].rules[rule].actions,
+ "resources": policies[policy].rules[rule].resources,
+ "all_principals": policies[policy].rules[rule].principals.all,
+ "effect": policies[policy].rules[rule].effect,
+ "name": policies[policy].rules[rule].name,
+ }
+ )
+ return policies_info
+
+
+def generate_bucket_cross_object_policies_dict(blade):
+ policies_info = {}
+ policies = list(blade.get_buckets_cross_origin_resource_sharing_policies().items)
+ for policy in range(0, len(policies)):
+ policy_name = policies[policy].name
+ policies_info[policy_name] = {
+ "allowed_headers": policies[policy].allowed_headers,
+ "allowed_methods": policies[policy].allowed_methods,
+ "allowed_origins": policies[policy].allowed_origins,
+ }
+ return policies_info
+
+
def generate_object_store_access_policies_dict(blade):
policies_info = {}
policies = list(blade.get_object_store_access_policies().items)
@@ -1247,6 +1034,45 @@ def generate_nfs_export_policies_dict(blade):
return policies_info
+def generate_smb_client_policies_dict(blade):
+ policies_info = {}
+ policies = list(blade.get_smb_client_policies().items)
+ for policy in range(0, len(policies)):
+ policy_name = policies[policy].name
+ policies_info[policy_name] = {
+ "local": policies[policy].is_local,
+ "enabled": policies[policy].enabled,
+ "version": policies[policy].version,
+ "rules": [],
+ }
+ for rule in range(0, len(policies[policy].rules)):
+ policies_info[policy_name]["rules"].append(
+ {
+ "name": policies[policy].rules[rule].name,
+ "change": getattr(policies[policy].rules[rule], "change", None),
+ "full_control": getattr(
+ policies[policy].rules[rule], "full_control", None
+ ),
+ "principal": getattr(
+ policies[policy].rules[rule], "principal", None
+ ),
+ "read": getattr(policies[policy].rules[rule], "read", None),
+ "client": getattr(policies[policy].rules[rule], "client", None),
+ "index": getattr(policies[policy].rules[rule], "index", None),
+ "policy_version": getattr(
+ policies[policy].rules[rule], "policy_version", None
+ ),
+ "encryption": getattr(
+ policies[policy].rules[rule], "encryption", None
+ ),
+ "permission": getattr(
+ policies[policy].rules[rule], "permission", None
+ ),
+ }
+ )
+ return policies_info
+
+
def generate_object_store_accounts_dict(blade):
account_info = {}
accounts = list(blade.get_object_store_accounts().items)
@@ -1259,6 +1085,19 @@ def generate_object_store_accounts_dict(blade):
"total_physical_space": accounts[account].space.total_physical,
"unique_space": accounts[account].space.unique,
"virtual_space": accounts[account].space.virtual,
+ "total_provisioned_space": getattr(
+ accounts[account].space, "total_provisioned", None
+ ),
+ "available_provisioned_space": getattr(
+ accounts[account].space, "available_provisioned", None
+ ),
+ "available_ratio": getattr(
+ accounts[account].space, "available_ratio", None
+ ),
+ "destroyed_space": getattr(accounts[account].space, "destroyed", None),
+ "destroyed_virtual_space": getattr(
+ accounts[account].space, "destroyed_virtual", None
+ ),
"quota_limit": getattr(accounts[account], "quota_limit", None),
"hard_limit_enabled": getattr(
accounts[account], "hard_limit_enabled", None
@@ -1277,6 +1116,17 @@ def generate_object_store_accounts_dict(blade):
}
except AttributeError:
pass
+ try:
+ account_info[acc_name]["public_access_config"] = {
+ "block_new_public_policies": accounts[
+ account
+ ].public_access_config.block_new_public_policies,
+ "block_public_access": accounts[
+ account
+ ].public_access_config.block_public_access,
+ }
+ except AttributeError:
+ pass
acc_users = list(
blade.get_object_store_users(filter='name="' + acc_name + '/*"').items
)
@@ -1413,6 +1263,24 @@ def generate_fs_dict(module, blade):
"quota": fs_user_quotas[user_quota].quota,
"usage": fs_user_quotas[user_quota].usage,
}
+ if PUBLIC_API_VERSION in api_version:
+ for v2fs in range(0, len(fsys_v2)):
+ if fsys_v2[v2fs].name == share:
+ fs_info[share]["smb_client_policy"] = getattr(
+ fsys_v2[v2fs].smb.client_policy, "name", None
+ )
+ fs_info[share]["smb_share_policy"] = getattr(
+ fsys_v2[v2fs].smb.share_policy, "name", None
+ )
+ fs_info[share]["smb_continuous_availability_enabled"] = fsys_v2[
+ v2fs
+ ].smb.continuous_availability_enabled
+ fs_info[share]["multi_protocol_access_control_style"] = getattr(
+ fsys_v2[v2fs].multi_protocol, "access_control_style", None
+ )
+ fs_info[share]["multi_protocol_safeguard_acls"] = fsys_v2[
+ v2fs
+ ].multi_protocol.safeguard_acls
return fs_info
@@ -1433,6 +1301,7 @@ def generate_drives_dict(blade):
"raw_capacity": getattr(drives[drive], "raw_capacity", None),
"status": getattr(drives[drive], "status", None),
"details": getattr(drives[drive], "details", None),
+ "type": getattr(drives[drive], "type", None),
}
return drives_info
@@ -1495,7 +1364,7 @@ def main():
if "config" in subset or "all" in subset:
info["config"] = generate_config_dict(blade)
if "capacity" in subset or "all" in subset:
- info["capacity"] = generate_capacity_dict(blade)
+ info["capacity"] = generate_capacity_dict(module, blade)
if "lags" in subset or "all" in subset:
info["lag"] = generate_lag_dict(blade)
if "network" in subset or "all" in subset:
@@ -1537,8 +1406,17 @@ def main():
info["access_policies"] = generate_object_store_access_policies_dict(
blade
)
+ if PUBLIC_API_VERSION in api_version:
+ info["bucket_access_policies"] = generate_bucket_access_policies_dict(
+ blade
+ )
+ info["bucket_cross_origin_policies"] = (
+ generate_bucket_cross_object_policies_dict(blade)
+ )
if NFS_POLICY_API_VERSION in api_version:
info["export_policies"] = generate_nfs_export_policies_dict(blade)
+ if SMB_CLIENT_API_VERSION in api_version:
+ info["share_policies"] = generate_smb_client_policies_dict(blade)
if "drives" in subset or "all" in subset and DRIVES_API_VERSION in api_version:
info["drives"] = generate_drives_dict(blade)
module.exit_json(changed=False, purefb_info=info)
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_inventory.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_inventory.py
index b17bc3f9e..1ef96f870 100644
--- a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_inventory.py
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_inventory.py
@@ -47,65 +47,7 @@ RETURN = r"""
purefb_inventory:
description: Returns the inventory information for the FlashBlade
returned: always
- type: complex
- sample: {
- "blades": {
- "CH1.FB1": {
- "model": "FB-17TB",
- "serial": "PPCXA1942AFF5",
- "slot": 1,
- "status": "healthy"
- }
- },
- "chassis": {
- "CH1": {
- "index": 1,
- "model": null,
- "serial": "PMPAM163402AE",
- "slot": null,
- "status": "healthy"
- }
- },
- "controllers": {},
- "ethernet": {
- "CH1.FM1.ETH1": {
- "model": "624410002",
- "serial": "APF16360021PRV",
- "slot": 1,
- "speed": 40000000000,
- "status": "healthy"
- }
- },
- "fans": {
- "CH1.FM1.FAN1": {
- "slot": 1,
- "status": "healthy"
- }
- },
- "modules": {
- "CH1.FM1": {
- "model": "EFM-110",
- "serial": "PSUFS1640002C",
- "slot": 1,
- "status": "healthy"
- },
- "CH1.FM2": {
- "model": "EFM-110",
- "serial": "PSUFS1640004A",
- "slot": 2,
- "status": "healthy"
- }
- },
- "power": {
- "CH1.PWR1": {
- "model": "DS1600SPE-3",
- "serial": "M0500E00D8AJZ",
- "slot": 1,
- "status": "healthy"
- }
- },
- "switch": {}
- }
+ type: dict
"""
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_lag.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_lag.py
index e5c46e730..8bf3ce48a 100644
--- a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_lag.py
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_lag.py
@@ -163,16 +163,19 @@ def update_lag(module, blade):
)
new_ports = []
for port in range(0, len(module.params["ports"])):
- new_ports.append(
- module.params["ports"][port].split(".")[0].upper()
- + ".FM1."
- + module.params["ports"][port].split(".")[1].upper()
- )
- new_ports.append(
- module.params["ports"][port].split(".")[0].upper()
- + ".FM2."
- + module.params["ports"][port].split(".")[1].upper()
- )
+ if module.params["ports"][port].split(".")[0].upper()[0] != "X":
+ new_ports.append(
+ module.params["ports"][port].split(".")[0].upper()
+ + ".FM1."
+ + module.params["ports"][port].split(".")[1].upper()
+ )
+ new_ports.append(
+ module.params["ports"][port].split(".")[0].upper()
+ + ".FM2."
+ + module.params["ports"][port].split(".")[1].upper()
+ )
+ else:
+ new_ports.append(module.params["ports"][port].upper())
ports = []
for final_port in range(0, len(new_ports)):
ports.append(flashblade.FixedReference(name=new_ports[final_port]))
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_policy.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_policy.py
index 273166de8..ebe70aa48 100644
--- a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_policy.py
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_policy.py
@@ -52,7 +52,7 @@ options:
- Type of policy
default: snapshot
type: str
- choices: [ snapshot, access, nfs ]
+ choices: [ snapshot, access, nfs, smb_share, smb_client ]
version_added: "1.9.0"
account:
description:
@@ -72,7 +72,7 @@ options:
Rules are additive.
type: str
default: allow
- choices: [ allow ]
+ choices: [ allow, deny ]
version_added: "1.9.0"
actions:
description:
@@ -83,6 +83,7 @@ options:
choices:
- s3:*
- s3:AbortMultipartUpload
+ - s3:BypassGovernanceRetention
- s3:CreateBucket
- s3:DeleteBucket
- s3:DeleteObject
@@ -94,7 +95,12 @@ options:
- s3:GetLifecycleConfiguration
- s3:GetObject
- s3:GetObjectAcl
+ - s3:GetObjectLegalHold
+ - s3:GetObjectLockConfiguration
+ - s3:GetObjectRetention
+ - s3:GetObjectTagging
- s3:GetObjectVersion
+ - s3:GetObjectVersionTagging
- s3:ListAllMyBuckets
- s3:ListBucket
- s3:ListBucketMultipartUploads
@@ -103,6 +109,10 @@ options:
- s3:PutBucketVersioning
- s3:PutLifecycleConfiguration
- s3:PutObject
+ - s3:PutObjectLegalHold
+ - s3:PutObjectLockConfiguration
+ - s3:PutObjectRetention
+ - s3:ResolveSafemodeConflicts
version_added: "1.9.0"
object_resources:
description:
@@ -213,7 +223,7 @@ options:
description:
- Any user whose UID is affected by an I(access) of `root_squash` or `all_squash`
will have their UID mapped to anonuid.
- The defaultis null, which means 65534.
+ The default is null, which means 65534.
Use "" to clear.
type: str
version_added: "1.9.0"
@@ -241,7 +251,6 @@ options:
- Accepted notation is a single IP address, subnet in CIDR notation, netgroup, or
anonymous (*).
type: str
- default: "*"
version_added: "1.9.0"
fileid_32bit:
description:
@@ -284,8 +293,8 @@ options:
version_added: "1.9.0"
rename:
description:
- - New name for export policy
- - Only applies to NFS export policies
+ - New name for policy
+ - Only applies to NFS and SMB policies
type: str
version_added: "1.10.0"
destroy_snapshots:
@@ -294,6 +303,47 @@ options:
type: bool
version_added: '1.11.0'
default: false
+ principal:
+ description:
+ - The user or group who is the subject of this rule, and their domain
+ type: str
+ version_added: '1.12.0'
+ change:
+ description:
+ - The state of the SMB share principals Change access permission.
+ - Setting to "" will clear the current setting
+ type: str
+ choices: [ allow, deny, "" ]
+ version_added: '1.12.0'
+ read:
+ description:
+ - The state of the SMB share principals Read access permission.
+ - Setting to "" will clear the current setting
+ type: str
+ choices: [ allow, deny, "" ]
+ version_added: '1.12.0'
+ full_control:
+ description:
+ - The state of the SMB share principals Full Control access permission.
+ - Setting to "" will clear the current setting
+ type: str
+ choices: [ allow, deny, "" ]
+ version_added: '1.12.0'
+ smb_encryption:
+ description:
+ - The status of SMB encryption in a client policy rule
+ type: str
+ choices: [ disabled, optional, required ]
+ default: optional
+ version_added: '1.12.0'
+ desc:
+ description:
+ - A description of an object store policy,
+ optionally specified when the policy is created.
+ - Cannot be modified for an existing policy.
+ type: str
+ default: ""
+ version_added: '1.14.0'
extends_documentation_fragment:
- purestorage.flashblade.purestorage.fb
"""
@@ -359,6 +409,20 @@ EXAMPLES = r"""
object_resources: "*"
fb_url: 10.10.10.2
api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Create an empty SMB client policy
+ purestorage.flashblade.purefb_policy:
+ name: test_smb_client
+ policy_type: smb_client
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
+- name: Create an SMB client policy with a client rule
+ purestorage.flashblade.purefb_policy:
+ name: test_smb_client
+ policy_type: smb_client
+ client: "10.0.1.0/24"
+ permission: rw
+ fb_url: 10.10.10.2
+ api_token: T-9f276a18-50ab-446e-8a0c-666a3529a1b6
- name: Create an empty NFS export policy
purestorage.flashblade.purefb_policy:
name: test_nfs_export
@@ -460,7 +524,9 @@ RETURN = r"""
HAS_PURITYFB = True
try:
- from purity_fb import Policy, PolicyRule, PolicyPatch
+ from purity_fb import Policy as Policyv1
+ from purity_fb import PolicyRule as PolicyRulev1
+ from purity_fb import PolicyPatch as PolicyPatchv1
except ImportError:
HAS_PURITYFB = False
@@ -473,7 +539,13 @@ try:
NfsExportPolicy,
NfsExportPolicyRule,
Policy,
+ PolicyPatch,
PolicyRule,
+ SmbSharePolicyRule,
+ SmbSharePolicy,
+ SmbClientPolicyRule,
+ SmbClientPolicy,
+ ObjectStoreAccessPolicyPost,
)
except ImportError:
HAS_PYPURECLIENT = False
@@ -503,6 +575,8 @@ SNAPSHOT_POLICY_API_VERSION = "2.1"
ACCESS_POLICY_API_VERSION = "2.2"
NFS_POLICY_API_VERSION = "2.3"
NFS_RENAME_API_VERSION = "2.4"
+SMB_POLICY_API_VERSION = "2.10"
+SMB_ENCRYPT_API_VERSION = "2.11"
def _convert_to_millisecs(hour):
@@ -596,6 +670,614 @@ def _get_local_tz(module, timezone="UTC"):
return timezone
+def delete_smb_share_policy(module, blade):
+ """Delete SMB Share Policy, or Rule
+
+ If principal is provided then delete the principal rule if it exists.
+ """
+
+ changed = False
+ policy_delete = True
+ if module.params["principal"]:
+ policy_delete = False
+ prin_rule = blade.get_smb_share_policies_rules(
+ policy_names=[module.params["name"]],
+ filter="principal='" + module.params["principal"] + "'",
+ )
+ if prin_rule.status_code == 200:
+ rule = list(prin_rule.items)[0]
+ changed = True
+ if not module.check_mode:
+ res = blade.delete_smb_share_policies_rules(names=[rule.name])
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete rule for principal {0} in policy {1}. "
+ "Error: {2}".format(
+ module.params["principal"],
+ module.params["name"],
+ res.errors[0].message,
+ )
+ )
+ if policy_delete:
+ changed = True
+ if not module.check_mode:
+ res = blade.delete_smb_share_policies(names=[module.params["name"]])
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete SMB share policy {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def rename_smb_share_policy(module, blade):
+ """Rename SMB Share Policy"""
+
+ changed = True
+ if not module.check_mode:
+ res = blade.patch_smb_share_policies(
+ names=[module.params["name"]],
+ policy=SmbSharePolicy(name=module.params["rename"]),
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to rename SMB share policy {0} to {1}. Error: {2}".format(
+ module.params["name"],
+ module.params["rename"],
+ res.errors[0].message,
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def create_smb_share_policy(module, blade):
+ """Create SMB Share Policy"""
+ changed = True
+ if not module.check_mode:
+ res = blade.post_smb_share_policies(names=[module.params["name"]])
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to create SMB share policy {0}.Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ if not module.params["enabled"]:
+ res = blade.patch_smb_share_policies(
+ policy=SmbSharePolicy(enabled=False), names=[module.params["name"]]
+ )
+ if res.status_code != 200:
+ blade.delete_smb_share_policies(names=[module.params["name"]])
+ module.fail_json(
+ msg="Failed to create SMB share policy {0}.Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ if not module.params["principal"]:
+ module.fail_json(msg="principal is required to create a new rule")
+ else:
+ rule = SmbSharePolicyRule(
+ principal=module.params["principal"],
+ change=module.params["change"],
+ read=module.params["read"],
+ full_control=module.params["full_control"],
+ )
+ res = blade.post_smb_share_policies_rules(
+ policy_names=[module.params["name"]],
+ rule=rule,
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to create rule for policy {0}. Error: {1}".format(
+ module.params["name"],
+ res.errors[0].message,
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def update_smb_share_policy(module, blade):
+ """Update SMB Share Policy Rule"""
+
+ changed = False
+ if module.params["principal"]:
+ current_policy_rule = blade.get_smb_share_policies_rules(
+ policy_names=[module.params["name"]],
+ filter="principal='" + module.params["principal"] + "'",
+ )
+ if (
+ current_policy_rule.status_code == 200
+ and current_policy_rule.total_item_count == 0
+ ):
+ rule = SmbSharePolicyRule(
+ principal=module.params["principal"],
+ change=module.params["change"],
+ read=module.params["read"],
+ full_control=module.params["full_control"],
+ )
+ changed = True
+ if not module.check_mode:
+ if module.params["before_rule"]:
+ before_name = (
+ module.params["name"] + "." + str(module.params["before_rule"])
+ )
+ res = blade.post_smb_share_policies_rules(
+ policy_names=[module.params["name"]],
+ rule=rule,
+ before_rule_name=before_name,
+ )
+ else:
+ res = blade.post_smb_share_policies_rules(
+ policy_names=[module.params["name"]],
+ rule=rule,
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to create rule for principal {0} "
+ "in policy {1}. Error: {2}".format(
+ module.params["principal"],
+ module.params["name"],
+ res.errors[0].message,
+ )
+ )
+ else:
+ rules = list(current_policy_rule.items)
+ cli_count = None
+ old_policy_rule = rules[0]
+ current_rule = {
+ "principal": sorted(old_policy_rule.principal),
+ "read": sorted(old_policy_rule.read),
+ "change": sorted(old_policy_rule.change),
+ "full_control": sorted(old_policy_rule.full_control),
+ }
+ if module.params["read"]:
+ if module.params["read"] == "":
+ new_read = ""
+ else:
+ new_read = module.params["read"]
+ else:
+ new_read = current_rule["read"]
+ if module.params["full_control"]:
+ if module.params["full_control"] == "":
+ new_full_control = ""
+ else:
+ new_full_control = module.params["full_control"]
+ else:
+ new_full_control = current_rule["full_control"]
+ if module.params["change"]:
+ if module.params["change"] == "":
+ new_change = ""
+ else:
+ new_change = module.params["change"]
+ else:
+ new_change = current_rule["change"]
+ if module.params["principal"]:
+ new_principal = module.params["principal"]
+ else:
+ new_principal = current_rule["principal"]
+ new_rule = {
+ "principal": new_principal,
+ "read": new_read,
+ "change": new_change,
+ "full_control": new_full_control,
+ }
+ if current_rule != new_rule:
+ changed = True
+ if not module.check_mode:
+ rule = SmbSharePolicyRule(
+ principal=module.params["principal"],
+ change=module.params["change"],
+ read=module.params["read"],
+ full_control=module.params["full_control"],
+ )
+ res = blade.patch_smb_share_policies_rules(
+ names=[
+ module.params["name"] + "." + str(old_policy_rule.index)
+ ],
+ rule=rule,
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to update SMB share rule {0}. Error: {1}".format(
+ module.params["name"]
+ + "."
+ + str(old_policy_rule.index),
+ res.errors[0].message,
+ )
+ )
+ if (
+ module.params["before_rule"]
+ and module.params["before_rule"] != old_policy_rule.index
+ ):
+ changed = True
+ if not module.check_mode:
+ before_name = (
+ module.params["name"] + "." + str(module.params["before_rule"])
+ )
+ res = blade.patch_smb_share_policies_rules(
+ names=[
+ module.params["name"] + "." + str(old_policy_rule.index)
+ ],
+ rule=SmbSharePolicyRule(),
+ before_rule_name=before_name,
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to move SMB share rule {0}. Error: {1}".format(
+ module.params["name"]
+ + "."
+ + str(old_policy_rule.index),
+ res.errors[0].message,
+ )
+ )
+ current_policy = list(
+ blade.get_smb_share_policies(names=[module.params["name"]]).items
+ )[0]
+ if current_policy.enabled != module.params["enabled"]:
+ changed = True
+ if not module.check_mode:
+ res = blade.patch_smb_share_policies(
+ policy=SmbSharePolicy(enabled=module.params["enabled"]),
+ names=[module.params["name"]],
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to change state of SMB share policy {0}.Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def delete_smb_client_policy(module, blade):
+ """Delete SMB CLient Policy, or Rule
+
+ If client is provided then delete the client rule if it exists.
+ """
+
+ changed = False
+ policy_delete = True
+ if module.params["client"]:
+ policy_delete = False
+ res = blade.get_smb_client_policies_rules(
+ policy_names=[module.params["name"]],
+ filter="client='" + module.params["client"] + "'",
+ )
+ if res.status_code == 200:
+ if res.total_item_count == 0:
+ pass
+ elif res.total_item_count == 1:
+ rule = list(res.items)[0]
+ if module.params["client"] == rule.client:
+ changed = True
+ if not module.check_mode:
+ res = blade.delete_smb_client_policies_rules(names=[rule.name])
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete rule for client {0} in policy {1}. "
+ "Error: {2}".format(
+ module.params["client"],
+ module.params["name"],
+ res.errors[0].message,
+ )
+ )
+ else:
+ rules = list(res.items)
+ for cli in range(0, len(rules)):
+ if rules[cli].client == "*":
+ changed = True
+ if not module.check_mode:
+ res = blade.delete_smb_client_policies_rules(
+ names=[rules[cli].name]
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete rule for client {0} in policy {1}. "
+ "Error: {2}".format(
+ module.params["client"],
+ module.params["name"],
+ res.errors[0].message,
+ )
+ )
+ if policy_delete:
+ changed = True
+ if not module.check_mode:
+ res = blade.delete_smb_client_policies(names=[module.params["name"]])
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to delete SMB client policy {0}. Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def rename_smb_client_policy(module, blade):
+ """Rename SMB Client Policy"""
+
+ changed = True
+ if not module.check_mode:
+ res = blade.patch_smb_client_policies(
+ names=[module.params["name"]],
+ policy=SmbClientPolicy(name=module.params["rename"]),
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to rename SMB client policy {0} to {1}. Error: {2}".format(
+ module.params["name"],
+ module.params["rename"],
+ res.errors[0].message,
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def create_smb_client_policy(module, blade):
+ """Create SMB Client Policy"""
+ changed = True
+ versions = blade.api_version.list_versions().versions
+ if not module.check_mode:
+ res = blade.post_smb_client_policies(names=[module.params["name"]])
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to create SMB client policy {0}.Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ if not module.params["enabled"]:
+ res = blade.patch_smb_client_policies(
+ policy=SmbClientPolicy(enabled=False), names=[module.params["name"]]
+ )
+ if res.status_code != 200:
+ blade.delete_smb_client_policies(names=[module.params["name"]])
+ module.fail_json(
+ msg="Failed to create SMB client policy {0}.Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ if not module.params["client"]:
+ module.fail_json(msg="client is required to create a new rule")
+ else:
+ if SMB_ENCRYPT_API_VERSION in versions:
+ rule = SmbClientPolicyRule(
+ client=module.params["client"],
+ permission=module.params["permission"],
+ access=module.params["access"],
+ encryption=module.params["smb_encryption"],
+ )
+ else:
+ rule = SmbClientPolicyRule(
+ client=module.params["client"],
+ access=module.params["access"],
+ permission=module.params["permission"],
+ )
+ res = blade.post_smb_client_policies_rules(
+ policy_names=[module.params["name"]],
+ rule=rule,
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to rule for policy {0}. Error: {1}".format(
+ module.params["name"],
+ res.errors[0].message,
+ )
+ )
+ module.exit_json(changed=changed)
+
+
+def update_smb_client_policy(module, blade):
+ """Update SMB Client Policy Rule"""
+
+ changed = False
+ versions = blade.api_version.list_versions().versions
+ if module.params["client"]:
+ current_policy_rule = blade.get_smb_client_policies_rules(
+ policy_names=[module.params["name"]],
+ filter="client='" + module.params["client"] + "'",
+ )
+ if (
+ current_policy_rule.status_code == 200
+ and current_policy_rule.total_item_count == 0
+ ):
+ if SMB_ENCRYPT_API_VERSION in versions:
+ rule = SmbClientPolicyRule(
+ client=module.params["client"],
+ permission=module.params["permission"],
+ access=module.params["access"],
+ encryption=module.params["smb_encryption"],
+ )
+ else:
+ rule = SmbClientPolicyRule(
+ client=module.params["client"],
+ permission=module.params["permission"],
+ access=module.params["access"],
+ )
+ changed = True
+ if not module.check_mode:
+ if module.params["before_rule"]:
+ before_name = (
+ module.params["name"] + "." + str(module.params["before_rule"])
+ )
+ res = blade.post_smb_client_policies_rules(
+ policy_names=[module.params["name"]],
+ rule=rule,
+ before_rule_name=before_name,
+ )
+ else:
+ res = blade.post_smb_client_policies_rules(
+ policy_names=[module.params["name"]],
+ rule=rule,
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to create rule for client {0} "
+ "in policy {1}. Error: {2}".format(
+ module.params["client"],
+ module.params["name"],
+ res.errors[0].message,
+ )
+ )
+ else:
+ rules = list(current_policy_rule.items)
+ cli_count = None
+ done = False
+ if module.params["client"] == "*":
+ for cli in range(0, len(rules)):
+ if rules[cli].client == "*":
+ cli_count = cli
+ if not cli_count:
+ if SMB_ENCRYPT_API_VERSION in versions:
+ rule = SmbClientPolicyRule(
+ client=module.params["client"],
+ permission=module.params["permission"],
+ access=module.params["access"],
+ encryption=module.params["smb_encryption"],
+ )
+ else:
+ rule = SmbClientPolicyRule(
+ client=module.params["client"],
+ permission=module.params["permission"],
+ access=module.params["access"],
+ )
+ done = True
+ changed = True
+ if not module.check_mode:
+ if module.params["before_rule"]:
+ res = blade.post_smb_client_policies_rules(
+ policy_names=[module.params["name"]],
+ rule=rule,
+ before_rule_name=(
+ module.params["name"]
+ + "."
+ + str(module.params["before_rule"]),
+ ),
+ )
+ else:
+ res = blade.post_smb_client_policies_rules(
+ policy_names=[module.params["name"]],
+ rule=rule,
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to create rule for "
+ "client {0} in policy {1}. Error: {2}".format(
+ module.params["client"],
+ module.params["name"],
+ res.errors[0].message,
+ )
+ )
+ if not done:
+ old_policy_rule = rules[0]
+ if SMB_ENCRYPT_API_VERSION in versions:
+ current_rule = {
+ "client": sorted(old_policy_rule.client),
+ "permission": sorted(old_policy_rule.permission),
+ "encryption": old_policy_rule.encryption,
+ }
+ else:
+ current_rule = {
+ "client": sorted(old_policy_rule.client),
+ "permission": sorted(old_policy_rule.permission),
+ }
+ if SMB_ENCRYPT_API_VERSION in versions:
+ if module.params["smb_encryption"]:
+ new_encryption = module.params["smb_encryption"]
+ else:
+ new_encryption = current_rule["encryption"]
+ if module.params["permission"]:
+ new_permission = sorted(module.params["permission"])
+ else:
+ new_permission = sorted(current_rule["permission"])
+ if module.params["client"]:
+ new_client = sorted(module.params["client"])
+ else:
+ new_client = sorted(current_rule["client"])
+ if SMB_ENCRYPT_API_VERSION in versions:
+ new_rule = {
+ "client": new_client,
+ "permission": new_permission,
+ "encryption": new_encryption,
+ }
+ else:
+ new_rule = {
+ "client": new_client,
+ "permission": new_permission,
+ }
+ if current_rule != new_rule:
+ changed = True
+ if not module.check_mode:
+ if SMB_ENCRYPT_API_VERSION in versions:
+ rule = SmbClientPolicyRule(
+ client=module.params["client"],
+ permission=module.params["permission"],
+ encryption=module.params["smb_encryption"],
+ )
+ else:
+ rule = SmbClientPolicyRule(
+ client=module.params["client"],
+ permission=module.params["permission"],
+ )
+ res = blade.patch_smb_client_policies_rules(
+ names=[
+ module.params["name"] + "." + str(old_policy_rule.index)
+ ],
+ rule=rule,
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to update SMB client rule {0}. Error: {1}".format(
+ module.params["name"]
+ + "."
+ + str(old_policy_rule.index),
+ res.errors[0].message,
+ )
+ )
+ if (
+ module.params["before_rule"]
+ and module.params["before_rule"] != old_policy_rule.index
+ ):
+ changed = True
+ if not module.check_mode:
+ before_name = (
+ module.params["name"]
+ + "."
+ + str(module.params["before_rule"])
+ )
+ res = blade.patch_smb_client_policies_rules(
+ names=[
+ module.params["name"] + "." + str(old_policy_rule.index)
+ ],
+ rule=SmbClientPolicyRule(),
+ before_rule_name=before_name,
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to move SMB client rule {0}. Error: {1}".format(
+ module.params["name"]
+ + "."
+ + str(old_policy_rule.index),
+ res.errors[0].message,
+ )
+ )
+ current_policy = list(
+ blade.get_smb_client_policies(names=[module.params["name"]]).items
+ )[0]
+ if current_policy.enabled != module.params["enabled"]:
+ changed = True
+ if not module.check_mode:
+ res = blade.patch_smb_client_policies(
+ policy=SmbClientPolicy(enabled=module.params["enabled"]),
+ names=[module.params["name"]],
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to change state of SMB client policy {0}.Error: {1}".format(
+ module.params["name"], res.errors[0].message
+ )
+ )
+ module.exit_json(changed=changed)
+
+
def delete_nfs_policy(module, blade):
"""Delete NFS Export Policy, or Rule
@@ -926,9 +1608,7 @@ def create_nfs_policy(module, blade):
module.params["name"], res.errors[0].message
)
)
- if not module.params["client"]:
- module.fail_json(msg="client is required to create a new rule")
- else:
+ if module.params["client"]:
rule = NfsExportPolicyRule(
client=module.params["client"],
permission=module.params["permission"],
@@ -1061,8 +1741,12 @@ def create_os_policy(module, blade):
"""Create Object Store Access Policy"""
changed = True
policy_name = module.params["account"] + "/" + module.params["name"]
+ versions = list(blade.get_versions().items)
if not module.check_mode:
- res = blade.post_object_store_access_policies(names=[policy_name])
+ res = blade.post_object_store_access_policies(
+ names=[policy_name],
+ policy=ObjectStoreAccessPolicyPost(description=module.params["desc"]),
+ )
if res.status_code != 200:
module.fail_json(
msg="Failed to create access policy {0}.".format(policy_name)
@@ -1078,11 +1762,19 @@ def create_os_policy(module, blade):
s3_delimiters=module.params["s3_delimiters"],
s3_prefixes=module.params["s3_prefixes"],
)
- rule = PolicyRuleObjectAccessPost(
- actions=module.params["actions"],
- resources=module.params["object_resources"],
- conditions=conditions,
- )
+ if SMB_ENCRYPT_API_VERSION in versions:
+ rule = PolicyRuleObjectAccessPost(
+ actions=module.params["actions"],
+ resources=module.params["object_resources"],
+ conditions=conditions,
+ effect=module.params["effect"],
+ )
+ else:
+ rule = PolicyRuleObjectAccessPost(
+ actions=module.params["actions"],
+ resources=module.params["object_resources"],
+ conditions=conditions,
+ )
res = blade.post_object_store_access_policies_rules(
policy_names=policy_name,
names=[module.params["rule"]],
@@ -1118,22 +1810,30 @@ def update_os_policy(module, blade):
policy_names=[policy_name], names=[module.params["rule"]]
)
if current_policy_rule.status_code != 200:
- conditions = PolicyRuleObjectAccessCondition(
- source_ips=module.params["source_ips"],
- s3_delimiters=module.params["s3_delimiters"],
- s3_prefixes=module.params["s3_prefixes"],
- )
- rule = PolicyRuleObjectAccessPost(
- actions=module.params["actions"],
- resources=module.params["object_resources"],
- conditions=conditions,
- )
- res = blade.post_object_store_access_policies_rules(
- policy_names=policy_name,
- names=[module.params["rule"]],
- enforce_action_restrictions=module.params["ignore_enforcement"],
- rule=rule,
- )
+ changed = True
+ if not module.check_mode:
+ conditions = PolicyRuleObjectAccessCondition(
+ source_ips=module.params["source_ips"],
+ s3_delimiters=module.params["s3_delimiters"],
+ s3_prefixes=module.params["s3_prefixes"],
+ )
+ rule = PolicyRuleObjectAccessPost(
+ actions=module.params["actions"],
+ resources=module.params["object_resources"],
+ conditions=conditions,
+ )
+ res = blade.post_object_store_access_policies_rules(
+ policy_names=policy_name,
+ names=[module.params["rule"]],
+ enforce_action_restrictions=module.params["ignore_enforcement"],
+ rule=rule,
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to create rule {0} in policy {1}. Error: {2}".format(
+ module.params["rule"], policy_name, res.errors[0].message
+ )
+ )
else:
old_policy_rule = list(current_policy_rule.items)[0]
current_rule = {
@@ -1500,10 +2200,10 @@ def create_policy(module, blade):
msg="every parameter is out of range (300 to 34560000)"
)
if module.params["at"]:
- attr = Policy(
+ attr = Policyv1(
enabled=module.params["enabled"],
rules=[
- PolicyRule(
+ PolicyRulev1(
keep_for=module.params["keep_for"] * 1000,
every=module.params["every"] * 1000,
at=_convert_to_millisecs(module.params["at"]),
@@ -1512,17 +2212,17 @@ def create_policy(module, blade):
],
)
else:
- attr = Policy(
+ attr = Policyv1(
enabled=module.params["enabled"],
rules=[
- PolicyRule(
+ PolicyRulev1(
keep_for=module.params["keep_for"] * 1000,
every=module.params["every"] * 1000,
)
],
)
else:
- attr = Policy(enabled=module.params["enabled"])
+ attr = Policyv1(enabled=module.params["enabled"])
blade.policies.create_policies(names=[module.params["name"]], policy=attr)
except Exception:
module.fail_json(
@@ -1798,11 +2498,11 @@ def update_policy(module, blade, policy):
changed = True
if not module.check_mode:
try:
- attr = PolicyPatch()
+ attr = PolicyPatchv1()
attr.enabled = module.params["enabled"]
if at_time:
attr.add_rules = [
- PolicyRule(
+ PolicyRulev1(
keep_for=module.params["keep_for"] * 1000,
every=module.params["every"] * 1000,
at=at_time,
@@ -1811,13 +2511,13 @@ def update_policy(module, blade, policy):
]
else:
attr.add_rules = [
- PolicyRule(
+ PolicyRulev1(
keep_for=module.params["keep_for"] * 1000,
every=module.params["every"] * 1000,
)
]
attr.remove_rules = [
- PolicyRule(
+ PolicyRulev1(
keep_for=current_policy["keep_for"] * 1000,
every=current_policy["every"] * 1000,
at=current_policy["at"],
@@ -1842,7 +2542,9 @@ def main():
type="str", default="present", choices=["absent", "present", "copy"]
),
policy_type=dict(
- type="str", default="snapshot", choices=["snapshot", "access", "nfs"]
+ type="str",
+ default="snapshot",
+ choices=["snapshot", "access", "nfs", "smb_share", "smb_client"],
),
enabled=dict(type="bool", default=True),
timezone=dict(type="str"),
@@ -1858,13 +2560,14 @@ def main():
rename=dict(type="str"),
rule=dict(type="str"),
user=dict(type="str"),
- effect=dict(type="str", default="allow", choices=["allow"]),
+ effect=dict(type="str", default="allow", choices=["allow", "deny"]),
actions=dict(
type="list",
elements="str",
choices=[
"s3:*",
"s3:AbortMultipartUpload",
+ "s3:BypassGovernanceRetention",
"s3:CreateBucket",
"s3:DeleteBucket",
"s3:DeleteObject",
@@ -1876,7 +2579,12 @@ def main():
"s3:GetLifecycleConfiguration",
"s3:GetObject",
"s3:GetObjectAcl",
+ "s3:GetObjectLegalHold",
+ "s3:GetObjectLockConfiguration",
+ "s3:GetObjectRetention",
+ "s3:GetObjectTagging",
"s3:GetObjectVersion",
+ "s3:GetObjectVersionTagging",
"s3:ListAllMyBuckets",
"s3:ListBucket",
"s3:ListBucketMultipartUploads",
@@ -1885,6 +2593,10 @@ def main():
"s3:PutBucketVersioning",
"s3:PutLifecycleConfiguration",
"s3:PutObject",
+ "s3:PutObjectLegalHold",
+ "s3:PutObjectLockConfiguration",
+ "s3:PutObjectRetention",
+ "s3:ResolveSafemodeConflicts",
],
),
object_resources=dict(type="list", elements="str"),
@@ -1901,7 +2613,7 @@ def main():
anonuid=dict(type="str"),
anongid=dict(type="str"),
atime=dict(type="bool", default=True),
- client=dict(type="str", default="*"),
+ client=dict(type="str"),
fileid_32bit=dict(type="bool", default=False),
permission=dict(type="str", choices=["rw", "ro"], default="ro"),
secure=dict(type="bool", default=False),
@@ -1913,6 +2625,16 @@ def main():
default=["sys"],
),
before_rule=dict(type="int"),
+ principal=dict(type="str"),
+ change=dict(type="str", choices=["deny", "allow", ""]),
+ read=dict(type="str", choices=["deny", "allow", ""]),
+ full_control=dict(type="str", choices=["deny", "allow", ""]),
+ smb_encryption=dict(
+ type="str",
+ default="optional",
+ choices=["disabled", "optional", "required"],
+ ),
+ desc=dict(type="str", default=""),
)
)
@@ -1920,6 +2642,8 @@ def main():
required_if = [
["policy_type", "access", ["account", "name"]],
["policy_type", "nfs", ["name"]],
+ ["policy_type", "smb_client", ["name"]],
+ ["policy_type", "smb_share", ["name"]],
]
module = AnsibleModule(
@@ -2037,6 +2761,102 @@ def main():
create_nfs_policy(module, blade)
elif state == "absent" and policy:
delete_nfs_policy(module, blade)
+ elif module.params["policy_type"] == "smb_client":
+ if SMB_POLICY_API_VERSION not in versions:
+ module.fail_json(
+ msg=(
+ "Minimum FlashBlade REST version required: {0}".format(
+ SMB_POLICY_API_VERSION
+ )
+ )
+ )
+ if not HAS_PYPURECLIENT:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+ blade = get_system(module)
+ try:
+ policy = list(
+ blade.get_smb_client_policies(names=[module.params["name"]]).items
+ )[0]
+ except AttributeError:
+ policy = None
+ if module.params["rename"]:
+ try:
+ new_policy = list(
+ blade.get_smb_client_policies(names=[module.params["rename"]]).items
+ )[0]
+ except AttributeError:
+ new_policy = None
+ if policy and state == "present" and not module.params["rename"]:
+ if module.params["before_rule"]:
+ res = blade.get_smb_client_policies_rules(
+ policy_names=[module.params["name"]],
+ names=[
+ module.params["name"] + "." + str(module.params["before_rule"])
+ ],
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Rule index {0} does not exist.".format(
+ module.params["before_rule"]
+ )
+ )
+ update_smb_client_policy(module, blade)
+ elif (
+ state == "present" and module.params["rename"] and policy and not new_policy
+ ):
+ rename_smb_client_policy(module, blade)
+ elif state == "present" and not policy and not module.params["rename"]:
+ create_smb_client_policy(module, blade)
+ elif state == "absent" and policy:
+ delete_smb_client_policy(module, blade)
+ elif module.params["policy_type"] == "smb_share":
+ if SMB_POLICY_API_VERSION not in versions:
+ module.fail_json(
+ msg=(
+ "Minimum FlashBlade REST version required: {0}".format(
+ SMB_POLICY_API_VERSION
+ )
+ )
+ )
+ if not HAS_PYPURECLIENT:
+ module.fail_json(msg="py-pure-client sdk is required for this module")
+ blade = get_system(module)
+ try:
+ policy = list(
+ blade.get_smb_share_policies(names=[module.params["name"]]).items
+ )[0]
+ except AttributeError:
+ policy = None
+ if module.params["rename"]:
+ try:
+ new_policy = list(
+ blade.get_smb_share_policies(names=[module.params["rename"]]).items
+ )[0]
+ except AttributeError:
+ new_policy = None
+ if policy and state == "present" and not module.params["rename"]:
+ if module.params["before_rule"]:
+ res = blade.get_smb_share_policies_rules(
+ policy_names=[module.params["name"]],
+ names=[
+ module.params["name"] + "." + str(module.params["before_rule"])
+ ],
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Rule index {0} does not exist.".format(
+ module.params["before_rule"]
+ )
+ )
+ update_smb_share_policy(module, blade)
+ elif (
+ state == "present" and module.params["rename"] and policy and not new_policy
+ ):
+ rename_smb_share_policy(module, blade)
+ elif state == "present" and not policy and not module.params["rename"]:
+ create_smb_share_policy(module, blade)
+ elif state == "absent" and policy:
+ delete_smb_share_policy(module, blade)
elif SNAPSHOT_POLICY_API_VERSION in versions:
if not HAS_PYPURECLIENT:
module.fail_json(msg="py-pure-client sdk is required for this module")
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_s3acc.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_s3acc.py
index 034731994..33aa9a30f 100644
--- a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_s3acc.py
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_s3acc.py
@@ -38,6 +38,7 @@ options:
quota:
description:
- The effective quota limit to be applied against the size of the account in bytes.
+ - Values can be entered as K, M, T or P
- If set to '' (empty string), the account is unlimited in size.
version_added: 1.11.0
type: str
@@ -48,11 +49,11 @@ options:
will still be sent if the account has a value set for I(quota_limit).
version_added: 1.11.0
type: bool
- default: false
default_quota:
description:
- The value of this field will be used to configure the I(quota_limit) field of newly created buckets
associated with this object store account, if the bucket creation does not specify its own value.
+ - Values can be entered as K, M, T or P
- If set to '' (empty string), the bucket default is unlimited in size.
version_added: 1.11.0
type: str
@@ -62,13 +63,23 @@ options:
associated with this object store account, if the bucket creation does not specify its own value.
version_added: 1.11.0
type: bool
- default: false
+ block_new_public_policies:
+ description:
+ - If set to true, adding bucket policies that grant public access to a bucket is not allowed.
+ type: bool
+ version_added: 1.15.0
+ block_public_access:
+ description:
+ - If set to true, access to a bucket with a public policy is restricted to only authenticated
+ users within the account that bucket belongs to.
+ type: bool
+ version_added: 1.15.0
extends_documentation_fragment:
- purestorage.flashblade.purestorage.fb
"""
EXAMPLES = r"""
-- name: Crrate object store account foo (with no quotas)
+- name: Create object store account foo (with no quotas)
purestorage.flashblade.purefb_s3acc:
name: foo
fb_url: 10.10.10.2
@@ -97,11 +108,15 @@ RETURN = r"""
HAS_PURESTORAGE = True
try:
- from pypureclient.flashblade import ObjectStoreAccountPatch, BucketDefaults
+ from pypureclient.flashblade import (
+ ObjectStoreAccountPatch,
+ BucketDefaults,
+ PublicAccessConfig,
+ )
except ImportError:
HAS_PURESTORAGE = False
-from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.basic import AnsibleModule, human_to_bytes
from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
get_blade,
get_system,
@@ -111,6 +126,7 @@ from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb impo
MIN_REQUIRED_API_VERSION = "1.3"
QUOTA_API_VERSION = "2.1"
+PUBLIC_API_VERSION = "2.12"
def get_s3acc(module, blade):
@@ -126,16 +142,28 @@ def get_s3acc(module, blade):
def update_s3acc(module):
"""Update Object Store Account"""
changed = False
+ public = False
blade = get_system(module)
acc_settings = list(
blade.get_object_store_accounts(names=[module.params["name"]]).items
)[0]
- current_account = {
- "hard_limit": acc_settings.hard_limit_enabled,
- "default_hard_limit": acc_settings.bucket_defaults.hard_limit_enabled,
- "quota": str(acc_settings.quota_limit),
- "default_quota": str(acc_settings.bucket_defaults.quota_limit),
- }
+ if getattr(acc_settings, "public_access_config", None):
+ public = True
+ current_account = {
+ "hard_limit": acc_settings.hard_limit_enabled,
+ "default_hard_limit": acc_settings.bucket_defaults.hard_limit_enabled,
+ "quota": str(acc_settings.quota_limit),
+ "default_quota": str(acc_settings.bucket_defaults.quota_limit),
+ "block_new_public_policies": acc_settings.public_access_config.block_new_public_policies,
+ "block_public_access": acc_settings.public_access_config.block_public_access,
+ }
+ else:
+ current_account = {
+ "hard_limit": acc_settings.hard_limit_enabled,
+ "default_hard_limit": acc_settings.bucket_defaults.hard_limit_enabled,
+ "quota": str(acc_settings.quota_limit),
+ "default_quota": str(acc_settings.bucket_defaults.quota_limit),
+ }
if current_account["quota"] == "None":
current_account["quota"] = ""
if current_account["default_quota"] == "None":
@@ -144,12 +172,48 @@ def update_s3acc(module):
module.params["quota"] = current_account["quota"]
if module.params["default_quota"] is None:
module.params["default_quota"] = current_account["default_quota"]
- new_account = {
- "hard_limit": module.params["hard_limit"],
- "default_hard_limit": module.params["default_hard_limit"],
- "quota": module.params["quota"],
- "default_quota": module.params["default_quota"],
- }
+ if not module.params["default_quota"]:
+ module.params["default_quota"] = ""
+ if not module.params["quota"]:
+ quota = ""
+ else:
+ quota = str(human_to_bytes(module.params["quota"]))
+ if not module.params["default_quota"]:
+ default_quota = ""
+ else:
+ default_quota = str(human_to_bytes(module.params["default_quota"]))
+ if module.params["hard_limit"] is None:
+ hard_limit = current_account["hard_limit"]
+ else:
+ hard_limit = module.params["hard_limit"]
+ if module.params["default_hard_limit"] is None:
+ default_hard_limit = current_account["default_hard_limit"]
+ else:
+ default_hard_limit = module.params["default_hard_limit"]
+ if public:
+ if module.params["block_new_public_policies"] is None:
+ new_public_policies = current_account["block_new_public_policies"]
+ else:
+ new_public_policies = module.params["block_new_public_policies"]
+ if module.params["block_public_access"] is None:
+ public_access = current_account["block_public_access"]
+ else:
+ public_access = module.params["block_public_access"]
+ new_account = {
+ "hard_limit": hard_limit,
+ "default_hard_limit": default_hard_limit,
+ "quota": quota,
+ "default_quota": default_quota,
+ "block_new_public_policies": new_public_policies,
+ "block_public_access": public_access,
+ }
+ else:
+ new_account = {
+ "hard_limit": module.params["hard_limit"],
+ "default_hard_limit": module.params["default_hard_limit"],
+ "quota": quota,
+ "default_quota": default_quota,
+ }
if new_account != current_account:
changed = True
if not module.check_mode:
@@ -169,12 +233,14 @@ def update_s3acc(module):
msg="Failed to update account {0}. "
"Error: {1}".format(module.params["name"], res.errors[0].message)
)
+
module.exit_json(changed=changed)
def create_s3acc(module, blade):
"""Create Object Store Account"""
changed = True
+ versions = blade.api_version.list_versions().versions
if not module.check_mode:
try:
blade.object_store_accounts.create_object_store_accounts(
@@ -188,27 +254,26 @@ def create_s3acc(module, blade):
)
if module.params["quota"] or module.params["default_quota"]:
blade2 = get_system(module)
- if module.params["quota"] and not module.params["default_quota"]:
- osa = ObjectStoreAccountPatch(
- hard_limit_enabled=module.params["hard_limit"],
- quota_limit=module.params["quota"],
- )
- if not module.params["quota"] and module.params["default_quota"]:
- osa = ObjectStoreAccountPatch(
- bucket_defaults=BucketDefaults(
- hard_limit_enabled=module.params["default_hard_limit"],
- quota_limit=module.params["default_quota"],
- )
- )
+ if not module.params["default_quota"]:
+ default_quota = ""
else:
- osa = ObjectStoreAccountPatch(
- hard_limit_enabled=module.params["hard_limit"],
- quota_limit=module.params["quota"],
- bucket_defaults=BucketDefaults(
- hard_limit_enabled=module.params["default_hard_limit"],
- quota_limit=module.params["default_quota"],
- ),
- )
+ default_quota = str(human_to_bytes(module.params["default_quota"]))
+ if not module.params["quota"]:
+ quota = ""
+ else:
+ quota = str(human_to_bytes(module.params["quota"]))
+ if not module.params["hard_limit"]:
+ module.params["hard_limit"] = False
+ if not module.params["default_hard_limit"]:
+ module.params["default_hard_limit"] = False
+ osa = ObjectStoreAccountPatch(
+ hard_limit_enabled=module.params["hard_limit"],
+ quota_limit=quota,
+ bucket_defaults=BucketDefaults(
+ hard_limit_enabled=module.params["default_hard_limit"],
+ quota_limit=default_quota,
+ ),
+ )
res = blade2.patch_object_store_accounts(
object_store_account=osa, names=[module.params["name"]]
)
@@ -220,6 +285,28 @@ def create_s3acc(module, blade):
msg="Failed to set quotas correctly for account {0}. "
"Error: {1}".format(module.params["name"], res.errors[0].message)
)
+ if PUBLIC_API_VERSION in versions:
+ if not module.params["block_new_public_policies"]:
+ module.params["block_new_public_policies"] = False
+ if not module.params["block_public_access"]:
+ module.params["block_public_access"] = False
+ osa = ObjectStoreAccountPatch(
+ public_access_config=PublicAccessConfig(
+ block_new_public_policies=module.params[
+ "block_new_public_policies"
+ ],
+ block_public_access=module.params["block_public_access"],
+ )
+ )
+ res = blade2.patch_object_store_accounts(
+ object_store_account=osa, names=[module.params["name"]]
+ )
+ if res.status_code != 200:
+ module.fail_json(
+ msg="Failed to Public Access config correctly for account {0}. "
+ "Error: {1}".format(module.params["name"], res.errors[0].message)
+ )
+
module.exit_json(changed=changed)
@@ -258,8 +345,10 @@ def main():
argument_spec.update(
dict(
name=dict(required=True, type="str"),
- hard_limit=dict(type="bool", default=False),
- default_hard_limit=dict(type="bool", default=False),
+ hard_limit=dict(type="bool"),
+ default_hard_limit=dict(type="bool"),
+ block_new_public_policies=dict(type="bool"),
+ block_public_access=dict(type="bool"),
quota=dict(type="str"),
default_quota=dict(type="str"),
state=dict(default="present", choices=["present", "absent"]),
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_s3user.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_s3user.py
index 55bc05c3f..1905184b1 100644
--- a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_s3user.py
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_s3user.py
@@ -48,6 +48,12 @@ options:
- If enabled this will override I(imported_key)
type: bool
default: false
+ multiple_keys:
+ description:
+ - Allow multiple access keys to be created for the user.
+ type: bool
+ default: false
+ version_added: "1.12.0"
remove_key:
description:
- Access key to be removed from user
@@ -181,27 +187,29 @@ def update_s3user(module, blade):
key_count += 1
if not exists:
if key_count < 2:
- changed = True
- if not module.check_mode:
- try:
- if (
- module.params["access_key"]
- and module.params["imported_key"]
+ try:
+ if module.params["access_key"] and module.params["imported_key"]:
+ module.warn("'access_key: true' overrides imported keys")
+ if module.params["access_key"]:
+ if key_count == 0 or (
+ key_count >= 1 and module.params["multiple_keys"]
):
- module.warn("'access_key: true' overrides imported keys")
- if module.params["access_key"]:
- result = blade.object_store_access_keys.create_object_store_access_keys(
- object_store_access_key=ObjectStoreAccessKey(
- user={"name": user}
+ changed = True
+ if not module.check_mode:
+ result = blade.object_store_access_keys.create_object_store_access_keys(
+ object_store_access_key=ObjectStoreAccessKey(
+ user={"name": user}
+ )
)
- )
- s3user_facts["fb_s3user"] = {
- "user": user,
- "access_key": result.items[0].secret_access_key,
- "access_id": result.items[0].name,
- }
- else:
- if IMPORT_KEY_API_VERSION in versions:
+ s3user_facts["fb_s3user"] = {
+ "user": user,
+ "access_key": result.items[0].secret_access_key,
+ "access_id": result.items[0].name,
+ }
+ else:
+ if IMPORT_KEY_API_VERSION in versions:
+ changed = True
+ if not module.check_mode:
blade.object_store_access_keys.create_object_store_access_keys(
names=[module.params["imported_key"]],
object_store_access_key=ObjectStoreAccessKeyPost(
@@ -211,19 +219,19 @@ def update_s3user(module, blade):
],
),
)
- except Exception:
- if module.params["imported_key"]:
- module.fail_json(
- msg="Object Store User {0}: Access Key import failed".format(
- user
- )
+ except Exception:
+ if module.params["imported_key"]:
+ module.fail_json(
+ msg="Object Store User {0}: Access Key import failed".format(
+ user
)
- else:
- module.fail_json(
- msg="Object Store User {0}: Access Key creation failed".format(
- user
- )
+ )
+ else:
+ module.fail_json(
+ msg="Object Store User {0}: Access Key creation failed".format(
+ user
)
+ )
else:
module.warn(
"Object Store User {0}: Maximum Access Key count reached".format(
@@ -370,6 +378,7 @@ def main():
name=dict(required=True, type="str"),
account=dict(required=True, type="str"),
access_key=dict(default="false", type="bool"),
+ multiple_keys=dict(default="false", type="bool"),
imported_key=dict(type="str", no_log=False),
remove_key=dict(type="str", no_log=False),
imported_secret=dict(type="str", no_log=True),
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_timeout.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_timeout.py
index 21e83c002..79f53adc2 100644
--- a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_timeout.py
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_timeout.py
@@ -123,7 +123,7 @@ def main():
if 5 < module.params["timeout"] > 180 and module.params["timeout"] != 0:
module.fail_json(msg="Timeout value must be between 5 and 180 minutes")
blade = get_system(module)
- current_timeout = list(blade.get_arrays().items)[0].idle_timeout * 60000
+ current_timeout = list(blade.get_arrays().items)[0].idle_timeout / 60000
if state == "present" and current_timeout != module.params["timeout"]:
set_timeout(module, blade)
elif state == "absent" and current_timeout != 0:
diff --git a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_userpolicy.py b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_userpolicy.py
index 6e7dbe49d..be8716454 100644
--- a/ansible_collections/purestorage/flashblade/plugins/modules/purefb_userpolicy.py
+++ b/ansible_collections/purestorage/flashblade/plugins/modules/purefb_userpolicy.py
@@ -249,6 +249,7 @@ def main():
names=[module.params["account"] + "/" + module.params["name"]]
).status_code
!= 200
+ and state != "show"
):
module.fail_json(
msg="Account User {0}/{1} does not exist".format(
diff --git a/ansible_collections/purestorage/flashblade/tests/config.yaml b/ansible_collections/purestorage/flashblade/tests/config.yaml
new file mode 100644
index 000000000..9e402bda7
--- /dev/null
+++ b/ansible_collections/purestorage/flashblade/tests/config.yaml
@@ -0,0 +1,2 @@
+modules:
+ python_requires: ">=3.6"
diff --git a/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.10.txt b/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.10.txt
deleted file mode 100644
index 771db46ec..000000000
--- a/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.10.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-plugins/modules/purefb_info.py validate-modules:return-syntax-error
-plugins/modules/purefb_inventory.py validate-modules:return-syntax-error
diff --git a/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.11.txt b/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.11.txt
deleted file mode 100644
index 771db46ec..000000000
--- a/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.11.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-plugins/modules/purefb_info.py validate-modules:return-syntax-error
-plugins/modules/purefb_inventory.py validate-modules:return-syntax-error
diff --git a/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.12.txt b/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.12.txt
deleted file mode 100644
index 771db46ec..000000000
--- a/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.12.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-plugins/modules/purefb_info.py validate-modules:return-syntax-error
-plugins/modules/purefb_inventory.py validate-modules:return-syntax-error
diff --git a/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.13.txt b/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.13.txt
deleted file mode 100644
index 771db46ec..000000000
--- a/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.13.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-plugins/modules/purefb_info.py validate-modules:return-syntax-error
-plugins/modules/purefb_inventory.py validate-modules:return-syntax-error
diff --git a/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.14.txt b/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.14.txt
deleted file mode 100644
index 771db46ec..000000000
--- a/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.14.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-plugins/modules/purefb_info.py validate-modules:return-syntax-error
-plugins/modules/purefb_inventory.py validate-modules:return-syntax-error
diff --git a/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.15.txt b/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.15.txt
deleted file mode 100644
index 771db46ec..000000000
--- a/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.15.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-plugins/modules/purefb_info.py validate-modules:return-syntax-error
-plugins/modules/purefb_inventory.py validate-modules:return-syntax-error
diff --git a/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.16.txt b/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.16.txt
deleted file mode 100644
index 771db46ec..000000000
--- a/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.16.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-plugins/modules/purefb_info.py validate-modules:return-syntax-error
-plugins/modules/purefb_inventory.py validate-modules:return-syntax-error
diff --git a/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.9.txt b/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.9.txt
deleted file mode 100644
index 771db46ec..000000000
--- a/ansible_collections/purestorage/flashblade/tests/sanity/ignore-2.9.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-plugins/modules/purefb_info.py validate-modules:return-syntax-error
-plugins/modules/purefb_inventory.py validate-modules:return-syntax-error
diff --git a/ansible_collections/purestorage/fusion/.github/workflows/ansible-lint.yaml b/ansible_collections/purestorage/fusion/.github/workflows/ansible-lint.yaml
index 0b2102184..384c5ac93 100644
--- a/ansible_collections/purestorage/fusion/.github/workflows/ansible-lint.yaml
+++ b/ansible_collections/purestorage/fusion/.github/workflows/ansible-lint.yaml
@@ -1,5 +1,5 @@
-name: Ansible Lint # feel free to pick your own name
-on: [push, pull_request]
+name: Ansible Lint # feel free to pick your own name
+"on": [push, pull_request]
jobs:
build:
diff --git a/ansible_collections/purestorage/fusion/.github/workflows/black.yaml b/ansible_collections/purestorage/fusion/.github/workflows/black.yaml
index 68061652a..10b16296c 100644
--- a/ansible_collections/purestorage/fusion/.github/workflows/black.yaml
+++ b/ansible_collections/purestorage/fusion/.github/workflows/black.yaml
@@ -1,6 +1,6 @@
name: Black
-on: [push, pull_request]
+"on": [push, pull_request]
jobs:
lint:
diff --git a/ansible_collections/purestorage/fusion/.github/workflows/create-release.yaml b/ansible_collections/purestorage/fusion/.github/workflows/create-release.yaml
index 25725c15d..68da05e4d 100644
--- a/ansible_collections/purestorage/fusion/.github/workflows/create-release.yaml
+++ b/ansible_collections/purestorage/fusion/.github/workflows/create-release.yaml
@@ -1,6 +1,6 @@
name: Release Collection
-on: workflow_dispatch
+"on": workflow_dispatch
jobs:
create_github_release:
runs-on: ubuntu-latest
@@ -23,7 +23,7 @@ jobs:
if [[ "$response" == *"$RELEASE_VERSION"* ]]; then
trap "exit 1" EXIT
echo "Error: Tag $RELEASE_VERSION already exists"
- exit 1
+ exit 1
fi
- name: Extract changelog
diff --git a/ansible_collections/purestorage/fusion/.github/workflows/main.yml b/ansible_collections/purestorage/fusion/.github/workflows/main.yml
index da0a69969..5c9a3914b 100644
--- a/ansible_collections/purestorage/fusion/.github/workflows/main.yml
+++ b/ansible_collections/purestorage/fusion/.github/workflows/main.yml
@@ -1,6 +1,6 @@
name: Pure Storage Ansible CI
-on:
+"on":
pull_request:
push:
schedule:
@@ -13,36 +13,23 @@ jobs:
strategy:
matrix:
ansible:
- - stable-2.11
- - stable-2.12
- - stable-2.13
- stable-2.14
- stable-2.15
+ - stable-2.16
- devel
python-version:
- - 3.8
- 3.9
- "3.10"
- "3.11"
exclude:
- - python-version: "3.11"
- ansible: stable-2.11
- - python-version: "3.11"
- ansible: stable-2.12
- - python-version: "3.11"
- ansible: stable-2.13
- - python-version: "3.10"
- ansible: stable-2.11
- - python-version: 3.8
- ansible: stable-2.14
- - python-version: 3.8
- ansible: stable-2.15
- - python-version: 3.8
+ - python-version: 3.9
+ ansible: stable-2.16
+ - python-version: 3.9
ansible: devel
steps:
- name: Check out code
uses: actions/checkout@v3
-
+
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v3
with:
diff --git a/ansible_collections/purestorage/fusion/.github/workflows/rh_automation_hub_token_keep_alive.yml b/ansible_collections/purestorage/fusion/.github/workflows/rh_automation_hub_token_keep_alive.yml
new file mode 100644
index 000000000..f4e0a667b
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/.github/workflows/rh_automation_hub_token_keep_alive.yml
@@ -0,0 +1,19 @@
+---
+name: "Red Hat Automation Hub - Keep token alive"
+# The SSO token to upload content to Automation Hub must be accessed once every 30 days or it will be turned off
+
+"on":
+ schedule:
+ - cron: '0 12 1,15 * *' # run 12pm on the 1st and 15th of the month
+
+jobs:
+ keep_rh_sso_token_alive:
+ runs-on: "ubuntu-latest"
+ steps:
+ - name: "Run curl command"
+ run: |
+ curl ${{ secrets.RH_AUTOMATION_HUB_URL }} \
+ -d grant_type=refresh_token \
+ -d client_id="cloud-services" \
+ -d refresh_token="${{ secrets.RH_AUTOMATION_HUB_TOKEN }}" \
+ --fail --silent --show-error --output /dev/null
diff --git a/ansible_collections/purestorage/fusion/.github/workflows/stale.yml b/ansible_collections/purestorage/fusion/.github/workflows/stale.yml
index 7bbc0505b..ee7c9796e 100644
--- a/ansible_collections/purestorage/fusion/.github/workflows/stale.yml
+++ b/ansible_collections/purestorage/fusion/.github/workflows/stale.yml
@@ -1,6 +1,6 @@
name: Mark stale issues and pull requests
-on:
+"on":
schedule:
- cron: "0 0 * * *"
diff --git a/ansible_collections/purestorage/fusion/CHANGELOG.rst b/ansible_collections/purestorage/fusion/CHANGELOG.rst
index b4d9bd6ae..b6a0f071a 100644
--- a/ansible_collections/purestorage/fusion/CHANGELOG.rst
+++ b/ansible_collections/purestorage/fusion/CHANGELOG.rst
@@ -5,6 +5,29 @@ Purestorage.Fusion Release Notes
.. contents:: Topics
+v1.6.1
+======
+
+Minor Changes
+-------------
+
+- fusion_volume - Allow creating a new volume from already existing volume or volume snapshot
+
+v1.6.0
+======
+
+Minor Changes
+-------------
+
+- all modules - return resource's id parameter on update and create.
+- fusion_array - added `apartment_id` argument, which can be used when creating an array.
+- fusion_pg - introduced `destroy_snapshots_on_delete` which, if set to true, ensures that before deleting placement group, snapshots within the placement group will be deleted.
+- fusion_pp - 'local_rpo' duration parsing documented, 'local_retention' minimum value fixed
+- fusion_pp - Allow leading zeros in duration strings
+- fusion_pp - Change the minimum value of the protection policy local retention from 1 to 10
+- fusion_pp - introduced `destroy_snapshots_on_delete` which, if set to true, ensures that before deleting protection policy, snapshots within the protection policy will be deleted.
+- fusion_volume - Allow creating a new volume from already existing volume or volume snapshot
+
v1.5.0
======
diff --git a/ansible_collections/purestorage/fusion/FILES.json b/ansible_collections/purestorage/fusion/FILES.json
index b3f73b7e0..ad0452035 100644
--- a/ansible_collections/purestorage/fusion/FILES.json
+++ b/ansible_collections/purestorage/fusion/FILES.json
@@ -15,17 +15,24 @@
"format": 1
},
{
+ "name": "changelogs/fragments",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
"name": "changelogs/changelog.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "b568495166ca2ef38576e62cc6f1eb2d1f4caa988b020112e14650d37510dd83",
+ "chksum_sha256": "17ac55399ed69cbac46280c27dde9825e556a3b5214ff7defef12cc1dbbae598",
"format": 1
},
{
"name": "changelogs/.plugin-cache.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "492bf617d0924a14708a862efd096e1a032e1a1243f25e2287e44a6e072e2f1a",
+ "chksum_sha256": "64d8cc09b182d4991facb3abb8835c821b4e0cb72d6dd687ddbe16402b6209cc",
"format": 1
},
{
@@ -347,7 +354,7 @@
"name": "tests/unit/modules/test_fusion_az.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "75be72264bf7d95ddc73d72c4763b6e877a05feaab2f6d9b91a55448bb77af51",
+ "chksum_sha256": "7d6b7a4a5a233ee47c6788d370bf1c6a6da5adf9b758b43c6d5557ba87b4dc58",
"format": 1
},
{
@@ -368,7 +375,7 @@
"name": "tests/unit/mocks/operation_mock.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "aaa5ad3b4a9bcd10a95947af5f06ec4153512927b56d94f4d442da6007d43c7b",
+ "chksum_sha256": "373ea55faf5262f157724aaf6d1ca31b963415ad1b180b2fa7833983acd1d8f2",
"format": 1
},
{
@@ -417,7 +424,7 @@
"name": "tests/unit/module_utils/test_parsing.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "58c8e7b81680984e3e606cc56210aa8afb93c020939f1d3d585b5cf7de61c513",
+ "chksum_sha256": "a7efaf296b085c6ffbf5174218e575491dcd850f74a159068d0004222d6fade6",
"format": 1
},
{
@@ -438,14 +445,14 @@
"name": "tests/functional/test_fusion_region.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8d108a21480c4cb9c9d2810e809ea876173b3d43621f417c0957c77d21f33f76",
+ "chksum_sha256": "5ba0b74a3580885a0e3dc5693adb85c1d863fe664588275c175131d59122b7f9",
"format": 1
},
{
"name": "tests/functional/test_fusion_ss.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "676bba88edd7f73957c605356d31f4bd61cd144d354280c373beb3689196d5cd",
+ "chksum_sha256": "6b44ef093fcfed0897a14395095185ac6bc1dd6a5f9b153c0e31857734a9a10e",
"format": 1
},
{
@@ -459,56 +466,56 @@
"name": "tests/functional/test_fusion_volume.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8b6a8f18e610fcd4f2aea719e5b51ff58ef6f6b06412afd98309255ccab7f8a4",
+ "chksum_sha256": "d307a55f86ea7683542e58d1416fe8d005d3dfcf65b84c8bd6b0f5c914e2aadf",
"format": 1
},
{
"name": "tests/functional/test_fusion_ts.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "dc844fc260f337525396d47da3e012fbb2f1f5188a96c3d1071515bdac879583",
+ "chksum_sha256": "5dadaf161e2daf306ef5d0eb72dd8e53db398a630f82b6d511fab4e836991489",
"format": 1
},
{
"name": "tests/functional/test_fusion_pg.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "4da9f7a334491933d40fe8d32fbae767393f84d744537a7d03a57d84a1693b38",
+ "chksum_sha256": "8df05411ef3cd13bd9f7436df88b7bb4c0611a3db23988435b5cc39e9546a967",
"format": 1
},
{
"name": "tests/functional/test_fusion_nig.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f20b2ab1eed1bd182d68a00198537f960e9c7e844cfb8df3c99922e98e2365c1",
+ "chksum_sha256": "a06addf4b81f91e57c7b2413da2a39279b3a794fecab23c3bb2a4609c22873f1",
"format": 1
},
{
"name": "tests/functional/test_fusion_se.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "79d30463a37430a6a697778bb58fe2ced187672ec74ddae6b98f191069931b04",
+ "chksum_sha256": "ad77974f2c4e7ea6ee3681c57ccaf235b2401e83d299495c862a717f6f2b7199",
"format": 1
},
{
"name": "tests/functional/test_fusion_az.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d6b7e24d40c1268b1ce0de3557210fbd89e97441dcd384522263f5982a5922b5",
+ "chksum_sha256": "39d40d8fee3e346a8dc7db8becf15c5149fbf13d1af6e29b2325a6dc9f8e9624",
"format": 1
},
{
"name": "tests/functional/test_fusion_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "05d60add74e73360eefd8679e808c2c5c5c774726a15c29d923dd077099e9537",
+ "chksum_sha256": "417845afcefefa6a2e739be86ed8ef7474b43409d44df3079bc4c537ff0821df",
"format": 1
},
{
"name": "tests/functional/test_fusion_hap.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "0a8ffe64ef5a561e2eb102f58b20f62c99c8a79022be63976f6e8c19608178ab",
+ "chksum_sha256": "b7f1abe557c37b29f2a9270636f8b81745d72281141caac0fb0931a79dd391b4",
"format": 1
},
{
@@ -529,56 +536,56 @@
"name": "tests/functional/test_fusion_sc.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "cf1794f2f91b496adc54131be84e1ea81263ccf603cf648fecd0a52c7a1da467",
+ "chksum_sha256": "b3d781841cc7f3d2956f48dffd0e6ef35a0121841a1a0ac6915068c60ed5bda2",
"format": 1
},
{
"name": "tests/functional/test_fusion_ra.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "718b4be5026d83e2fe3fefe6838637efce66d9880c635a80603844266b3e926c",
+ "chksum_sha256": "36206507be9b65d09a4e01ff75740f6507f5a987180dde6f2d720ea76abc9b9b",
"format": 1
},
{
"name": "tests/functional/test_fusion_tenant.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "b5c6413737db89d121c98f7798b36bb736b367fb6c2ee1651645c742822f9b66",
+ "chksum_sha256": "59d4786a7dfb78f72ebca838b7dbfda7444a861d7113b50ddebffd82bea56ade",
"format": 1
},
{
"name": "tests/functional/utils.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d6e339b28c2b3ed78108244bde3950424b3acc81a6a3b9c8cd7b32a53fbd5ba9",
+ "chksum_sha256": "e3d1d3cbd790a64d783f52914e52158e339a7c7eb86c6cb63546d72820e6d6c5",
"format": 1
},
{
"name": "tests/functional/test_fusion_api_client.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "44f1df7dfe3c53b30ae7c2c2fd2873b651f2306bba67a26310a8c2d86c53f04e",
+ "chksum_sha256": "5010777699d6259c58c38e7e51c84baa554b484736516ac180f3b1d3b5f844f8",
"format": 1
},
{
"name": "tests/functional/test_fusion_array.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "910cd4c0859534f5de3b8cb743c9f549b93f25c0f18399158adff63b933a8110",
+ "chksum_sha256": "ca4057d295d239c6c6e6afb7e701f9d337fb44a2dbaf29bb5805cbed80bbe93e",
"format": 1
},
{
"name": "tests/functional/test_fusion_pp.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3578756616fff28885b379a93221c3dfe8d083a9d685bd8b16878b7f9bf045c9",
+ "chksum_sha256": "c45337495b95f164eed6e3e6d14c038246d042f16315aea494276380f8afa370",
"format": 1
},
{
"name": "tests/helpers.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "74d50a53434e0ca164aa41ea5f272755e9b6ad0dc105d3eec53f62d3e188034c",
+ "chksum_sha256": "24f55093b6e7486c6c6c6a9cf57875ec69496ac23253f1c5a7f79c870c00d38a",
"format": 1
},
{
@@ -841,6 +848,20 @@
"format": 1
},
{
+ "name": "test",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "test/config.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9a009a349eaaf78c93ff56072d2ef171937bdb884e4976592ab5aaa9c68e1044",
+ "format": 1
+ },
+ {
"name": "plugins",
"ftype": "dir",
"chksum_type": null,
@@ -858,42 +879,42 @@
"name": "plugins/modules/fusion_ts.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "91e740ffbf27ab279cc6fbd07b5a59e92de941a4e88f437040eae89c1b8f1f3b",
+ "chksum_sha256": "6d4c3f141409f2b1a5128912dbfe14341e17f6a9e5c20d14f3a5e82b75da4462",
"format": 1
},
{
"name": "plugins/modules/fusion_nig.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "59cd2a72c5544ebf83ff6fe239f623ec03b4de84efb7cb08fdf4b4159544bc2c",
+ "chksum_sha256": "c220a2793545a832096d78d5d926a7a3037a828a0f2e042601e7e00afa29471b",
"format": 1
},
{
"name": "plugins/modules/fusion_api_client.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "5b92104038365e11b958a75a521a843c7b599e950e1d7815ff40a01a519dfff5",
+ "chksum_sha256": "a0e35d215e9cce9b2f5e64e8661a12414ddea48df96a24e0f7b7ae9071aad43e",
"format": 1
},
{
"name": "plugins/modules/fusion_ni.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "5edf635cb12288af965649f3941bac2f1eea781c2e23793ac40988faedd34735",
+ "chksum_sha256": "148fdf12357a1b56c30343bb2a46b3d4cc2ff4f179f667c764d64101abc5057b",
"format": 1
},
{
"name": "plugins/modules/fusion_ss.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "c278ef52dbd80a2c143b56ace8f31ebcca5ae76426bc7e38bea3e7e66a1a5742",
+ "chksum_sha256": "9ffdfd9f3ae2fa8df1a37b52370cc49e265801d25efcc17eb65649d92f72d4d9",
"format": 1
},
{
"name": "plugins/modules/fusion_pp.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "29b9019589464b7650892f84ebe112d03f38e03e8c804d6ce35401f85e31603f",
+ "chksum_sha256": "63543885c2adc09b3f829f0600774cf2ca098010ee2c9b58ce359ed383d7a780",
"format": 1
},
{
@@ -907,42 +928,42 @@
"name": "plugins/modules/fusion_array.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "0edaabce3e589722d0dd299f7f81d66811351e678046068fae179ad3f331fa4e",
+ "chksum_sha256": "6baebe9c43933cc7bc0c43455a4019eb88805b0f11671a2331cc86bb8775d438",
"format": 1
},
{
"name": "plugins/modules/fusion_az.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f0e9ea0a969913323e917d5b7d631c38e33b3e55a3b641cf553c8ea01228f0a5",
+ "chksum_sha256": "ecaf8c75e0941895ba3b8f349c25049d7e2d6eb2468a609926d57c2d865ba264",
"format": 1
},
{
"name": "plugins/modules/fusion_pg.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3c03eb5a59d749a954fe09d4c2a57ec36d30f9bdd51565c8e1e3d3e293d2bbc5",
+ "chksum_sha256": "819e0298797184fd0a5d54fdb6dcebc9e8fad118a353c217311eddd7608ac2d4",
"format": 1
},
{
"name": "plugins/modules/fusion_volume.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e2b6a4837e1abc3efc2fa88707cfa80f618b800bccdad6bd5a5ac3f60ba77d14",
+ "chksum_sha256": "ccf9e89f5c10eac9d992ac0740c082ebaaedf7d8d9ded38322223cc57a2f98d5",
"format": 1
},
{
"name": "plugins/modules/fusion_tenant.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "03823b7283e0de940ee3e95bf5645595e4330759ad7dd18f0411c319774ec173",
+ "chksum_sha256": "18887fdb6f7a44b647db0e25f726d605e00ded8396bc9841b29ddec1b56e101e",
"format": 1
},
{
"name": "plugins/modules/fusion_se.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "355892de73b5d265e1e57e8ff31b3dd0775c04a191ded999131ebbfdbbcd7231",
+ "chksum_sha256": "9c77f1b938823461f586c66949527a5377dff942a7ec1024d729f190b2e127fa",
"format": 1
},
{
@@ -956,21 +977,21 @@
"name": "plugins/modules/fusion_ra.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "4a1bd14fe1038fbf09d4196143f4ac262ef7627ee550ea2efbaeceaa3e0a6176",
+ "chksum_sha256": "20c1a2bbde557ebc6b03453552c2a2f718f11271ac96d75e7beb43c04095157c",
"format": 1
},
{
"name": "plugins/modules/fusion_sc.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "7302c71a969dbbc3fb636455ee16ef807a3e2c212d307c305b86504b2b42603c",
+ "chksum_sha256": "12711136fb3fa243fd65f15df009f9f0b68d5ee9fef9b11f5f7d7de38526afdb",
"format": 1
},
{
"name": "plugins/modules/fusion_region.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d6cb89588cca8681cfc12651ace18375eba88d2aaaacf2ece2d7652d9821fde9",
+ "chksum_sha256": "1bb063fced01a15617c99e4be32a95265e1678c75fc892ea3e1b9e93f3f683fb",
"format": 1
},
{
@@ -984,7 +1005,7 @@
"name": "plugins/modules/fusion_hap.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "ffc4d041a552ac1d45ab51868428f9281829d2f345581eef8f379b1692e50a1a",
+ "chksum_sha256": "0b3e80b8cc3d42fc06b657d01258f24a805b6027ccfdc4286ddd66ddce554ea1",
"format": 1
},
{
@@ -1019,7 +1040,7 @@
"name": "plugins/module_utils/prerequisites.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "589f5ad7eed9dfe57263a9d3ec7dd6b179da0406aa2a6706ec056f3ab60af5cd",
+ "chksum_sha256": "d4c21413eceda5c98229edd0999d45c3b87554fae9eeb096322d03dab91ac870",
"format": 1
},
{
@@ -1033,14 +1054,14 @@
"name": "plugins/module_utils/errors.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "fa7c577ce38810b137980e87e6e5b87e95fb43e101d02652df7cbb434f630699",
+ "chksum_sha256": "8b39a68c54dd07e3061824d2ad60ba4f9cebbf7b3c020283ceb3859e4e0e28a9",
"format": 1
},
{
"name": "plugins/module_utils/parsing.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "efe7b474e24d7fa53dc134f7fd1e4062542a22d5ea9f8b16715ab8a353d1d953",
+ "chksum_sha256": "61e0fac0fa4ff6bbd1eb7f9dd1ba1b6822965f5f7c7691537222d3af37e725c2",
"format": 1
},
{
@@ -1051,6 +1072,13 @@
"format": 1
},
{
+ "name": "plugins/module_utils/snapshots.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f7571aba639702400e659241f9d8f61dd72f6ef3119b0af5296d7522dbd5125f",
+ "format": 1
+ },
+ {
"name": "plugins/module_utils/startup.py",
"ftype": "file",
"chksum_type": "sha256",
@@ -1124,35 +1152,42 @@
"name": ".github/workflows/ansible-lint.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "4c85688d98b71e3a6594530a362cd5d2cf83842ceaccd0e0fc76e233777c1cef",
+ "chksum_sha256": "62dbc43cafdab8da066ba0d86a08924e433f8b2919cdef935c116c5962d3a572",
+ "format": 1
+ },
+ {
+ "name": ".github/workflows/rh_automation_hub_token_keep_alive.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3c7f513c85853a9f152635f5fc9f4f8a1e621cc8b2a40c405d9efc69830800f6",
"format": 1
},
{
"name": ".github/workflows/stale.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "0bdef4889afabcd627fc30711a0809c7468b8c9e64cbcebe1334f794a41e7bd9",
+ "chksum_sha256": "544ccc9f17e16d9087802e3dcec69741e6ff79e31cf7302947ce2c08126ce1d4",
"format": 1
},
{
"name": ".github/workflows/black.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "c62a1a4fcc1e00f3e8f295863e304db520124bfd3e9b0c2cccd6d78343b679c5",
+ "chksum_sha256": "b82c6a8af5e7c7d2113fecafa178bf6df94de434d4dc6e2ed6c3bc695da74f41",
"format": 1
},
{
"name": ".github/workflows/create-release.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "43ea888cb2b22ddc86ea989f75967accaff0065cc43c39a0043ba6cf2f424378",
+ "chksum_sha256": "12ebf07984e4908dd2a6bed45d8bf38641bf3f264fe30ead9ce2849e6fcc8eb5",
"format": 1
},
{
"name": ".github/workflows/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "0c8c2578e81d44e4a9611c57a59c6fbc7dd947ff149a169ea65f497484d6d4a4",
+ "chksum_sha256": "60e50d69898144d914ad2af759c744bd3ec8ccc78141cbeb13b850f283e20653",
"format": 1
},
{
@@ -1166,7 +1201,7 @@
"name": "README.md",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a753d4c6dc5cdd493fd60f147cf68f644ec6f301b895fc249093914db1cf3ab1",
+ "chksum_sha256": "2958e9b57938d749df6845d5d1a7e65c499990637af1056923ed6efa22c5684e",
"format": 1
},
{
@@ -1180,7 +1215,7 @@
"name": "meta/runtime.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "9f829699b200db8a8282ce6f44d6ae28a2e3377e0e611b0d327db64b0cbba321",
+ "chksum_sha256": "359c08cf506ebfd67477b25cc2f4763a1495f398cfb3cc9dd2a29595dce990db",
"format": 1
},
{
@@ -1201,7 +1236,7 @@
"name": "CHANGELOG.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "28eab01a890a0719cf1908791d9575a4d47014547796bb077f44702dbbc5632a",
+ "chksum_sha256": "9c8780730bed93a88dcaafd561c2a40c06cc7add04279bce943a6e2a7a2f8778",
"format": 1
}
],
diff --git a/ansible_collections/purestorage/fusion/MANIFEST.json b/ansible_collections/purestorage/fusion/MANIFEST.json
index 4fe3bc8b5..877ab80f8 100644
--- a/ansible_collections/purestorage/fusion/MANIFEST.json
+++ b/ansible_collections/purestorage/fusion/MANIFEST.json
@@ -2,7 +2,7 @@
"collection_info": {
"namespace": "purestorage",
"name": "fusion",
- "version": "1.5.0",
+ "version": "1.6.1",
"authors": [
"Pure Storage Ansible Team <pure-ansible-team@purestorage.com>"
],
@@ -27,7 +27,7 @@
"name": "FILES.json",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3e406206ea2f67e0a9846219a9d5d2813aef76437e1b05d12d341aded53cfd13",
+ "chksum_sha256": "fcdf40d7eabc65ac35369e36b6ed40579ce799ee0ea94caed4b7ab06c0efd0b5",
"format": 1
},
"format": 1
diff --git a/ansible_collections/purestorage/fusion/README.md b/ansible_collections/purestorage/fusion/README.md
index b2a36de10..0bb22423d 100644
--- a/ansible_collections/purestorage/fusion/README.md
+++ b/ansible_collections/purestorage/fusion/README.md
@@ -4,14 +4,20 @@
<img src="https://github.com/Pure-Storage-Ansible/Fusion-Collection/workflows/Pure%20Storage%20Ansible%20CI/badge.svg">
<a href="https://github.com/psf/black"><img src="https://img.shields.io/badge/code%20style-black-000000.svg"></a>
+# DEPRECATION NOTICE
+
+The Pure Storage Fusion Ansible Collection is no longer being developed and is being deprecated.
+
+No further development work will be performed on this repo and the repo will be archived.
+
# Pure Storage Fusion Collection
-The Pure Storage Fusion collection consists of the latest versions of the Fusion modules.
+The Pure Storage Fusion collection consists of the latest versions of the Fusion v1 modules.
## Requirements
-- ansible-core >= 2.11
-- Python >= 3.8
+- ansible-core >= 2.14.0
+- Python >= 3.9
- Authorized API Application ID for Pure Storage Pure1 and associated Private Key
- Refer to Pure Storage documentation on how to create these.
- purefusion >= 1.0.4
diff --git a/ansible_collections/purestorage/fusion/changelogs/.plugin-cache.yaml b/ansible_collections/purestorage/fusion/changelogs/.plugin-cache.yaml
index 23a38bf01..190d97ce5 100644
--- a/ansible_collections/purestorage/fusion/changelogs/.plugin-cache.yaml
+++ b/ansible_collections/purestorage/fusion/changelogs/.plugin-cache.yaml
@@ -111,4 +111,4 @@ plugins:
strategy: {}
test: {}
vars: {}
-version: 1.5.0
+version: 1.6.1
diff --git a/ansible_collections/purestorage/fusion/changelogs/changelog.yaml b/ansible_collections/purestorage/fusion/changelogs/changelog.yaml
index 82ef323c8..c5d3f432d 100644
--- a/ansible_collections/purestorage/fusion/changelogs/changelog.yaml
+++ b/ansible_collections/purestorage/fusion/changelogs/changelog.yaml
@@ -343,3 +343,39 @@ releases:
- 3289_functests_pp_pg_ra.yml
- 99_update_protection_policy_retention_description.yaml
release_date: '2023-05-31'
+ 1.6.0:
+ changes:
+ minor_changes:
+ - all modules - return resource's id parameter on update and create.
+ - fusion_array - added `apartment_id` argument, which can be used when creating
+ an array.
+ - fusion_pg - introduced `destroy_snapshots_on_delete` which, if set to true,
+ ensures that before deleting placement group, snapshots within the placement
+ group will be deleted.
+ - fusion_pp - 'local_rpo' duration parsing documented, 'local_retention' minimum
+ value fixed
+ - fusion_pp - Allow leading zeros in duration strings
+ - fusion_pp - Change the minimum value of the protection policy local retention
+ from 1 to 10
+ - fusion_pp - introduced `destroy_snapshots_on_delete` which, if set to true,
+ ensures that before deleting protection policy, snapshots within the protection
+ policy will be deleted.
+ - fusion_volume - Allow creating a new volume from already existing volume or
+ volume snapshot
+ fragments:
+ - 148_add_apartment_id_to_fusion_array.yml
+ - 151_create_volume_using_existing_volume_or_snapshot.yaml
+ - 152_fix_rpo_local_retention_doc.yaml
+ - 154_add_destroy_snapshots_on_delete_to_pp_and_pg.yml
+ - 156_allow_leading_zeros.yaml
+ - 159_fix_protection_policy_local_retention_validation.yaml
+ - 160_add_id_on_exit.yml
+ release_date: '2023-07-31'
+ 1.6.1:
+ changes:
+ minor_changes:
+ - fusion_volume - Allow creating a new volume from already existing volume or
+ volume snapshot
+ fragments:
+ - 151_create_volume_using_existing_volume_or_snapshot.yaml
+ release_date: '2024-02-08'
diff --git a/ansible_collections/purestorage/fusion/meta/runtime.yml b/ansible_collections/purestorage/fusion/meta/runtime.yml
index 1812440b2..6af15681b 100644
--- a/ansible_collections/purestorage/fusion/meta/runtime.yml
+++ b/ansible_collections/purestorage/fusion/meta/runtime.yml
@@ -1,5 +1,5 @@
---
-requires_ansible: ">=2.11.0"
+requires_ansible: ">=2.14.0"
plugin_routing:
modules:
fusion_tn:
diff --git a/ansible_collections/purestorage/fusion/plugins/module_utils/errors.py b/ansible_collections/purestorage/fusion/plugins/module_utils/errors.py
index 0edf364cf..f3d574edc 100644
--- a/ansible_collections/purestorage/fusion/plugins/module_utils/errors.py
+++ b/ansible_collections/purestorage/fusion/plugins/module_utils/errors.py
@@ -162,7 +162,7 @@ def format_failed_fusion_operation_exception(exception):
if not code:
code = error.http_code
operation_name = op.request_type
- except Exception as e:
+ except Exception:
pass
output = ""
diff --git a/ansible_collections/purestorage/fusion/plugins/module_utils/parsing.py b/ansible_collections/purestorage/fusion/plugins/module_utils/parsing.py
index a2cd75245..1bcb8b812 100644
--- a/ansible_collections/purestorage/fusion/plugins/module_utils/parsing.py
+++ b/ansible_collections/purestorage/fusion/plugins/module_utils/parsing.py
@@ -11,7 +11,7 @@ __metaclass__ = type
METRIC_SUFFIXES = ["K", "M", "G", "T", "P"]
duration_pattern = re.compile(
- r"^((?P<Y>[1-9]\d*)Y)?((?P<W>[1-9]\d*)W)?((?P<D>[1-9]\d*)D)?(((?P<H>[1-9]\d*)H)?((?P<M>[1-9]\d*)M)?)?$"
+ r"^((?P<Y>\d+)Y)?((?P<W>\d+)W)?((?P<D>\d+)D)?(((?P<H>\d+)H)?((?P<M>\d+)M)?)?$"
)
duration_transformation = {
"Y": 365 * 24 * 60,
diff --git a/ansible_collections/purestorage/fusion/plugins/module_utils/prerequisites.py b/ansible_collections/purestorage/fusion/plugins/module_utils/prerequisites.py
index a4edaf341..db00a9c6f 100644
--- a/ansible_collections/purestorage/fusion/plugins/module_utils/prerequisites.py
+++ b/ansible_collections/purestorage/fusion/plugins/module_utils/prerequisites.py
@@ -136,7 +136,7 @@ def _check_import(ansible_module, module, package=None, version_requirements=Non
:param version_requirements: a string, version requirements for 'package'
"""
try:
- mod = importlib.import_module(module)
+ importlib.import_module(module)
except ImportError:
ansible_module.fail_json(
msg="Error: Python package '{0}' required and missing".format(module)
diff --git a/ansible_collections/purestorage/fusion/plugins/module_utils/snapshots.py b/ansible_collections/purestorage/fusion/plugins/module_utils/snapshots.py
new file mode 100644
index 000000000..ed34c1c0e
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/plugins/module_utils/snapshots.py
@@ -0,0 +1,29 @@
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+try:
+ import fusion as purefusion
+except ImportError:
+ pass
+
+from ansible_collections.purestorage.fusion.plugins.module_utils.operations import (
+ await_operation,
+)
+
+
+def delete_snapshot(fusion, snap, snapshots_api):
+ patch = purefusion.SnapshotPatch(destroyed=purefusion.NullableBoolean(True))
+ op = snapshots_api.update_snapshot(
+ body=patch,
+ tenant_name=snap.tenant.name,
+ tenant_space_name=snap.tenant_space.name,
+ snapshot_name=snap.name,
+ )
+ await_operation(fusion, op)
+ op = snapshots_api.delete_snapshot(
+ tenant_name=snap.tenant.name,
+ tenant_space_name=snap.tenant_space.name,
+ snapshot_name=snap.name,
+ )
+ await_operation(fusion, op)
diff --git a/ansible_collections/purestorage/fusion/plugins/modules/fusion_api_client.py b/ansible_collections/purestorage/fusion/plugins/modules/fusion_api_client.py
index 39860449d..42254338f 100644
--- a/ansible_collections/purestorage/fusion/plugins/modules/fusion_api_client.py
+++ b/ansible_collections/purestorage/fusion/plugins/modules/fusion_api_client.py
@@ -99,14 +99,15 @@ def create_client(module, fusion):
id_api_instance = purefusion.IdentityManagerApi(fusion)
changed = True
+ id = None
if not module.check_mode:
client = purefusion.APIClientPost(
public_key=module.params["public_key"],
display_name=module.params["name"],
)
- id_api_instance.create_api_client(client)
-
- module.exit_json(changed=changed)
+ res = id_api_instance.create_api_client(client)
+ id = res.id
+ module.exit_json(changed=changed, id=id)
def main():
@@ -129,8 +130,8 @@ def main():
create_client(module, fusion)
elif client_id is not None and state == "absent":
delete_client(module, fusion, client_id)
- else:
- module.exit_json(changed=False)
+ if client_id is not None:
+ module.exit_json(changed=False, id=client_id)
module.exit_json(changed=False)
diff --git a/ansible_collections/purestorage/fusion/plugins/modules/fusion_array.py b/ansible_collections/purestorage/fusion/plugins/modules/fusion_array.py
index f7933eabe..ec94d616f 100644
--- a/ansible_collections/purestorage/fusion/plugins/modules/fusion_array.py
+++ b/ansible_collections/purestorage/fusion/plugins/modules/fusion_array.py
@@ -60,6 +60,10 @@ options:
description:
- Appliance ID of the array.
type: str
+ apartment_id:
+ description:
+ - The Apartment ID of the Array.
+ type: str
maintenance_mode:
description:
- "Switch the array into maintenance mode or back.
@@ -123,6 +127,7 @@ def create_array(module, fusion):
"""Create Array"""
array_api_instance = purefusion.ArraysApi(fusion)
+ id = None
if not module.check_mode:
if not module.params["display_name"]:
@@ -135,14 +140,17 @@ def create_array(module, fusion):
host_name=module.params["host_name"],
name=module.params["name"],
appliance_id=module.params["appliance_id"],
+ apartment_id=module.params["apartment_id"],
)
res = array_api_instance.create_array(
array,
availability_zone_name=module.params["availability_zone"],
region_name=module.params["region"],
)
- await_operation(fusion, res)
- return True
+ res_op = await_operation(fusion, res)
+ id = res_op.result.resource.id
+
+ return True, id
def update_array(module, fusion):
@@ -222,6 +230,7 @@ def main():
availability_zone=dict(type="str", required=True, aliases=["az"]),
display_name=dict(type="str"),
region=dict(type="str", required=True),
+ apartment_id=dict(type="str"),
appliance_id=dict(type="str"),
host_name=dict(type="str"),
hardware_type=dict(
@@ -246,17 +255,24 @@ def main():
array = get_array(module, fusion)
changed = False
+ id = None
+ if array is not None:
+ id = array.id
+
if not array and state == "present":
module.fail_on_missing_params(["hardware_type", "host_name", "appliance_id"])
- changed = create_array(module, fusion) | update_array(
+ changed, id = create_array(module, fusion)
+ update_array(
module, fusion
) # update is run to set properties which cannot be set on creation and instead use defaults
elif array and state == "present":
- changed = changed | update_array(module, fusion)
+ changed = update_array(module, fusion)
elif array and state == "absent":
changed = changed | delete_array(module, fusion)
- else:
- module.exit_json(changed=False)
+ module.exit_json(changed=changed)
+
+ if id is not None:
+ module.exit_json(changed=changed, id=id)
module.exit_json(changed=changed)
diff --git a/ansible_collections/purestorage/fusion/plugins/modules/fusion_az.py b/ansible_collections/purestorage/fusion/plugins/modules/fusion_az.py
index 02647d397..b4a493861 100644
--- a/ansible_collections/purestorage/fusion/plugins/modules/fusion_az.py
+++ b/ansible_collections/purestorage/fusion/plugins/modules/fusion_az.py
@@ -112,6 +112,7 @@ def create_az(module, fusion):
az_api_instance = purefusion.AvailabilityZonesApi(fusion)
changed = True
+ id = None
if not module.check_mode:
if not module.params["display_name"]:
display_name = module.params["name"]
@@ -125,9 +126,10 @@ def create_az(module, fusion):
op = az_api_instance.create_availability_zone(
azone, region_name=module.params["region"]
)
- await_operation(fusion, op)
+ res_op = await_operation(fusion, op)
+ id = res_op.result.resource.id
- module.exit_json(changed=changed)
+ module.exit_json(changed=changed, id=id)
def main():
@@ -152,8 +154,6 @@ def main():
create_az(module, fusion)
elif azone and state == "absent":
delete_az(module, fusion)
- else:
- module.exit_json(changed=False)
module.exit_json(changed=False)
diff --git a/ansible_collections/purestorage/fusion/plugins/modules/fusion_hap.py b/ansible_collections/purestorage/fusion/plugins/modules/fusion_hap.py
index 3f45ea2dd..c4df0af49 100644
--- a/ansible_collections/purestorage/fusion/plugins/modules/fusion_hap.py
+++ b/ansible_collections/purestorage/fusion/plugins/modules/fusion_hap.py
@@ -170,8 +170,10 @@ def create_hap(module, fusion):
display_name=display_name,
)
)
- await_operation(fusion, op)
- module.exit_json(changed=changed)
+ res_op = await_operation(fusion, op)
+ id = res_op.result.resource.id
+
+ module.exit_json(changed=changed, id=id)
def delete_hap(module, fusion):
diff --git a/ansible_collections/purestorage/fusion/plugins/modules/fusion_ni.py b/ansible_collections/purestorage/fusion/plugins/modules/fusion_ni.py
index 6816ed841..82c896fac 100644
--- a/ansible_collections/purestorage/fusion/plugins/modules/fusion_ni.py
+++ b/ansible_collections/purestorage/fusion/plugins/modules/fusion_ni.py
@@ -162,7 +162,7 @@ def update_ni(module, fusion, ni):
),
)
patches.append(patch)
-
+ id = None
if not module.check_mode:
for patch in patches:
op = ni_api_instance.update_network_interface(
@@ -172,11 +172,12 @@ def update_ni(module, fusion, ni):
array_name=module.params["array"],
net_intf_name=module.params["name"],
)
- await_operation(fusion, op)
+ res_op = await_operation(fusion, op)
+ id = res_op.result.resource.id
changed = len(patches) != 0
- module.exit_json(changed=changed)
+ module.exit_json(changed=changed, id=id)
def main():
diff --git a/ansible_collections/purestorage/fusion/plugins/modules/fusion_nig.py b/ansible_collections/purestorage/fusion/plugins/modules/fusion_nig.py
index d6056fd5a..d40b813b9 100644
--- a/ansible_collections/purestorage/fusion/plugins/modules/fusion_nig.py
+++ b/ansible_collections/purestorage/fusion/plugins/modules/fusion_nig.py
@@ -146,6 +146,7 @@ def create_nig(module, fusion):
):
module.fail_json(msg="`gateway` must be an address in subnet `prefix`")
+ id = None
if not module.check_mode:
display_name = module.params["display_name"] or module.params["name"]
if module.params["group_type"] == "eth":
@@ -171,13 +172,14 @@ def create_nig(module, fusion):
availability_zone_name=module.params["availability_zone"],
region_name=module.params["region"],
)
- await_operation(fusion, op)
+ res_op = await_operation(fusion, op)
+ id = res_op.result.resource.id
changed = True
else:
# to prevent future unintended error
module.warn(f"group_type={module.params['group_type']} is not implemented")
- module.exit_json(changed=changed)
+ module.exit_json(changed=changed, id=id)
def delete_nig(module, fusion):
@@ -220,7 +222,7 @@ def update_nig(module, fusion, nig):
changed = len(patches) != 0
- module.exit_json(changed=changed)
+ module.exit_json(changed=changed, id=nig.id)
def main():
diff --git a/ansible_collections/purestorage/fusion/plugins/modules/fusion_pg.py b/ansible_collections/purestorage/fusion/plugins/modules/fusion_pg.py
index 57843d896..6d6f0eb94 100644
--- a/ansible_collections/purestorage/fusion/plugins/modules/fusion_pg.py
+++ b/ansible_collections/purestorage/fusion/plugins/modules/fusion_pg.py
@@ -36,6 +36,11 @@ options:
type: str
default: present
choices: [ absent, present ]
+ destroy_snapshots_on_delete:
+ description:
+ - "Before deleting placement group, snapshots within the placement group will be deleted."
+ - "If `false` then any snapshots will need to be deleted as a separate step before removing the placement group."
+ type: bool
tenant:
description:
- The name of the tenant.
@@ -116,6 +121,9 @@ from ansible_collections.purestorage.fusion.plugins.module_utils.startup import
from ansible_collections.purestorage.fusion.plugins.module_utils.operations import (
await_operation,
)
+from ansible_collections.purestorage.fusion.plugins.module_utils.snapshots import (
+ delete_snapshot,
+)
def get_pg(module, fusion):
@@ -153,9 +161,10 @@ def create_pg(module, fusion):
tenant_name=module.params["tenant"],
tenant_space_name=module.params["tenant_space"],
)
- await_operation(fusion, op)
+ res_op = await_operation(fusion, op)
+ id = res_op.result.resource.id
- return True
+ return True, id
def update_display_name(module, fusion, patches, pg):
@@ -213,6 +222,16 @@ def delete_pg(module, fusion):
"""Delete Placement Group"""
pg_api_instance = purefusion.PlacementGroupsApi(fusion)
if not module.check_mode:
+ if module.params["destroy_snapshots_on_delete"]:
+ snapshots_api = purefusion.SnapshotsApi(fusion)
+ snapshots = snapshots_api.list_snapshots(
+ placement_group=module.params["name"],
+ tenant_name=module.params["tenant"],
+ tenant_space_name=module.params["tenant_space"],
+ )
+ for snap in snapshots.items:
+ delete_snapshot(fusion, snap, snapshots_api)
+
op = pg_api_instance.delete_placement_group(
placement_group_name=module.params["name"],
tenant_name=module.params["tenant"],
@@ -229,6 +248,7 @@ def main():
argument_spec.update(
dict(
name=dict(type="str", required=True),
+ destroy_snapshots_on_delete=dict(type="bool"),
display_name=dict(type="str"),
tenant=dict(type="str", required=True),
tenant_space=dict(type="str", required=True),
@@ -257,19 +277,28 @@ def main():
state = module.params["state"]
pgroup = get_pg(module, fusion)
+ id = None
+ if pgroup is not None:
+ id = pgroup.id
+
if state == "present" and not pgroup:
module.fail_on_missing_params(
["region", "availability_zone", "storage_service"]
)
- changed = create_pg(module, fusion) or changed
+ changed, id = create_pg(module, fusion) or changed
if module.params["array"]:
# changing placement requires additional update
pgroup = get_pg(module, fusion)
- changed = update_pg(module, fusion, pgroup) or changed
+ changedUpdate = update_pg(module, fusion, pgroup)
+ changed = changed | changedUpdate
elif state == "present" and pgroup:
changed = update_pg(module, fusion, pgroup) or changed
elif state == "absent" and pgroup:
changed = delete_pg(module, fusion) or changed
+ module.exit_json(changed=changed)
+
+ if id is not None:
+ module.exit_json(changed=changed, id=id)
module.exit_json(changed=changed)
diff --git a/ansible_collections/purestorage/fusion/plugins/modules/fusion_pp.py b/ansible_collections/purestorage/fusion/plugins/modules/fusion_pp.py
index abce9195c..216209d84 100644
--- a/ansible_collections/purestorage/fusion/plugins/modules/fusion_pp.py
+++ b/ansible_collections/purestorage/fusion/plugins/modules/fusion_pp.py
@@ -31,6 +31,11 @@ options:
default: present
choices: [ present, absent ]
type: str
+ destroy_snapshots_on_delete:
+ description:
+ - "Before deleting protection policy, snapshots within the protection policy will be deleted."
+ - "If `false` then any snapshots will need to be deleted as a separate step before removing the protection policy."
+ type: bool
display_name:
description:
- The human name of the protection policy.
@@ -39,8 +44,10 @@ options:
local_rpo:
description:
- Recovery Point Objective for snapshots.
- - Value should be specified in minutes.
- Minimum value is 10 minutes.
+ - Value can be provided as m(inutes), h(ours),
+ d(ays), w(eeks), or y(ears).
+ - If no unit is provided, minutes are assumed.
type: str
local_retention:
description:
@@ -95,6 +102,9 @@ from ansible_collections.purestorage.fusion.plugins.module_utils.startup import
from ansible_collections.purestorage.fusion.plugins.module_utils.operations import (
await_operation,
)
+from ansible_collections.purestorage.fusion.plugins.module_utils.snapshots import (
+ delete_snapshot,
+)
def get_pp(module, fusion):
@@ -114,11 +124,13 @@ def create_pp(module, fusion):
pp_api_instance = purefusion.ProtectionPoliciesApi(fusion)
local_rpo = parse_minutes(module, module.params["local_rpo"])
local_retention = parse_minutes(module, module.params["local_retention"])
- if local_retention < 1:
- module.fail_json(msg="Local Retention must be a minimum of 1 minutes")
+ if local_retention < 10:
+ module.fail_json(msg="Local Retention must be a minimum of 10 minutes")
if local_rpo < 10:
module.fail_json(msg="Local RPO must be a minimum of 10 minutes")
+
changed = True
+ id = None
if not module.check_mode:
if not module.params["display_name"]:
display_name = module.params["name"]
@@ -136,9 +148,10 @@ def create_pp(module, fusion):
],
)
)
- await_operation(fusion, op)
+ res_op = await_operation(fusion, op)
+ id = res_op.result.resource.id
- module.exit_json(changed=changed)
+ module.exit_json(changed=changed, id=id)
def delete_pp(module, fusion):
@@ -146,6 +159,15 @@ def delete_pp(module, fusion):
pp_api_instance = purefusion.ProtectionPoliciesApi(fusion)
changed = True
if not module.check_mode:
+ if module.params["destroy_snapshots_on_delete"]:
+ protection_policy = get_pp(module, fusion)
+ snapshots_api = purefusion.SnapshotsApi(fusion)
+ snapshots = snapshots_api.query_snapshots(
+ protection_policy_id=protection_policy.id
+ )
+ for snap in snapshots.items:
+ delete_snapshot(fusion, snap, snapshots_api)
+
op = pp_api_instance.delete_protection_policy(
protection_policy_name=module.params["name"],
)
@@ -160,6 +182,7 @@ def main():
argument_spec.update(
dict(
name=dict(type="str", required=True),
+ destroy_snapshots_on_delete=dict(type="bool"),
display_name=dict(type="str"),
local_rpo=dict(type="str"),
local_retention=dict(type="str"),
@@ -177,8 +200,6 @@ def main():
create_pp(module, fusion)
elif policy and state == "absent":
delete_pp(module, fusion)
- else:
- module.exit_json(changed=False)
module.exit_json(changed=False)
diff --git a/ansible_collections/purestorage/fusion/plugins/modules/fusion_ra.py b/ansible_collections/purestorage/fusion/plugins/modules/fusion_ra.py
index 7cfc7d866..c2ae2d5cf 100644
--- a/ansible_collections/purestorage/fusion/plugins/modules/fusion_ra.py
+++ b/ansible_collections/purestorage/fusion/plugins/modules/fusion_ra.py
@@ -43,7 +43,7 @@ options:
type: str
api_client_key:
description:
- - The key of API client to assign the role to.
+ - The issuer ID of the API client to assign the role to.
type: str
scope:
description:
@@ -127,7 +127,7 @@ def get_principal(module, fusion):
def user_to_principal(fusion, user_id):
- """Given a human readable Fusion user, such as a Pure 1 App ID
+ """Given a human-readable Fusion user, such as a Pure 1 App ID
return the associated principal
"""
id_api_instance = purefusion.IdentityManagerApi(fusion)
@@ -139,7 +139,7 @@ def user_to_principal(fusion, user_id):
def apiclient_to_principal(fusion, api_client_key):
- """Given an API client key, such as "pure1:apikey:123xXxyYyzYzASDF" (also known as issuer_id),
+ """Given an API client issuer ID, such as "pure1:apikey:123xXxyYyzYzASDF",
return the associated principal
"""
id_api_instance = purefusion.IdentityManagerApi(fusion)
@@ -189,6 +189,7 @@ def create_ra(module, fusion):
ra_api_instance = purefusion.RoleAssignmentsApi(fusion)
changed = True
+ id = None
if not module.check_mode:
principal = get_principal(module, fusion)
scope = get_scope(module.params)
@@ -196,8 +197,10 @@ def create_ra(module, fusion):
op = ra_api_instance.create_role_assignment(
assignment, role_name=module.params["role"]
)
- await_operation(fusion, op)
- module.exit_json(changed=changed)
+ res_op = await_operation(fusion, op)
+ id = res_op.result.resource.id
+
+ module.exit_json(changed=changed, id=id)
def delete_ra(module, fusion):
diff --git a/ansible_collections/purestorage/fusion/plugins/modules/fusion_region.py b/ansible_collections/purestorage/fusion/plugins/modules/fusion_region.py
index fbcbff4b0..de40e7dc2 100644
--- a/ansible_collections/purestorage/fusion/plugins/modules/fusion_region.py
+++ b/ansible_collections/purestorage/fusion/plugins/modules/fusion_region.py
@@ -96,6 +96,7 @@ def create_region(module, fusion):
reg_api_instance = purefusion.RegionsApi(fusion)
changed = True
+ id = None
if not module.check_mode:
if not module.params["display_name"]:
display_name = module.params["name"]
@@ -106,9 +107,10 @@ def create_region(module, fusion):
display_name=display_name,
)
op = reg_api_instance.create_region(region)
- await_operation(fusion, op)
+ res_op = await_operation(fusion, op)
+ id = res_op.result.resource.id
- module.exit_json(changed=changed)
+ module.exit_json(changed=changed, id=id)
def delete_region(module, fusion):
@@ -144,7 +146,7 @@ def update_region(module, fusion, region):
)
await_operation(fusion, op)
- module.exit_json(changed=changed)
+ module.exit_json(changed=changed, id=region.id)
def main():
diff --git a/ansible_collections/purestorage/fusion/plugins/modules/fusion_sc.py b/ansible_collections/purestorage/fusion/plugins/modules/fusion_sc.py
index 2327b8d48..59fc0025e 100644
--- a/ansible_collections/purestorage/fusion/plugins/modules/fusion_sc.py
+++ b/ansible_collections/purestorage/fusion/plugins/modules/fusion_sc.py
@@ -160,6 +160,7 @@ def create_sc(module, fusion):
module.fail_json(msg="Size limit is not within the required range")
changed = True
+ id = None
if not module.check_mode:
if not module.params["display_name"]:
display_name = module.params["name"]
@@ -175,9 +176,10 @@ def create_sc(module, fusion):
op = sc_api_instance.create_storage_class(
s_class, storage_service_name=module.params["storage_service"]
)
- await_operation(fusion, op)
+ res_op = await_operation(fusion, op)
+ id = res_op.result.resource.id
- module.exit_json(changed=changed)
+ module.exit_json(changed=changed, id=id)
def update_sc(module, fusion, s_class):
@@ -201,7 +203,7 @@ def update_sc(module, fusion, s_class):
)
await_operation(fusion, op)
- module.exit_json(changed=changed)
+ module.exit_json(changed=changed, id=s_class.id)
def delete_sc(module, fusion):
diff --git a/ansible_collections/purestorage/fusion/plugins/modules/fusion_se.py b/ansible_collections/purestorage/fusion/plugins/modules/fusion_se.py
index 9eed4bea0..3a191a166 100644
--- a/ansible_collections/purestorage/fusion/plugins/modules/fusion_se.py
+++ b/ansible_collections/purestorage/fusion/plugins/modules/fusion_se.py
@@ -269,7 +269,7 @@ def get_se(module, fusion):
def create_se(module, fusion):
"""Create Storage Endpoint"""
se_api_instance = purefusion.StorageEndpointsApi(fusion)
-
+ id = None
if not module.check_mode:
endpoint_type = None
@@ -307,9 +307,10 @@ def create_se(module, fusion):
region_name=module.params["region"],
availability_zone_name=module.params["availability_zone"],
)
- await_operation(fusion, op)
+ res_op = await_operation(fusion, op)
+ id = res_op.result.resource.id
- module.exit_json(changed=True)
+ module.exit_json(changed=True, id=id)
def delete_se(module, fusion):
@@ -351,7 +352,7 @@ def update_se(module, fusion, se):
changed = len(patches) != 0
- module.exit_json(changed=changed)
+ module.exit_json(changed=changed, id=se.id)
def main():
diff --git a/ansible_collections/purestorage/fusion/plugins/modules/fusion_ss.py b/ansible_collections/purestorage/fusion/plugins/modules/fusion_ss.py
index 3fdbb07dd..4e6388249 100644
--- a/ansible_collections/purestorage/fusion/plugins/modules/fusion_ss.py
+++ b/ansible_collections/purestorage/fusion/plugins/modules/fusion_ss.py
@@ -106,6 +106,7 @@ def create_ss(module, fusion):
ss_api_instance = purefusion.StorageServicesApi(fusion)
changed = True
+ id = None
if not module.check_mode:
if not module.params["display_name"]:
display_name = module.params["name"]
@@ -117,9 +118,10 @@ def create_ss(module, fusion):
hardware_types=module.params["hardware_types"],
)
op = ss_api_instance.create_storage_service(s_service)
- await_operation(fusion, op)
+ res_op = await_operation(fusion, op)
+ id = res_op.result.resource.id
- module.exit_json(changed=changed)
+ module.exit_json(changed=changed, id=id)
def delete_ss(module, fusion):
@@ -151,6 +153,7 @@ def update_ss(module, fusion, ss):
)
patches.append(patch)
+ id = None
if not module.check_mode:
for patch in patches:
op = ss_api_instance.update_storage_service(
@@ -161,7 +164,7 @@ def update_ss(module, fusion, ss):
changed = len(patches) != 0
- module.exit_json(changed=changed)
+ module.exit_json(changed=changed, id=ss.id)
def main():
diff --git a/ansible_collections/purestorage/fusion/plugins/modules/fusion_tenant.py b/ansible_collections/purestorage/fusion/plugins/modules/fusion_tenant.py
index 96e890a6b..85224a6c5 100644
--- a/ansible_collections/purestorage/fusion/plugins/modules/fusion_tenant.py
+++ b/ansible_collections/purestorage/fusion/plugins/modules/fusion_tenant.py
@@ -87,6 +87,7 @@ def create_tenant(module, fusion):
api_instance = purefusion.TenantsApi(fusion)
changed = True
+ id = None
if not module.check_mode:
if not module.params["display_name"]:
display_name = module.params["name"]
@@ -97,9 +98,10 @@ def create_tenant(module, fusion):
display_name=display_name,
)
op = api_instance.create_tenant(tenant)
- await_operation(fusion, op)
+ res_op = await_operation(fusion, op)
+ id = res_op.result.resource.id
- module.exit_json(changed=changed)
+ module.exit_json(changed=changed, id=id)
def update_tenant(module, fusion, tenant):
@@ -122,7 +124,7 @@ def update_tenant(module, fusion, tenant):
)
await_operation(fusion, op)
- module.exit_json(changed=changed)
+ module.exit_json(changed=changed, id=tenant.id)
def delete_tenant(module, fusion):
diff --git a/ansible_collections/purestorage/fusion/plugins/modules/fusion_ts.py b/ansible_collections/purestorage/fusion/plugins/modules/fusion_ts.py
index 33fb0187a..ac60476bc 100644
--- a/ansible_collections/purestorage/fusion/plugins/modules/fusion_ts.py
+++ b/ansible_collections/purestorage/fusion/plugins/modules/fusion_ts.py
@@ -95,6 +95,7 @@ def create_ts(module, fusion):
ts_api_instance = purefusion.TenantSpacesApi(fusion)
changed = True
+ id = None
if not module.check_mode:
if not module.params["display_name"]:
display_name = module.params["name"]
@@ -108,9 +109,10 @@ def create_ts(module, fusion):
tspace,
tenant_name=module.params["tenant"],
)
- await_operation(fusion, op)
+ res_op = await_operation(fusion, op)
+ id = res_op.result.resource.id
- module.exit_json(changed=changed)
+ module.exit_json(changed=changed, id=id)
def update_ts(module, fusion, ts):
@@ -138,7 +140,7 @@ def update_ts(module, fusion, ts):
changed = len(patches) != 0
- module.exit_json(changed=changed)
+ module.exit_json(changed=changed, id=ts.id)
def delete_ts(module, fusion):
diff --git a/ansible_collections/purestorage/fusion/plugins/modules/fusion_volume.py b/ansible_collections/purestorage/fusion/plugins/modules/fusion_volume.py
index 5b19064f5..38dee8650 100644
--- a/ansible_collections/purestorage/fusion/plugins/modules/fusion_volume.py
+++ b/ansible_collections/purestorage/fusion/plugins/modules/fusion_volume.py
@@ -73,6 +73,21 @@ options:
To clear, assign empty list: host_access_policies: []'
type: list
elements: str
+ source_volume:
+ description:
+ - The source volume name. It must live within the same tenant space.
+ Cannot be used together with `source_snapshot` or `source_volume_snapshot`.
+ type: str
+ source_snapshot:
+ description:
+ - The source snapshot name. It must live within the same tenant space.
+ Cannot be used together with `source_volume`.
+ type: str
+ source_volume_snapshot:
+ description:
+ - The source volume snapshot name. It must live within the same tenant space.
+ Cannot be used together with `source_volume`.
+ type: str
rename:
description:
- New name for volume.
@@ -86,6 +101,7 @@ EXAMPLES = r"""
purestorage.fusion.fusion_volume:
name: foo
storage_class: fred
+ placement_group: pg
size: 1T
tenant: test
tenant_space: space_1
@@ -93,6 +109,31 @@ EXAMPLES = r"""
issuer_id: key_name
private_key_file: "az-admin-private-key.pem"
+- name: Create new volume based on a volume from the same tenant space
+ purestorage.fusion.fusion_volume:
+ name: foo
+ storage_class: fred
+ placement_group: pg
+ tenant: test
+ tenant_space: space_1
+ state: present
+ source_volume: "original_volume_name"
+ issuer_id: key_name
+ private_key_file: "az-admin-private-key.pem"
+
+- name: Create new volume based on a volume snapshot from the same tenant space
+ purestorage.fusion.fusion_volume:
+ name: foo
+ storage_class: fred
+ placement_group: pg
+ tenant: test
+ tenant_space: space_1
+ state: present
+ source_snapshot: "snap"
+ source_volume_snapshot: "vol_snap"
+ issuer_id: key_name
+ private_key_file: "az-admin-private-key.pem"
+
- name: Extend the size of an existing volume named foo
purestorage.fusion.fusion_volume:
name: foo
@@ -116,24 +157,24 @@ EXAMPLES = r"""
RETURN = r"""
"""
-try:
- import fusion as purefusion
-except ImportError:
- pass
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.purestorage.fusion.plugins.module_utils.fusion import (
- fusion_argument_spec,
-)
-from ansible_collections.purestorage.fusion.plugins.module_utils.parsing import (
- parse_number_with_metric_suffix,
+from ansible_collections.purestorage.fusion.plugins.module_utils.operations import (
+ await_operation,
)
from ansible_collections.purestorage.fusion.plugins.module_utils.startup import (
setup_fusion,
)
-from ansible_collections.purestorage.fusion.plugins.module_utils.operations import (
- await_operation,
+from ansible_collections.purestorage.fusion.plugins.module_utils.parsing import (
+ parse_number_with_metric_suffix,
)
+from ansible_collections.purestorage.fusion.plugins.module_utils.fusion import (
+ fusion_argument_spec,
+)
+from ansible.module_utils.basic import AnsibleModule
+
+try:
+ import fusion as purefusion
+except ImportError:
+ pass
def get_volume(module, fusion):
@@ -166,28 +207,30 @@ def extract_current_haps(volume):
def create_volume(module, fusion):
"""Create Volume"""
-
- size = parse_number_with_metric_suffix(module, module.params["size"])
-
+ id = None
if not module.check_mode:
display_name = module.params["display_name"] or module.params["name"]
volume_api_instance = purefusion.VolumesApi(fusion)
+ source_link = get_source_link_from_parameters(module.params)
volume = purefusion.VolumePost(
- size=size,
+ size=None # when cloning a volume, size is not required
+ if source_link
+ else parse_number_with_metric_suffix(module, module.params["size"]),
storage_class=module.params["storage_class"],
placement_group=module.params["placement_group"],
name=module.params["name"],
display_name=display_name,
protection_policy=module.params["protection_policy"],
+ source_link=source_link,
)
op = volume_api_instance.create_volume(
volume,
tenant_name=module.params["tenant"],
tenant_space_name=module.params["tenant_space"],
)
- await_operation(fusion, op)
-
- return True
+ res_op = await_operation(fusion, op)
+ id = res_op.result.resource.id
+ return True, id
def update_host_access_policies(module, current, patches):
@@ -273,6 +316,17 @@ def update_protection_policy(module, current, patches):
patches.append(patch)
+def update_source_link(module, fusion, current, patches):
+ source_link = get_source_link_from_parameters(module.params)
+ if source_link is not None and (
+ current.source is None or current.source.self_link != source_link
+ ):
+ patch = purefusion.VolumePatch(
+ source_link=purefusion.NullableString(source_link)
+ )
+ patches.append(patch)
+
+
def apply_patches(module, fusion, patches):
volume_api_instance = purefusion.VolumesApi(fusion)
for patch in patches:
@@ -313,6 +367,7 @@ def update_volume(module, fusion):
update_storage_class(module, current, patches)
update_placement_group(module, current, patches)
update_host_access_policies(module, current, patches)
+ update_source_link(module, fusion, current, patches)
elif module.params["state"] == "absent" and not current.destroyed:
update_size(module, current, patches)
update_protection_policy(module, current, patches)
@@ -320,6 +375,7 @@ def update_volume(module, fusion):
update_storage_class(module, current, patches)
update_placement_group(module, current, patches)
update_host_access_policies(module, current, patches)
+ update_source_link(module, fusion, current, patches)
update_destroyed(module, current, patches)
if not module.check_mode:
@@ -355,16 +411,46 @@ def eradicate_volume(module, fusion):
return True
+def get_source_link_from_parameters(params):
+ tenant = params["tenant"]
+ tenant_space = params["tenant_space"]
+ volume = params["source_volume"]
+ snapshot = params["source_snapshot"]
+ volume_snapshot = params["source_volume_snapshot"]
+ if (
+ tenant is None or tenant_space is None
+ ): # should not happen as those parameters are always required by the ansible module
+ return None
+ if volume is not None:
+ return f"/tenants/{tenant}/tenant-spaces/{tenant_space}/volumes/{volume}"
+ if snapshot is not None and volume_snapshot is not None:
+ return f"/tenants/{tenant}/tenant-spaces/{tenant_space}/snapshots/{snapshot}/volume-snapshots/{volume_snapshot}"
+ return None
+
+
def validate_arguments(module, volume):
"""Validates most argument conditions and possible unacceptable argument combinations"""
state = module.params["state"]
if state == "present" and not volume:
- module.fail_on_missing_params(["placement_group", "storage_class", "size"])
+ module.fail_on_missing_params(["placement_group", "storage_class"])
+
+ if (
+ module.params["size"] is None
+ and module.params["source_volume"] is None
+ and module.params["source_snapshot"] is None
+ ):
+ module.fail_json(
+ msg="Either `size`, `source_volume` or `source_snapshot` parameter is required when creating a volume."
+ )
if module.params["state"] == "absent" and (
module.params["host_access_policies"]
- or (volume and volume.host_access_policies)
+ or (
+ module.params["host_access_policies"] is None
+ and volume
+ and volume.host_access_policies
+ )
):
module.fail_json(
msg=(
@@ -378,7 +464,7 @@ def validate_arguments(module, volume):
msg="'eradicate: true' cannot be used together with 'state: present'"
)
- if module.params["size"]:
+ if module.params["size"] is not None:
size = parse_number_with_metric_suffix(module, module.params["size"])
if size < 1048576 or size > 4503599627370496: # 1MB to 4PB
module.fail_json(
@@ -412,6 +498,9 @@ def main():
eradicate=dict(type="bool", default=False),
state=dict(type="str", default="present", choices=["absent", "present"]),
size=dict(type="str"),
+ source_volume=dict(type="str"),
+ source_snapshot=dict(type="str"),
+ source_volume_snapshot=dict(type="str"),
)
)
@@ -419,9 +508,22 @@ def main():
"placement_group": "storage_class",
}
+ mutually_exclusive = [
+ # a new volume cannot be based on a volume and a snapshot at the same time
+ # also, when cloning a volume, size of original volume is used
+ ("source_volume", "source_snapshot", "size"),
+ ]
+
+ required_together = [
+ # when creating a volume from snapshot, we need to know both snapshot name and snapshot volume name
+ ("source_snapshot", "source_volume_snapshot"),
+ ]
+
module = AnsibleModule(
argument_spec,
required_by=required_by,
+ mutually_exclusive=mutually_exclusive,
+ required_together=required_together,
supports_check_mode=True,
)
fusion = setup_fusion(module)
@@ -436,12 +538,19 @@ def main():
module.exit_json(changed=False)
changed = False
+ id = None
+ if volume is not None:
+ id = volume.id
if state == "present" and not volume:
- changed = changed | create_volume(module, fusion)
+ changed, id = create_volume(module, fusion)
# volume might exist even if soft-deleted, so we still have to update it
changed = changed | update_volume(module, fusion)
if module.params["eradicate"]:
changed = changed | eradicate_volume(module, fusion)
+ module.exit_json(changed=changed)
+
+ if id is not None:
+ module.exit_json(changed=changed, id=id)
module.exit_json(changed=changed)
diff --git a/ansible_collections/purestorage/fusion/test/config.yaml b/ansible_collections/purestorage/fusion/test/config.yaml
new file mode 100644
index 000000000..9e402bda7
--- /dev/null
+++ b/ansible_collections/purestorage/fusion/test/config.yaml
@@ -0,0 +1,2 @@
+modules:
+ python_requires: ">=3.6"
diff --git a/ansible_collections/purestorage/fusion/tests/functional/test_fusion_api_client.py b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_api_client.py
index 77f753656..295c62bd6 100644
--- a/ansible_collections/purestorage/fusion/tests/functional/test_fusion_api_client.py
+++ b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_api_client.py
@@ -147,7 +147,19 @@ def test_api_client_create(m_im_api, current_clients):
api_obj = MagicMock()
api_obj.list_api_clients = MagicMock(return_value=current_clients)
api_obj.get_api_client = MagicMock(side_effect=purefusion.rest.ApiException)
- api_obj.create_api_client = MagicMock()
+ api_obj.create_api_client = MagicMock(
+ return_value=FakeApiClient(
+ "321321",
+ "self_link_value",
+ "client_test",
+ "client_test",
+ "apikey:name:test",
+ "321321",
+ 321321,
+ 321321,
+ "321321",
+ )
+ )
api_obj.delete_api_client = MagicMock()
m_im_api.return_value = api_obj
@@ -156,6 +168,7 @@ def test_api_client_create(m_im_api, current_clients):
fusion_api_client.main()
assert exc.value.changed is True
+ assert exc.value.id == "321321"
# check api was called correctly
api_obj.list_api_clients.assert_called_once_with()
diff --git a/ansible_collections/purestorage/fusion/tests/functional/test_fusion_array.py b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_array.py
index 0343bb1dc..6af1e1136 100644
--- a/ansible_collections/purestorage/fusion/tests/functional/test_fusion_array.py
+++ b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_array.py
@@ -22,6 +22,7 @@ from ansible_collections.purestorage.fusion.tests.functional.utils import (
FailedOperationMock,
OperationMock,
SuccessfulOperationMock,
+ FAKE_RESOURCE_ID,
exit_json,
fail_json,
set_module_args,
@@ -46,6 +47,7 @@ def module_args():
"region": "region1",
"availability_zone": "az1",
"appliance_id": "23984573498573",
+ "apartment_id": "76586785687",
"host_name": "array_1",
"hardware_type": "flash-array-x",
"maintenance_mode": False,
@@ -65,7 +67,7 @@ def current_array(module_args):
"region": module_args["region"],
"availability_zone": module_args["availability_zone"],
"appliance_id": module_args["appliance_id"],
- "apartment_id": "76586785687",
+ "apartment_id": module_args["apartment_id"],
"host_name": module_args["host_name"],
"hardware_type": module_args["hardware_type"],
"maintenance_mode": module_args["maintenance_mode"],
@@ -332,7 +334,7 @@ def test_array_create(m_array_api, m_op_api, hw_type, main_m, unav_m, module_arg
"region": module_args["region"],
"availability_zone": module_args["availability_zone"],
"appliance_id": module_args["appliance_id"],
- "apartment_id": "76586785687",
+ "apartment_id": module_args["apartment_id"],
"host_name": module_args["host_name"],
"hardware_type": module_args["hardware_type"],
"maintenance_mode": not module_args[
@@ -364,6 +366,7 @@ def test_array_create(m_array_api, m_op_api, hw_type, main_m, unav_m, module_arg
fusion_array.main()
assert exc.value.changed
+ assert exc.value.id == FAKE_RESOURCE_ID
# check api was called correctly
api_obj.get_array.assert_called_with(
@@ -378,6 +381,7 @@ def test_array_create(m_array_api, m_op_api, hw_type, main_m, unav_m, module_arg
host_name=module_args["host_name"],
name=module_args["name"],
appliance_id=module_args["appliance_id"],
+ apartment_id=module_args["apartment_id"],
),
availability_zone_name=module_args["availability_zone"],
region_name=module_args["region"],
@@ -429,7 +433,7 @@ def test_array_create_without_display_name(m_array_api, m_op_api, module_args):
"region": module_args["region"],
"availability_zone": module_args["availability_zone"],
"appliance_id": module_args["appliance_id"],
- "apartment_id": "76586785687",
+ "apartment_id": module_args["apartment_id"],
"host_name": module_args["host_name"],
"hardware_type": module_args["hardware_type"],
"maintenance_mode": not module_args["maintenance_mode"],
@@ -457,6 +461,7 @@ def test_array_create_without_display_name(m_array_api, m_op_api, module_args):
fusion_array.main()
assert exc.value.changed
+ assert exc.value.id == FAKE_RESOURCE_ID
# check api was called correctly
api_obj.get_array.assert_called_with(
@@ -471,6 +476,7 @@ def test_array_create_without_display_name(m_array_api, m_op_api, module_args):
host_name=module_args["host_name"],
name=module_args["name"],
appliance_id=module_args["appliance_id"],
+ apartment_id=module_args["apartment_id"],
),
availability_zone_name=module_args["availability_zone"],
region_name=module_args["region"],
@@ -554,6 +560,7 @@ def test_array_create_exception(
host_name=module_args["host_name"],
name=module_args["name"],
appliance_id=module_args["appliance_id"],
+ apartment_id=module_args["apartment_id"],
),
availability_zone_name=module_args["availability_zone"],
region_name=module_args["region"],
@@ -583,7 +590,7 @@ def test_array_create_second_exception(
"region": module_args["region"],
"availability_zone": module_args["availability_zone"],
"appliance_id": module_args["appliance_id"],
- "apartment_id": "76586785687",
+ "apartment_id": module_args["apartment_id"],
"host_name": module_args["host_name"],
"hardware_type": module_args["hardware_type"],
"maintenance_mode": not module_args["maintenance_mode"],
@@ -623,6 +630,7 @@ def test_array_create_second_exception(
host_name=module_args["host_name"],
name=module_args["name"],
appliance_id=module_args["appliance_id"],
+ apartment_id=module_args["apartment_id"],
),
availability_zone_name=module_args["availability_zone"],
region_name=module_args["region"],
@@ -667,6 +675,7 @@ def test_array_create_op_fails(m_array_api, m_op_api, module_args):
host_name=module_args["host_name"],
name=module_args["name"],
appliance_id=module_args["appliance_id"],
+ apartment_id=module_args["apartment_id"],
),
availability_zone_name=module_args["availability_zone"],
region_name=module_args["region"],
@@ -687,7 +696,7 @@ def test_array_create_second_op_fails(m_array_api, m_op_api, module_args):
"region": module_args["region"],
"availability_zone": module_args["availability_zone"],
"appliance_id": module_args["appliance_id"],
- "apartment_id": "76586785687",
+ "apartment_id": module_args["apartment_id"],
"host_name": module_args["host_name"],
"hardware_type": module_args["hardware_type"],
"maintenance_mode": not module_args["maintenance_mode"],
@@ -729,6 +738,7 @@ def test_array_create_second_op_fails(m_array_api, m_op_api, module_args):
host_name=module_args["host_name"],
name=module_args["name"],
appliance_id=module_args["appliance_id"],
+ apartment_id=module_args["apartment_id"],
),
availability_zone_name=module_args["availability_zone"],
region_name=module_args["region"],
@@ -787,6 +797,7 @@ def test_array_create_op_exception(
host_name=module_args["host_name"],
name=module_args["name"],
appliance_id=module_args["appliance_id"],
+ apartment_id=module_args["apartment_id"],
),
availability_zone_name=module_args["availability_zone"],
region_name=module_args["region"],
@@ -816,7 +827,7 @@ def test_array_create_second_op_exception(
"region": module_args["region"],
"availability_zone": module_args["availability_zone"],
"appliance_id": module_args["appliance_id"],
- "apartment_id": "76586785687",
+ "apartment_id": module_args["apartment_id"],
"host_name": module_args["host_name"],
"hardware_type": module_args["hardware_type"],
"maintenance_mode": not module_args["maintenance_mode"],
@@ -858,6 +869,7 @@ def test_array_create_second_op_exception(
host_name=module_args["host_name"],
name=module_args["name"],
appliance_id=module_args["appliance_id"],
+ apartment_id=module_args["apartment_id"],
),
availability_zone_name=module_args["availability_zone"],
region_name=module_args["region"],
@@ -899,6 +911,7 @@ def test_array_update(m_array_api, m_op_api, module_args, current_array):
fusion_array.main()
assert exc.value.changed
+ assert exc.value.id == current_array["id"]
# check api was called correctly
api_obj.get_array.assert_called_with(
diff --git a/ansible_collections/purestorage/fusion/tests/functional/test_fusion_az.py b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_az.py
index c49f958a2..d19e41827 100644
--- a/ansible_collections/purestorage/fusion/tests/functional/test_fusion_az.py
+++ b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_az.py
@@ -22,6 +22,7 @@ from ansible_collections.purestorage.fusion.tests.functional.utils import (
FailedOperationMock,
OperationMock,
SuccessfulOperationMock,
+ FAKE_RESOURCE_ID,
exit_json,
fail_json,
set_module_args,
@@ -139,6 +140,7 @@ def test_az_create(m_az_api, m_op_api):
fusion_az.main()
assert exc.value.changed
+ assert exc.value.id == FAKE_RESOURCE_ID
api_obj.get_region.get_availability_zone(
availability_zone_name=module_args["name"],
@@ -186,6 +188,7 @@ def test_az_create_without_display_name(m_az_api, m_op_api):
fusion_az.main()
assert exc.value.changed
+ assert exc.value.id == FAKE_RESOURCE_ID
api_obj.get_region.get_availability_zone(
availability_zone_name=module_args["name"],
diff --git a/ansible_collections/purestorage/fusion/tests/functional/test_fusion_hap.py b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_hap.py
index 6491c71da..258ca2034 100644
--- a/ansible_collections/purestorage/fusion/tests/functional/test_fusion_hap.py
+++ b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_hap.py
@@ -23,6 +23,7 @@ from ansible_collections.purestorage.fusion.tests.functional.utils import (
FailedOperationMock,
OperationMock,
SuccessfulOperationMock,
+ FAKE_RESOURCE_ID,
exit_json,
fail_json,
set_module_args,
@@ -295,6 +296,7 @@ def test_hap_create(m_hap_api, m_op_api, module_args, current_hap_list):
fusion_hap.main()
assert exc.value.changed is True
+ assert exc.value.id == FAKE_RESOURCE_ID
# check api was called correctly
api_obj.list_host_access_policies.assert_called_once_with()
@@ -341,6 +343,7 @@ def test_hap_create_without_display_name(
fusion_hap.main()
assert exc.value.changed is True
+ assert exc.value.id == FAKE_RESOURCE_ID
# check api was called correctly
api_obj.list_host_access_policies.assert_called_once_with()
diff --git a/ansible_collections/purestorage/fusion/tests/functional/test_fusion_info.py b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_info.py
index 784b550cd..c542cddc0 100644
--- a/ansible_collections/purestorage/fusion/tests/functional/test_fusion_info.py
+++ b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_info.py
@@ -847,6 +847,7 @@ RESP_VS = purefusion.VolumeSnapshotList(
@patch.dict(os.environ, {"TZ": "UTC"})
+@patch.dict(os.environ, {"LC_TIME": "en_US.utf8"})
@patch("fusion.DefaultApi")
@patch("fusion.IdentityManagerApi")
@patch("fusion.ProtectionPoliciesApi")
diff --git a/ansible_collections/purestorage/fusion/tests/functional/test_fusion_nig.py b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_nig.py
index 3a7b7ca5c..e8a2eb0ac 100644
--- a/ansible_collections/purestorage/fusion/tests/functional/test_fusion_nig.py
+++ b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_nig.py
@@ -22,6 +22,7 @@ from ansible_collections.purestorage.fusion.tests.functional.utils import (
FailedOperationMock,
OperationMock,
SuccessfulOperationMock,
+ FAKE_RESOURCE_ID,
exit_json,
fail_json,
set_module_args,
@@ -236,6 +237,7 @@ def test_nig_create(m_nig_api, m_op_api):
fusion_nig.main()
assert exc.value.changed is True
+ assert exc.value.id == FAKE_RESOURCE_ID
# check api was called correctly
api_obj.get_network_interface_group.assert_called_once_with(
@@ -299,6 +301,7 @@ def test_nig_create_without_display_name(m_nig_api, m_op_api):
fusion_nig.main()
assert exc.value.changed is True
+ assert exc.value.id == FAKE_RESOURCE_ID
# check api was called correctly
api_obj.get_network_interface_group.assert_called_once_with(
@@ -362,6 +365,7 @@ def test_nig_create_without_gateway(m_nig_api, m_op_api):
fusion_nig.main()
assert exc.value.changed is True
+ assert exc.value.id == FAKE_RESOURCE_ID
# check api was called correctly
api_obj.get_network_interface_group.assert_called_once_with(
@@ -631,6 +635,7 @@ def test_nig_update(m_nig_api, m_op_api):
fusion_nig.main()
assert exc.value.changed is True
+ assert exc.value.id == current_nig.id
# check api was called correctly
api_obj.get_network_interface_group.assert_called_once_with(
diff --git a/ansible_collections/purestorage/fusion/tests/functional/test_fusion_pg.py b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_pg.py
index 2f0601e12..2a9419a8e 100644
--- a/ansible_collections/purestorage/fusion/tests/functional/test_fusion_pg.py
+++ b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_pg.py
@@ -20,6 +20,7 @@ from ansible_collections.purestorage.fusion.tests.functional.utils import (
AnsibleExitJson,
AnsibleFailJson,
OperationMock,
+ FAKE_RESOURCE_ID,
exit_json,
fail_json,
set_module_args,
@@ -221,6 +222,7 @@ def test_pg_create_ok(pg_api_init, op_api_init, module_args_present):
with pytest.raises(AnsibleExitJson) as excinfo:
fusion_pg.main()
assert excinfo.value.changed
+ assert excinfo.value.id == FAKE_RESOURCE_ID
pg_mock.get_placement_group.assert_called_with(
tenant_name="tenant1",
@@ -265,6 +267,7 @@ def test_pg_create_without_display_name_ok(
with pytest.raises(AnsibleExitJson) as excinfo:
fusion_pg.main()
assert excinfo.value.changed
+ assert excinfo.value.id == FAKE_RESOURCE_ID
pg_mock.get_placement_group.assert_called_with(
tenant_name="tenant1",
@@ -450,6 +453,7 @@ def test_pg_create_triggers_update_ok(pg_api_init, op_api_init):
with pytest.raises(AnsibleExitJson) as excinfo:
fusion_pg.main()
assert excinfo.value.changed
+ assert excinfo.value.id == FAKE_RESOURCE_ID
pg_mock.get_placement_group.assert_has_calls(
[
@@ -946,6 +950,7 @@ def test_pg_update_ok(pg_api_init, op_api_init, test_case):
with pytest.raises(AnsibleExitJson) as excinfo:
fusion_pg.main()
assert excinfo.value.changed
+ assert excinfo.value.id == test_case["current_state"].id
pg_mock.get_placement_group.assert_called_with(
tenant_name="tenant1",
diff --git a/ansible_collections/purestorage/fusion/tests/functional/test_fusion_pp.py b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_pp.py
index 519caea40..359d4ca7e 100644
--- a/ansible_collections/purestorage/fusion/tests/functional/test_fusion_pp.py
+++ b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_pp.py
@@ -20,6 +20,7 @@ from ansible_collections.purestorage.fusion.tests.functional.utils import (
AnsibleExitJson,
AnsibleFailJson,
OperationMock,
+ FAKE_RESOURCE_ID,
exit_json,
fail_json,
set_module_args,
@@ -39,7 +40,7 @@ basic.AnsibleModule.fail_json = fail_json
def module_args_present():
return {
"name": "protection_policy1",
- "local_rpo": 43,
+ "local_rpo": "1H43M",
"local_retention": "2H",
"state": "present",
"issuer_id": "ABCD1234",
@@ -181,6 +182,7 @@ def test_pp_create_ok(pp_api_init, op_api_init, module_args_present):
with pytest.raises(AnsibleExitJson) as excinfo:
fusion_pp.main()
assert excinfo.value.changed
+ assert excinfo.value.id == FAKE_RESOURCE_ID
pp_mock.get_protection_policy.assert_called_with(
protection_policy_name="protection_policy1"
@@ -190,7 +192,7 @@ def test_pp_create_ok(pp_api_init, op_api_init, module_args_present):
name="protection_policy1",
display_name="some_display_name",
objectives=[
- purefusion.RPO(type="RPO", rpo="PT43M"),
+ purefusion.RPO(type="RPO", rpo="PT103M"),
purefusion.Retention(type="Retention", after="PT120M"),
],
)
@@ -220,6 +222,7 @@ def test_pp_create_without_display_name_ok(
with pytest.raises(AnsibleExitJson) as excinfo:
fusion_pp.main()
assert excinfo.value.changed
+ assert excinfo.value.id == FAKE_RESOURCE_ID
pp_mock.get_protection_policy.assert_called_with(
protection_policy_name="protection_policy1"
@@ -229,7 +232,7 @@ def test_pp_create_without_display_name_ok(
name="protection_policy1",
display_name="protection_policy1",
objectives=[
- purefusion.RPO(type="RPO", rpo="PT43M"),
+ purefusion.RPO(type="RPO", rpo="PT103M"),
purefusion.Retention(type="Retention", after="PT120M"),
],
)
@@ -274,7 +277,7 @@ def test_pp_create_exception(
name="protection_policy1",
display_name="protection_policy1",
objectives=[
- purefusion.RPO(type="RPO", rpo="PT43M"),
+ purefusion.RPO(type="RPO", rpo="PT103M"),
purefusion.Retention(type="Retention", after="PT120M"),
],
)
@@ -310,7 +313,7 @@ def test_pp_create_op_fails(pp_api_init, op_api_init, module_args_present):
name="protection_policy1",
display_name="protection_policy1",
objectives=[
- purefusion.RPO(type="RPO", rpo="PT43M"),
+ purefusion.RPO(type="RPO", rpo="PT103M"),
purefusion.Retention(type="Retention", after="PT120M"),
],
)
@@ -333,7 +336,7 @@ def test_pp_delete_ok(pp_api_init, op_api_init, module_args_absent):
display_name="protection_policy1_display_name",
self_link="test_self_link",
objectives=[
- purefusion.RPO(type="RPO", rpo="PT43M"),
+ purefusion.RPO(type="RPO", rpo="PT103M"),
purefusion.Retention(type="Retention", after="PT120M"),
],
)
@@ -385,7 +388,7 @@ def test_pp_delete_exception(
display_name="protection_policy1_display_name",
self_link="test_self_link",
objectives=[
- purefusion.RPO(type="RPO", rpo="PT43M"),
+ purefusion.RPO(type="RPO", rpo="PT103M"),
purefusion.Retention(type="Retention", after="PT120M"),
],
)
@@ -425,7 +428,7 @@ def test_pp_delete_op_fails(pp_api_init, op_api_init, module_args_absent):
display_name="protection_policy1_display_name",
self_link="test_self_link",
objectives=[
- purefusion.RPO(type="RPO", rpo="PT43M"),
+ purefusion.RPO(type="RPO", rpo="PT103M"),
purefusion.Retention(type="Retention", after="PT120M"),
],
)
@@ -459,7 +462,7 @@ def test_pp_present_not_changed(pp_api_init, op_api_init):
module_args = {
"name": "protection_policy1",
"display_name": "some_display_name",
- "local_rpo": 43,
+ "local_rpo": "43M",
"local_retention": "2H",
"state": "present",
"issuer_id": "ABCD1234",
diff --git a/ansible_collections/purestorage/fusion/tests/functional/test_fusion_ra.py b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_ra.py
index 6456fa7d7..d8cac74a5 100644
--- a/ansible_collections/purestorage/fusion/tests/functional/test_fusion_ra.py
+++ b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_ra.py
@@ -20,6 +20,7 @@ from ansible_collections.purestorage.fusion.tests.functional.utils import (
AnsibleExitJson,
AnsibleFailJson,
OperationMock,
+ FAKE_RESOURCE_ID,
exit_json,
fail_json,
set_module_args,
@@ -330,6 +331,7 @@ def test_ra_create_ok(ra_api_init, im_api_init, op_api_init, args_and_scope):
with pytest.raises(AnsibleExitJson) as excinfo:
fusion_ra.main()
assert excinfo.value.changed
+ assert excinfo.value.id == FAKE_RESOURCE_ID
ra_mock.list_role_assignments.assert_called_with(
role_name=module_args["role"], principal="principal1"
diff --git a/ansible_collections/purestorage/fusion/tests/functional/test_fusion_region.py b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_region.py
index 6b13adecf..42d14d56e 100644
--- a/ansible_collections/purestorage/fusion/tests/functional/test_fusion_region.py
+++ b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_region.py
@@ -22,6 +22,7 @@ from ansible_collections.purestorage.fusion.tests.functional.utils import (
FailedOperationMock,
OperationMock,
SuccessfulOperationMock,
+ FAKE_RESOURCE_ID,
exit_json,
fail_json,
set_module_args,
@@ -126,6 +127,7 @@ def test_region_create(m_region_api, m_op_api):
fusion_region.main()
assert exc.value.changed
+ assert exc.value.id == FAKE_RESOURCE_ID
# check api was called correctly
api_obj.get_region.assert_called_once_with(region_name=module_args["name"])
@@ -168,6 +170,7 @@ def test_region_create_without_display_name(m_region_api, m_op_api):
fusion_region.main()
assert exc.value.changed
+ assert exc.value.id == FAKE_RESOURCE_ID
# check api was called correctly
api_obj.get_region.assert_called_once_with(region_name=module_args["name"])
@@ -354,6 +357,7 @@ def test_region_update(m_region_api, m_op_api):
fusion_region.main()
assert exc.value.changed
+ assert exc.value.id == current_region["id"]
# check api was called correctly
api_obj.get_region.assert_called_once_with(region_name=module_args["name"])
diff --git a/ansible_collections/purestorage/fusion/tests/functional/test_fusion_sc.py b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_sc.py
index 1a2db191c..4d44e7fcb 100644
--- a/ansible_collections/purestorage/fusion/tests/functional/test_fusion_sc.py
+++ b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_sc.py
@@ -22,6 +22,7 @@ from ansible_collections.purestorage.fusion.tests.functional.utils import (
FailedOperationMock,
OperationMock,
SuccessfulOperationMock,
+ FAKE_RESOURCE_ID,
exit_json,
fail_json,
set_module_args,
@@ -167,6 +168,7 @@ def test_sc_create(
fusion_sc.main()
assert exc.value.changed
+ assert exc.value.id == FAKE_RESOURCE_ID
# check api was called correctly
api_obj.get_storage_class.assert_called_once_with(
@@ -224,6 +226,7 @@ def test_sc_create_without_display_name(m_sc_api, m_op_api):
fusion_sc.main()
assert exc.value.changed
+ assert exc.value.id == FAKE_RESOURCE_ID
# check api was called correctly
api_obj.get_storage_class.assert_called_once_with(
@@ -608,6 +611,7 @@ def test_sc_update(m_sc_api, m_op_api):
fusion_sc.main()
assert exc.value.changed
+ assert exc.value.id == current_sc["id"]
# check api was called correctly
api_obj.get_storage_class.assert_called_once_with(
diff --git a/ansible_collections/purestorage/fusion/tests/functional/test_fusion_se.py b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_se.py
index a071190db..9d9559c12 100644
--- a/ansible_collections/purestorage/fusion/tests/functional/test_fusion_se.py
+++ b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_se.py
@@ -22,6 +22,7 @@ from ansible_collections.purestorage.fusion.tests.functional.utils import (
FailedOperationMock,
OperationMock,
SuccessfulOperationMock,
+ FAKE_RESOURCE_ID,
exit_json,
fail_json,
set_module_args,
@@ -285,6 +286,7 @@ def test_se_create_iscsi(m_se_api, m_op_api, module_args):
fusion_se.main()
assert exc.value.changed
+ assert exc.value.id == FAKE_RESOURCE_ID
# check api was called correctly
api_obj.get_storage_endpoint.assert_called_once_with(
@@ -341,6 +343,7 @@ def test_se_create_cbs_azure_iscsi(m_se_api, m_op_api, module_args):
fusion_se.main()
assert exc.value.changed is True
+ assert exc.value.id == FAKE_RESOURCE_ID
# check api was called correctly
api_obj.get_storage_endpoint.assert_called_once_with(
@@ -395,6 +398,7 @@ def test_se_create_without_display_name(m_se_api, m_op_api, module_args):
fusion_se.main()
assert exc.value.changed
+ assert exc.value.id == FAKE_RESOURCE_ID
# check api was called correctly
api_obj.get_storage_endpoint.assert_called_once_with(
@@ -610,6 +614,7 @@ def test_se_update(m_se_api, m_op_api, module_args, current_se):
fusion_se.main()
assert exc.value.changed
+ assert exc.value.id == current_se["id"]
# check api was called correctly
api_obj.get_storage_endpoint.assert_called_once_with(
diff --git a/ansible_collections/purestorage/fusion/tests/functional/test_fusion_ss.py b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_ss.py
index d784b1a52..f1514b8e6 100644
--- a/ansible_collections/purestorage/fusion/tests/functional/test_fusion_ss.py
+++ b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_ss.py
@@ -22,6 +22,7 @@ from ansible_collections.purestorage.fusion.tests.functional.utils import (
FailedOperationMock,
OperationMock,
SuccessfulOperationMock,
+ FAKE_RESOURCE_ID,
exit_json,
fail_json,
set_module_args,
@@ -139,6 +140,7 @@ def test_ss_create(m_ss_api, m_op_api):
fusion_ss.main()
assert exc.value.changed
+ assert exc.value.id == FAKE_RESOURCE_ID
# check api was called correctly
api_obj.get_storage_service.assert_called_once_with(
@@ -186,6 +188,7 @@ def test_ss_create_without_display_name(m_ss_api, m_op_api):
fusion_ss.main()
assert exc.value.changed
+ assert exc.value.id == FAKE_RESOURCE_ID
# check api was called correctly
api_obj.get_storage_service.assert_called_once_with(
@@ -434,6 +437,7 @@ def test_ss_update(m_ss_api, m_op_api):
fusion_ss.main()
assert exc.value.changed
+ assert exc.value.id == current_ss["id"]
# check api was called correctly
api_obj.get_storage_service.assert_called_once_with(
diff --git a/ansible_collections/purestorage/fusion/tests/functional/test_fusion_tenant.py b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_tenant.py
index bb0521b01..11cd71171 100644
--- a/ansible_collections/purestorage/fusion/tests/functional/test_fusion_tenant.py
+++ b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_tenant.py
@@ -22,6 +22,7 @@ from ansible_collections.purestorage.fusion.tests.functional.utils import (
FailedOperationMock,
OperationMock,
SuccessfulOperationMock,
+ FAKE_RESOURCE_ID,
exit_json,
fail_json,
set_module_args,
@@ -126,6 +127,7 @@ def test_tenant_create(m_tenant_api, m_op_api):
fusion_tenant.main()
assert exc.value.changed
+ assert exc.value.id == FAKE_RESOURCE_ID
# check api was called correctly
api_obj.get_tenant.assert_called_once_with(tenant_name=module_args["name"])
@@ -169,6 +171,7 @@ def test_tenant_create_without_display_name(m_tenant_api, m_op_api):
fusion_tenant.main()
assert exc.value.changed
+ assert exc.value.id == FAKE_RESOURCE_ID
# check api was called correctly
api_obj.get_tenant.assert_called_once_with(tenant_name=module_args["name"])
@@ -359,6 +362,7 @@ def test_tenant_update(m_tenant_api, m_op_api):
fusion_tenant.main()
assert exc.value.changed
+ assert exc.value.id == current_tenant["id"]
# check api was called correctly
api_obj.get_tenant.assert_called_once_with(tenant_name=module_args["name"])
diff --git a/ansible_collections/purestorage/fusion/tests/functional/test_fusion_ts.py b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_ts.py
index 0d9cbb25a..0e1260858 100644
--- a/ansible_collections/purestorage/fusion/tests/functional/test_fusion_ts.py
+++ b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_ts.py
@@ -22,6 +22,7 @@ from ansible_collections.purestorage.fusion.tests.functional.utils import (
FailedOperationMock,
OperationMock,
SuccessfulOperationMock,
+ FAKE_RESOURCE_ID,
exit_json,
fail_json,
set_module_args,
@@ -138,6 +139,7 @@ def test_ts_create(m_ts_api, m_op_api):
fusion_ts.main()
assert exc.value.changed
+ assert exc.value.id == FAKE_RESOURCE_ID
# check api was called correctly
api_obj.get_tenant_space.assert_called_once_with(
@@ -186,6 +188,7 @@ def test_ts_create_without_display_name(m_ts_api, m_op_api):
fusion_ts.main()
assert exc.value.changed
+ assert exc.value.id == FAKE_RESOURCE_ID
# check api was called correctly
api_obj.get_tenant_space.assert_called_once_with(
@@ -399,6 +402,7 @@ def test_ts_update(m_ts_api, m_op_api):
fusion_ts.main()
assert exc.value.changed
+ assert exc.value.id == current_ts["id"]
# check api was called correctly
api_obj.get_tenant_space.assert_called_once_with(
diff --git a/ansible_collections/purestorage/fusion/tests/functional/test_fusion_volume.py b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_volume.py
index 592bda32e..43f69666e 100644
--- a/ansible_collections/purestorage/fusion/tests/functional/test_fusion_volume.py
+++ b/ansible_collections/purestorage/fusion/tests/functional/test_fusion_volume.py
@@ -7,7 +7,7 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-from unittest.mock import MagicMock, patch
+from unittest.mock import MagicMock, call, patch
import fusion as purefusion
import pytest
@@ -23,6 +23,7 @@ from ansible_collections.purestorage.fusion.tests.functional.utils import (
AnsibleFailJson,
OperationMock,
SuccessfulOperationMock,
+ FAKE_RESOURCE_ID,
exit_json,
fail_json,
set_module_args,
@@ -126,7 +127,7 @@ def destroyed_volume(volume):
),
(
"size",
- "missing required arguments: size",
+ "Either `size`, `source_volume` or `source_snapshot` parameter is required when creating a volume.",
),
],
)
@@ -164,6 +165,18 @@ def test_module_fails_on_missing_parameters(
{"size": "1K"},
"Size is not within the required range",
),
+ (
+ {"source_volume": "vol_name"},
+ "parameters are mutually exclusive: source_volume|source_snapshot|size",
+ ),
+ (
+ {"source_snapshot": "snap_name"},
+ "parameters are mutually exclusive: source_volume|source_snapshot|size",
+ ),
+ (
+ {"source_volume_snapshot": "vol_snap_name"},
+ "parameters are required together: source_snapshot, source_volume_snapshot",
+ ),
],
)
def test_module_fails_on_incorrect_parameters(
@@ -216,6 +229,8 @@ def test_volume_create_successfully(mock_volumes_api, mock_operations_api, modul
with pytest.raises(AnsibleExitJson) as exception:
fusion_volume.main()
assert exception.value.changed is True
+ assert exception.value.id == FAKE_RESOURCE_ID
+
volumes_api.get_volume.assert_called_with(
volume_name=module_args["name"],
tenant_name=module_args["tenant"],
@@ -238,6 +253,90 @@ def test_volume_create_successfully(mock_volumes_api, mock_operations_api, modul
@patch("fusion.OperationsApi")
@patch("fusion.VolumesApi")
+def test_volume_create_from_volume_successfully(
+ mock_volumes_api, mock_operations_api, module_args
+):
+ del module_args["size"]
+ module_args["source_volume"] = "source_volume_name"
+
+ operations_api = purefusion.OperationsApi()
+ volumes_api = purefusion.VolumesApi()
+ volumes_api.get_volume = MagicMock(side_effect=purefusion.rest.ApiException)
+ volumes_api.create_volume = MagicMock(return_value=OperationMock(1))
+ operations_api.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ mock_volumes_api.return_value = volumes_api
+ mock_operations_api.return_value = operations_api
+ set_module_args(module_args)
+ # run module
+ with pytest.raises(AnsibleExitJson) as exception:
+ fusion_volume.main()
+ assert exception.value.changed is True
+ assert exception.value.id == FAKE_RESOURCE_ID
+ volumes_api.get_volume.assert_called_with(
+ volume_name=module_args["name"],
+ tenant_name=module_args["tenant"],
+ tenant_space_name=module_args["tenant_space"],
+ )
+ volumes_api.create_volume.assert_called_once_with(
+ purefusion.VolumePost(
+ source_link=f"/tenants/{module_args['tenant']}/tenant-spaces/{module_args['tenant_space']}/volumes/{module_args['source_volume']}",
+ storage_class=module_args["storage_class"],
+ placement_group=module_args["placement_group"],
+ name=module_args["name"],
+ display_name=module_args["display_name"],
+ protection_policy=module_args["protection_policy"],
+ ),
+ tenant_name=module_args["tenant"],
+ tenant_space_name=module_args["tenant_space"],
+ )
+ operations_api.get_operation.assert_called_once_with(1)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.VolumesApi")
+def test_volume_create_from_volume_snapshot_successfully(
+ mock_volumes_api, mock_operations_api, module_args
+):
+ del module_args["size"]
+ module_args["source_snapshot"] = "source_snapshot_name"
+ module_args["source_volume_snapshot"] = "source_volume_snapshot_name"
+
+ operations_api = purefusion.OperationsApi()
+ volumes_api = purefusion.VolumesApi()
+ volumes_api.get_volume = MagicMock(side_effect=purefusion.rest.ApiException)
+ volumes_api.create_volume = MagicMock(return_value=OperationMock(1))
+ operations_api.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ mock_volumes_api.return_value = volumes_api
+ mock_operations_api.return_value = operations_api
+ set_module_args(module_args)
+ # run module
+ with pytest.raises(AnsibleExitJson) as exception:
+ fusion_volume.main()
+ assert exception.value.changed is True
+ assert exception.value.id == FAKE_RESOURCE_ID
+ volumes_api.get_volume.assert_called_with(
+ volume_name=module_args["name"],
+ tenant_name=module_args["tenant"],
+ tenant_space_name=module_args["tenant_space"],
+ )
+ volumes_api.create_volume.assert_called_once_with(
+ purefusion.VolumePost(
+ source_link=f"/tenants/{module_args['tenant']}/tenant-spaces/{module_args['tenant_space']}/snapshots/"
+ f"{module_args['source_snapshot']}/volume-snapshots/{module_args['source_volume_snapshot']}",
+ storage_class=module_args["storage_class"],
+ placement_group=module_args["placement_group"],
+ name=module_args["name"],
+ display_name=module_args["display_name"],
+ protection_policy=module_args["protection_policy"],
+ ),
+ tenant_name=module_args["tenant"],
+ tenant_space_name=module_args["tenant_space"],
+ )
+ operations_api.get_operation.assert_called_once_with(1)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.VolumesApi")
def test_volume_create_without_display_name_successfully(
mock_volumes_api, mock_operations_api, module_args
):
@@ -254,6 +353,7 @@ def test_volume_create_without_display_name_successfully(
with pytest.raises(AnsibleExitJson) as exception:
fusion_volume.main()
assert exception.value.changed is True
+ assert exception.value.id == FAKE_RESOURCE_ID
volumes_api.get_volume.assert_called_with(
volume_name=module_args["name"],
tenant_name=module_args["tenant"],
@@ -399,6 +499,7 @@ def test_volume_update_with_state_present_executed_correctly(
with pytest.raises(AnsibleExitJson) as exception:
fusion_volume.main()
assert exception.value.changed is True
+ assert exception.value.id == volume["id"]
volumes_api.get_volume.assert_called_with(
volume_name=module_args["name"],
tenant_name=module_args["tenant"],
@@ -447,6 +548,7 @@ def test_volume_update_with_state_absent_executed_correctly(
with pytest.raises(AnsibleExitJson) as exception:
fusion_volume.main()
assert exception.value.changed is True
+ assert exception.value.id == volume["id"]
volumes_api.get_volume.assert_called_with(
volume_name=module_args["name"],
tenant_name=module_args["tenant"],
@@ -713,3 +815,47 @@ def test_volume_delete_operation_throws_exception(
tenant_space_name=absent_module_args["tenant_space"],
)
operations_api.get_operation.assert_called_once_with(2)
+
+
+@patch("fusion.OperationsApi")
+@patch("fusion.VolumesApi")
+def test_module_updates_on_empty_array_of_haps(
+ mock_volumes_api, mock_operations_api, module_args, volume
+):
+ volumes_api = purefusion.VolumesApi()
+ operations_api = purefusion.OperationsApi()
+ volumes_api.get_volume = MagicMock(return_value=purefusion.Volume(**volume))
+ volumes_api.update_volume = MagicMock(return_value=OperationMock(1))
+ operations_api.get_operation = MagicMock(return_value=SuccessfulOperationMock)
+ mock_operations_api.return_value = operations_api
+ mock_volumes_api.return_value = volumes_api
+ module_args.update({"state": "absent", "host_access_policies": []})
+ set_module_args(module_args)
+ # run module
+ with pytest.raises(AnsibleExitJson) as exception:
+ fusion_volume.main()
+ assert exception.value.changed is True
+ assert exception.value.id == volume["id"]
+ volumes_api.get_volume.assert_called_with(
+ volume_name=module_args["name"],
+ tenant_name=module_args["tenant"],
+ tenant_space_name=module_args["tenant_space"],
+ )
+ volumes_api.update_volume.assert_has_calls(
+ [
+ call(
+ purefusion.VolumePatch(
+ host_access_policies=purefusion.NullableString(",".join([]))
+ ),
+ volume_name=volume["name"],
+ tenant_name=volume["tenant"],
+ tenant_space_name=volume["tenant_space"],
+ ),
+ call(
+ purefusion.VolumePatch(destroyed=purefusion.NullableBoolean(True)),
+ volume_name=volume["name"],
+ tenant_name=volume["tenant"],
+ tenant_space_name=volume["tenant_space"],
+ ),
+ ]
+ )
diff --git a/ansible_collections/purestorage/fusion/tests/functional/utils.py b/ansible_collections/purestorage/fusion/tests/functional/utils.py
index 24d6f0328..53e501bc0 100644
--- a/ansible_collections/purestorage/fusion/tests/functional/utils.py
+++ b/ansible_collections/purestorage/fusion/tests/functional/utils.py
@@ -7,6 +7,11 @@ from dataclasses import dataclass
from ansible.module_utils import basic
from ansible.module_utils.common.text.converters import to_bytes
+from ansible_collections.purestorage.fusion.tests.helpers import (
+ OperationResultsDict,
+)
+
+FAKE_RESOURCE_ID = "fake-id-12345"
@dataclass
@@ -20,6 +25,9 @@ class OperationMock:
self.status = "Pending"
elif success:
self.status = "Succeeded"
+ self.result = OperationResultsDict(
+ {"resource": OperationResultsDict({"id": FAKE_RESOURCE_ID})}
+ )
else:
self.status = "Failed"
self.id = id
@@ -30,6 +38,9 @@ class SuccessfulOperationMock:
Mock object for successful operation. This object is returned by mocked Operation API if the operation was successful.
"""
+ result = OperationResultsDict(
+ {"resource": OperationResultsDict({"id": FAKE_RESOURCE_ID})}
+ )
status = "Succeeded"
@@ -65,6 +76,10 @@ class AnsibleExitJson(Exception):
return self.kwargs["changed"]
@property
+ def id(self):
+ return self.kwargs["id"]
+
+ @property
def fusion_info(self):
return self.kwargs["fusion_info"] if "fusion_info" in self.kwargs else None
diff --git a/ansible_collections/purestorage/fusion/tests/helpers.py b/ansible_collections/purestorage/fusion/tests/helpers.py
index 40d98cf0e..76d51b6f7 100644
--- a/ansible_collections/purestorage/fusion/tests/helpers.py
+++ b/ansible_collections/purestorage/fusion/tests/helpers.py
@@ -27,3 +27,11 @@ class ApiExceptionsMockGenerator:
def create_not_found():
status = HTTPStatus.NOT_FOUND
return purefusion.rest.ApiException(status=status, reason=status.phrase)
+
+
+class OperationResultsDict(dict):
+ """dot.notation access to dictionary attributes"""
+
+ __getattr__ = dict.get
+ __setattr__ = dict.__setitem__
+ __delattr__ = dict.__delitem__
diff --git a/ansible_collections/purestorage/fusion/tests/unit/mocks/operation_mock.py b/ansible_collections/purestorage/fusion/tests/unit/mocks/operation_mock.py
index 99487ddfa..a3a70c67d 100644
--- a/ansible_collections/purestorage/fusion/tests/unit/mocks/operation_mock.py
+++ b/ansible_collections/purestorage/fusion/tests/unit/mocks/operation_mock.py
@@ -8,6 +8,9 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
from enum import Enum
+from ansible_collections.purestorage.fusion.tests.helpers import (
+ OperationResultsDict,
+)
class OperationStatus(str, Enum):
@@ -18,7 +21,16 @@ class OperationStatus(str, Enum):
class OperationMock:
- def __init__(self, id, status, retry_in=1):
+ def __init__(
+ self,
+ id,
+ status,
+ result=OperationResultsDict(
+ {"resource": OperationResultsDict({"id": "fake-id"})}
+ ),
+ retry_in=1,
+ ):
self.id = id
self.status = status
self.retry_in = retry_in
+ self.result = result
diff --git a/ansible_collections/purestorage/fusion/tests/unit/module_utils/test_parsing.py b/ansible_collections/purestorage/fusion/tests/unit/module_utils/test_parsing.py
index 7e2a1cc78..230d0ff01 100644
--- a/ansible_collections/purestorage/fusion/tests/unit/module_utils/test_parsing.py
+++ b/ansible_collections/purestorage/fusion/tests/unit/module_utils/test_parsing.py
@@ -7,13 +7,12 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
+import pytest
from ansible_collections.purestorage.fusion.plugins.module_utils.parsing import (
- parse_number_with_metric_suffix,
parse_minutes,
+ parse_number_with_metric_suffix,
)
-import pytest
-
class MockException(Exception):
pass
@@ -83,17 +82,37 @@ def test_parsing_invalid_number():
def test_parsing_valid_time_period():
module = MockModule()
+ assert parse_minutes(module, "0") == 0
+ assert parse_minutes(module, "00") == 0
+ assert parse_minutes(module, "00M") == 0
assert parse_minutes(module, "10") == 10
- assert parse_minutes(module, "2h") == 120
- assert parse_minutes(module, "2H") == 120
+ assert parse_minutes(module, "015") == 15
+ assert parse_minutes(module, "0023") == 23
+ assert parse_minutes(module, "0H10M") == 10
+ assert parse_minutes(module, "2h") == 2 * 60
+ assert parse_minutes(module, "2H") == 2 * 60
+ assert parse_minutes(module, "02h") == 2 * 60
+ assert parse_minutes(module, "02H") == 2 * 60
+ assert parse_minutes(module, "002h") == 2 * 60
+ assert parse_minutes(module, "002H") == 2 * 60
+ assert parse_minutes(module, "0D10H10M") == 10 * 60 + 10
assert parse_minutes(module, "14D") == 14 * 24 * 60
+ assert parse_minutes(module, "014D") == 14 * 24 * 60
+ assert parse_minutes(module, "0000014D") == 14 * 24 * 60
assert parse_minutes(module, "1W") == 7 * 24 * 60
+ assert parse_minutes(module, "01W") == 7 * 24 * 60
+ assert parse_minutes(module, "01Y0H10M") == 365 * 24 * 60 + 10
assert parse_minutes(module, "12Y") == 12 * 365 * 24 * 60
+ assert parse_minutes(module, "012Y") == 12 * 365 * 24 * 60
assert (
parse_minutes(module, "10Y20W30D40H50M")
== 10 * 365 * 24 * 60 + 20 * 7 * 24 * 60 + 30 * 24 * 60 + 40 * 60 + 50
)
assert (
+ parse_minutes(module, "010Y20W30D40H50M")
+ == 10 * 365 * 24 * 60 + 20 * 7 * 24 * 60 + 30 * 24 * 60 + 40 * 60 + 50
+ )
+ assert (
parse_minutes(module, "10Y20W30D40H")
== 10 * 365 * 24 * 60 + 20 * 7 * 24 * 60 + 30 * 24 * 60 + 40 * 60
)
@@ -110,6 +129,10 @@ def test_parsing_valid_time_period():
assert parse_minutes(module, "40H50M") == 40 * 60 + 50
assert parse_minutes(module, "30D50M") == 30 * 24 * 60 + 50
assert parse_minutes(module, "20W40H") == 20 * 7 * 24 * 60 + 40 * 60
+ assert (
+ parse_minutes(module, "01W000010D10H10M")
+ == 7 * 24 * 60 + 10 * 24 * 60 + 10 * 60 + 10
+ )
def test_parsing_invalid_time_period():
@@ -123,16 +146,8 @@ def test_parsing_invalid_time_period():
with pytest.raises(MockException):
assert parse_minutes(module, "1V")
with pytest.raises(MockException):
- assert parse_minutes(module, "0M")
- with pytest.raises(MockException):
- assert parse_minutes(module, "0H10M")
- with pytest.raises(MockException):
- assert parse_minutes(module, "0H10M")
+ assert parse_minutes(module, "1v")
with pytest.raises(MockException):
- assert parse_minutes(module, "0D10H10M")
+ assert parse_minutes(module, "10M2H")
with pytest.raises(MockException):
- assert parse_minutes(module, "01W10D10H10M")
- with pytest.raises(MockException):
- assert parse_minutes(module, "01Y0H10M")
- with pytest.raises(MockException):
- assert parse_minutes(module, "1V")
+ assert parse_minutes(module, "0H10M01Y")
diff --git a/ansible_collections/purestorage/fusion/tests/unit/modules/test_fusion_az.py b/ansible_collections/purestorage/fusion/tests/unit/modules/test_fusion_az.py
index a384506d8..ee300638e 100644
--- a/ansible_collections/purestorage/fusion/tests/unit/modules/test_fusion_az.py
+++ b/ansible_collections/purestorage/fusion/tests/unit/modules/test_fusion_az.py
@@ -81,7 +81,9 @@ class TestCreateAZ:
azone, region_name=module_params["region"]
)
await_operation_mock.assert_called_once_with(fusion_mock, op)
- moduleMock.exit_json.assert_called_once_with(changed=True)
+ moduleMock.exit_json.assert_called_once_with(
+ changed=True, id=op.result.resource.id
+ )
@patch(f"{current_module}.fusion_az.purefusion.AvailabilityZonesApi.__new__")
@patch(f"{current_module}.fusion_az.await_operation")
@@ -113,7 +115,7 @@ class TestCreateAZ:
# Assertions
mock_az_api_obj.create_availability_zone.assert_not_called()
await_operation_mock.assert_not_called()
- moduleMock.exit_json.assert_called_once_with(changed=True)
+ moduleMock.exit_json.assert_called_once_with(changed=True, id=None)
@patch(f"{current_module}.fusion_az.purefusion.AvailabilityZonesApi.__new__")
@patch(f"{current_module}.fusion_az.await_operation")
@@ -151,7 +153,9 @@ class TestCreateAZ:
azone, region_name=module_params["region"]
)
await_operation_mock.assert_called_once_with(fusion_mock, op)
- moduleMock.exit_json.assert_called_once_with(changed=True)
+ moduleMock.exit_json.assert_called_once_with(
+ changed=True, id=op.result.resource.id
+ )
@patch(f"{current_module}.fusion_az.purefusion.AvailabilityZonesApi.__new__")
@patch(f"{current_module}.fusion_az.await_operation")