summaryrefslogtreecommitdiffstats
path: root/ansible_collections/netapp/ontap/plugins
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-13 12:04:41 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-13 12:04:41 +0000
commit975f66f2eebe9dadba04f275774d4ab83f74cf25 (patch)
tree89bd26a93aaae6a25749145b7e4bca4a1e75b2be /ansible_collections/netapp/ontap/plugins
parentInitial commit. (diff)
downloadansible-975f66f2eebe9dadba04f275774d4ab83f74cf25.tar.xz
ansible-975f66f2eebe9dadba04f275774d4ab83f74cf25.zip
Adding upstream version 7.7.0+dfsg.upstream/7.7.0+dfsg
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'ansible_collections/netapp/ontap/plugins')
-rw-r--r--ansible_collections/netapp/ontap/plugins/doc_fragments/netapp.py268
-rw-r--r--ansible_collections/netapp/ontap/plugins/filter/iso8601_duration_from_seconds.yml33
-rw-r--r--ansible_collections/netapp/ontap/plugins/filter/iso8601_duration_to_seconds.yml30
-rw-r--r--ansible_collections/netapp/ontap/plugins/filter/na_filter_iso8601.py53
-rw-r--r--ansible_collections/netapp/ontap/plugins/module_utils/netapp.py1134
-rw-r--r--ansible_collections/netapp/ontap/plugins/module_utils/netapp_elementsw_module.py41
-rw-r--r--ansible_collections/netapp/ontap/plugins/module_utils/netapp_ipaddress.py134
-rw-r--r--ansible_collections/netapp/ontap/plugins/module_utils/netapp_module.py619
-rw-r--r--ansible_collections/netapp/ontap/plugins/module_utils/rest_application.py180
-rw-r--r--ansible_collections/netapp/ontap/plugins/module_utils/rest_generic.py101
-rw-r--r--ansible_collections/netapp/ontap/plugins/module_utils/rest_owning_resource.py26
-rw-r--r--ansible_collections/netapp/ontap/plugins/module_utils/rest_response_helpers.py137
-rw-r--r--ansible_collections/netapp/ontap/plugins/module_utils/rest_user.py49
-rw-r--r--ansible_collections/netapp/ontap/plugins/module_utils/rest_volume.py61
-rw-r--r--ansible_collections/netapp/ontap/plugins/module_utils/rest_vserver.py61
-rw-r--r--ansible_collections/netapp/ontap/plugins/module_utils/zapis_svm.py133
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_active_directory.py328
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_active_directory_domain_controllers.py221
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_aggregate.py1121
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_autosupport.py449
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_autosupport_invoke.py188
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_bgp_peer_group.py356
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_broadcast_domain.py690
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_broadcast_domain_ports.py224
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_cg_snapshot.py229
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs.py563
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_acl.py351
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_local_group.py235
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_local_group_member.py292
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_local_user.py244
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_local_user_modify.py235
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_local_user_set_password.py162
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_server.py619
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_cluster.py776
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_cluster_ha.py151
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_cluster_peer.py427
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_command.py290
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_debug.py258
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_disk_options.py175
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_disks.py386
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_dns.py368
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_domain_tunnel.py168
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_efficiency_policy.py414
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_ems_destination.py199
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_ems_filter.py250
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_export_policy.py274
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_export_policy_rule.py745
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_fcp.py275
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_fdsd.py173
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_fdsp.py171
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_fdspt.py257
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_fdss.py123
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_file_directory_policy.py363
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_file_security_permissions.py760
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_file_security_permissions_acl.py495
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_firewall_policy.py325
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_firmware_upgrade.py873
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_flexcache.py672
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_fpolicy_event.py444
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_fpolicy_ext_engine.py520
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_fpolicy_policy.py378
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_fpolicy_scope.py516
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_fpolicy_status.py283
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_igroup.py697
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_igroup_initiator.py221
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_info.py1825
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_interface.py1457
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_ipspace.py286
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_iscsi.py329
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_iscsi_security.py350
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_job_schedule.py477
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_kerberos_interface.py225
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_kerberos_realm.py438
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_ldap.py221
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_ldap_client.py550
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_license.py708
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_local_hosts.py197
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_log_forward.py312
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_login_messages.py307
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_lun.py1270
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_lun_copy.py221
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_lun_map.py356
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_lun_map_reporting_nodes.py274
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_mcc_mediator.py185
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_metrocluster.py171
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_metrocluster_dr_group.py223
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_motd.py210
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_name_mappings.py286
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_name_service_switch.py250
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_ndmp.py392
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_ifgrp.py546
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_port.py309
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_routes.py354
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_subnet.py426
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_vlan.py367
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_nfs.py700
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_node.py265
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_ntfs_dacl.py355
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_ntfs_sd.py288
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_ntp.py271
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_ntp_key.py159
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_nvme.py250
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_nvme_namespace.py256
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_nvme_subsystem.py463
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_object_store.py360
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_partitions.py415
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_ports.py583
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_portset.py423
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_publickey.py302
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_qos_adaptive_policy_group.py323
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_qos_policy_group.py579
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_qtree.py462
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_quota_policy.py257
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_quotas.py890
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_rest_cli.py156
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_rest_info.py1138
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_restit.py393
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_s3_buckets.py586
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_s3_groups.py234
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_s3_policies.py246
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_s3_services.py219
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_s3_users.py193
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_certificates.py468
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_config.py285
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_ipsec_ca_certificate.py184
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_ipsec_config.py127
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_ipsec_policy.py458
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_key_manager.py640
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_ssh.py197
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_service_policy.py339
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_service_processor_network.py391
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_snaplock_clock.py177
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapmirror.py1749
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapmirror_policy.py1038
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapshot.py437
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapshot_policy.py742
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_snmp.py235
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_snmp_traphosts.py126
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_software_update.py722
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_ssh_command.py254
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_storage_auto_giveback.py248
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_storage_failover.py208
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_svm.py939
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_svm_options.py163
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_ucadapter.py303
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_unix_group.py459
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_unix_user.py330
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_user.py854
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_user_role.py522
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume.py2902
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume_autosize.py353
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume_clone.py355
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume_efficiency.py715
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume_snaplock.py227
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan.py168
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan_on_access_policy.py524
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan_on_demand_task.py407
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan_scanner_pool.py297
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_vserver_audit.py373
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_vserver_cifs_security.py310
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_vserver_peer.py446
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_vserver_peer_permissions.py201
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_wait_for_condition.py402
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_wwpn_alias.py194
-rw-r--r--ansible_collections/netapp/ontap/plugins/modules/na_ontap_zapit.py315
165 files changed, 67066 insertions, 0 deletions
diff --git a/ansible_collections/netapp/ontap/plugins/doc_fragments/netapp.py b/ansible_collections/netapp/ontap/plugins/doc_fragments/netapp.py
new file mode 100644
index 000000000..6acfea61d
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/doc_fragments/netapp.py
@@ -0,0 +1,268 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018-2022, Sumit Kumar <sumit4@netapp.com>, chris Archibald <carchi@netapp.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ DOCUMENTATION = r'''
+options:
+ - See respective platform section for more details
+requirements:
+ - See respective platform section for more details
+notes:
+ - Ansible modules are available for the following NetApp Storage Platforms: E-Series, ONTAP, SolidFire
+'''
+ # Documentation fragment for ONTAP (na_ontap) that contains REST
+ NA_ONTAP = r'''
+options:
+ hostname:
+ description:
+ - The hostname or IP address of the ONTAP instance.
+ type: str
+ required: true
+ username:
+ description:
+ - This can be a Cluster-scoped or SVM-scoped account, depending on whether a Cluster-level or SVM-level API is required.
+ - For more information, please read the documentation U(https://mysupport.netapp.com/NOW/download/software/nmsdk/9.4/).
+ - Two authentication methods are supported
+ - 1. basic authentication, using username and password,
+ - 2. SSL certificate authentication, using a ssl client cert file, and optionally a private key file.
+ - To use a certificate, the certificate must have been installed in the ONTAP cluster, and cert authentication must have been enabled.
+ type: str
+ aliases: [ user ]
+ password:
+ description:
+ - Password for the specified user.
+ type: str
+ aliases: [ pass ]
+ cert_filepath:
+ description:
+ - path to SSL client cert file (.pem).
+ - not supported with python 2.6.
+ type: str
+ version_added: 20.6.0
+ key_filepath:
+ description:
+ - path to SSL client key file.
+ type: str
+ version_added: 20.6.0
+ https:
+ description:
+ - Enable and disable https.
+ - Ignored when using REST as only https is supported.
+ - Ignored when using SSL certificate authentication as it requires SSL.
+ type: bool
+ default: no
+ validate_certs:
+ description:
+ - If set to C(no), the SSL certificates will not be validated.
+ - This should only set to C(False) used on personally controlled sites using self-signed certificates.
+ type: bool
+ default: yes
+ http_port:
+ description:
+ - Override the default port (80 or 443) with this port
+ type: int
+ ontapi:
+ description:
+ - The ontap api version to use
+ type: int
+ use_rest:
+ description:
+ - Whether to use REST or ZAPI.
+ - always -- will always use the REST API if the module supports REST.
+ A warning is issued if the module does not support REST.
+ An error is issued if a module option is not supported in REST.
+ - never -- will always use ZAPI if the module supports ZAPI. An error may be issued if a REST option is not supported in ZAPI.
+ - auto -- will try to use the REST API if the module supports REST and modules options are supported. Reverts to ZAPI otherwise.
+ default: auto
+ type: str
+ feature_flags:
+ description:
+ - Enable or disable a new feature.
+ - This can be used to enable an experimental feature or disable a new feature that breaks backward compatibility.
+ - Supported keys and values are subject to change without notice. Unknown keys are ignored.
+ type: dict
+ version_added: "20.5.0"
+ force_ontap_version:
+ description:
+ - Override the cluster ONTAP version when using REST.
+ - The behavior is undefined if the version does not match the target cluster.
+ - This is provided as a work-around when the cluster version cannot be read because of permission issues.
+ See https://github.com/ansible-collections/netapp.ontap/wiki/Known-issues.
+ - This should be in the form 9.10 or 9.10.1 with each element being an integer number.
+ - When C(use_rest) is set to auto, this may force a switch to ZAPI based on the version and platform capabilities.
+ - Ignored with ZAPI.
+ type: str
+ version_added: "21.23.0"
+requirements:
+ - Ansible 2.9 or later - 2.12 or later is recommended.
+ - Python3 - 3.9 or later is recommended.
+ - When using ZAPI, netapp-lib 2018.11.13 or later (install using 'pip install netapp-lib'),
+ netapp-lib 2020.3.12 is strongly recommended as it provides better error reporting for connection issues
+ - a physical or virtual clustered Data ONTAP system, the modules support Data ONTAP 9.1 and onward,
+ REST support requires ONTAP 9.6 or later
+
+notes:
+ - The modules prefixed with na_ontap are built to support the ONTAP storage platform.
+ - https is enabled by default and recommended.
+ To enable http on the cluster you must run the following commands 'set -privilege advanced;' 'system services web modify -http-enabled true;'
+ '''
+
+ # Documentation fragment for ONTAP (na_ontap) that are ZAPI ONLY
+ NA_ONTAP_ZAPI = r'''
+options:
+ hostname:
+ description:
+ - The hostname or IP address of the ONTAP instance.
+ type: str
+ required: true
+ username:
+ description:
+ - This can be a Cluster-scoped or SVM-scoped account, depending on whether a Cluster-level or SVM-level API is required.
+ - For more information, please read the documentation U(https://mysupport.netapp.com/NOW/download/software/nmsdk/9.4/).
+ - Two authentication methods are supported
+ - 1. basic authentication, using username and password,
+ - 2. SSL certificate authentication, using a ssl client cert file, and optionally a private key file.
+ - To use a certificate, the certificate must have been installed in the ONTAP cluster, and cert authentication must have been enabled.
+ type: str
+ aliases: [ user ]
+ password:
+ description:
+ - Password for the specified user.
+ type: str
+ aliases: [ pass ]
+ cert_filepath:
+ description:
+ - path to SSL client cert file (.pem).
+ - not supported with python 2.6.
+ type: str
+ version_added: 20.6.0
+ key_filepath:
+ description:
+ - path to SSL client key file.
+ type: str
+ version_added: 20.6.0
+ https:
+ description:
+ - Enable and disable https.
+ - Ignored when using REST as only https is supported.
+ - Ignored when using SSL certificate authentication as it requires SSL.
+ type: bool
+ default: no
+ validate_certs:
+ description:
+ - If set to C(no), the SSL certificates will not be validated.
+ - This should only set to C(False) used on personally controlled sites using self-signed certificates.
+ type: bool
+ default: yes
+ http_port:
+ description:
+ - Override the default port (80 or 443) with this port
+ type: int
+ ontapi:
+ description:
+ - The ontap api version to use
+ type: int
+ use_rest:
+ description:
+ - This module only support ZAPI and will can not be swtich to REST
+ - never -- will always use ZAPI if the module supports ZAPI. An error may be issued if a REST option is not supported in ZAPI.
+ - auto -- will always use ZAPI.
+ default: never
+ type: str
+ feature_flags:
+ description:
+ - Enable or disable a new feature.
+ - This can be used to enable an experimental feature or disable a new feature that breaks backward compatibility.
+ - Supported keys and values are subject to change without notice. Unknown keys are ignored.
+ type: dict
+ version_added: "20.5.0"
+requirements:
+ - Ansible 2.9 or later - 2.12 or later is recommended.
+ - Python3 - 3.9 or later is recommended.
+ - When using ZAPI, netapp-lib 2018.11.13 or later (install using 'pip install netapp-lib'),
+ netapp-lib 2020.3.12 is strongly recommended as it provides better error reporting for connection issues
+ - a physical or virtual clustered Data ONTAP system, the modules support Data ONTAP 9.1 and onward,
+ REST support requires ONTAP 9.6 or later
+
+notes:
+ - The modules prefixed with na_ontap are built to support the ONTAP storage platform.
+ - https is enabled by default and recommended.
+ To enable http on the cluster you must run the following commands 'set -privilege advanced;' 'system services web modify -http-enabled true;'
+ '''
+
+ # Documentation fragment for ONTAP (na_ontap) peer options
+ NA_ONTAP_PEER = r'''
+options:
+ peer_options:
+ version_added: 21.8.0
+ description:
+ - IP address and connection options for the peer system.
+ - If any if these options is not specified, the corresponding source option is used.
+ type: dict
+ suboptions:
+ hostname:
+ description:
+ - The hostname or IP address of the ONTAP instance.
+ type: str
+ required: true
+ username:
+ description:
+ - Username when using basic authentication.
+ type: str
+ aliases: [ user ]
+ password:
+ description:
+ - Password for the specified user.
+ type: str
+ aliases: [ pass ]
+ cert_filepath:
+ description:
+ - path to SSL client cert file (.pem).
+ type: str
+ key_filepath:
+ description:
+ - path to SSL client key file.
+ type: str
+ https:
+ description:
+ - Enable and disable https.
+ type: bool
+ validate_certs:
+ description:
+ - If set to C(no), the SSL certificates will not be validated.
+ - This should only set to C(False) used on personally controlled sites using self-signed certificates.
+ type: bool
+ http_port:
+ description:
+ - Override the default port (80 or 443) with this port
+ type: int
+ ontapi:
+ description:
+ - The ontap api version to use
+ type: int
+ use_rest:
+ description:
+ - REST API if supported by the target system for all the resources and attributes the module requires. Otherwise will revert to ZAPI.
+ - always -- will always use the REST API
+ - never -- will always use the ZAPI
+ - auto -- will try to use the REST Api
+ type: str
+ force_ontap_version:
+ description:
+ - Override the cluster ONTAP version when using REST.
+ - The behavior is undefined if the version does not match the target cluster.
+ - This is provided as a work-around when the cluster version cannot be read because of permission issues.
+ See https://github.com/ansible-collections/netapp.ontap/wiki/Known-issues.
+ - This should be in the form 9.10 or 9.10.1 with each element being an integer number.
+ - When C(use_rest) is set to auto, this may force a switch to ZAPI based on the version and platform capabilities.
+ - Ignored with ZAPI.
+ type: str
+ version_added: "21.23.0"
+'''
diff --git a/ansible_collections/netapp/ontap/plugins/filter/iso8601_duration_from_seconds.yml b/ansible_collections/netapp/ontap/plugins/filter/iso8601_duration_from_seconds.yml
new file mode 100644
index 000000000..e605469ce
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/filter/iso8601_duration_from_seconds.yml
@@ -0,0 +1,33 @@
+DOCUMENTATION:
+ name: iso8601_duration_from_seconds
+ author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+ version_added: 21.24.0
+ short_description: Encode seconds as a ISO 8601 duration string
+ description:
+ - Encode seconds as a ISO 8601 duration string.
+ positional: _input
+ options:
+ _input:
+ description: A number of seconds to encode.
+ type: float
+ required: true
+ format:
+ description: An optional format string for isodate.duration_isoformat. Defaults to P%P.
+ type: string
+ notes:
+ - requires isodate and datetime python modules.
+ - set filter_plugins path to <installation_path>/ansible_collections/netapp/ontap/plugins/filter in ansible.cfg.
+ - documentation can be generated locally using a version of ansible-doc (2.14) that supports '-t filter'
+ - ansible-doc -t filter netapp.ontap.iso8601_duration_to_seconds
+
+EXAMPLES: |
+ # Encode seconds
+ iso_duration: "{{ 59579864 | netapp.ontap.iso8601_duration_from_seconds }}"
+
+ # Encode 'duration_in_seconds' variable
+ iso_duration: "{{ duration_in_seconds | netapp.ontap.iso8601_duration_from_seconds }}"
+
+RETURN:
+ _value:
+ description: A string representing the duration in ISO 8601 format.
+ type: string
diff --git a/ansible_collections/netapp/ontap/plugins/filter/iso8601_duration_to_seconds.yml b/ansible_collections/netapp/ontap/plugins/filter/iso8601_duration_to_seconds.yml
new file mode 100644
index 000000000..1fd796938
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/filter/iso8601_duration_to_seconds.yml
@@ -0,0 +1,30 @@
+DOCUMENTATION:
+ name: iso8601_duration_to_seconds
+ author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+ version_added: 21.24.0
+ short_description: Decode a ISO 8601 duration string as seconds
+ description:
+ - Decode a ISO 8601 duration string as seconds
+ positional: _input
+ options:
+ _input:
+ description: A string to decode
+ type: string
+ required: true
+ notes:
+ - requires isodate and datetime python modules.
+ - set filter_plugins path to <installation_path>/ansible_collections/netapp/ontap/plugins/filter in ansible.cfg.
+ - documentation can be generated locally using a version of ansible-doc (2.14) that supports '-t filter'
+ - ansible-doc -t filter netapp.ontap.iso8601_duration_to_seconds
+
+EXAMPLES: |
+ # Decode a string
+ duration_in_seconds: "{{ 'P689DT13H57M44S' | netapp.ontap.iso8601_duration_to_seconds }}"
+
+ # Decode 'iso_duration' variable
+ duration_in_seconds: "{{ iso_duration | netapp.ontap.iso8601_duration_to_seconds }}"
+
+RETURN:
+ _value:
+ description: A float representing the number of seconds. The fractional part may represent milliseconds.
+ type: float
diff --git a/ansible_collections/netapp/ontap/plugins/filter/na_filter_iso8601.py b/ansible_collections/netapp/ontap/plugins/filter/na_filter_iso8601.py
new file mode 100644
index 000000000..7494e3878
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/filter/na_filter_iso8601.py
@@ -0,0 +1,53 @@
+# (c) 2022, NetApp, Inc
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+Filters for ISO 8601 durations
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.errors import AnsibleFilterError
+from ansible.module_utils._text import to_native
+
+IMPORT_ERROR = None
+try:
+ import isodate
+except ImportError as exc:
+ IMPORT_ERROR = to_native(exc)
+
+
+class FilterModule:
+ ''' Ansible jinja2 filters '''
+
+ def filters(self):
+ return {
+ 'iso8601_duration_to_seconds': iso8601_duration_to_seconds,
+ 'iso8601_duration_from_seconds': iso8601_duration_from_seconds,
+ }
+
+
+def check_for_import():
+ if IMPORT_ERROR:
+ raise AnsibleFilterError("isodate python package is required: %s" % IMPORT_ERROR)
+
+
+def iso8601_duration_to_seconds(duration):
+ check_for_import()
+ try:
+ dt_duration = isodate.parse_duration(duration)
+ except Exception as exc:
+ raise AnsibleFilterError("iso8601_duration_to_seconds - error: %s - expecting PnnYnnMnnDTnnHnnMnnS, received: %s" % (to_native(exc), duration))
+ return dt_duration.total_seconds()
+
+
+def iso8601_duration_from_seconds(seconds, format=None):
+ check_for_import()
+ try:
+ duration = isodate.Duration(seconds=seconds)
+ iso8601_duration = isodate.duration_isoformat(duration, format=isodate.D_DEFAULT if format is None else format)
+ except Exception as exc:
+ raise AnsibleFilterError("iso8601_duration_from_seconds - error: %s - received: %s" % (to_native(exc), seconds))
+ return iso8601_duration
diff --git a/ansible_collections/netapp/ontap/plugins/module_utils/netapp.py b/ansible_collections/netapp/ontap/plugins/module_utils/netapp.py
new file mode 100644
index 000000000..28d9428a2
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/module_utils/netapp.py
@@ -0,0 +1,1134 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2017, Sumit Kumar <sumit4@netapp.com>
+# Copyright (c) 2017, Michael Price <michael.price@netapp.com>
+# Copyright (c) 2017-2023, NetApp, Inc
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+'''
+netapp.py
+'''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import base64
+import logging
+import os
+import ssl
+import time
+from ansible.module_utils.basic import missing_required_lib
+from ansible.module_utils._text import to_native
+
+try:
+ from ansible.module_utils.ansible_release import __version__ as ANSIBLE_VERSION
+except ImportError:
+ ANSIBLE_VERSION = 'unknown'
+
+COLLECTION_VERSION = "22.7.0"
+CLIENT_APP_VERSION = "%s/%s" % ("%s", COLLECTION_VERSION)
+IMPORT_EXCEPTION = None
+
+try:
+ from netapp_lib.api.zapi import zapi
+ HAS_NETAPP_LIB = True
+except ImportError as exc:
+ HAS_NETAPP_LIB = False
+ IMPORT_EXCEPTION = exc
+
+try:
+ import requests
+ HAS_REQUESTS = True
+except ImportError:
+ HAS_REQUESTS = False
+
+HAS_SF_SDK = False
+SF_BYTE_MAP = dict(
+ # Management GUI displays 1024 ** 3 as 1.1 GB, thus use 1000.
+ bytes=1,
+ b=1,
+ kb=1000,
+ mb=1000 ** 2,
+ gb=1000 ** 3,
+ tb=1000 ** 4,
+ pb=1000 ** 5,
+ eb=1000 ** 6,
+ zb=1000 ** 7,
+ yb=1000 ** 8
+)
+
+POW2_BYTE_MAP = dict(
+ # Here, 1 kb = 1024
+ bytes=1,
+ b=1,
+ k=1024,
+ m=1024 ** 2,
+ g=1024 ** 3,
+ t=1024 ** 4,
+ p=1024 ** 5,
+ e=1024 ** 6,
+ z=1024 ** 7,
+ y=1024 ** 8,
+ kb=1024,
+ mb=1024 ** 2,
+ gb=1024 ** 3,
+ tb=1024 ** 4,
+ pb=1024 ** 5,
+ eb=1024 ** 6,
+ zb=1024 ** 7,
+ yb=1024 ** 8,
+)
+
+ERROR_MSG = dict(
+ no_cserver='This module is expected to run as cluster admin'
+)
+
+LOG = logging.getLogger(__name__)
+LOG_FILE = '/tmp/ontap_apis.log'
+ZAPI_DEPRECATION_MESSAGE = "With version 22.0.0 ONTAPI (ZAPI) has been deprecated. The final ONTAP version to support ZAPI is ONTAP 9.13.1. "\
+ "ZAPI calls in these modules will continue to work for ONTAP versions that supports ZAPI. "\
+ "You can update your playbook to use REST by adding use_rest: always to your playbook. "\
+ "More information can be found at: https://github.com/ansible-collections/netapp.ontap"
+
+try:
+ from solidfire.factory import ElementFactory
+ HAS_SF_SDK = True
+except ImportError:
+ HAS_SF_SDK = False
+
+
+def has_netapp_lib():
+ return HAS_NETAPP_LIB
+
+
+def netapp_lib_is_required():
+ return "Error: the python NetApp-Lib module is required. Import error: %s" % str(IMPORT_EXCEPTION)
+
+
+def has_sf_sdk():
+ return HAS_SF_SDK
+
+
+def na_ontap_zapi_only_spec():
+ # This is used for Zapi only Modules.
+
+ return dict(
+ hostname=dict(required=True, type='str'),
+ username=dict(required=False, type='str', aliases=['user']),
+ password=dict(required=False, type='str', aliases=['pass'], no_log=True),
+ https=dict(required=False, type='bool', default=False),
+ validate_certs=dict(required=False, type='bool', default=True),
+ http_port=dict(required=False, type='int'),
+ ontapi=dict(required=False, type='int'),
+ use_rest=dict(required=False, type='str', default='never'),
+ feature_flags=dict(required=False, type='dict'),
+ cert_filepath=dict(required=False, type='str'),
+ key_filepath=dict(required=False, type='str', no_log=False),
+ )
+
+
+def na_ontap_host_argument_spec():
+ # This is used for Zapi + REST, and REST only Modules.
+
+ return dict(
+ hostname=dict(required=True, type='str'),
+ username=dict(required=False, type='str', aliases=['user']),
+ password=dict(required=False, type='str', aliases=['pass'], no_log=True),
+ https=dict(required=False, type='bool', default=False),
+ validate_certs=dict(required=False, type='bool', default=True),
+ http_port=dict(required=False, type='int'),
+ ontapi=dict(required=False, type='int'),
+ use_rest=dict(required=False, type='str', default='auto'),
+ feature_flags=dict(required=False, type='dict'),
+ cert_filepath=dict(required=False, type='str'),
+ key_filepath=dict(required=False, type='str', no_log=False),
+ force_ontap_version=dict(required=False, type='str')
+ )
+
+
+def na_ontap_host_argument_spec_peer():
+ spec = na_ontap_host_argument_spec()
+ spec.pop('feature_flags')
+ # get rid of default values, as we'll use source values
+ for value in spec.values():
+ if 'default' in value:
+ value.pop('default')
+ return spec
+
+
+def has_feature(module, feature_name):
+ feature = get_feature(module, feature_name)
+ if isinstance(feature, bool):
+ return feature
+ module.fail_json(msg="Error: expected bool type for feature flag: %s" % feature_name)
+
+
+def get_feature(module, feature_name):
+ ''' if the user has configured the feature, use it
+ otherwise, use our default
+ '''
+ default_flags = dict(
+ strict_json_check=True, # when true, fail if response.content in not empty and is not valid json
+ trace_apis=False, # when true, append ZAPI and REST requests/responses to /tmp/ontap_zapi.txt
+ trace_headers=False, # when true, headers are not redacted in send requests
+ trace_auth_args=False, # when true, auth_args are not redacted in send requests
+ check_required_params_for_none=True,
+ classic_basic_authorization=False, # use ZAPI wrapper to send Authorization header
+ deprecation_warning=True,
+ sanitize_xml=True,
+ sanitize_code_points=[8], # unicode values, 8 is backspace
+ show_modified=True,
+ always_wrap_zapi=True, # for better error reporting
+ flexcache_delete_return_timeout=5, # ONTAP bug if too big?
+ # for SVM, whch protocols can be allowed
+ svm_allowable_protocols_rest=['cifs', 'fcp', 'iscsi', 'nvme', 'nfs', 'ndmp'],
+ svm_allowable_protocols_zapi=['cifs', 'fcp', 'iscsi', 'nvme', 'nfs', 'ndmp', 'http'],
+ max_files_change_threshold=1, # percentage of increase/decrease required to trigger a modify action
+ warn_or_fail_on_fabricpool_backend_change='fail',
+ no_cserver_ems=False # when True, don't attempt to find cserver and don't send cserver EMS
+ )
+
+ if module.params['feature_flags'] is not None and feature_name in module.params['feature_flags']:
+ return module.params['feature_flags'][feature_name]
+ if feature_name in default_flags:
+ return default_flags[feature_name]
+ module.fail_json(msg="Internal error: unexpected feature flag: %s" % feature_name)
+
+
+def create_sf_connection(module, port=None, host_options=None):
+ if not HAS_SF_SDK:
+ module.fail_json(msg="the python SolidFire SDK module is required")
+
+ if host_options is None:
+ host_options = module.params
+ msg, msg2 = None, None
+ missing_options = [option for option in ('hostname', 'username', 'password') if not host_options.get(option)]
+ if missing_options:
+ verb = 'are' if len(missing_options) > 1 else 'is'
+ msg = "%s %s required for ElementSW connection." % (', '.join(missing_options), verb)
+ extra_options = [option for option in ('cert_filepath', 'key_filepath') if host_options.get(option)]
+ if extra_options:
+ verb = 'are' if len(extra_options) > 1 else 'is'
+ msg2 = "%s %s not supported for ElementSW connection." % (', '.join(extra_options), verb)
+ msg = "%s %s" % (msg, msg2) if msg and msg2 else msg or msg2
+ if msg:
+ module.fail_json(msg=msg)
+ hostname = host_options.get('hostname')
+ username = host_options.get('username')
+ password = host_options.get('password')
+
+ try:
+ return ElementFactory.create(hostname, username, password, port=port)
+ except Exception as exc:
+ raise Exception("Unable to create SF connection: %s" % exc)
+
+
+def set_auth_method(module, username, password, cert_filepath, key_filepath):
+ error = None
+ if password is None and username is None:
+ if cert_filepath is None:
+ error = ('Error: cannot have a key file without a cert file' if key_filepath is not None
+ else 'Error: ONTAP module requires username/password or SSL certificate file(s)')
+ else:
+ auth_method = 'single_cert' if key_filepath is None else 'cert_key'
+ elif password is not None and username is not None:
+ if cert_filepath is not None or key_filepath is not None:
+ error = 'Error: cannot have both basic authentication (username/password) ' +\
+ 'and certificate authentication (cert/key files)'
+ else:
+ auth_method = 'basic_auth' if has_feature(module, 'classic_basic_authorization') else 'speedy_basic_auth'
+ else:
+ error = 'Error: username and password have to be provided together'
+ if cert_filepath is not None or key_filepath is not None:
+ error += ' and cannot be used with cert or key files'
+ if error:
+ module.fail_json(msg=error)
+ return auth_method
+
+
+def setup_host_options_from_module_params(host_options, module, keys):
+ '''if an option is not set, use primary value.
+ but don't mix up basic and certificate authentication methods
+
+ host_options is updated in place
+ option values are read from module.params
+ keys is a list of keys that need to be added/updated/left alone in host_options
+ '''
+ password_keys = ['username', 'password']
+ certificate_keys = ['cert_filepath', 'key_filepath']
+ use_password = any(host_options.get(x) is not None for x in password_keys)
+ use_certificate = any(host_options.get(x) is not None for x in certificate_keys)
+ if use_password and use_certificate:
+ module.fail_json(
+ msg='Error: host cannot have both basic authentication (username/password) and certificate authentication (cert/key files).')
+ if use_password:
+ exclude_keys = certificate_keys
+ elif use_certificate:
+ exclude_keys = password_keys
+ else:
+ exclude_keys = []
+ for key in keys:
+ if host_options.get(key) is None and key not in exclude_keys:
+ # use same value as source if no value is given for dest
+ host_options[key] = module.params[key]
+
+
+def set_zapi_port_and_transport(server, https, port, validate_certs):
+ # default is HTTP
+ if https:
+ if port is None:
+ port = 443
+ transport_type = 'HTTPS'
+ # HACK to bypass certificate verification
+ if validate_certs is False and not os.environ.get('PYTHONHTTPSVERIFY', '') and getattr(ssl, '_create_unverified_context', None):
+ ssl._create_default_https_context = ssl._create_unverified_context
+ else:
+ if port is None:
+ port = 80
+ transport_type = 'HTTP'
+ server.set_transport_type(transport_type)
+ server.set_port(port)
+
+
+def setup_na_ontap_zapi(module, vserver=None, wrap_zapi=False, host_options=None):
+ module.warn(ZAPI_DEPRECATION_MESSAGE)
+ if host_options is None:
+ host_options = module.params
+ hostname = host_options.get('hostname')
+ username = host_options.get('username')
+ password = host_options.get('password')
+ cert_filepath = host_options.get('cert_filepath')
+ key_filepath = host_options.get('key_filepath')
+ https = host_options.get('https')
+ validate_certs = host_options.get('validate_certs')
+ port = host_options.get('http_port')
+ version = host_options.get('ontapi')
+ trace = has_feature(module, 'trace_apis')
+ if trace:
+ logging.basicConfig(filename=LOG_FILE, level=logging.DEBUG, format='%(asctime)s %(levelname)-8s %(message)s')
+ wrap_zapi |= has_feature(module, 'always_wrap_zapi')
+ auth_method = set_auth_method(module, username, password, cert_filepath, key_filepath)
+
+ if not HAS_NETAPP_LIB:
+ module.fail_json(msg=netapp_lib_is_required())
+
+ # set up zapi
+ if auth_method in ('single_cert', 'cert_key'):
+ # override NaServer in netapp-lib to enable certificate authentication
+ server = OntapZAPICx(hostname, module=module, username=username, password=password,
+ validate_certs=validate_certs, cert_filepath=cert_filepath,
+ key_filepath=key_filepath, style=zapi.NaServer.STYLE_CERTIFICATE,
+ auth_method=auth_method, trace=trace)
+ # SSL certificate authentication requires SSL
+ https = True
+ elif auth_method == 'speedy_basic_auth' or wrap_zapi:
+ # override NaServer in netapp-lib to add Authorization header preemptively
+ # use wrapper to handle parse error (mostly for na_ontap_command)
+ server = OntapZAPICx(hostname, module=module, username=username, password=password,
+ validate_certs=validate_certs, auth_method=auth_method, trace=trace)
+ else:
+ # legacy netapp-lib
+ server = zapi.NaServer(hostname, username=username, password=password, trace=trace)
+ if vserver:
+ server.set_vserver(vserver)
+ if host_options.get('use_rest') == 'always':
+ note = '' if https else ' Note: https is set to false.'
+ module.warn("Using ZAPI for %s, ignoring 'use_rest: always'.%s" % (module._name, note))
+
+ set_zapi_port_and_transport(server, https, port, validate_certs)
+ server.set_api_version(major=1, minor=(version or 110))
+ server.set_server_type('FILER')
+ return server
+
+
+def is_zapi_connection_error(message):
+ ''' return True if it is a connection issue '''
+ # netapp-lib message may contain a tuple or a str!
+ try:
+ if isinstance(message, tuple) and isinstance(message[0], ConnectionError):
+ return True
+ except NameError:
+ # python 2.7 does not know about ConnectionError
+ pass
+ return isinstance(message, str) and message.startswith(('URLError', 'Unauthorized'))
+
+
+def is_zapi_write_access_error(message):
+ ''' return True if it is a write access error '''
+ # netapp-lib message may contain a tuple or a str!
+ if isinstance(message, str) and message.startswith('Insufficient privileges:'):
+ return 'does not have write access' in message
+ return False
+
+
+def is_zapi_missing_vserver_error(message):
+ ''' return True if it is a missing vserver error '''
+ # netapp-lib message may contain a tuple or a str!
+ return isinstance(message, str) and message in ('Vserver API missing vserver parameter.', 'Specified vserver not found')
+
+
+def get_cserver_zapi(server):
+ ''' returns None if not run on the management or cluster IP '''
+ vserver_info = zapi.NaElement('vserver-get-iter')
+ query_details = zapi.NaElement.create_node_with_children('vserver-info', **{'vserver-type': 'admin'})
+ query = zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ vserver_info.add_child_elem(query)
+ try:
+ result = server.invoke_successfully(vserver_info,
+ enable_tunneling=False)
+ except zapi.NaApiError as exc:
+ # Do not fail if we can't connect to the server.
+ # The module will report a better error when trying to get some data from ONTAP.
+ if is_zapi_connection_error(exc.message):
+ return None
+ # raise on other errors, as it may be a bug in calling the ZAPI
+ raise exc
+ attribute_list = result.get_child_by_name('attributes-list')
+ if attribute_list is not None:
+ vserver_list = attribute_list.get_child_by_name('vserver-info')
+ if vserver_list is not None:
+ return vserver_list.get_child_content('vserver-name')
+ return None
+
+
+def classify_zapi_exception(error):
+ ''' return type of error '''
+ try:
+ # very unlikely to fail, but don't take any chance
+ err_code = int(error.code)
+ except (AttributeError, ValueError):
+ err_code = 0
+ try:
+ # very unlikely to fail, but don't take any chance
+ err_msg = error.message
+ except AttributeError:
+ err_msg = ""
+ if err_code == 13005 and err_msg.startswith('Unable to find API:') and 'data vserver' in err_msg:
+ return 'missing_vserver_api_error', 'Most likely running a cluster level API as vserver: %s' % to_native(error)
+ if err_code == 13001 and err_msg.startswith("RPC: Couldn't make connection"):
+ return 'rpc_error', to_native(error)
+ return "other_error", to_native(error)
+
+
+def get_cserver(connection, is_rest=False):
+ if not is_rest:
+ return get_cserver_zapi(connection)
+
+ params = {'fields': 'type'}
+ api = "private/cli/vserver"
+ json, error = connection.get(api, params)
+ if json is None or error is not None:
+ # exit if there is an error or no data
+ return None
+ vservers = json.get('records')
+ if vservers is not None:
+ for vserver in vservers:
+ if vserver['type'] == 'admin': # cluster admin
+ return vserver['vserver']
+ if len(vservers) == 1: # assume vserver admin
+ return vservers[0]['vserver']
+
+ return None
+
+
+def generate_result(changed, actions=None, modify=None, response=None, extra_responses=None):
+ result = dict(changed=changed)
+ if response is not None:
+ result['response'] = response
+ if modify:
+ result['modify'] = modify
+ if actions:
+ result['actions'] = actions
+ if extra_responses:
+ result.update(extra_responses)
+ return result
+
+
+if HAS_NETAPP_LIB:
+ class OntapZAPICx(zapi.NaServer):
+ ''' override zapi NaServer class to:
+ - enable SSL certificate authentication
+ - ignore invalid XML characters in ONTAP output (when using CLI module)
+ - add Authorization header when using basic authentication
+ '''
+ def __init__(self, hostname=None, server_type=zapi.NaServer.SERVER_TYPE_FILER,
+ transport_type=zapi.NaServer.TRANSPORT_TYPE_HTTP,
+ style=zapi.NaServer.STYLE_LOGIN_PASSWORD, username=None,
+ password=None, port=None, trace=False, module=None,
+ cert_filepath=None, key_filepath=None, validate_certs=None,
+ auth_method=None):
+ # python 2.x syntax, but works for python 3 as well
+ super(OntapZAPICx, self).__init__(hostname, server_type=server_type,
+ transport_type=transport_type,
+ style=style, username=username,
+ password=password, port=port, trace=trace)
+ self.cert_filepath = cert_filepath
+ self.key_filepath = key_filepath
+ self.validate_certs = validate_certs
+ self.module = module
+ self.base64_creds = None
+ if auth_method == 'speedy_basic_auth':
+ auth = '%s:%s' % (username, password)
+ self.base64_creds = base64.b64encode(auth.encode()).decode()
+
+ def _create_certificate_auth_handler(self):
+ try:
+ context = ssl.create_default_context()
+ except AttributeError as exc:
+ self._fail_with_exc_info('SSL certificate authentication requires python 2.7 or later.', exc)
+
+ if not self.validate_certs:
+ context.check_hostname = False
+ context.verify_mode = ssl.CERT_NONE
+ try:
+ context.load_cert_chain(self.cert_filepath, keyfile=self.key_filepath)
+ except IOError as exc:
+ self._fail_with_exc_info('Cannot load SSL certificate, check files exist.', exc)
+
+ return zapi.urllib.request.HTTPSHandler(context=context)
+
+ def _fail_with_exc_info(self, arg0, exc):
+ msg = arg0
+ msg += ' More info: %s' % repr(exc)
+ self.module.fail_json(msg=msg)
+
+ def sanitize_xml(self, response):
+ # some ONTAP CLI commands return BEL on error
+ new_response = response.replace(b'\x07\n', b'')
+ # And 9.1 uses \r\n rather than \n !
+ new_response = new_response.replace(b'\x07\r\n', b'')
+ # And 9.7 may send backspaces
+ for code_point in get_feature(self.module, 'sanitize_code_points'):
+ if bytes([8]) == b'\x08': # python 3
+ byte = bytes([code_point])
+ elif chr(8) == b'\x08': # python 2
+ byte = chr(code_point)
+ else: # very unlikely, noop
+ byte = b'.'
+ new_response = new_response.replace(byte, b'.')
+ return new_response
+
+ def _parse_response(self, response):
+ ''' handling XML parsing exception '''
+ try:
+ return super(OntapZAPICx, self)._parse_response(response)
+ except zapi.etree.XMLSyntaxError as exc:
+ if has_feature(self.module, 'sanitize_xml'):
+ try:
+ return super(OntapZAPICx, self)._parse_response(self.sanitize_xml(response))
+ except Exception:
+ # ignore a second exception, we'll report the first one
+ pass
+ try:
+ # report first exception, but include full response
+ exc.msg += ". Received: %s" % response
+ except Exception:
+ # in case the response is very badly formatted, ignore it
+ pass
+ raise exc
+
+ def _create_request(self, na_element, enable_tunneling=False):
+ ''' intercept newly created request to add Authorization header '''
+ request, netapp_element = super(OntapZAPICx, self)._create_request(na_element, enable_tunneling=enable_tunneling)
+ request.add_header('X-Dot-Client-App', CLIENT_APP_VERSION % self.module._name)
+ if self.base64_creds is not None:
+ request.add_header('Authorization', 'Basic %s' % self.base64_creds)
+ return request, netapp_element
+
+ # as is from latest version of netapp-lib
+ def invoke_elem(self, na_element, enable_tunneling=False):
+ """Invoke the API on the server."""
+ if not na_element or not isinstance(na_element, zapi.NaElement):
+ raise ValueError('NaElement must be supplied to invoke API')
+
+ request, request_element = self._create_request(na_element,
+ enable_tunneling)
+
+ if self._trace:
+ zapi.LOG.debug("Request: %s", request_element.to_string(pretty=True))
+
+ if not hasattr(self, '_opener') or not self._opener \
+ or self._refresh_conn:
+ self._build_opener()
+ try:
+ if hasattr(self, '_timeout'):
+ response = self._opener.open(request, timeout=self._timeout)
+ else:
+ response = self._opener.open(request)
+ except zapi.urllib.error.HTTPError as exc:
+ raise zapi.NaApiError(exc.code, exc.reason)
+ except zapi.urllib.error.URLError as exc:
+ msg = 'URL error'
+ error = repr(exc)
+ try:
+ # ConnectionRefusedError is not defined in python 2.7
+ if isinstance(exc.reason, ConnectionRefusedError):
+ msg = 'Unable to connect'
+ error = exc.args
+ except Exception:
+ pass
+ raise zapi.NaApiError(msg, error)
+ except Exception as exc:
+ raise zapi.NaApiError('Unexpected error', repr(exc))
+
+ response_xml = response.read()
+ response_element = self._get_result(response_xml)
+
+ if self._trace:
+ zapi.LOG.debug("Response: %s", response_element.to_string(pretty=True))
+
+ return response_element
+
+
+class OntapRestAPI(object):
+ ''' wrapper to send requests to ONTAP REST APIs '''
+ def __init__(self, module, timeout=60, host_options=None):
+ self.host_options = module.params if host_options is None else host_options
+ self.module = module
+ # either username/password or a certifcate with/without a key are used for authentication
+ self.username = self.host_options.get('username')
+ self.password = self.host_options.get('password')
+ self.hostname = self.host_options['hostname']
+ self.use_rest = self.host_options['use_rest'].lower()
+ self.cert_filepath = self.host_options.get('cert_filepath')
+ self.key_filepath = self.host_options.get('key_filepath')
+ self.verify = self.host_options['validate_certs']
+ self.timeout = timeout
+ port = self.host_options['http_port']
+ self.force_ontap_version = self.host_options.get('force_ontap_version')
+ if port is None:
+ self.url = 'https://%s/api/' % self.hostname
+ else:
+ self.url = 'https://%s:%d/api/' % (self.hostname, port)
+ self.is_rest_error = None
+ self.fallback_to_zapi_reason = None
+ self.ontap_version = dict(
+ full='unknown',
+ generation=-1,
+ major=-1,
+ minor=-1,
+ valid=False
+ )
+ self.errors = []
+ self.debug_logs = []
+ self.auth_method = set_auth_method(self.module, self.username, self.password, self.cert_filepath, self.key_filepath)
+ self.check_required_library()
+ if has_feature(module, 'trace_apis'):
+ logging.basicConfig(filename=LOG_FILE, level=logging.DEBUG, format='%(asctime)s %(levelname)-8s %(message)s')
+ self.log_headers = has_feature(module, 'trace_headers')
+ self.log_auth_args = has_feature(module, 'trace_auth_args')
+
+ def requires_ontap_9_6(self, module_name):
+ return self.requires_ontap_version(module_name)
+
+ def requires_ontap_version(self, module_name, version='9.6'):
+ suffix = " - %s" % self.is_rest_error if self.is_rest_error is not None else ""
+ return "%s only supports REST, and requires ONTAP %s or later.%s" % (module_name, version, suffix)
+
+ def options_require_ontap_version(self, options, version='9.6', use_rest=None):
+ current_version = self.get_ontap_version()
+ suffix = " - %s" % self.is_rest_error if self.is_rest_error is not None else ""
+ if current_version != (-1, -1, -1):
+ suffix += " - ONTAP version: %s.%s.%s" % current_version
+ if use_rest is not None:
+ suffix += " - using %s" % ('REST' if use_rest else 'ZAPI')
+ if isinstance(options, list) and len(options) > 1:
+ tag = "any of %s" % options
+ elif isinstance(options, list) and len(options) == 1:
+ tag = str(options[0])
+ else:
+ tag = str(options)
+ return 'using %s requires ONTAP %s or later and REST must be enabled%s.' % (tag, version, suffix)
+
+ def meets_rest_minimum_version(self, use_rest, minimum_generation, minimum_major, minimum_minor=0):
+ return use_rest and self.get_ontap_version() >= (minimum_generation, minimum_major, minimum_minor)
+
+ def fail_if_not_rest_minimum_version(self, module_name, minimum_generation, minimum_major, minimum_minor=0):
+ status_code = self.get_ontap_version_using_rest()
+ msgs = []
+ if self.use_rest == 'never':
+ msgs.append('Error: REST is required for this module, found: "use_rest: %s".' % self.use_rest)
+ # The module only supports REST, so make it required
+ self.use_rest = 'always'
+ if self.is_rest_error:
+ msgs.append('Error using REST for version, error: %s.' % self.is_rest_error)
+ if status_code != 200:
+ msgs.append('Error using REST for version, status_code: %s.' % status_code)
+ if msgs:
+ self.module.fail_json(msg=' '.join(msgs))
+ version = self.get_ontap_version()
+ if version < (minimum_generation, minimum_major, minimum_minor):
+ msg = 'Error: ' + self.requires_ontap_version(module_name, '%d.%d.%d' % (minimum_generation, minimum_major, minimum_minor))
+ msg += ' Found: %s.%s.%s.' % version
+ self.module.fail_json(msg=msg)
+
+ def check_required_library(self):
+ if not HAS_REQUESTS:
+ self.module.fail_json(msg=missing_required_lib('requests'))
+
+ def build_headers(self, accept=None, vserver_name=None, vserver_uuid=None):
+ headers = {'X-Dot-Client-App': CLIENT_APP_VERSION % self.module._name}
+ # accept is used to turn on/off HAL linking
+ if accept is not None:
+ headers['accept'] = accept
+ # vserver tunneling using vserver name and/or UUID
+ if vserver_name is not None:
+ headers['X-Dot-SVM-Name'] = vserver_name
+ if vserver_uuid is not None:
+ headers['X-Dot-SVM-UUID'] = vserver_uuid
+ return headers
+
+ def send_request(self, method, api, params, json=None, headers=None, files=None):
+ ''' send http request and process reponse, including error conditions '''
+ url = self.url + api
+
+ def get_auth_args():
+ if self.auth_method == 'single_cert':
+ kwargs = dict(cert=self.cert_filepath)
+ elif self.auth_method == 'cert_key':
+ kwargs = dict(cert=(self.cert_filepath, self.key_filepath))
+ elif self.auth_method in ('basic_auth', 'speedy_basic_auth'):
+ # with requests, there is no challenge, eg no 401.
+ kwargs = dict(auth=(self.username, self.password))
+ else:
+ raise KeyError(self.auth_method)
+ return kwargs
+
+ status_code, json_dict, error_details = self._send_request(method, url, params, json, headers, files, get_auth_args())
+
+ return status_code, json_dict, error_details
+
+ def _send_request(self, method, url, params, json, headers, files, auth_args):
+ status_code = None
+ json_dict = None
+ json_error = None
+ error_details = None
+ if headers is None:
+ headers = self.build_headers()
+
+ def fail_on_non_empty_value(response):
+ '''json() may fail on an empty value, but it's OK if no response is expected.
+ To avoid false positives, only report an issue when we expect to read a value.
+ The first get will see it.
+ '''
+ if method == 'GET' and has_feature(self.module, 'strict_json_check'):
+ contents = response.content
+ if len(contents) > 0:
+ raise ValueError("Expecting json, got: %s" % contents)
+
+ def get_json(response):
+ ''' extract json, and error message if present '''
+ try:
+ json = response.json()
+ except ValueError:
+ fail_on_non_empty_value(response)
+ return None, None
+ return json, json.get('error')
+
+ self.log_debug('sending', repr(dict(method=method, url=url, verify=self.verify, params=params,
+ timeout=self.timeout, json=json,
+ headers=headers if self.log_headers else 'redacted',
+ auth_args=auth_args if self.log_auth_args else 'redacted')))
+ try:
+ response = requests.request(method, url, verify=self.verify, params=params,
+ timeout=self.timeout, json=json, headers=headers, files=files, **auth_args)
+ status_code = response.status_code
+ self.log_debug(status_code, response.content)
+ # If the response was successful, no Exception will be raised
+ response.raise_for_status()
+ json_dict, json_error = get_json(response)
+ except requests.exceptions.HTTPError as err:
+ try:
+ __, json_error = get_json(response)
+ except (AttributeError, ValueError):
+ json_error = None
+ if json_error is None:
+ self.log_error(status_code, 'HTTP error: %s' % err)
+ error_details = str(err)
+
+ # If an error was reported in the json payload, it is handled below
+ except requests.exceptions.ConnectionError as err:
+ self.log_error(status_code, 'Connection error: %s' % err)
+ error_details = str(err)
+ except Exception as err:
+ self.log_error(status_code, 'Other error: %s' % err)
+ error_details = str(err)
+ if json_error is not None:
+ self.log_error(status_code, 'Endpoint error: %d: %s' % (status_code, json_error))
+ error_details = json_error
+ if not error_details and not json_dict:
+ if json_dict is None:
+ json_dict = {}
+ if method == 'OPTIONS':
+ # OPTIONS provides the list of supported verbs
+ json_dict['Allow'] = response.headers.get('Allow')
+ if response.headers.get('Content-Type', '').startswith("multipart/form-data"):
+ json_dict['text'] = response.text
+ return status_code, json_dict, error_details
+
+ def _is_job_done(self, job_json, job_state, job_error, timed_out):
+ """ return (done, message, error)
+ done is True to indicate that the job is complete, or failed, or timed out
+ done is False when the job is still running
+ """
+ # a job looks like this
+ # {
+ # "uuid": "cca3d070-58c6-11ea-8c0c-005056826c14",
+ # "description": "POST /api/cluster/metrocluster",
+ # "state": "failure",
+ # "message": "There are not enough disks in Pool1.", **OPTIONAL**
+ # "code": 2432836,
+ # "start_time": "2020-02-26T10:35:44-08:00",
+ # "end_time": "2020-02-26T10:47:38-08:00",
+ # "_links": {
+ # "self": {
+ # "href": "/api/cluster/jobs/cca3d070-58c6-11ea-8c0c-005056826c14"
+ # }
+ # }
+ # }
+ done, error = False, None
+ message = job_json.get('message', '') if job_json else None
+ if job_state == 'failure':
+ # if the job has failed, return message as error
+ error = message
+ message = None
+ done = True
+ elif job_state not in ('queued', 'running', None):
+ error = job_error
+ done = True
+ elif timed_out:
+ # Would like to post a message to user (not sure how)
+ self.log_error(0, 'Timeout error: Process still running')
+ error = 'Timeout error: Process still running'
+ if job_error is not None:
+ error += ' - %s' % job_error
+ done = True
+ return done, message, error
+
+ def wait_on_job(self, job, timeout=600, increment=60):
+ try:
+ url = job['_links']['self']['href'].split('api/')[1]
+ except Exception as err:
+ self.log_error(0, 'URL Incorrect format: %s - Job: %s' % (err, job))
+ return None, 'URL Incorrect format: %s - Job: %s' % (err, job)
+ # Expecting job to be in the following format
+ # {'job':
+ # {'uuid': 'fde79888-692a-11ea-80c2-005056b39fe7',
+ # '_links':
+ # {'self':
+ # {'href': '/api/cluster/jobs/fde79888-692a-11ea-80c2-005056b39fe7'}
+ # }
+ # }
+ # }
+ error = None
+ errors = []
+ message = None
+ runtime = 0
+ retries = 0
+ max_retries = 3
+ done = False
+ while not done:
+ # Will run every <increment> seconds for <timeout> seconds
+ job_json, job_error = self.get(url, None)
+ job_state = job_json.get('state', None) if job_json else None
+ # ignore error if status is provided in the job
+ if job_error and job_state is None:
+ errors.append(str(job_error))
+ retries += 1
+ if retries > max_retries:
+ error = " - ".join(errors)
+ self.log_error(0, 'Job error: Reached max retries.')
+ done = True
+ else:
+ retries = 0
+ done, message, error = self._is_job_done(job_json, job_state, job_error, runtime >= timeout)
+ if not done:
+ time.sleep(increment)
+ runtime += increment
+ return message, error
+
+ def get(self, api, params=None, headers=None):
+ method = 'GET'
+ dummy, message, error = self.send_request(method, api, params, json=None, headers=headers)
+ return message, error
+
+ def post(self, api, body, params=None, headers=None, files=None):
+ method = 'POST'
+ retry = 3
+ while retry > 0:
+ dummy, message, error = self.send_request(method, api, params, json=body, headers=headers, files=files)
+ if error and type(error) is dict and 'temporarily locked' in error.get('message', ''):
+ time.sleep(30)
+ retry = retry - 1
+ continue
+ break
+ return message, error
+
+ def patch(self, api, body, params=None, headers=None, files=None):
+ method = 'PATCH'
+ retry = 3
+ while retry > 0:
+ dummy, message, error = self.send_request(method, api, params, json=body, headers=headers, files=files)
+ if error and type(error) is dict and 'temporarily locked' in error.get('message', ''):
+ time.sleep(30)
+ retry = retry - 1
+ continue
+ break
+ return message, error
+
+ def delete(self, api, body=None, params=None, headers=None):
+ method = 'DELETE'
+ dummy, message, error = self.send_request(method, api, params, json=body, headers=headers)
+ return message, error
+
+ def options(self, api, params=None, headers=None):
+ method = 'OPTIONS'
+ dummy, message, error = self.send_request(method, api, params, json=None, headers=headers)
+ return message, error
+
+ def set_version(self, message):
+ try:
+ version = message.get('version', 'not found')
+ except AttributeError:
+ self.ontap_version['valid'] = False
+ self.ontap_version['full'] = 'unreadable message'
+ return
+ for key in self.ontap_version:
+ try:
+ self.ontap_version[key] = version.get(key, -1)
+ except AttributeError:
+ self.ontap_version[key] = -1
+ self.ontap_version['valid'] = all(
+ self.ontap_version[key] != -1 for key in self.ontap_version if key != 'valid'
+ )
+
+ def get_ontap_version(self):
+ if self.ontap_version['valid']:
+ return self.ontap_version['generation'], self.ontap_version['major'], self.ontap_version['minor']
+ return -1, -1, -1
+
+ def get_node_version_using_rest(self):
+ # using GET rather than HEAD because the error messages are different,
+ # and we need the version as some REST options are not available in earlier versions
+ method = 'GET'
+ api = 'cluster/nodes'
+ params = {'fields': ['version']}
+ status_code, message, error = self.send_request(method, api, params=params)
+ if message and 'records' in message and len(message['records']) > 0:
+ message = message['records'][0]
+ return status_code, message, error
+
+ def get_ontap_version_from_params(self):
+ """ Provide a way to override the current version
+ This is required when running a custom vsadmin role as ONTAP does not currently allow access to /api/cluster.
+ This may also be interesting for testing :)
+ Report a warning if API call failed to report version.
+ Report a warning if current version could be fetched and is different.
+ """
+ try:
+ version = [int(x) for x in self.force_ontap_version.split('.')]
+ if len(version) == 2:
+ version.append(0)
+ gen, major, minor = version
+ except (TypeError, ValueError) as exc:
+ self.module.fail_json(
+ msg='Error: unexpected format in force_ontap_version, expecting G.M.m or G.M, as in 9.10.1, got: %s, error: %s'
+ % (self.force_ontap_version, exc))
+
+ warning = ''
+ read_version = self.get_ontap_version()
+ if read_version == (-1, -1, -1):
+ warning = ', unable to read current version:'
+ elif read_version != (gen, major, minor):
+ warning = ' but current version is %s' % self.ontap_version['full']
+ if warning:
+ warning = 'Forcing ONTAP version to %s%s' % (self.force_ontap_version, warning)
+ self.set_version({'version': {
+ 'generation': gen,
+ 'major': major,
+ 'minor': minor,
+ 'full': 'set by user to %s' % self.force_ontap_version,
+ }})
+ return warning
+
+ def get_ontap_version_using_rest(self):
+ # using GET rather than HEAD because the error messages are different,
+ # and we need the version as some REST options are not available in earlier versions
+ method = 'GET'
+ api = 'cluster'
+ params = {'fields': ['version']}
+ status_code, message, error = self.send_request(method, api, params=params)
+ try:
+ if error and 'are available in precluster.' in error.get('message', ''):
+ # in precluster mode, version is not available :(
+ status_code, message, error = self.get_node_version_using_rest()
+ except AttributeError:
+ pass
+ self.set_version(message)
+ if error:
+ self.log_error(status_code, str(error))
+ if self.force_ontap_version:
+ warning = self.get_ontap_version_from_params()
+ if error:
+ warning += ' error: %s, status_code: %s' % (error, status_code)
+ if warning:
+ self.module.warn(warning)
+ msg = 'Forcing ONTAP version to %s' % self.force_ontap_version
+ if error:
+ self.log_error('INFO', msg)
+ else:
+ self.log_debug('INFO', msg)
+ error = None
+ status_code = 200
+ self.is_rest_error = str(error) if error else None
+ return status_code
+
+ def convert_parameter_keys_to_dot_notation(self, parameters):
+ """ Get all variable set in a list and add them to a dict so that partially_supported_rest_properties works correctly """
+ if isinstance(parameters, dict):
+ temp = {}
+ for parameter in parameters:
+ if isinstance(parameters[parameter], list):
+ if parameter not in temp:
+ temp[parameter] = {}
+ for adict in parameters[parameter]:
+ if isinstance(adict, dict):
+ for key in adict:
+ temp[parameter + '.' + key] = 0
+ parameters.update(temp)
+ return parameters
+
+ def _is_rest(self, used_unsupported_rest_properties=None, partially_supported_rest_properties=None, parameters=None):
+ if self.use_rest not in ['always', 'auto', 'never']:
+ error = "use_rest must be one of: never, always, auto. Got: '%s'" % self.use_rest
+ return False, error
+ if self.use_rest == "always" and used_unsupported_rest_properties:
+ error = "REST API currently does not support '%s'" % ', '.join(used_unsupported_rest_properties)
+ return True, error
+ if self.use_rest == 'never':
+ # force ZAPI if requested
+ return False, None
+ # don't send a new request if we already know the version
+ status_code = self.get_ontap_version_using_rest() if self.get_ontap_version() == (-1, -1, -1) else 200
+ if self.use_rest == "always" and partially_supported_rest_properties:
+ # If a variable is on a list we need to move it to a dict for this check to work correctly.
+ temp_parameters = parameters.copy()
+ temp_parameters = self.convert_parameter_keys_to_dot_notation(temp_parameters)
+ error = '\n'.join(
+ "Minimum version of ONTAP for %s is %s." % (property[0], str(property[1]))
+ for property in partially_supported_rest_properties
+ if self.get_ontap_version()[:3] < property[1] and property[0] in temp_parameters
+ )
+ if error != '':
+ return True, 'Error: %s Current version: %s.' % (error, self.get_ontap_version())
+ if self.use_rest == 'always':
+ # ignore error, it will show up later when calling another REST API
+ return True, None
+ # we're now using 'auto'
+ if used_unsupported_rest_properties:
+ # force ZAPI if some parameter requires it
+ if self.get_ontap_version()[:2] > (9, 5):
+ self.fallback_to_zapi_reason =\
+ 'because of unsupported option(s) or option value(s) in REST: %s' % used_unsupported_rest_properties
+ self.module.warn('Falling back to ZAPI %s' % self.fallback_to_zapi_reason)
+ return False, None
+ if partially_supported_rest_properties:
+ # if ontap version is lower than partially_supported_rest_properties version, force ZAPI, only if the paramater is used
+ # If a variable is on a list we need to move it to a dict for this check to work correctly.
+ temp_parameters = parameters.copy()
+ temp_parameters = self.convert_parameter_keys_to_dot_notation(temp_parameters)
+ for property in partially_supported_rest_properties:
+ if self.get_ontap_version()[:3] < property[1] and property[0] in temp_parameters:
+ self.fallback_to_zapi_reason =\
+ 'because of unsupported option(s) or option value(s) "%s" in REST require %s' % (property[0], str(property[1]))
+ self.module.warn('Falling back to ZAPI %s' % self.fallback_to_zapi_reason)
+ return False, None
+ if self.get_ontap_version()[:2] in ((9, 4), (9, 5)):
+ # we can't trust REST support on 9.5, and not at all on 9.4
+ return False, None
+ return (True, None) if status_code == 200 else (False, None)
+
+ def is_rest_supported_properties(self, parameters, unsupported_rest_properties=None, partially_supported_rest_properties=None, report_error=False):
+ used_unsupported_rest_properties = None
+ if unsupported_rest_properties:
+ used_unsupported_rest_properties = [x for x in unsupported_rest_properties if x in parameters]
+ use_rest, error = self.is_rest(used_unsupported_rest_properties, partially_supported_rest_properties, parameters)
+ if report_error:
+ return use_rest, error
+ if error:
+ self.module.fail_json(msg=error)
+ return use_rest
+
+ def is_rest(self, used_unsupported_rest_properties=None, partially_supported_rest_properties=None, parameters=None):
+ ''' only return error if there is a reason to '''
+ use_rest, error = self._is_rest(used_unsupported_rest_properties, partially_supported_rest_properties, parameters)
+ if used_unsupported_rest_properties is None and partially_supported_rest_properties is None:
+ return use_rest
+ return use_rest, error
+
+ def log_error(self, status_code, message):
+ LOG.error("%s: %s", status_code, message)
+ self.errors.append(message)
+ self.debug_logs.append((status_code, message))
+
+ def log_debug(self, status_code, content):
+ LOG.debug("%s: %s", status_code, content)
+ self.debug_logs.append((status_code, content))
+
+ def write_to_file(self, tag, data=None, filepath=None, append=True):
+ '''
+ This function is only for debug purposes, all calls to write_to_file should be removed
+ before submitting.
+ If data is None, tag is considered as data
+ else tag is a label, and data is data.
+ '''
+ if filepath is None:
+ filepath = '/tmp/ontap_log'
+ mode = 'a' if append else 'w'
+ with open(filepath, mode) as afile:
+ if data is not None:
+ afile.write("%s: %s\n" % (str(tag), str(data)))
+ else:
+ afile.write(str(tag))
+ afile.write('\n')
+
+ def write_errors_to_file(self, tag=None, filepath=None, append=True):
+ if tag is None:
+ tag = 'Error'
+ for error in self.errors:
+ self.write_to_file(tag, error, filepath, append)
+ if not append:
+ append = True
+
+ def write_debug_log_to_file(self, tag=None, filepath=None, append=True):
+ if tag is None:
+ tag = 'Debug'
+ for status_code, message in self.debug_logs:
+ self.write_to_file(tag, status_code, filepath, append)
+ if not append:
+ append = True
+ self.write_to_file(tag, message, filepath, append)
diff --git a/ansible_collections/netapp/ontap/plugins/module_utils/netapp_elementsw_module.py b/ansible_collections/netapp/ontap/plugins/module_utils/netapp_elementsw_module.py
new file mode 100644
index 000000000..d16c992ec
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/module_utils/netapp_elementsw_module.py
@@ -0,0 +1,41 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+HAS_SF_SDK = False
+try:
+ # pylint: disable=unused-import
+ import solidfire.common
+
+ HAS_SF_SDK = True
+except Exception:
+ HAS_SF_SDK = False
+
+
+def has_sf_sdk():
+ return HAS_SF_SDK
+
+
+class NaElementSWModule(object):
+
+ def __init__(self, elem):
+ self.elem_connect = elem
+ self.parameters = dict()
+
+ def volume_id_exists(self, volume_id):
+ """
+ Return volume_id if volume exists for given volume_id
+
+ :param volume_id: volume ID
+ :type volume_id: int
+ :return: Volume ID if found, None if not found
+ :rtype: int
+ """
+ volume_list = self.elem_connect.list_volumes(volume_ids=[volume_id])
+ for volume in volume_list.volumes:
+ if volume.volume_id == volume_id:
+ if str(volume.delete_time) == "":
+ return volume.volume_id
+ return None
diff --git a/ansible_collections/netapp/ontap/plugins/module_utils/netapp_ipaddress.py b/ansible_collections/netapp/ontap/plugins/module_utils/netapp_ipaddress.py
new file mode 100644
index 000000000..a936071ca
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/module_utils/netapp_ipaddress.py
@@ -0,0 +1,134 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2020-2022, Laurent Nicolas <laurentn@netapp.com>
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+""" Support class for NetApp ansible modules
+
+ Provides accesss to ipaddress - mediating unicode issues with python2.7
+"""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils._text import to_native
+
+try:
+ import ipaddress
+ HAS_IPADDRESS_LIB = True
+ IMPORT_ERROR = None
+except ImportError as exc:
+ HAS_IPADDRESS_LIB = False
+ IMPORT_ERROR = to_native(exc)
+
+
+def _check_ipaddress_is_present(module):
+ '''
+ report error at runtime rather than when attempting to load the module
+ '''
+ if HAS_IPADDRESS_LIB:
+ return None
+ module.fail_json(msg="Error: the python ipaddress package is required for this module. Import error: %s" % IMPORT_ERROR)
+
+
+def _get_ipv4orv6_address(ip_address, module):
+ '''
+ return IPV4Adress or IPV6Address object
+ '''
+ _check_ipaddress_is_present(module)
+ # python 2.7 requires unicode format
+ ip_addr = u'%s' % ip_address
+ try:
+ return ipaddress.ip_address(ip_addr)
+ except ValueError as exc:
+ error = 'Error: Invalid IP address value %s - %s' % (ip_address, to_native(exc))
+ module.fail_json(msg=error)
+
+
+def _get_ipv4orv6_network(ip_address, netmask, strict, module):
+ '''
+ return IPV4Network or IPV6Network object
+ '''
+ _check_ipaddress_is_present(module)
+ # python 2.7 requires unicode format
+ ip_addr = u'%s/%s' % (ip_address, netmask) if netmask is not None else u'%s' % ip_address
+ try:
+ return ipaddress.ip_network(ip_addr, strict)
+ except ValueError as exc:
+ error = 'Error: Invalid IP network value %s' % ip_addr
+ if 'has host bits set' in to_native(exc):
+ error += '. Please specify a network address without host bits set'
+ elif netmask is not None:
+ error += '. Check address and netmask values'
+ error += ': %s.' % to_native(exc)
+ module.fail_json(msg=error)
+
+
+def _check_ipv6_has_prefix_length(ip_address, netmask, module):
+ ip_address = _get_ipv4orv6_address(ip_address, module)
+ if not isinstance(ip_address, ipaddress.IPv6Address) or isinstance(netmask, int):
+ return
+ if ':' in netmask:
+ module.fail_json(msg='Error: only prefix_len is supported for IPv6 addresses, got %s' % netmask)
+
+
+def validate_ip_address_is_network_address(ip_address, module):
+ '''
+ Validate if the given IP address is a network address (i.e. it's host bits are set to 0)
+ ONTAP doesn't validate if the host bits are set,
+ and hence doesn't add a new address unless the IP is from a different network.
+ So this validation allows the module to be idempotent.
+ :return: None
+ '''
+ dummy = _get_ipv4orv6_network(ip_address, None, True, module)
+
+
+def validate_and_compress_ip_address(ip_address, module):
+ '''
+ 0's in IPv6 addresses can be compressed to save space
+ This will be a noop for IPv4 address
+ In addition, it makes sure the address is in a valid format
+ '''
+ # return compressed value for IPv6 and value in . notation for IPv4
+ return str(_get_ipv4orv6_address(ip_address, module))
+
+
+def netmask_length_to_netmask(ip_address, length, module):
+ '''
+ input: ip_address and netmask length
+ output: netmask in dot notation
+ '''
+ return str(_get_ipv4orv6_network(ip_address, length, False, module).netmask)
+
+
+def netmask_to_netmask_length(ip_address, netmask, module):
+ '''
+ input: ip_address and netmask in dot notation for IPv4, expanded netmask is not supported for IPv6
+ netmask as int or a str representaiton of int is also accepted
+ output: netmask length as int
+ '''
+ _check_ipv6_has_prefix_length(ip_address, netmask, module)
+ return _get_ipv4orv6_network(ip_address, netmask, False, module).prefixlen
diff --git a/ansible_collections/netapp/ontap/plugins/module_utils/netapp_module.py b/ansible_collections/netapp/ontap/plugins/module_utils/netapp_module.py
new file mode 100644
index 000000000..91acd3933
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/module_utils/netapp_module.py
@@ -0,0 +1,619 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2018, Laurent Nicolas <laurentn@netapp.com>
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+''' Support class for NetApp ansible modules '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from copy import deepcopy
+import re
+import traceback
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+ZAPI_ONLY_DEPRECATION_MESSAGE = "This module only supports ZAPI and is deprecated. "\
+ "It will no longer work with newer versions of ONTAP. "\
+ "The final ONTAP version to support ZAPI is ONTAP 9.12.1."
+
+
+def cmp(obj1, obj2):
+ """
+ Python 3 does not have a cmp function, this will do the cmp.
+ :param obj1: first object to check
+ :param obj2: second object to check
+ :return:
+ """
+ # convert to lower case for string comparison.
+ if obj1 is None:
+ return -1
+ if isinstance(obj1, str) and isinstance(obj2, str):
+ obj1 = obj1.lower()
+ obj2 = obj2.lower()
+ # if list has string element, convert string to lower case.
+ if isinstance(obj1, list) and isinstance(obj2, list):
+ obj1 = [x.lower() if isinstance(x, str) else x for x in obj1]
+ obj2 = [x.lower() if isinstance(x, str) else x for x in obj2]
+ obj1.sort()
+ obj2.sort()
+ return (obj1 > obj2) - (obj1 < obj2)
+
+
+class NetAppModule(object):
+ '''
+ Common class for NetApp modules
+ set of support functions to derive actions based
+ on the current state of the system, and a desired state
+ '''
+
+ def __init__(self, module=None):
+ # we can call this with module set to self or self.module
+ # self is a NetApp module, while self.module is the AnsibleModule object
+ self.netapp_module = None
+ self.ansible_module = module
+ if module and getattr(module, 'module', None) is not None:
+ self.netapp_module = module
+ self.ansible_module = module.module
+ # When using self or self.module, this gives access to:
+ # self.ansible_module.fail_json
+ # When using self, this gives access to:
+ # self.netapp_module.rest_api.log_debug
+ self.log = []
+ self.changed = False
+ self.parameters = {'name': 'not initialized'}
+ self.zapi_string_keys = {}
+ self.zapi_bool_keys = {}
+ self.zapi_list_keys = {}
+ self.zapi_int_keys = {}
+ self.zapi_required = {}
+ self.params_to_rest_api_keys = {}
+
+ def module_deprecated(self, module):
+ module.warn(ZAPI_ONLY_DEPRECATION_MESSAGE)
+
+ def module_replaces(self, new_module, module):
+ self.module_deprecated(module)
+ module.warn('netapp.ontap.%s should be used instead.' % new_module)
+
+ def set_parameters(self, ansible_params):
+ self.parameters = {}
+ for param in ansible_params:
+ if ansible_params[param] is not None:
+ self.parameters[param] = ansible_params[param]
+ return self.parameters
+
+ def fall_back_to_zapi(self, module, msg, parameters):
+ if parameters['use_rest'].lower() == 'always':
+ module.fail_json(msg='Error: %s' % msg)
+ if parameters['use_rest'].lower() == 'auto':
+ module.warn('Falling back to ZAPI: %s' % msg)
+ return False
+
+ def check_and_set_parameters(self, module):
+ self.parameters = {}
+ check_for_none = netapp_utils.has_feature(module, 'check_required_params_for_none')
+ if check_for_none:
+ required_keys = [key for key, value in module.argument_spec.items() if value.get('required')]
+ for param in module.params:
+ if module.params[param] is not None:
+ self.parameters[param] = module.params[param]
+ elif check_for_none and param in required_keys:
+ module.fail_json(msg="%s requires a value, got: None" % param)
+ return self.parameters
+
+ @staticmethod
+ def type_error_message(type_str, key, value):
+ return "expecting '%s' type for %s: %s, got: %s" % (type_str, repr(key), repr(value), type(value))
+
+ def get_value_for_bool(self, from_zapi, value, key=None):
+ """
+ Convert boolean values to string or vice-versa
+ If from_zapi = True, value is converted from string (as it appears in ZAPI) to boolean
+ If from_zapi = False, value is converted from boolean to string
+ For get() method, from_zapi = True
+ For modify(), create(), from_zapi = False
+ :param from_zapi: convert the value from ZAPI or to ZAPI acceptable type
+ :param value: value of the boolean attribute
+ :param key: if present, force error checking to validate type, and accepted values
+ :return: string or boolean
+ """
+ if value is None:
+ return None
+ if from_zapi:
+ if key is not None and not isinstance(value, str):
+ raise TypeError(self.type_error_message('str', key, value))
+ if key is not None and value not in ('true', 'false'):
+ raise ValueError('Unexpected value: %s received from ZAPI for boolean attribute: %s' % (repr(value), repr(key)))
+ return value == 'true'
+ if key is not None and not isinstance(value, bool):
+ raise TypeError(self.type_error_message('bool', key, value))
+ return 'true' if value else 'false'
+
+ def get_value_for_int(self, from_zapi, value, key=None):
+ """
+ Convert integer values to string or vice-versa
+ If from_zapi = True, value is converted from string (as it appears in ZAPI) to integer
+ If from_zapi = False, value is converted from integer to string
+ For get() method, from_zapi = True
+ For modify(), create(), from_zapi = False
+ :param from_zapi: convert the value from ZAPI or to ZAPI acceptable type
+ :param value: value of the integer attribute
+ :param key: if present, force error checking to validate type
+ :return: string or integer
+ """
+ if value is None:
+ return None
+ if from_zapi:
+ if key is not None and not isinstance(value, str):
+ raise TypeError(self.type_error_message('str', key, value))
+ return int(value)
+ if key is not None and not isinstance(value, int):
+ raise TypeError(self.type_error_message('int', key, value))
+ return str(value)
+
+ def get_value_for_list(self, from_zapi, zapi_parent, zapi_child=None, data=None):
+ """
+ Convert a python list() to NaElement or vice-versa
+ If from_zapi = True, value is converted from NaElement (parent-children structure) to list()
+ If from_zapi = False, value is converted from list() to NaElement
+ :param zapi_parent: ZAPI parent key or the ZAPI parent NaElement
+ :param zapi_child: ZAPI child key
+ :param data: list() to be converted to NaElement parent-children object
+ :param from_zapi: convert the value from ZAPI or to ZAPI acceptable type
+ :return: list() or NaElement
+ """
+ if from_zapi:
+ if zapi_parent is None:
+ return []
+ return [zapi_child.get_content() for zapi_child in zapi_parent.get_children()]
+
+ zapi_parent = netapp_utils.zapi.NaElement(zapi_parent)
+ for item in data:
+ zapi_parent.add_new_child(zapi_child, item)
+ return zapi_parent
+
+ def get_cd_action(self, current, desired):
+ ''' takes a desired state and a current state, and return an action:
+ create, delete, None
+ eg:
+ is_present = 'absent'
+ some_object = self.get_object(source)
+ if some_object is not None:
+ is_present = 'present'
+ action = cd_action(current=is_present, desired = self.desired.state())
+ '''
+ desired_state = desired['state'] if 'state' in desired else 'present'
+ if current is None and desired_state == 'absent':
+ return None
+ if current is not None and desired_state == 'present':
+ return None
+ # change in state
+ self.changed = True
+ return 'create' if current is None else 'delete'
+
+ @staticmethod
+ def check_keys(current, desired):
+ ''' TODO: raise an error if keys do not match
+ with the exception of:
+ new_name, state in desired
+ '''
+
+ @staticmethod
+ def compare_lists(current, desired, get_list_diff):
+ ''' compares two lists and return a list of elements that are either the desired elements or elements that are
+ modified from the current state depending on the get_list_diff flag
+ :param: current: current item attribute in ONTAP
+ :param: desired: attributes from playbook
+ :param: get_list_diff: specifies whether to have a diff of desired list w.r.t current list for an attribute
+ :return: list of attributes to be modified
+ :rtype: list
+ '''
+ current_copy = deepcopy(current)
+ desired_copy = deepcopy(desired)
+
+ # get what in desired and not in current
+ desired_diff_list = []
+ for item in desired:
+ if item in current_copy:
+ current_copy.remove(item)
+ else:
+ desired_diff_list.append(item)
+
+ # get what in current but not in desired
+ current_diff_list = []
+ for item in current:
+ if item in desired_copy:
+ desired_copy.remove(item)
+ else:
+ current_diff_list.append(item)
+
+ if desired_diff_list or current_diff_list:
+ # there are changes
+ return desired_diff_list if get_list_diff else desired
+ else:
+ return None
+
+ def get_modified_attributes(self, current, desired, get_list_diff=False):
+ ''' takes two dicts of attributes and return a dict of attributes that are
+ not in the current state
+ It is expected that all attributes of interest are listed in current and
+ desired.
+ :param: current: current attributes in ONTAP
+ :param: desired: attributes from playbook
+ :param: get_list_diff: specifies whether to have a diff of desired list w.r.t current list for an attribute
+ :return: dict of attributes to be modified
+ :rtype: dict
+
+ NOTE: depending on the attribute, the caller may need to do a modify or a
+ different operation (eg move volume if the modified attribute is an
+ aggregate name)
+ '''
+ # if the object does not exist, we can't modify it
+ modified = {}
+ if current is None:
+ return modified
+
+ if not isinstance(desired, dict):
+ raise TypeError("Expecting dict, got: %s with current: %s" % (desired, current))
+ # error out if keys do not match
+ self.check_keys(current, desired)
+
+ # collect changed attributes
+ for key, value in current.items():
+ # if self.netapp_module:
+ # self.netapp_module.rest_api.log_debug('KDV', "%s:%s:%s" % (key, desired.get(key), value))
+ if desired.get(key) is not None:
+ modified_value = None
+ if isinstance(value, list):
+ modified_value = self.compare_lists(value, desired[key], get_list_diff) # get modified list from current and desired
+ elif isinstance(value, dict):
+ modified_value = self.get_modified_attributes(value, desired[key]) or None
+ else:
+ try:
+ result = cmp(value, desired[key])
+ except TypeError as exc:
+ raise TypeError("%s, key: %s, value: %s, desired: %s" % (repr(exc), key, repr(value), repr(desired[key])))
+ # if self.netapp_module:
+ # self.netapp_module.rest_api.log_debug('RESULT', result)
+ if result != 0:
+ modified_value = desired[key]
+ if modified_value is not None:
+ modified[key] = modified_value
+
+ if modified:
+ self.changed = True
+ return modified
+
+ def is_rename_action(self, source, target):
+ ''' takes a source and target object, and returns True
+ if a rename is required
+ eg:
+ source = self.get_object(source_name)
+ target = self.get_object(target_name)
+ action = is_rename_action(source, target)
+ :return: None for error, True for rename action, False otherwise
+
+ I'm not sure we need this function any more.
+ I think a better way to do it is to:
+ 1. look if a create is required (eg the target resource does not exist and state==present)
+ 2. consider that a create can be fullfilled by different actions: rename, create from scratch, move, ...
+ So for rename:
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action == 'create' and self.parameters.get('from_name'):
+ # creating new subnet by renaming
+ current = self.get_subnet(self.parameters['from_name'])
+ if current is None:
+ self.module.fail_json(msg="Error renaming: subnet %s does not exist" %
+ self.parameters['from_name'])
+ rename = True
+ cd_action = None
+ '''
+ if source is None and target is None:
+ # error, do nothing
+ # cannot rename a non existent resource
+ return None
+ if target is None:
+ # source is not None and target is None:
+ # rename is in order
+ self.changed = True
+ return True
+ # target is not None, so do nothing as the destination exists
+ # if source is None, maybe we already renamed
+ # if source is not None, maybe a new resource was created after being renamed
+ return False
+
+ @staticmethod
+ def sanitize_wwn(initiator):
+ ''' igroup initiator may or may not be using WWN format: eg 20:00:00:25:B5:00:20:01
+ if format is matched, convert initiator to lowercase, as this is what ONTAP is using '''
+ wwn_format = r'[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){7}'
+ initiator = initiator.strip()
+ if re.match(wwn_format, initiator):
+ initiator = initiator.lower()
+ return initiator
+
+ def safe_get(self, an_object, key_list, allow_sparse_dict=True):
+ ''' recursively traverse a dictionary or a any object supporting get_item or indexing
+ (in our case, python dicts and NAElement responses, and lists)
+ It is expected that some keys can be missing, this is controlled with allow_sparse_dict
+
+ return value if the key chain is exhausted
+ return None if a key is not found and allow_sparse_dict is True
+ raise KeyError is a key is not found and allow_sparse_dict is False (looking for exact match)
+ raise TypeError if an intermediate element cannot be indexed,
+ unless the element is None and allow_sparse_dict is True
+ '''
+ if not key_list:
+ # we've exhausted the keys, good!
+ return an_object
+ key_list = list(key_list) # preserve original values
+ key = key_list.pop(0)
+ try:
+ return self.safe_get(an_object[key], key_list, allow_sparse_dict=allow_sparse_dict)
+ except (KeyError, IndexError) as exc:
+ # error, key or index not found
+ if allow_sparse_dict:
+ return None
+ raise exc
+ except TypeError as exc:
+ # error, we were expecting a dict or NAElement
+ if allow_sparse_dict and an_object is None:
+ return None
+ raise exc
+
+ def convert_value(self, value, convert_to):
+ if convert_to is None:
+ return value, None
+ if not isinstance(value, str):
+ return None, ('Unexpected type: %s for %s' % (type(value), str(value)))
+ if convert_to == str:
+ return value, None
+ if convert_to == int:
+ try:
+ return int(value), None
+ except ValueError as exc:
+ return None, ('Unexpected value for int: %s, %s' % (str(value), str(exc)))
+ if convert_to == bool:
+ if value not in ('true', 'false'):
+ return None, 'Unexpected value: %s received from ZAPI for boolean attribute' % value
+ return value == 'true', None
+ if convert_to == 'bool_online':
+ return value == 'online', None
+ self.ansible_module.fail_json(msg='Error: Unexpected value for convert_to: %s' % convert_to)
+
+ def zapi_get_value(self, na_element, key_list, required=False, default=None, convert_to=None):
+ """ read a value from na_element using key_list
+
+ If required is True, an error is reported if a key in key_list is not found.
+ If required is False and the value is not found, uses default as the value.
+ If convert_to is set to str, bool, int, the ZAPI value is converted from str to the desired type.
+ suported values: None, the python types int, str, bool, special 'bool_online'
+
+ Errors: fail_json is called for:
+ - a key is not found and required=True,
+ - a format conversion error
+ """
+
+ # keep a copy, as the list is mutated
+ saved_key_list = list(key_list)
+ try:
+ value = self.safe_get(na_element, key_list, allow_sparse_dict=not required)
+ except (KeyError, TypeError) as exc:
+ error = exc
+ else:
+ value, error = self.convert_value(value, convert_to) if value is not None else (default, None)
+ if error:
+ self.ansible_module.fail_json(msg='Error reading %s from %s: %s' % (saved_key_list, na_element.to_string(), error))
+ return value
+
+ def zapi_get_attrs(self, na_element, attr_dict, result):
+ """ Retrieve a list of attributes from na_elements
+ see na_ontap_volume for an example.
+ na_element: xml element as returned by ZAPI.
+ attr_dict:
+ A dict of dict, with format:
+ key: dict(key_list, required=False, default=None, convert_to=None, omitnone=False)
+ The keys are used to index a result dictionary, values are read from a ZAPI object indexed by key_list.
+ If required is True, an error is reported if a key in key_list is not found.
+ If required is False and the value is not found, uses default as the value.
+ If convert_to is set to str, bool, int, the ZAPI value is converted from str to the desired type.
+ I'm not sure there is much value in omitnone, but it preserves backward compatibility.
+ When the value is None, if omitnone is False, a None value is recorded, if True, the key is not set.
+ result: an existing dictionary. keys are added or updated based on attrs.
+
+ Errors: fail_json is called for:
+ - a key is not found and required=True,
+ - a format conversion error
+ """
+ for key, kwargs in attr_dict.items():
+ omitnone = kwargs.pop('omitnone', False)
+ value = self.zapi_get_value(na_element, **kwargs)
+ if value is not None or not omitnone:
+ result[key] = value
+
+ def _filter_out_none_entries_from_dict(self, adict, allow_empty_list_or_dict):
+ """take a dict as input and return a dict without keys whose values are None
+ return empty dicts or lists if allow_empty_list_or_dict otherwise skip empty dicts or lists.
+ """
+ result = {}
+ for key, value in adict.items():
+ if isinstance(value, (list, dict)):
+ sub = self.filter_out_none_entries(value, allow_empty_list_or_dict)
+ if sub or allow_empty_list_or_dict:
+ # allow empty dict or list if allow_empty_list_or_dict is set.
+ # skip empty dict or list otherwise
+ result[key] = sub
+ elif value is not None:
+ # skip None value
+ result[key] = value
+ return result
+
+ def _filter_out_none_entries_from_list(self, alist, allow_empty_list_or_dict):
+ """take a list as input and return a list without elements whose values are None
+ return empty dicts or lists if allow_empty_list_or_dict otherwise skip empty dicts or lists.
+ """
+ result = []
+ for item in alist:
+ if isinstance(item, (list, dict)):
+ sub = self.filter_out_none_entries(item, allow_empty_list_or_dict)
+ if sub or allow_empty_list_or_dict:
+ # allow empty dict or list if allow_empty_list_or_dict is set.
+ # skip empty dict or list otherwise
+ result.append(sub)
+ elif item is not None:
+ # skip None value
+ result.append(item)
+ return result
+
+ def filter_out_none_entries(self, list_or_dict, allow_empty_list_or_dict=False):
+ """take a dict or list as input and return a dict/list without keys/elements whose values are None
+ return empty dicts or lists if allow_empty_list_or_dict otherwise skip empty dicts or lists.
+ """
+
+ if isinstance(list_or_dict, dict):
+ return self._filter_out_none_entries_from_dict(list_or_dict, allow_empty_list_or_dict)
+
+ if isinstance(list_or_dict, list):
+ return self._filter_out_none_entries_from_list(list_or_dict, allow_empty_list_or_dict)
+
+ raise TypeError('unexpected type %s' % type(list_or_dict))
+
+ @staticmethod
+ def get_caller(depth):
+ '''return the name of:
+ our caller if depth is 1
+ the caller of our caller if depth is 2
+ the caller of the caller of our caller if depth is 3
+ ...
+ '''
+ # one more caller in the stack
+ depth += 1
+ frames = traceback.extract_stack(limit=depth)
+ try:
+ function_name = frames[0].name
+ except AttributeError:
+ # python 2.7 does not have named attributes for frames
+ try:
+ function_name = frames[0][2]
+ except Exception as exc: # pylint: disable=broad-except
+ function_name = 'Error retrieving function name: %s - %s' % (str(exc), repr(frames))
+ return function_name
+
+ def fail_on_error(self, error, api=None, stack=False, depth=1, previous_errors=None):
+ '''depth identifies how far is the caller in the call stack'''
+ if error is None:
+ return
+ # one more caller to account for this function
+ depth += 1
+ if api is not None:
+ error = 'calling api: %s: %s' % (api, error)
+ results = dict(msg='Error in %s: %s' % (self.get_caller(depth), error))
+ if stack:
+ results['stack'] = traceback.format_stack()
+ if previous_errors:
+ results['previous_errors'] = ' - '.join(previous_errors)
+ if getattr(self, 'ansible_module', None) is not None:
+ self.ansible_module.fail_json(**results)
+ raise AttributeError('Expecting self.ansible_module to be set when reporting %s' % repr(results))
+
+ def compare_chmod_value(self, current_permissions, desired_permissions):
+ """
+ compare current unix_permissions to desired unix_permissions.
+ :return: True if the same, False it not the same or desired unix_permissions is not valid.
+ """
+ if current_permissions is None:
+ return False
+ if desired_permissions.isdigit():
+ return int(current_permissions) == int(desired_permissions)
+ # ONTAP will throw error as invalid field if the length is not 9 or 12.
+ if len(desired_permissions) not in [12, 9]:
+ return False
+ desired_octal_value = ''
+ # if the length is 12, first three character sets userid('s'), groupid('s') and sticky('t') attributes
+ if len(desired_permissions) == 12:
+ if desired_permissions[0] not in ['s', '-'] or desired_permissions[1] not in ['s', '-']\
+ or desired_permissions[2] not in ['t', '-']:
+ return False
+ desired_octal_value += str(self.char_to_octal(desired_permissions[:3]))
+ # if the len is 9, start from 0 else start from 3.
+ start_range = len(desired_permissions) - 9
+ for i in range(start_range, len(desired_permissions), 3):
+ if desired_permissions[i] not in ['r', '-'] or desired_permissions[i + 1] not in ['w', '-']\
+ or desired_permissions[i + 2] not in ['x', '-']:
+ return False
+ group_permission = self.char_to_octal(desired_permissions[i:i + 3])
+ desired_octal_value += str(group_permission)
+ return int(current_permissions) == int(desired_octal_value)
+
+ def char_to_octal(self, chars):
+ """
+ :param chars: Characters to be converted into octal values.
+ :return: octal value of the individual group permission.
+ """
+ total = 0
+ if chars[0] in ['r', 's']:
+ total += 4
+ if chars[1] in ['w', 's']:
+ total += 2
+ if chars[2] in ['x', 't']:
+ total += 1
+ return total
+
+ def ignore_missing_vserver_on_delete(self, error, vserver_name=None):
+ """ When a resource is expected to be absent, it's OK if the containing vserver is also absent.
+ This function expects self.parameters('vserver') to be set or the vserver_name argument to be passed.
+ error is an error returned by rest_generic.get_xxxx.
+ """
+ if self.parameters.get('state') != 'absent':
+ return False
+ if vserver_name is None:
+ if self.parameters.get('vserver') is None:
+ self.ansible_module.fail_json(
+ msg='Internal error, vserver name is required, when processing error: %s' % error, exception=traceback.format_exc())
+ vserver_name = self.parameters['vserver']
+ if isinstance(error, str):
+ pass
+ elif isinstance(error, dict):
+ if 'message' in error:
+ error = error['message']
+ else:
+ self.ansible_module.fail_json(
+ msg='Internal error, error should contain "message" key, found: %s' % error, exception=traceback.format_exc())
+ else:
+ self.ansible_module.fail_json(
+ msg='Internal error, error should be str or dict, found: %s, %s' % (type(error), error), exception=traceback.format_exc())
+ return 'SVM "%s" does not exist.' % vserver_name in error
+
+ def remove_hal_links(self, records):
+ """ Remove all _links entries """
+ if isinstance(records, dict):
+ records.pop('_links', None)
+ for record in records.values():
+ self.remove_hal_links(record)
+ if isinstance(records, list):
+ for record in records:
+ self.remove_hal_links(record)
diff --git a/ansible_collections/netapp/ontap/plugins/module_utils/rest_application.py b/ansible_collections/netapp/ontap/plugins/module_utils/rest_application.py
new file mode 100644
index 000000000..d96f23031
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/module_utils/rest_application.py
@@ -0,0 +1,180 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2020-2022, Laurent Nicolas <laurentn@netapp.com>
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+""" Support class for NetApp ansible modules
+
+ Provides accesss to application resources using REST calls
+"""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class RestApplication():
+ """Helper methods to manage application and application components"""
+ def __init__(self, rest_api, svm_name, app_name):
+ self.svm_name = svm_name
+ self.app_name = app_name
+ self.app_uuid = None
+ self.rest_api = rest_api
+
+ def _set_application_uuid(self):
+ """Use REST application/applications to get application uuid"""
+ api = 'application/applications'
+ query = {'svm.name': self.svm_name, 'name': self.app_name}
+ record, error = rest_generic.get_one_record(self.rest_api, api, query)
+ if error is None and record is not None:
+ self.app_uuid = record['uuid']
+ return None, error
+
+ def get_application_uuid(self):
+ """Use REST application/applications to get application uuid"""
+ error = None
+ if self.app_uuid is None:
+ dummy, error = self._set_application_uuid()
+ return self.app_uuid, error
+
+ def get_application_details(self, template=None):
+ """Use REST application/applications to get application details"""
+ uuid, error = self.get_application_uuid()
+ if error:
+ return uuid, error
+ if uuid is None: # not found
+ return None, None
+ query = dict(fields='name,%s,statistics' % template) if template else None
+ api = 'application/applications/%s' % uuid
+ return rest_generic.get_one_record(self.rest_api, api, query)
+
+ def create_application(self, body):
+ """Use REST application/applications san template to create one or more LUNs"""
+ dummy, error = self.fail_if_uuid('create_application')
+ if error is not None:
+ return dummy, error
+ api = 'application/applications'
+ query = {'return_records': 'true'}
+ response, error = rest_generic.post_async(self.rest_api, api, body, query)
+ if error and 'Unexpected argument' in error and 'exclude_aggregates' in error:
+ error += ' "exclude_aggregates" requires ONTAP 9.9.1 GA or later.'
+ return response, error
+
+ def patch_application(self, body):
+ """Use REST application/applications san template to add one or more LUNs"""
+ dummy, error = self.fail_if_no_uuid()
+ if error is not None:
+ return dummy, error
+ api = 'application/applications'
+ query = {'return_records': 'true'}
+ return rest_generic.patch_async(self.rest_api, api, self.app_uuid, body, query)
+
+ def create_application_body(self, template_name, template_body, smart_container=True):
+ if not isinstance(smart_container, bool):
+ error = "expecting bool value for smart_container, got: %s" % smart_container
+ return None, error
+ body = {
+ 'name': self.app_name,
+ 'svm': {'name': self.svm_name},
+ 'smart_container': smart_container,
+ template_name: template_body
+ }
+ return body, None
+
+ def delete_application(self):
+ """Use REST application/applications to delete app"""
+ dummy, error = self.fail_if_no_uuid()
+ if error is not None:
+ return dummy, error
+ api = 'application/applications'
+ response, error = rest_generic.delete_async(self.rest_api, api, self.app_uuid)
+ self.app_uuid = None
+ return response, error
+
+ def get_application_components(self):
+ """Use REST application/applications to get application components"""
+ dummy, error = self.fail_if_no_uuid()
+ if error is not None:
+ return dummy, error
+ api = 'application/applications/%s/components' % self.app_uuid
+ return rest_generic.get_0_or_more_records(self.rest_api, api)
+
+ def get_application_component_uuid(self):
+ """Use REST application/applications to get component uuid
+ Assume a single component per application
+ """
+ dummy, error = self.fail_if_no_uuid()
+ if error is not None:
+ return dummy, error
+ api = 'application/applications/%s/components' % self.app_uuid
+ record, error = rest_generic.get_one_record(self.rest_api, api, fields='uuid')
+ if error is None and record is not None:
+ return record['uuid'], None
+ return None, error
+
+ def get_application_component_details(self, comp_uuid=None):
+ """Use REST application/applications to get application components"""
+ dummy, error = self.fail_if_no_uuid()
+ if error is not None:
+ return dummy, error
+ if comp_uuid is None:
+ # assume a single component
+ comp_uuid, error = self.get_application_component_uuid()
+ if error:
+ return comp_uuid, error
+ if comp_uuid is None:
+ error = 'no component for application %s' % self.app_name
+ return None, error
+ api = 'application/applications/%s/components/%s' % (self.app_uuid, comp_uuid)
+ return rest_generic.get_one_record(self.rest_api, api)
+
+ def get_application_component_backing_storage(self):
+ """Use REST application/applications to get component uuid.
+
+ Assume a single component per application
+ """
+ dummy, error = self.fail_if_no_uuid()
+ if error is not None:
+ return dummy, error
+ response, error = self.get_application_component_details()
+ if error or response is None:
+ return response, error
+ return response['backing_storage'], None
+
+ def fail_if_no_uuid(self):
+ """Prevent a logic error."""
+ if self.app_uuid is None:
+ msg = 'function should not be called before application uuid is set.'
+ return None, msg
+ return None, None
+
+ def fail_if_uuid(self, fname):
+ """Prevent a logic error."""
+ if self.app_uuid is not None:
+ msg = 'function %s should not be called when application uuid is set: %s.' % (fname, self.app_uuid)
+ return None, msg
+ return None, None
diff --git a/ansible_collections/netapp/ontap/plugins/module_utils/rest_generic.py b/ansible_collections/netapp/ontap/plugins/module_utils/rest_generic.py
new file mode 100644
index 000000000..4c570f8d8
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/module_utils/rest_generic.py
@@ -0,0 +1,101 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2021, Laurent Nicolas <laurentn@netapp.com>
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+""" Support functions for NetApp ansible modules
+
+ Provides common processing for responses and errors from REST calls
+"""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import ansible_collections.netapp.ontap.plugins.module_utils.rest_response_helpers as rrh
+
+
+def build_query_with_fields(query, fields):
+ ''' for GET requests'''
+ if fields is not None and query is None:
+ query = {}
+ if fields is not None:
+ query['fields'] = fields
+ return query
+
+
+def build_query_with_timeout(query, timeout):
+ ''' for POST, PATCH, DELETE requests'''
+ params = {} if query else None
+ if timeout > 0:
+ # without return_timeout, REST returns immediately with a 202 and a job link
+ # but the job status is 'running'
+ # with return_timeout, REST returns quickly with a 200 and a job link
+ # and the job status is 'success'
+ params = dict(return_timeout=timeout)
+ if query is not None:
+ params.update(query)
+ return params
+
+
+def get_one_record(rest_api, api, query=None, fields=None):
+ query = build_query_with_fields(query, fields)
+ response, error = rest_api.get(api, query)
+ record, error = rrh.check_for_0_or_1_records(api, response, error, query)
+ return record, error
+
+
+def get_0_or_more_records(rest_api, api, query=None, fields=None):
+ query = build_query_with_fields(query, fields)
+ response, error = rest_api.get(api, query)
+ records, error = rrh.check_for_0_or_more_records(api, response, error)
+ return records, error
+
+
+def post_async(rest_api, api, body, query=None, timeout=30, job_timeout=30, headers=None, raw_error=False, files=None):
+ # see delete_async for async and sync operations and status codes
+ response, error = rest_api.post(api, body=body, params=build_query_with_timeout(query, timeout), headers=headers, files=files)
+ # limit the polling interval to something between 5 seconds and 60 seconds
+ increment = min(max(job_timeout / 6, 5), 60)
+ response, error = rrh.check_for_error_and_job_results(api, response, error, rest_api, increment=increment, timeout=job_timeout, raw_error=raw_error)
+ return response, error
+
+
+def patch_async(rest_api, api, uuid_or_name, body, query=None, timeout=30, job_timeout=30, headers=None, raw_error=False, files=None):
+ # cluster does not use uuid or name, and query based PATCH does not use UUID (for restit)
+ api = '%s/%s' % (api, uuid_or_name) if uuid_or_name is not None else api
+ response, error = rest_api.patch(api, body=body, params=build_query_with_timeout(query, timeout), headers=headers, files=files)
+ increment = min(max(job_timeout / 6, 5), 60)
+ response, error = rrh.check_for_error_and_job_results(api, response, error, rest_api, increment=increment, timeout=job_timeout, raw_error=raw_error)
+ return response, error
+
+
+def delete_async(rest_api, api, uuid, query=None, body=None, timeout=30, job_timeout=30, headers=None, raw_error=False):
+ # query based DELETE does not use UUID (for restit)
+ api = '%s/%s' % (api, uuid) if uuid is not None else api
+ response, error = rest_api.delete(api, body=body, params=build_query_with_timeout(query, timeout), headers=headers)
+ increment = min(max(job_timeout / 6, 5), 60)
+ response, error = rrh.check_for_error_and_job_results(api, response, error, rest_api, increment=increment, timeout=job_timeout, raw_error=raw_error)
+ return response, error
diff --git a/ansible_collections/netapp/ontap/plugins/module_utils/rest_owning_resource.py b/ansible_collections/netapp/ontap/plugins/module_utils/rest_owning_resource.py
new file mode 100644
index 000000000..597c02d50
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/module_utils/rest_owning_resource.py
@@ -0,0 +1,26 @@
+""" Support functions for NetApp ansible modules
+ Provides common processing for responses and errors from REST calls
+"""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+def get_export_policy_id(rest_api, policy_name, svm_name, module):
+ api = 'protocols/nfs/export-policies'
+ query = {'name': policy_name, 'svm.name': svm_name}
+ record, error = rest_generic.get_one_record(rest_api, api, query)
+ if error:
+ module.fail_json(msg='Could not find export policy %s on SVM %s' % (policy_name, svm_name))
+ return record['id'] if record else None
+
+
+def get_volume_uuid(rest_api, volume_name, svm_name, module):
+ api = 'storage/volumes'
+ query = {'name': volume_name, 'svm.name': svm_name}
+ record, error = rest_generic.get_one_record(rest_api, api, query)
+ if error:
+ module.fail_json(msg='Could not find volume %s on SVM %s' % (volume_name, svm_name))
+ return record['uuid'] if record else None
diff --git a/ansible_collections/netapp/ontap/plugins/module_utils/rest_response_helpers.py b/ansible_collections/netapp/ontap/plugins/module_utils/rest_response_helpers.py
new file mode 100644
index 000000000..59e04b3b3
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/module_utils/rest_response_helpers.py
@@ -0,0 +1,137 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2020, Laurent Nicolas <laurentn@netapp.com>
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+""" Support functions for NetApp ansible modules
+
+ Provides common processing for responses and errors from REST calls
+"""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+def api_error(api, error):
+ """format error message for api error, if error is present"""
+ return "calling: %s: got %s." % (api, error) if error is not None else None
+
+
+def no_response_error(api, response):
+ """format error message for empty response"""
+ return "calling: %s: no response %s." % (api, repr(response))
+
+
+def job_error(response, error):
+ """format error message for job error"""
+ return "job reported error: %s, received %s." % (error, repr(response))
+
+
+def unexpected_response_error(api, response, query=None):
+ """format error message for reponse not matching expectations"""
+ msg = "calling: %s: unexpected response %s." % (api, repr(response))
+ if query:
+ msg += " for query: %s" % repr(query)
+ return response, msg
+
+
+def get_num_records(response):
+ """ num_records is not always present
+ if absent, count the records or assume 1
+ """
+ if 'num_records' in response:
+ return response['num_records']
+ return len(response['records']) if 'records' in response else 1
+
+
+def check_for_0_or_1_records(api, response, error, query=None):
+ """return None if no record was returned by the API
+ return record if one record was returned by the API
+ return error otherwise (error, no response, more than 1 record)
+ """
+ if error:
+ return (None, api_error(api, error)) if api else (None, error)
+ if not response:
+ return None, no_response_error(api, response)
+ num_records = get_num_records(response)
+ if num_records == 0:
+ return None, None # not found
+ if num_records != 1:
+ return unexpected_response_error(api, response, query)
+ if 'records' in response:
+ return response['records'][0], None
+ return response, None
+
+
+def check_for_0_or_more_records(api, response, error):
+ """return None if no record was returned by the API
+ return records if one or more records was returned by the API
+ return error otherwise (error, no response)
+ """
+ if error:
+ return (None, api_error(api, error)) if api else (None, error)
+ if not response:
+ return None, no_response_error(api, response)
+ if get_num_records(response) == 0:
+ return None, None # not found
+ if 'records' in response:
+ return response['records'], None
+ error = 'No "records" key in %s' % response
+ return (None, api_error(api, error)) if api else (None, error)
+
+
+def check_for_error_and_job_results(api, response, error, rest_api, **kwargs):
+ """report first error if present
+ otherwise call wait_on_job and retrieve job response or error
+ """
+ format_error = not kwargs.pop('raw_error', False)
+ if error:
+ if format_error:
+ error = api_error(api, error)
+ # we expect two types of response
+ # a plain response, for synchronous calls
+ # or a job response, for asynchronous calls
+ # and it's possible to expect both when 'return_timeout' > 0
+ #
+ # when using a query instead of UUID, REST return jobs (a list of jobs) rather than a single job
+ # only restit can send a query, all other calls are using a UUID.
+ elif isinstance(response, dict):
+ job = None
+ if 'job' in response:
+ job = response['job']
+ elif 'jobs' in response:
+ if response['num_records'] > 1:
+ error = "multiple jobs in progress, can't check status"
+ else:
+ job = response['jobs'][0]
+ if job:
+ job_response, error = rest_api.wait_on_job(job, **kwargs)
+ if error:
+ if format_error:
+ error = job_error(response, error)
+ else:
+ response['job_response'] = job_response
+ return response, error
diff --git a/ansible_collections/netapp/ontap/plugins/module_utils/rest_user.py b/ansible_collections/netapp/ontap/plugins/module_utils/rest_user.py
new file mode 100644
index 000000000..b7c2e66a2
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/module_utils/rest_user.py
@@ -0,0 +1,49 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2021, Laurent Nicolas <laurentn@netapp.com>
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+""" Support functions for NetApp ansible modules
+
+ Provides common processing for responses and errors from REST calls
+"""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import ansible_collections.netapp.ontap.plugins.module_utils.rest_response_helpers as rrh
+
+
+def get_users(rest_api, parameters, fields=None):
+ api = 'security/accounts'
+ query = dict()
+ for field in parameters:
+ query[field] = parameters[field]
+ if fields is not None:
+ query['fields'] = fields
+ response, error = rest_api.get(api, query)
+ users, error = rrh.check_for_0_or_more_records(api, response, error)
+ return users, error
diff --git a/ansible_collections/netapp/ontap/plugins/module_utils/rest_volume.py b/ansible_collections/netapp/ontap/plugins/module_utils/rest_volume.py
new file mode 100644
index 000000000..10af36754
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/module_utils/rest_volume.py
@@ -0,0 +1,61 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2020, Laurent Nicolas <laurentn@netapp.com>
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+""" Support functions for NetApp ansible modules
+
+ Provides common processing for responses and errors from REST calls
+"""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+def get_volumes(rest_api, vserver=None, name=None):
+ api = 'storage/volumes'
+ query = {}
+ if vserver is not None:
+ query['svm.name'] = vserver
+ if name is not None:
+ query['name'] = name
+ if not query:
+ query = None
+ return rest_generic.get_0_or_more_records(rest_api, api, query)
+
+
+def get_volume(rest_api, vserver, name, fields=None):
+ api = 'storage/volumes'
+ query = dict(name=name)
+ query['svm.name'] = vserver
+ return rest_generic.get_one_record(rest_api, api, query, fields=fields)
+
+
+def patch_volume(rest_api, uuid, body, query=None, job_timeout=120):
+ api = 'storage/volumes'
+ return rest_generic.patch_async(rest_api, api, uuid, body, query=query, job_timeout=job_timeout)
diff --git a/ansible_collections/netapp/ontap/plugins/module_utils/rest_vserver.py b/ansible_collections/netapp/ontap/plugins/module_utils/rest_vserver.py
new file mode 100644
index 000000000..cbdfdaef9
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/module_utils/rest_vserver.py
@@ -0,0 +1,61 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2021, Laurent Nicolas <laurentn@netapp.com>
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+""" Support functions for NetApp ansible modules
+
+ Provides common processing for responses and errors from REST calls
+"""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.netapp.ontap.plugins.module_utils.rest_generic import get_one_record
+
+
+def get_vserver(rest_api, name, fields=None):
+ api = 'svm/svms'
+ query = {'name': name}
+ if fields is not None:
+ query['fields'] = fields
+ vserver, error = get_one_record(rest_api, api, query)
+ return vserver, error
+
+
+def get_vserver_uuid(rest_api, name, module=None, error_on_none=False):
+ """ returns a tuple (uuid, error)
+ when module is set and an error is found, fails the module and exit
+ when error_on_none IS SET, force an error if vserver is not found
+ """
+ record, error = get_vserver(rest_api, name, 'uuid')
+ if error and module:
+ module.fail_json(msg="Error fetching vserver %s: %s" % (name, error))
+ if not error and record is None and error_on_none:
+ error = "vserver %s does not exist or is not a data vserver." % name
+ if module:
+ module.fail_json(msg="Error %s" % error)
+ return record['uuid'] if not error and record else None, error
diff --git a/ansible_collections/netapp/ontap/plugins/module_utils/zapis_svm.py b/ansible_collections/netapp/ontap/plugins/module_utils/zapis_svm.py
new file mode 100644
index 000000000..71a3f2496
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/module_utils/zapis_svm.py
@@ -0,0 +1,133 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2020, Laurent Nicolas <laurentn@netapp.com>
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+''' Support class for NetApp ansible modules
+
+ Provides accesss to SVM (vserver) resources using ZAPI calls
+'''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import traceback
+
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+
+def get_vserver(svm_cx, vserver_name):
+ """
+ Return vserver information.
+
+ :return:
+ vserver object if vserver found
+ None if vserver is not found
+ :rtype: object/None
+ """
+ vserver_info = netapp_utils.zapi.NaElement('vserver-get-iter')
+ query_details = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'vserver-info', **{'vserver-name': vserver_name})
+
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ vserver_info.add_child_elem(query)
+
+ result = svm_cx.invoke_successfully(vserver_info, enable_tunneling=False)
+ vserver_details = None
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ attributes_list = result.get_child_by_name('attributes-list')
+ vserver_info = attributes_list.get_child_by_name('vserver-info')
+ aggr_list = []
+ # vserver aggr-list can be empty by default
+ get_list = vserver_info.get_child_by_name('aggr-list')
+ if get_list is not None:
+ aggregates = get_list.get_children()
+ aggr_list.extend(aggr.get_content() for aggr in aggregates)
+ protocols = []
+ # allowed-protocols is not empty for data SVM, but is for node SVM
+ allowed_protocols = vserver_info.get_child_by_name('allowed-protocols')
+ if allowed_protocols is not None:
+ get_protocols = allowed_protocols.get_children()
+ protocols.extend(protocol.get_content() for protocol in get_protocols)
+ vserver_details = {'name': vserver_info.get_child_content('vserver-name'),
+ 'root_volume': vserver_info.get_child_content('root-volume'),
+ 'root_volume_aggregate': vserver_info.get_child_content('root-volume-aggregate'),
+ 'root_volume_security_style': vserver_info.get_child_content('root-volume-security-style'),
+ 'subtype': vserver_info.get_child_content('vserver-subtype'),
+ 'aggr_list': aggr_list,
+ 'language': vserver_info.get_child_content('language'),
+ 'quota_policy': vserver_info.get_child_content('quota-policy'),
+ 'snapshot_policy': vserver_info.get_child_content('snapshot-policy'),
+ 'allowed_protocols': protocols,
+ 'ipspace': vserver_info.get_child_content('ipspace'),
+ 'comment': vserver_info.get_child_content('comment'),
+ 'max_volumes': vserver_info.get_child_content('max-volumes')}
+
+ return vserver_details
+
+
+def modify_vserver(svm_cx, module, name, modify, parameters=None):
+ '''
+ Modify vserver.
+ :param name: vserver name
+ :param modify: list of modify attributes
+ :param parameters: customer original inputs
+ modify only contains the difference between the customer inputs and current
+ for some attributes, it may be safer to apply the original inputs
+ '''
+ if parameters is None:
+ parameters = modify
+
+ vserver_modify = netapp_utils.zapi.NaElement('vserver-modify')
+ vserver_modify.add_new_child('vserver-name', name)
+ for attribute in modify:
+ if attribute == 'comment':
+ vserver_modify.add_new_child('comment', parameters['comment'])
+ if attribute == 'language':
+ vserver_modify.add_new_child('language', parameters['language'])
+ if attribute == 'quota_policy':
+ vserver_modify.add_new_child('quota-policy', parameters['quota_policy'])
+ if attribute == 'snapshot_policy':
+ vserver_modify.add_new_child('snapshot-policy', parameters['snapshot_policy'])
+ if attribute == 'max_volumes':
+ vserver_modify.add_new_child('max-volumes', parameters['max_volumes'])
+ if attribute == 'allowed_protocols':
+ allowed_protocols = netapp_utils.zapi.NaElement('allowed-protocols')
+ for protocol in parameters['allowed_protocols']:
+ allowed_protocols.add_new_child('protocol', protocol)
+ vserver_modify.add_child_elem(allowed_protocols)
+ if attribute == 'aggr_list':
+ aggregates = netapp_utils.zapi.NaElement('aggr-list')
+ for aggr in parameters['aggr_list']:
+ aggregates.add_new_child('aggr-name', aggr)
+ vserver_modify.add_child_elem(aggregates)
+ try:
+ svm_cx.invoke_successfully(vserver_modify, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as exc:
+ module.fail_json(msg='Error modifying SVM %s: %s' % (name, to_native(exc)),
+ exception=traceback.format_exc())
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_active_directory.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_active_directory.py
new file mode 100644
index 000000000..d1ca57250
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_active_directory.py
@@ -0,0 +1,328 @@
+#!/usr/bin/python
+
+# (c) 2020-2022, NetApp Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: na_ontap_active_directory
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+short_description: NetApp ONTAP configure active directory
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 20.9.0
+description:
+ - Configure Active Directory.
+ - REST requires ONTAP 9.12.1 or later.
+options:
+ state:
+ description:
+ - Whether the Active Directory should exist or not
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ vserver:
+ description:
+ - The name of the vserver to use.
+ required: true
+ type: str
+
+ account_name:
+ description:
+ - Active Directory account NetBIOS name.
+ required: true
+ type: str
+
+ admin_password:
+ description:
+ - Administrator password required for Active Directory account creation.
+ required: true
+ type: str
+
+ admin_username:
+ description:
+ - Administrator username required for Active Directory account creation.
+ required: true
+ type: str
+
+ domain:
+ description:
+ - Fully qualified domain name.
+ type: str
+ aliases: ['fqdn']
+
+ force_account_overwrite:
+ description:
+ - If true and a machine account with the same name as specified in 'account-name' exists in Active Directory, it will be overwritten and reused.
+ type: bool
+
+ organizational_unit:
+ description:
+ - Organizational unit under which the Active Directory account will be created.
+ type: str
+
+notes:
+ - Supports check_mode.
+ - supports ZAPI and REST. REST requires ONTAP 9.12.1 or later.
+'''
+EXAMPLES = """
+-
+ name: Ontap test
+ hosts: localhost
+ collections:
+ - netapp.ontap
+ tasks:
+ - name: Create active directory account.
+ netapp.ontap.na_ontap_active_directory:
+ hostname: 10.193.78.219
+ username: admin
+ password: netapp1!
+ https: True
+ validate_certs: False
+ vserver: laurentncluster-1
+ state: present
+ account_name: carchi
+ admin_password: password
+ admin_username: carchi
+ domain: addomain.com
+
+ - name: Modify domain name.
+ netapp.ontap.na_ontap_active_directory:
+ hostname: 10.193.78.219
+ username: admin
+ password: netapp1!
+ https: True
+ validate_certs: False
+ vserver: laurentncluster-1
+ state: present
+ account_name: carchi
+ admin_password: password
+ admin_username: carchi
+ domain: addomain_new.com
+ force_account_overwrite: True
+
+ - name: Delete active directory account.
+ netapp.ontap.na_ontap_active_directory:
+ hostname: 10.193.78.219
+ username: admin
+ password: netapp1!
+ https: True
+ validate_certs: False
+ vserver: laurentncluster-1
+ state: absent
+ account_name: carchi
+ admin_password: password
+ admin_username: carchi
+ domain: addomain.com
+"""
+RETURN = """
+
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppOntapActiveDirectory:
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ vserver=dict(required=True, type='str'),
+ state=dict(choices=['present', 'absent'], default='present'),
+ account_name=dict(required=True, type='str'),
+ admin_password=dict(required=True, type='str', no_log=True),
+ admin_username=dict(required=True, type='str'),
+ domain=dict(type="str", default=None, aliases=['fqdn']),
+ force_account_overwrite=dict(type="bool", default=None),
+ organizational_unit=dict(type="str", default=None)
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.rest_api = netapp_utils.OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+ self.svm_uuid = None
+
+ if self.use_rest and not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 12, 1):
+ msg = 'REST requires ONTAP 9.12.1 or later for active directory APIs'
+ self.use_rest = self.na_helper.fall_back_to_zapi(self.module, msg, self.parameters)
+ if not self.use_rest:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_active_directory(self):
+ if self.use_rest:
+ return self.get_active_directory_rest()
+ active_directory_iter = netapp_utils.zapi.NaElement('active-directory-account-get-iter')
+ active_directory_info = netapp_utils.zapi.NaElement('active-directory-account-config')
+ active_directory_info.add_new_child('account-name', self.parameters['account_name'])
+ active_directory_info.add_new_child('vserver', self.parameters['vserver'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(active_directory_info)
+ active_directory_iter.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(active_directory_iter, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error searching for Active Directory %s: %s' %
+ (self.parameters['account_name'], to_native(error)),
+ exception=traceback.format_exc())
+ record = {}
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ account_info = result.get_child_by_name('attributes-list').get_child_by_name('active-directory-account-config')
+ for zapi_key, key in (('account-name', 'account_name'), ('domain', 'domain'), ('organizational-unit', 'organizational_unit')):
+ value = account_info.get_child_content(zapi_key)
+ if value is not None:
+ record[key] = value
+ # normalize case, using user inputs
+ for key, value in record.items():
+ if key in self.parameters and self.parameters[key].lower() == value.lower():
+ record[key] = self.parameters[key]
+ return record or None
+
+ def create_active_directory(self):
+ if self.use_rest:
+ return self.create_active_directory_rest()
+ active_directory_obj = netapp_utils.zapi.NaElement('active-directory-account-create')
+ active_directory_obj.add_new_child('account-name', self.parameters['account_name'])
+ active_directory_obj.add_new_child('admin-password', self.parameters['admin_password'])
+ active_directory_obj.add_new_child('admin-username', self.parameters['admin_username'])
+ if self.parameters.get('domain'):
+ active_directory_obj.add_new_child('domain', self.parameters['domain'])
+ if self.parameters.get('force_account_overwrite'):
+ active_directory_obj.add_new_child('force-account-overwrite', str(self.parameters['force_account_overwrite']))
+ if self.parameters.get('organizational_unit'):
+ active_directory_obj.add_new_child('organizational-unit', self.parameters['organizational_unit'])
+ try:
+ self.server.invoke_successfully(active_directory_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating vserver Active Directory %s: %s' %
+ (self.parameters['account_name'], to_native(error)))
+
+ def delete_active_directory(self):
+ if self.use_rest:
+ return self.delete_active_directory_rest()
+ active_directory_obj = netapp_utils.zapi.NaElement('active-directory-account-delete')
+ active_directory_obj.add_new_child('admin-password', self.parameters['admin_password'])
+ active_directory_obj.add_new_child('admin-username', self.parameters['admin_username'])
+ try:
+ self.server.invoke_successfully(active_directory_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting vserver Active Directory %s: %s' %
+ (self.parameters['account_name'], to_native(error)))
+
+ def modify_active_directory(self):
+ if self.use_rest:
+ return self.modify_active_directory_rest()
+ active_directory_obj = netapp_utils.zapi.NaElement('active-directory-account-modify')
+ active_directory_obj.add_new_child('admin-password', self.parameters['admin_password'])
+ active_directory_obj.add_new_child('admin-username', self.parameters['admin_username'])
+ if self.parameters.get('domain'):
+ active_directory_obj.add_new_child('domain', self.parameters['domain'])
+ if self.parameters.get('force_account_overwrite'):
+ active_directory_obj.add_new_child('force-account-overwrite', str(self.parameters['force_account_overwrite']))
+ try:
+ self.server.invoke_successfully(active_directory_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying vserver Active Directory %s: %s' %
+ (self.parameters['account_name'], to_native(error)))
+
+ def get_active_directory_rest(self):
+ api = 'protocols/active-directory'
+ query = {
+ 'name': self.parameters['account_name'],
+ 'svm.name': self.parameters['vserver'],
+ 'fields': 'fqdn,name,organizational_unit'
+ }
+ record, error = rest_generic.get_one_record(self.rest_api, api, query)
+ if error:
+ self.module.fail_json(msg='Error searching for Active Directory %s: %s' % (self.parameters['account_name'], to_native(error)),
+ exception=traceback.format_exc())
+ if record:
+ self.svm_uuid = record['svm']['uuid']
+ return {
+ 'account_name': record.get('name'),
+ 'domain': record.get('fqdn'),
+ 'organizational_unit': record.get('organizational_unit')
+ }
+ return None
+
+ def create_active_directory_rest(self):
+ api = 'protocols/active-directory'
+ body = {
+ 'svm.name': self.parameters['vserver'],
+ 'name': self.parameters['account_name'],
+ 'username': self.parameters['admin_username'],
+ 'password': self.parameters['admin_password']
+ }
+ if self.parameters.get('domain'):
+ body['fqdn'] = self.parameters['domain']
+ if self.parameters.get('force_account_overwrite'):
+ body['force_account_overwrite'] = self.parameters['force_account_overwrite']
+ if self.parameters.get('organizational_unit'):
+ body['organizational_unit'] = self.parameters['organizational_unit']
+ dummy, error = rest_generic.post_async(self.rest_api, api, body)
+ if error:
+ self.module.fail_json(msg='Error creating vserver Active Directory %s: %s' % (self.parameters['account_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_active_directory_rest(self):
+ api = 'protocols/active-directory'
+ body = {'username': self.parameters['admin_username'], 'password': self.parameters['admin_password']}
+ if self.parameters.get('domain'):
+ body['fqdn'] = self.parameters['domain']
+ if self.parameters.get('force_account_overwrite'):
+ body['force_account_overwrite'] = self.parameters['force_account_overwrite']
+ dummy, error = rest_generic.patch_async(self.rest_api, api, self.svm_uuid, body)
+ if error:
+ self.module.fail_json(msg='Error modifying vserver Active Directory %s: %s' % (self.parameters['account_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_active_directory_rest(self):
+ dummy, error = rest_generic.delete_async(self.rest_api, 'protocols/active-directory', self.svm_uuid,
+ body={'username': self.parameters['admin_username'], 'password': self.parameters['admin_password']})
+ if error:
+ self.module.fail_json(msg='Error deleting vserver Active Directory %s: %s' % (self.parameters['account_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ current = self.get_active_directory()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ modify = None
+ if cd_action is None and self.parameters['state'] == 'present':
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if modify and 'organizational_unit' in modify:
+ self.module.fail_json(msg='Error: organizational_unit cannot be modified; found %s.' % modify)
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_active_directory()
+ elif cd_action == 'delete':
+ self.delete_active_directory()
+ elif modify:
+ self.modify_active_directory()
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ """
+ Execute action from playbook
+ """
+ command = NetAppOntapActiveDirectory()
+ command.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_active_directory_domain_controllers.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_active_directory_domain_controllers.py
new file mode 100644
index 000000000..844118344
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_active_directory_domain_controllers.py
@@ -0,0 +1,221 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: na_ontap_active_directory_domain_controllers
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+short_description: NetApp ONTAP configure active directory preferred domain controllers
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 22.7.0
+description:
+ - Configure Active Directory preferred Domain Controllers.
+options:
+ state:
+ description:
+ - Whether the Active Directory preferred Domain Controllers configuration should exist or not
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ vserver:
+ description:
+ - The name of the vserver to use.
+ required: true
+ type: str
+
+ fqdn:
+ description:
+ - Fully Qualified Domain Name.
+ required: true
+ type: str
+
+ server_ip:
+ description:
+ - IP address of the preferred DC. The address can be either an IPv4 or an IPv6 address.
+ required: true
+ type: str
+
+ skip_config_validation:
+ description:
+ - Skip the validation of the specified preferred DC configuration.
+ type: bool
+
+notes:
+ - This module requires ONTAP 9.12.1 or later for REST API.
+ - CLI support is available for other lower ONTAP versions.
+'''
+EXAMPLES = """
+ - name: Create active directory preferred domain controllers
+ netapp.ontap.na_ontap_active_directory_domain_controllers:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ https: true
+ validate_certs: false
+ vserver: ansible
+ state: present
+ fqdn: test.com
+ server_ip: 10.10.10.10
+
+ - name: Delete active directory preferred domain controllers
+ netapp.ontap.na_ontap_active_directory_domain_controllers:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ https: true
+ validate_certs: false
+ vserver: ansible
+ state: absent
+ fqdn: test.com
+ server_ip: 10.10.10.10
+"""
+RETURN = """
+
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic, rest_vserver
+
+
+class NetAppOntapActiveDirectoryDC:
+ """
+ Create or delete Active Directory preferred domain controllers
+ """
+ def __init__(self):
+ """
+ Initialize the Ontap ActiveDirectoryDC class
+ """
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ fqdn=dict(required=True, type='str'),
+ server_ip=dict(required=True, type='str'),
+ skip_config_validation=dict(required=False, type='bool'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ # set up variables
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ self.rest_api = netapp_utils.OntapRestAPI(self.module)
+ self.rest_api.fail_if_not_rest_minimum_version('na_ontap_active_directory_domain_controllers', 9, 6)
+ self.svm_uuid = None
+
+ def get_active_directory_preferred_domain_controllers_rest(self):
+ """
+ Retrieves the Active Directory preferred DC configuration of an SVM.
+ """
+ if self.rest_api.meets_rest_minimum_version(True, 9, 12, 0):
+ api = "protocols/active-directory/%s/preferred-domain-controllers" % (self.svm_uuid)
+ query = {
+ 'svm.name': self.parameters['vserver'],
+ 'fqdn': self.parameters['fqdn'],
+ 'server_ip': self.parameters['server_ip'],
+ 'fields': 'server_ip,fqdn'
+ }
+ record, error = rest_generic.get_one_record(self.rest_api, api, query)
+ if error:
+ self.module.fail_json(msg="Error on fetching Active Directory preferred DC configuration of an SVM: %s" % error)
+ if record:
+ return record
+ else:
+ api = 'private/cli/vserver/active-directory/preferred-dc'
+ query = {
+ 'vserver': self.parameters['vserver'],
+ 'domain': self.parameters['fqdn'],
+ 'preferred_dc': self.parameters['server_ip'],
+ 'fields': 'domain,preferred-dc'
+ }
+ record, error = rest_generic.get_one_record(self.rest_api, api, query)
+ if error:
+ self.module.fail_json(msg="Error on fetching Active Directory preferred DC configuration of an SVM using cli: %s" % error)
+ if record:
+ return {
+ 'server_ip': self.na_helper.safe_get(record, ['preferred_dc']),
+ 'fqdn': self.na_helper.safe_get(record, ['domain'])
+ }
+ return None
+
+ def create_active_directory_preferred_domain_controllers_rest(self):
+ """
+ Adds the Active Directory preferred DC configuration to an SVM.
+ """
+ query = {}
+ if self.rest_api.meets_rest_minimum_version(True, 9, 12, 0):
+ api = "protocols/active-directory/%s/preferred-domain-controllers" % (self.svm_uuid)
+ body = {
+ 'fqdn': self.parameters['fqdn'],
+ 'server_ip': self.parameters['server_ip']
+ }
+ if 'skip_config_validation' in self.parameters:
+ query['skip_config_validation'] = self.parameters['skip_config_validation']
+ else:
+ api = "private/cli/vserver/active-directory/preferred-dc/add"
+ body = {
+ "vserver": self.parameters['vserver'],
+ "domain": self.parameters['fqdn'],
+ "preferred_dc": [self.parameters['server_ip']]
+ }
+ if 'skip_config_validation' in self.parameters:
+ query['skip_config_validation'] = self.parameters['skip_config_validation']
+ dummy, error = rest_generic.post_async(self.rest_api, api, body, query)
+ if error:
+ self.module.fail_json(msg="Error on adding Active Directory preferred DC configuration to an SVM: %s" % error)
+
+ def delete_active_directory_preferred_domain_controllers_rest(self):
+ """
+ Removes the Active Directory preferred DC configuration from an SVM.
+ """
+ if self.rest_api.meets_rest_minimum_version(True, 9, 12, 0):
+ api = "protocols/active-directory/%s/preferred-domain-controllers/%s/%s" % (self.svm_uuid, self.parameters['fqdn'], self.parameters['server_ip'])
+ record, error = rest_generic.delete_async(self.rest_api, api, None)
+ else:
+ api = "private/cli/vserver/active-directory/preferred-dc/remove"
+ body = {
+ "vserver": self.parameters['vserver'],
+ "domain": self.parameters['fqdn'],
+ "preferred_dc": [self.parameters['server_ip']]
+ }
+ dummy, error = rest_generic.delete_async(self.rest_api, api, None, body)
+ if error:
+ self.module.fail_json(msg="Error on deleting Active Directory preferred DC configuration of an SVM: %s" % error)
+
+ def apply(self):
+ self.svm_uuid, dummy = rest_vserver.get_vserver_uuid(self.rest_api, self.parameters['vserver'], self.module, True)
+ current = self.get_active_directory_preferred_domain_controllers_rest()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_active_directory_preferred_domain_controllers_rest()
+ elif cd_action == 'delete':
+ self.delete_active_directory_preferred_domain_controllers_rest()
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action)
+ self.module.exit_json(**result)
+
+
+def main():
+ """
+ Creates the NetApp Ontap Cifs Local Group object and runs the correct play task
+ """
+ obj = NetAppOntapActiveDirectoryDC()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_aggregate.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_aggregate.py
new file mode 100644
index 000000000..560e81069
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_aggregate.py
@@ -0,0 +1,1121 @@
+#!/usr/bin/python
+
+# (c) 2018-2023, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_aggregate
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+
+module: na_ontap_aggregate
+short_description: NetApp ONTAP manage aggregates.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+ - Create, delete, or manage aggregates on ONTAP.
+
+options:
+
+ state:
+ description:
+ - Whether the specified aggregate should exist or not.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ service_state:
+ description:
+ - Whether the specified aggregate should be enabled or disabled. Creates aggregate if doesnt exist.
+ - Supported from 9.11.1 or later in REST.
+ choices: ['online', 'offline']
+ type: str
+
+ name:
+ description:
+ - The name of the aggregate to manage.
+ required: true
+ type: str
+
+ from_name:
+ description:
+ - Name of the aggregate to be renamed.
+ type: str
+ version_added: 2.7.0
+
+ nodes:
+ description:
+ - Node(s) for the aggregate to be created on. If no node specified, mgmt lif home will be used.
+ - ZAPI only - if multiple nodes specified an aggr stripe will be made.
+ - With REST, only one node can be specified. If disk_count is present, node name is required.
+ type: list
+ elements: str
+
+ disk_type:
+ description:
+ - Type of disk to use to build aggregate.
+ - Not supported with REST - see C(disk_class).
+ - SSD-NVM, SSD-CAP were added with ONTAP 9.6.
+ - VMLUN was added with ONTAP 9.9.
+ choices: ['ATA', 'BSAS', 'FCAL', 'FSAS', 'LUN', 'MSATA', 'SAS', 'SSD', 'SSD-CAP', 'SSD-NVM', 'VMDISK', 'VMLUN', 'VMLUN-SSD']
+ type: str
+ version_added: 2.7.0
+
+ disk_class:
+ description:
+ - Class of disk to use to build aggregate.
+ - C(capacity_flash) is listed in swagger, but rejected as invalid by ONTAP.
+ choices: ['capacity', 'performance', 'archive', 'solid_state', 'array', 'virtual', 'data_center', 'capacity_flash']
+ type: str
+ version_added: 21.16.0
+
+ disk_count:
+ description:
+ - Number of disks to place into the aggregate, including parity disks.
+ - The disks in this newly-created aggregate come from the spare disk pool.
+ - The smallest disks in this pool join the aggregate first, unless the C(disk-size) argument is provided.
+ - Either C(disk-count) or C(disks) must be supplied. Range [0..2^31-1].
+ - Required when C(state=present).
+ - Modifiable only if specified disk_count is larger than current disk_count.
+ - Cannot create raidgroup with 1 disk when using raid type raid4.
+ - If the disk_count % raid_size == 1, only disk_count/raid_size * raid_size will be added.
+ - If disk_count is 6, raid_type is raid4, raid_size 4, all 6 disks will be added.
+ - If disk_count is 5, raid_type is raid4, raid_size 4, 5/4 * 4 = 4 will be added. 1 will not be added.
+ - With REST, C(nodes) is required if C(disk_count) is present.
+ type: int
+
+ disk_size:
+ description:
+ - Disk size to use in 4K block size. Disks within 10% of specified size will be used.
+ - With REST, this is converted to bytes using 4096. Use C(disk_size_with_unit) to skip the conversion.
+ type: int
+ version_added: 2.7.0
+
+ disk_size_with_unit:
+ description:
+ - Disk size to use in the specified unit.
+ - It is a positive integer number followed by unit of T/G/M/K. For example, 72G, 1T and 32M.
+ - Or the unit can be omitted for bytes (REST also accepts B).
+ - This option is ignored if a specific list of disks is specified through the "disks" parameter.
+ - You must only use one of either "disk-size" or "disk-size-with-unit" parameters.
+ - With REST, this is converted to bytes, assuming K=1024.
+ type: str
+
+ raid_size:
+ description:
+ - Sets the maximum number of drives per raid group.
+ type: int
+ version_added: 2.7.0
+
+ raid_type:
+ description:
+ - Specifies the type of RAID groups to use in the new aggregate.
+ - raid_0 is only available on ONTAP Select.
+ choices: ['raid4', 'raid_dp', 'raid_tec', 'raid_0']
+ type: str
+ version_added: 2.7.0
+
+ unmount_volumes:
+ description:
+ - If set to "true", this option specifies that all of the volumes hosted by the given aggregate are to be unmounted
+ before the offline operation is executed.
+ - By default, the system will reject any attempt to offline an aggregate that hosts one or more online volumes.
+ - Not supported with REST, by default REST unmount volumes when trying to offline aggregate.
+ type: bool
+
+ disks:
+ description:
+ - Specific list of disks to use for the new aggregate.
+ - To create a "mirrored" aggregate with a specific list of disks, both 'disks' and 'mirror_disks' options must be supplied.
+ Additionally, the same number of disks must be supplied in both lists.
+ - Not supported with REST.
+ type: list
+ elements: str
+ version_added: 2.8.0
+
+ is_mirrored:
+ description:
+ - Specifies that the new aggregate be mirrored (have two plexes).
+ - If set to true, then the indicated disks will be split across the two plexes. By default, the new aggregate will not be mirrored.
+ - This option cannot be used when a specific list of disks is supplied with either the 'disks' or 'mirror_disks' options.
+ type: bool
+ version_added: 2.8.0
+
+ mirror_disks:
+ description:
+ - List of mirror disks to use. It must contain the same number of disks specified in 'disks'.
+ - Not supported with REST.
+ type: list
+ elements: str
+ version_added: 2.8.0
+
+ spare_pool:
+ description:
+ - Specifies the spare pool from which to select spare disks to use in creation of a new aggregate.
+ - Not supported with REST.
+ choices: ['Pool0', 'Pool1']
+ type: str
+ version_added: 2.8.0
+
+ wait_for_online:
+ description:
+ - Set this parameter to 'true' for synchronous execution during create (wait until aggregate status is online).
+ - Set this parameter to 'false' for asynchronous execution.
+ - For asynchronous, execution exits as soon as the request is sent, without checking aggregate status.
+ - Ignored with REST (always wait).
+ type: bool
+ default: false
+ version_added: 2.8.0
+
+ time_out:
+ description:
+ - time to wait for aggregate creation in seconds.
+ - default is set to 100 seconds.
+ type: int
+ default: 100
+ version_added: 2.8.0
+
+ object_store_name:
+ description:
+ - Name of the object store configuration attached to the aggregate.
+ type: str
+ version_added: 2.9.0
+
+ allow_flexgroups:
+ description:
+ - This optional parameter allows attaching object store to an aggregate containing FlexGroup constituents. The default value is false.
+ - Mixing FabricPools and non-FabricPools within a FlexGroup is not recommended.
+ - All aggregates hosting constituents of a FlexGroup should be attached to the object store.
+ type: bool
+ version_added: 22.3.0
+
+ snaplock_type:
+ description:
+ - Type of snaplock for the aggregate being created.
+ choices: ['compliance', 'enterprise', 'non_snaplock']
+ type: str
+ version_added: 20.1.0
+
+ ignore_pool_checks:
+ description:
+ - only valid when I(disks) option is used.
+ - disks in a plex should belong to the same spare pool, and mirror disks to another spare pool.
+ - when set to true, these checks are ignored.
+ - Ignored with REST as I(disks) is not supported.
+ type: bool
+ version_added: 20.8.0
+
+ encryption:
+ description:
+ - whether to enable software encryption.
+ - this is equivalent to -encrypt-with-aggr-key when using the CLI.
+ - requires a VE license.
+ type: bool
+ version_added: 21.14.0
+
+ tags:
+ description:
+ - Tags are an optional way to track the uses of a resource.
+ - Tag values must be formatted as key:value strings, example ["team:csi", "environment:test"]
+ type: list
+ elements: str
+ version_added: 22.6.0
+
+notes:
+ - supports check_mode.
+ - support ZAPI and REST.
+
+'''
+
+EXAMPLES = """
+- name: Create Aggregates and wait 5 minutes until aggregate is online in ZAPI.
+ netapp.ontap.na_ontap_aggregate:
+ state: present
+ service_state: online
+ name: ansibleAggr
+ disk_count: 10
+ wait_for_online: True
+ time_out: 300
+ snaplock_type: non_snaplock
+ use_rest: never
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+- name: Create Aggregates in REST.
+ netapp.ontap.na_ontap_aggregate:
+ state: present
+ service_state: online
+ name: ansibleAggr
+ disk_count: 10
+ nodes: ontap-node
+ snaplock_type: non_snaplock
+ use_rest: always
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+- name: Manage Aggregates in ZAPI, modify service state.
+ netapp.ontap.na_ontap_aggregate:
+ state: present
+ service_state: offline
+ unmount_volumes: true
+ name: ansibleAggr
+ disk_count: 10
+ use_rest: never
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+- name: Manage Aggregates in REST, increase disk count.
+ netapp.ontap.na_ontap_aggregate:
+ state: present
+ name: ansibleAggr
+ disk_count: 20
+ nodes: ontap-node
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+- name: Attach object store
+ netapp.ontap.na_ontap_aggregate:
+ state: present
+ name: aggr4
+ object_store_name: sgws_305
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+- name: Rename Aggregates
+ netapp.ontap.na_ontap_aggregate:
+ state: present
+ service_state: online
+ from_name: ansibleAggr
+ name: ansibleAggr2
+ disk_count: 20
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+- name: Delete Aggregates
+ netapp.ontap.na_ontap_aggregate:
+ state: absent
+ service_state: offline
+ unmount_volumes: true
+ name: ansibleAggr
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+
+"""
+import re
+import time
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+import ansible_collections.netapp.ontap.plugins.module_utils.rest_response_helpers as rrh
+
+
+class NetAppOntapAggregate:
+ ''' object initialize and class methods '''
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ name=dict(required=True, type='str'),
+ disks=dict(required=False, type='list', elements='str'),
+ disk_count=dict(required=False, type='int', default=None),
+ disk_size=dict(required=False, type='int'),
+ disk_size_with_unit=dict(required=False, type='str'),
+ disk_class=dict(required=False,
+ choices=['capacity', 'performance', 'archive', 'solid_state', 'array', 'virtual', 'data_center', 'capacity_flash']),
+ disk_type=dict(required=False,
+ choices=['ATA', 'BSAS', 'FCAL', 'FSAS', 'LUN', 'MSATA', 'SAS', 'SSD', 'SSD-CAP', 'SSD-NVM', 'VMDISK', 'VMLUN', 'VMLUN-SSD']),
+ from_name=dict(required=False, type='str'),
+ mirror_disks=dict(required=False, type='list', elements='str'),
+ nodes=dict(required=False, type='list', elements='str'),
+ is_mirrored=dict(required=False, type='bool'),
+ raid_size=dict(required=False, type='int'),
+ raid_type=dict(required=False, choices=['raid4', 'raid_dp', 'raid_tec', 'raid_0']),
+ service_state=dict(required=False, choices=['online', 'offline']),
+ spare_pool=dict(required=False, choices=['Pool0', 'Pool1']),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ unmount_volumes=dict(required=False, type='bool'),
+ wait_for_online=dict(required=False, type='bool', default=False),
+ time_out=dict(required=False, type='int', default=100),
+ object_store_name=dict(required=False, type='str'),
+ allow_flexgroups=dict(required=False, type='bool'),
+ snaplock_type=dict(required=False, type='str', choices=['compliance', 'enterprise', 'non_snaplock']),
+ ignore_pool_checks=dict(required=False, type='bool'),
+ encryption=dict(required=False, type='bool'),
+ tags=dict(required=False, type='list', elements='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ mutually_exclusive=[
+ ('is_mirrored', 'disks'),
+ ('is_mirrored', 'mirror_disks'),
+ ('is_mirrored', 'spare_pool'),
+ ('spare_pool', 'disks'),
+ ('disk_count', 'disks'),
+ ('disk_size', 'disk_size_with_unit'),
+ ('disk_class', 'disk_type'),
+ ],
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.rest_api = OntapRestAPI(self.module)
+ self.uuid = None
+ # some attributes are not supported in earlier REST implementation
+ unsupported_rest_properties = ['disks', 'disk_type', 'mirror_disks', 'spare_pool', 'unmount_volumes']
+ partially_supported_rest_properties = [['service_state', (9, 11, 1)], ['tags', (9, 13, 1)]]
+ self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, unsupported_rest_properties, partially_supported_rest_properties)
+ if not self.use_rest:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ if 'tags' in self.parameters:
+ self.module.fail_json(msg="Error: tags only supported with REST.")
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ if self.parameters['state'] == 'present':
+ self.validate_options()
+
+ def validate_options(self):
+ errors = []
+ if self.use_rest:
+ if len(self.parameters.get('nodes', [])) > 1:
+ errors.append('only one node can be specified when using rest, found %s' % self.parameters['nodes'])
+ if 'disk_count' in self.parameters and 'nodes' not in self.parameters:
+ errors.append('nodes is required when disk_count is present')
+ else:
+ if self.parameters.get('mirror_disks') is not None and self.parameters.get('disks') is None:
+ errors.append('mirror_disks require disks options to be set')
+ if errors:
+ plural = 's' if len(errors) > 1 else ''
+ self.module.fail_json(msg='Error%s when validating options: %s.' % (plural, '; '.join(errors)))
+
+ def aggr_get_iter(self, name):
+ """
+ Return aggr-get-iter query results
+ :param name: Name of the aggregate
+ :return: NaElement if aggregate found, None otherwise
+ """
+
+ aggr_get_iter = netapp_utils.zapi.NaElement('aggr-get-iter')
+ query_details = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'aggr-attributes', **{'aggregate-name': name})
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ aggr_get_iter.add_child_elem(query)
+ result = None
+ try:
+ result = self.server.invoke_successfully(aggr_get_iter, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ if to_native(error.code) != '13040':
+ self.module.fail_json(msg='Error getting aggregate: %s' % to_native(error), exception=traceback.format_exc())
+ return result
+
+ def get_aggr(self, name=None):
+ """
+ Fetch details if aggregate exists.
+ :param name: Name of the aggregate to be fetched
+ :return:
+ Dictionary of current details if aggregate found
+ None if aggregate is not found
+ """
+ if name is None:
+ name = self.parameters.get('name')
+ if self.use_rest:
+ return self.get_aggr_rest(name)
+ aggr_get = self.aggr_get_iter(name)
+ if aggr_get and aggr_get.get_child_by_name('num-records') and int(aggr_get.get_child_content('num-records')) >= 1:
+ attr = aggr_get.get_child_by_name('attributes-list').get_child_by_name('aggr-attributes')
+ current_aggr = {'service_state': attr.get_child_by_name('aggr-raid-attributes').get_child_content('state')}
+ if attr.get_child_by_name('aggr-raid-attributes').get_child_content('disk-count'):
+ current_aggr['disk_count'] = int(attr.get_child_by_name('aggr-raid-attributes').get_child_content('disk-count'))
+ if attr.get_child_by_name('aggr-raid-attributes').get_child_content('encrypt-with-aggr-key'):
+ current_aggr['encryption'] = attr.get_child_by_name('aggr-raid-attributes').get_child_content('encrypt-with-aggr-key') == 'true'
+ snaplock_type = self.na_helper.safe_get(attr, ['aggr-snaplock-attributes', 'snaplock-type'])
+ if snaplock_type:
+ current_aggr['snaplock_type'] = snaplock_type
+ return current_aggr
+ return None
+
+ def disk_get_iter(self, name):
+ """
+ Return storage-disk-get-iter query results
+ Filter disk list by aggregate name, and only reports disk-name and plex-name
+ :param name: Name of the aggregate
+ :return: NaElement
+ """
+
+ disk_get_iter = netapp_utils.zapi.NaElement('storage-disk-get-iter')
+ query_details = {
+ 'query': {
+ 'storage-disk-info': {
+ 'disk-raid-info': {
+ 'disk-aggregate-info': {
+ 'aggregate-name': name
+ }
+ }
+ }
+ }
+ }
+ disk_get_iter.translate_struct(query_details)
+ attributes = {
+ 'desired-attributes': {
+ 'storage-disk-info': {
+ 'disk-name': None,
+ 'disk-raid-info': {
+ 'disk_aggregate_info': {
+ 'plex-name': None
+ }
+ }
+ }
+ }
+ }
+ disk_get_iter.translate_struct(attributes)
+
+ result = None
+ try:
+ result = self.server.invoke_successfully(disk_get_iter, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error getting disks: %s' % to_native(error), exception=traceback.format_exc())
+ return result
+
+ def get_aggr_disks(self, name):
+ """
+ Fetch disks that are used for this aggregate.
+ :param name: Name of the aggregate to be fetched
+ :return:
+ list of tuples (disk-name, plex-name)
+ empty list if aggregate is not found
+ """
+ disks = []
+ aggr_get = self.disk_get_iter(name)
+ if aggr_get and aggr_get.get_child_by_name('num-records') and int(aggr_get.get_child_content('num-records')) >= 1:
+ attr = aggr_get.get_child_by_name('attributes-list')
+ disks = [(disk_info.get_child_content('disk-name'),
+ disk_info.get_child_by_name('disk-raid-info').get_child_by_name('disk-aggregate-info').get_child_content('plex-name'))
+ for disk_info in attr.get_children()]
+ return disks
+
+ def object_store_get_iter(self, name):
+ """
+ Return aggr-object-store-get query results
+ :return: NaElement if object-store for given aggregate found, None otherwise
+ """
+
+ object_store_get_iter = netapp_utils.zapi.NaElement('aggr-object-store-get-iter')
+ query_details = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'object-store-information', **{'object-store-name': self.parameters.get('object_store_name'),
+ 'aggregate': name})
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ object_store_get_iter.add_child_elem(query)
+ result = None
+ try:
+ result = self.server.invoke_successfully(object_store_get_iter, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error getting object store: %s' % to_native(error), exception=traceback.format_exc())
+ return result
+
+ def get_object_store(self, name):
+ """
+ Fetch details if object store attached to the given aggregate exists.
+ :return:
+ Dictionary of current details if object store attached to the given aggregate is found
+ None if object store is not found
+ """
+ if self.use_rest:
+ return self.get_object_store_rest()
+ object_store_get = self.object_store_get_iter(name)
+ if object_store_get and object_store_get.get_child_by_name('num-records') and int(object_store_get.get_child_content('num-records')) >= 1:
+ attr = object_store_get.get_child_by_name('attributes-list').get_child_by_name('object-store-information')
+ return {'object_store_name': attr.get_child_content('object-store-name')}
+ return None
+
+ def aggregate_online(self):
+ """
+ Set state of an offline aggregate to online
+ :return: None
+ """
+ if self.use_rest:
+ return self.patch_aggr_rest('make service state online for', {'state': 'online'})
+ online_aggr = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'aggr-online', **{'aggregate': self.parameters['name'],
+ 'force-online': 'true'})
+ try:
+ self.server.invoke_successfully(online_aggr,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error changing the state of aggregate %s to %s: %s' %
+ (self.parameters['name'], self.parameters['service_state'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def aggregate_offline(self):
+ """
+ Set state of an online aggregate to offline
+ :return: None
+ """
+ if self.use_rest:
+ return self.patch_aggr_rest('make service state offline for', {'state': 'offline'})
+ offline_aggr = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'aggr-offline', **{'aggregate': self.parameters['name'],
+ 'force-offline': 'false',
+ 'unmount-volumes': str(self.parameters.get('unmount_volumes', False))})
+
+ # if disk add operation is in progress, cannot offline aggregate, retry few times.
+ retry = 10
+ while retry > 0:
+ try:
+ self.server.invoke_successfully(offline_aggr, enable_tunneling=True)
+ break
+ except netapp_utils.zapi.NaApiError as error:
+ if 'disk add operation is in progress' in to_native(error):
+ retry -= 1
+ if retry > 0:
+ continue
+ self.module.fail_json(msg='Error changing the state of aggregate %s to %s: %s' %
+ (self.parameters['name'], self.parameters['service_state'], to_native(error)),
+ exception=traceback.format_exc())
+
+ @staticmethod
+ def get_disks_or_mirror_disks_object(name, disks):
+ '''
+ create ZAPI object for disks or mirror_disks
+ '''
+ disks_obj = netapp_utils.zapi.NaElement(name)
+ for disk in disks:
+ disk_info_obj = netapp_utils.zapi.NaElement('disk-info')
+ disk_info_obj.add_new_child('name', disk)
+ disks_obj.add_child_elem(disk_info_obj)
+ return disks_obj
+
+ def create_aggr(self):
+ """
+ Create aggregate
+ :return: None
+ """
+ if self.use_rest:
+ return self.create_aggr_rest()
+ options = {'aggregate': self.parameters['name']}
+ if self.parameters.get('disk_class'):
+ options['disk-class'] = self.parameters['disk_class']
+ if self.parameters.get('disk_type'):
+ options['disk-type'] = self.parameters['disk_type']
+ if self.parameters.get('raid_type'):
+ options['raid-type'] = self.parameters['raid_type']
+ if self.parameters.get('snaplock_type'):
+ options['snaplock-type'] = self.parameters['snaplock_type']
+ if self.parameters.get('spare_pool'):
+ options['spare-pool'] = self.parameters['spare_pool']
+ # int to str
+ if self.parameters.get('disk_count'):
+ options['disk-count'] = str(self.parameters['disk_count'])
+ if self.parameters.get('disk_size'):
+ options['disk-size'] = str(self.parameters['disk_size'])
+ if self.parameters.get('disk_size_with_unit'):
+ options['disk-size-with-unit'] = str(self.parameters['disk_size_with_unit'])
+ if self.parameters.get('raid_size'):
+ options['raid-size'] = str(self.parameters['raid_size'])
+ # boolean to str
+ if self.parameters.get('is_mirrored'):
+ options['is-mirrored'] = str(self.parameters['is_mirrored']).lower()
+ if self.parameters.get('ignore_pool_checks'):
+ options['ignore-pool-checks'] = str(self.parameters['ignore_pool_checks']).lower()
+ if self.parameters.get('encryption'):
+ options['encrypt-with-aggr-key'] = str(self.parameters['encryption']).lower()
+ aggr_create = netapp_utils.zapi.NaElement.create_node_with_children('aggr-create', **options)
+ if self.parameters.get('nodes'):
+ nodes_obj = netapp_utils.zapi.NaElement('nodes')
+ aggr_create.add_child_elem(nodes_obj)
+ for node in self.parameters['nodes']:
+ nodes_obj.add_new_child('node-name', node)
+ if self.parameters.get('disks'):
+ aggr_create.add_child_elem(self.get_disks_or_mirror_disks_object('disks', self.parameters.get('disks')))
+ if self.parameters.get('mirror_disks'):
+ aggr_create.add_child_elem(self.get_disks_or_mirror_disks_object('mirror-disks', self.parameters.get('mirror_disks')))
+
+ try:
+ self.server.invoke_successfully(aggr_create, enable_tunneling=False)
+ if self.parameters.get('wait_for_online'):
+ # round off time_out
+ retries = (self.parameters['time_out'] + 5) / 10
+ current = self.get_aggr()
+ status = None if current is None else current['service_state']
+ while status != 'online' and retries > 0:
+ time.sleep(10)
+ retries = retries - 1
+ current = self.get_aggr()
+ status = None if current is None else current['service_state']
+ else:
+ current = self.get_aggr()
+ if current is not None and current.get('disk_count') != self.parameters.get('disk_count'):
+ self.module.warn("Aggregate created with mismatched disk_count: created %s not %s"
+ % (current.get('disk_count'), self.parameters.get('disk_count')))
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error provisioning aggregate %s: %s"
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_aggr(self):
+ """
+ Delete aggregate.
+ :return: None
+ """
+ if self.use_rest:
+ return self.delete_aggr_rest()
+ aggr_destroy = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'aggr-destroy', **{'aggregate': self.parameters['name']})
+
+ try:
+ self.server.invoke_successfully(aggr_destroy,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error removing aggregate %s: %s" % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def rename_aggregate(self):
+ """
+ Rename aggregate.
+ """
+ if self.use_rest:
+ return self.rename_aggr_rest()
+ aggr_rename = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'aggr-rename', **{'aggregate': self.parameters['from_name'],
+ 'new-aggregate-name': self.parameters['name']})
+
+ try:
+ self.server.invoke_successfully(aggr_rename, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error renaming aggregate %s: %s"
+ % (self.parameters['from_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_aggr(self, modify):
+ """
+ Modify state of the aggregate
+ :param modify: dictionary of parameters to be modified
+ :return: None
+ """
+ # online aggregate first, so disk can be added after online.
+ if modify.get('service_state') == 'online':
+ self.aggregate_online()
+ # modify tags
+ if modify.get('tags') is not None:
+ self.patch_aggr_rest('modify tags for', {'_tags': modify['tags']})
+ # add disk before taking aggregate offline.
+ disk_size = self.parameters.get('disk_size', 0)
+ disk_size_with_unit = self.parameters.get('disk_size_with_unit')
+ if modify.get('disk_count'):
+ self.add_disks(modify['disk_count'], disk_size=disk_size, disk_size_with_unit=disk_size_with_unit)
+ if modify.get('disks_to_add') or modify.get('mirror_disks_to_add'):
+ self.add_disks(0, modify.get('disks_to_add'), modify.get('mirror_disks_to_add'))
+ # offline aggregate after adding additional disks.
+ if modify.get('service_state') == 'offline':
+ self.aggregate_offline()
+
+ def attach_object_store_to_aggr(self):
+ """
+ Attach object store to aggregate.
+ :return: None
+ """
+ if self.use_rest:
+ return self.attach_object_store_to_aggr_rest()
+ store_obj = {'aggregate': self.parameters['name'], 'object-store-name': self.parameters['object_store_name']}
+ if 'allow_flexgroups' in self.parameters:
+ store_obj['allow-flexgroup'] = self.na_helper.get_value_for_bool(False, self.parameters['allow_flexgroups'])
+ attach_object_store = netapp_utils.zapi.NaElement.create_node_with_children('aggr-object-store-attach', **store_obj)
+
+ try:
+ self.server.invoke_successfully(attach_object_store,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error attaching object store %s to aggregate %s: %s" %
+ (self.parameters['object_store_name'], self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def add_disks(self, count=0, disks=None, mirror_disks=None, disk_size=0, disk_size_with_unit=None):
+ """
+ Add additional disks to aggregate.
+ :return: None
+ """
+ if self.use_rest:
+ return self.add_disks_rest(count, disks, mirror_disks, disk_size, disk_size_with_unit)
+ options = {'aggregate': self.parameters['name']}
+ if count:
+ options['disk-count'] = str(count)
+ if disks and self.parameters.get('ignore_pool_checks'):
+ options['ignore-pool-checks'] = str(self.parameters['ignore_pool_checks'])
+ if disk_size:
+ options['disk-size'] = str(disk_size)
+ if disk_size_with_unit:
+ options['disk-size-with-unit'] = disk_size_with_unit
+ if self.parameters.get('disk_class'):
+ options['disk-class'] = self.parameters['disk_class']
+ if self.parameters.get('disk_type'):
+ options['disk-type'] = self.parameters['disk_type']
+ aggr_add = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'aggr-add', **options)
+ if disks:
+ aggr_add.add_child_elem(self.get_disks_or_mirror_disks_object('disks', disks))
+ if mirror_disks:
+ aggr_add.add_child_elem(self.get_disks_or_mirror_disks_object('mirror-disks', mirror_disks))
+
+ try:
+ self.server.invoke_successfully(aggr_add,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error adding additional disks to aggregate %s: %s' %
+ (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def map_plex_to_primary_and_mirror(self, plex_disks, disks, mirror_disks):
+ '''
+ we have N plexes, and disks, and maybe mirror_disks
+ we're trying to find which plex is used for disks, and which one, if applicable, for mirror_disks
+ :return: a tuple with the names of the two plexes (disks_plex, mirror_disks_plex)
+ the second one can be None
+ '''
+ disks_plex = None
+ mirror_disks_plex = None
+ error = ''
+ for plex in plex_disks:
+ common = set(plex_disks[plex]).intersection(set(disks))
+ if common:
+ if disks_plex is None:
+ disks_plex = plex
+ else:
+ error = 'found overlapping plexes: %s and %s' % (disks_plex, plex)
+ if mirror_disks is not None:
+ common = set(plex_disks[plex]).intersection(set(mirror_disks))
+ if common:
+ if mirror_disks_plex is None:
+ mirror_disks_plex = plex
+ else:
+ error = 'found overlapping mirror plexes: %s and %s' % (mirror_disks_plex, plex)
+ if not error:
+ # make sure we found a match
+ if disks_plex is None:
+ error = 'cannot match disks with current aggregate disks'
+ if mirror_disks is not None and mirror_disks_plex is None:
+ if error:
+ error += ', and '
+ error += 'cannot match mirror_disks with current aggregate disks'
+ if error:
+ self.module.fail_json(msg="Error mapping disks for aggregate %s: %s. Found: %s" %
+ (self.parameters['name'], error, str(plex_disks)))
+ return disks_plex, mirror_disks_plex
+
+ def get_disks_to_add(self, aggr_name, disks, mirror_disks):
+ '''
+ Get list of disks used by the aggregate, as primary and mirror.
+ Report error if:
+ the plexes in use cannot be matched with user inputs (we expect some overlap)
+ the user request requires some disks to be removed (not supported)
+ : return: a tuple of two lists of disks: disks_to_add, mirror_disks_to_add
+ '''
+ # let's see if we need to add disks
+ disks_in_use = self.get_aggr_disks(aggr_name)
+ # we expect a list of tuples (disk_name, plex_name), if there is a mirror, we should have 2 plexes
+ # let's get a list of disks for each plex
+ plex_disks = {}
+ for disk_name, plex_name in disks_in_use:
+ plex_disks.setdefault(plex_name, []).append(disk_name)
+ # find who is who
+ disks_plex, mirror_disks_plex = self.map_plex_to_primary_and_mirror(plex_disks, disks, mirror_disks)
+ # Now that we know what is which, find what needs to be removed (error), and what needs to be added
+ disks_to_remove = [disk for disk in plex_disks[disks_plex] if disk not in disks]
+ if mirror_disks_plex:
+ disks_to_remove.extend([disk for disk in plex_disks[mirror_disks_plex] if disk not in mirror_disks])
+ if disks_to_remove:
+ error = 'these disks cannot be removed: %s' % str(disks_to_remove)
+ self.module.fail_json(msg="Error removing disks is not supported. Aggregate %s: %s. In use: %s" %
+ (aggr_name, error, str(plex_disks)))
+ # finally, what's to be added
+ disks_to_add = [disk for disk in disks if disk not in plex_disks[disks_plex]]
+ mirror_disks_to_add = []
+ if mirror_disks_plex:
+ mirror_disks_to_add = [disk for disk in mirror_disks if disk not in plex_disks[mirror_disks_plex]]
+ if mirror_disks_to_add and not disks_to_add:
+ self.module.fail_json(msg="Error cannot add mirror disks %s without adding disks for aggregate %s. In use: %s" %
+ (str(mirror_disks_to_add), aggr_name, str(plex_disks)))
+ if disks_to_add or mirror_disks_to_add:
+ self.na_helper.changed = True
+
+ return disks_to_add, mirror_disks_to_add
+
+ def set_disk_count(self, current, modify):
+ if modify.get('disk_count'):
+ if int(modify['disk_count']) < int(current['disk_count']):
+ self.module.fail_json(msg="Error: specified disk_count is less than current disk_count. Only adding disks is allowed.")
+ else:
+ modify['disk_count'] = modify['disk_count'] - current['disk_count']
+
+ def get_aggr_actions(self):
+ aggr_name = self.parameters.get('name')
+ rename, cd_action, modify = None, None, {}
+ current = self.get_aggr()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action == 'create' and self.parameters.get('from_name'):
+ # create by renaming existing aggregate
+ old_aggregate = self.get_aggr(self.parameters['from_name'])
+ rename = self.na_helper.is_rename_action(old_aggregate, current)
+ if rename is None:
+ self.module.fail_json(msg='Error renaming aggregate %s: no aggregate with from_name %s.'
+ % (self.parameters['name'], self.parameters['from_name']))
+ if rename:
+ current = old_aggregate
+ aggr_name = self.parameters['from_name']
+ cd_action = None
+ if cd_action is None and self.parameters['state'] == 'present':
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if 'encryption' in modify and not self.use_rest:
+ self.module.fail_json(msg='Error: modifying encryption is not supported with ZAPI.')
+ if 'snaplock_type' in modify:
+ self.module.fail_json(msg='Error: snaplock_type is not modifiable. Cannot change to: %s.' % modify['snaplock_type'])
+ if self.parameters.get('disks'):
+ modify['disks_to_add'], modify['mirror_disks_to_add'] = \
+ self.get_disks_to_add(aggr_name, self.parameters['disks'], self.parameters.get('mirror_disks'))
+ self.set_disk_count(current, modify)
+
+ return current, cd_action, rename, modify
+
+ def get_object_store_action(self, current, rename):
+ object_store_cd_action = None
+ if self.parameters.get('object_store_name'):
+ aggr_name = self.parameters['from_name'] if rename else self.parameters['name']
+ object_store_current = self.get_object_store(aggr_name) if current else None
+ object_store_cd_action = self.na_helper.get_cd_action(object_store_current, self.parameters.get('object_store_name'))
+ if object_store_cd_action is None and object_store_current is not None\
+ and object_store_current['object_store_name'] != self.parameters.get('object_store_name'):
+ self.module.fail_json(msg='Error: object store %s is already associated with aggregate %s.' %
+ (object_store_current['object_store_name'], aggr_name))
+ return object_store_cd_action
+
+ def get_aggr_rest(self, name):
+ if not name:
+ return None
+ api = 'storage/aggregates'
+ query = {'name': name}
+ fields = 'uuid,state,block_storage.primary.disk_count,data_encryption,snaplock_type'
+ if 'tags' in self.parameters:
+ fields += ',_tags'
+ record, error = rest_generic.get_one_record(self.rest_api, api, query, fields)
+ if error:
+ self.module.fail_json(msg='Error: failed to get aggregate %s: %s' % (name, error))
+ if record:
+ return {
+ 'tags': record.get('_tags', []),
+ 'disk_count': self.na_helper.safe_get(record, ['block_storage', 'primary', 'disk_count']),
+ 'encryption': self.na_helper.safe_get(record, ['data_encryption', 'software_encryption_enabled']),
+ 'service_state': record['state'],
+ 'snaplock_type': record['snaplock_type'],
+ 'uuid': record['uuid'],
+ }
+ return None
+
+ def get_multiplier(self, unit):
+ if not unit:
+ return 1
+ try:
+ return netapp_utils.POW2_BYTE_MAP[unit[0].lower()]
+ except KeyError:
+ self.module.fail_json(msg='Error: unexpected unit in disk_size_with_unit: %s' % self.parameters['disk_size_with_unit'])
+
+ def get_disk_size(self):
+ if 'disk_size' in self.parameters:
+ return self.parameters['disk_size'] * 4 * 1024
+ if 'disk_size_with_unit' in self.parameters:
+ match = re.match(r'([\d.]+)(.*)', self.parameters['disk_size_with_unit'])
+ if match:
+ size, unit = match.groups()
+ mul = self.get_multiplier(unit)
+ return int(float(size) * mul)
+ self.module.fail_json(msg='Error: unexpected value in disk_size_with_unit: %s' % self.parameters['disk_size_with_unit'])
+ return None
+
+ def create_aggr_rest(self):
+ api = 'storage/aggregates'
+
+ disk_size = self.get_disk_size()
+ # Interestingly, REST expects True/False in body, but 'true'/'false' in query
+ # I guess it's because we're using json in the body
+ query = {'return_records': 'true'} # in order to capture UUID
+ if disk_size:
+ query['disk_size'] = disk_size
+ # query = {'disk_size': disk_size} if disk_size else None
+
+ body = {'name': self.parameters['name']} if 'name' in self.parameters else {}
+ block_storage = {}
+ primary = {}
+ if self.parameters.get('nodes'):
+ body['node.name'] = self.parameters['nodes'][0]
+ if self.parameters.get('disk_class'):
+ primary['disk_class'] = self.parameters['disk_class']
+ if self.parameters.get('disk_count'):
+ primary['disk_count'] = self.parameters['disk_count']
+ if self.parameters.get('raid_size'):
+ primary['raid_size'] = self.parameters['raid_size']
+ if self.parameters.get('raid_type'):
+ primary['raid_type'] = self.parameters['raid_type']
+ if primary:
+ block_storage['primary'] = primary
+ mirror = {}
+ if self.parameters.get('is_mirrored'):
+ mirror['enabled'] = self.parameters['is_mirrored']
+ if mirror:
+ block_storage['mirror'] = mirror
+ if block_storage:
+ body['block_storage'] = block_storage
+ if self.parameters.get('encryption'):
+ body['data_encryption'] = {'software_encryption_enabled': True}
+ if self.parameters.get('snaplock_type'):
+ body['snaplock_type'] = self.parameters['snaplock_type']
+ if self.parameters.get('tags') is not None:
+ body['_tags'] = self.parameters['tags']
+ response, error = rest_generic.post_async(self.rest_api, api, body or None, query, job_timeout=self.parameters['time_out'])
+ if error:
+ self.module.fail_json(msg='Error: failed to create aggregate: %s' % error)
+ if response:
+ record, error = rrh.check_for_0_or_1_records(api, response, error, query)
+ if not error and record and 'uuid' not in record:
+ error = 'uuid key not present in %s:' % record
+ if error:
+ self.module.fail_json(msg='Error: failed to parse create aggregate response: %s' % error)
+ if record:
+ self.uuid = record['uuid']
+
+ def delete_aggr_rest(self):
+ api = 'storage/aggregates'
+ dummy, error = rest_generic.delete_async(self.rest_api, api, self.uuid)
+ if error:
+ self.module.fail_json(msg='Error: failed to delete aggregate: %s' % error)
+
+ def patch_aggr_rest(self, action, body, query=None):
+ api = 'storage/aggregates'
+ dummy, error = rest_generic.patch_async(self.rest_api, api, self.uuid, body, query)
+ if error:
+ self.module.fail_json(msg='Error: failed to %s aggregate: %s' % (action, error))
+
+ def add_disks_rest(self, count=0, disks=None, mirror_disks=None, disk_size=0, disk_size_with_unit=None):
+ """
+ Add additional disks to aggregate.
+ :return: None
+ """
+ if disks or mirror_disks:
+ self.module.fail_json(msg='Error: disks or mirror disks are mot supported with rest: %s, %s.' % (disks, mirror_disks))
+ if self.parameters.get('disk_class'):
+ self.module.warn('disk_class is ignored when adding disks to an exiting aggregate')
+ primary = {'disk_count': self.parameters['disk_count']} if count else None
+ body = {'block_storage': {'primary': primary}} if primary else None
+ if body:
+ disk_size = self.get_disk_size()
+ query = {'disk_size': disk_size} if disk_size else None
+ self.patch_aggr_rest('increase disk count for', body, query)
+
+ def rename_aggr_rest(self):
+ body = {'name': self.parameters['name']}
+ self.patch_aggr_rest('rename', body)
+
+ def get_object_store_rest(self):
+ '''TODO: support mirror in addition to primary'''
+ api = 'storage/aggregates/%s/cloud-stores' % self.uuid
+ record, error = rest_generic.get_one_record(self.rest_api, api, query={'primary': True})
+ if error:
+ self.module.fail_json(msg='Error: failed to get cloud stores for aggregate: %s' % error)
+ return record
+
+ def get_cloud_target_uuid_rest(self):
+ api = 'cloud/targets'
+ query = {'name': self.parameters['object_store_name']}
+ record, error = rest_generic.get_one_record(self.rest_api, api, query)
+ if error or not record:
+ self.module.fail_json(msg='Error: failed to find cloud store with name %s: %s' % (self.parameters['object_store_name'], error))
+ return record['uuid']
+
+ def attach_object_store_to_aggr_rest(self):
+ '''TODO: support mirror in addition to primary'''
+
+ if self.uuid is None:
+ error = 'aggregate UUID is not set.'
+ self.module.fail_json(msg='Error: cannot attach cloud store with name %s: %s' % (self.parameters['object_store_name'], error))
+ body = {'target': {'uuid': self.get_cloud_target_uuid_rest()}}
+ api = 'storage/aggregates/%s/cloud-stores' % self.uuid
+ query = None
+ if 'allow_flexgroups' in self.parameters:
+ query = {'allow_flexgroups': 'true' if self.parameters['allow_flexgroups'] else 'false'}
+ record, error = rest_generic.post_async(self.rest_api, api, body, query)
+ if error:
+ self.module.fail_json(msg='Error: failed to attach cloud store with name %s: %s' % (self.parameters['object_store_name'], error))
+ return record
+
+ def validate_expensive_options(self, cd_action, modify):
+ if cd_action == 'create' or (modify and 'disk_count' in modify):
+ # report an error if disk_size_with_unit is not valid
+ self.get_disk_size()
+
+ def apply(self):
+ """
+ Apply action to the aggregate
+ :return: None
+ """
+ current, cd_action, rename, modify = self.get_aggr_actions()
+ if current:
+ self.uuid = current.get('uuid')
+ object_store_cd_action = self.get_object_store_action(current, rename)
+
+ if self.na_helper.changed and self.module.check_mode:
+ # additional validations that are done at runtime
+ self.validate_expensive_options(cd_action, modify)
+
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_aggr()
+ # offine aggregate after create.
+ if self.parameters.get('service_state') == 'offline':
+ self.modify_aggr({'service_state': 'offline'})
+ elif cd_action == 'delete':
+ self.delete_aggr()
+ else:
+ if rename:
+ self.rename_aggregate()
+ if modify:
+ self.modify_aggr(modify)
+ if object_store_cd_action == 'create':
+ self.attach_object_store_to_aggr()
+ if rename:
+ modify['name'] = self.parameters['name']
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ """
+ Create Aggregate class instance and invoke apply
+ :return: None
+ """
+ obj_aggr = NetAppOntapAggregate()
+ obj_aggr.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_autosupport.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_autosupport.py
new file mode 100644
index 000000000..00d02e314
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_autosupport.py
@@ -0,0 +1,449 @@
+#!/usr/bin/python
+"""
+create Autosupport module to enable, disable or modify
+"""
+
+# (c) 2018-2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = """
+module: na_ontap_autosupport
+short_description: NetApp ONTAP autosupport
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.7.0
+description:
+ - Enable/Disable Autosupport
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+options:
+ state:
+ description:
+ - Specifies whether the AutoSupport daemon is present or absent.
+ - When this setting is absent, delivery of all AutoSupport messages is turned off.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+ node_name:
+ description:
+ - The name of the filer that owns the AutoSupport Configuration.
+ required: true
+ type: str
+ transport:
+ description:
+ - The name of the transport protocol used to deliver AutoSupport messages.
+ choices: ['http', 'https', 'smtp']
+ type: str
+ noteto:
+ description:
+ - Specifies up to five recipients of short AutoSupport e-mail messages.
+ type: list
+ elements: str
+ post_url:
+ description:
+ - The URL used to deliver AutoSupport messages via HTTP POST.
+ type: str
+ mail_hosts:
+ description:
+ - List of mail server(s) used to deliver AutoSupport messages via SMTP.
+ - Both host names and IP addresses may be used as valid input.
+ type: list
+ elements: str
+ support:
+ description:
+ - Specifies whether AutoSupport notification to technical support is enabled.
+ type: bool
+ from_address:
+ description:
+ - specify the e-mail address from which the node sends AutoSupport messages.
+ version_added: 2.8.0
+ type: str
+ partner_addresses:
+ description:
+ - Specifies up to five partner vendor recipients of full AutoSupport e-mail messages.
+ version_added: 2.8.0
+ type: list
+ elements: str
+ to_addresses:
+ description:
+ - Specifies up to five recipients of full AutoSupport e-mail messages.
+ version_added: 2.8.0
+ type: list
+ elements: str
+ proxy_url:
+ description:
+ - specify an HTTP or HTTPS proxy if the 'transport' parameter is set to HTTP or HTTPS and your organization uses a proxy.
+ - If authentication is required, use the format "username:password@host:port".
+ version_added: 2.8.0
+ type: str
+ hostname_in_subject:
+ description:
+ - Specify whether the hostname of the node is included in the subject line of the AutoSupport message.
+ type: bool
+ version_added: 2.8.0
+ nht_data_enabled:
+ description:
+ - Specify whether the disk health data is collected as part of the AutoSupport data.
+ type: bool
+ version_added: '21.5.0'
+ perf_data_enabled:
+ description:
+ - Specify whether the performance data is collected as part of the AutoSupport data.
+ type: bool
+ version_added: '21.5.0'
+ retry_count:
+ description:
+ - Specify the maximum number of delivery attempts for an AutoSupport message.
+ type: int
+ version_added: '21.5.0'
+ reminder_enabled:
+ description:
+ - Specify whether AutoSupport reminders are enabled or disabled.
+ type: bool
+ version_added: '21.5.0'
+ max_http_size:
+ description:
+ - Specify delivery size limit for the HTTP transport protocol (in bytes).
+ type: int
+ version_added: '21.5.0'
+ max_smtp_size:
+ description:
+ - Specify delivery size limit for the SMTP transport protocol (in bytes).
+ type: int
+ version_added: '21.5.0'
+ private_data_removed:
+ description:
+ - Specify the removal of customer-supplied data.
+ type: bool
+ version_added: '21.5.0'
+ local_collection_enabled:
+ description:
+ - Specify whether collection of AutoSupport data when the AutoSupport daemon is disabled.
+ type: bool
+ version_added: '21.5.0'
+ ondemand_enabled:
+ description:
+ - Specify whether the AutoSupport OnDemand Download feature is enabled.
+ type: bool
+ version_added: '21.5.0'
+ validate_digital_certificate:
+ description:
+ - When set to true each node will validate the digital certificates that it receives.
+ type: bool
+ version_added: '21.5.0'
+ """
+
+EXAMPLES = """
+ - name: Enable autosupport
+ netapp.ontap.na_ontap_autosupport:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ state: present
+ node_name: test
+ transport: https
+ noteto: abc@def.com,def@ghi.com
+ mail_hosts: 1.2.3.4,5.6.7.8
+ support: False
+ post_url: url/1.0/post
+ - name: Modify autosupport proxy_url with password
+ netapp.ontap.na_ontap_autosupport:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ state: present
+ node_name: test
+ transport: https
+ proxy_url: username:password@host.com:8000
+ - name: Modify autosupport proxy_url without password
+ netapp.ontap.na_ontap_autosupport:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ state: present
+ node_name: test
+ transport: https
+ proxy_url: username@host.com:8000
+ - name: Disable autosupport
+ netapp.ontap.na_ontap_autosupport:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ state: absent
+ node_name: test
+"""
+
+RETURN = """
+"""
+import re
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppONTAPasup:
+ """Class with autosupport methods"""
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ node_name=dict(required=True, type='str'),
+ transport=dict(required=False, type='str', choices=['smtp', 'http', 'https']),
+ noteto=dict(required=False, type='list', elements='str'),
+ post_url=dict(required=False, type='str'),
+ support=dict(required=False, type='bool'),
+ mail_hosts=dict(required=False, type='list', elements='str'),
+ from_address=dict(required=False, type='str'),
+ partner_addresses=dict(required=False, type='list', elements='str'),
+ to_addresses=dict(required=False, type='list', elements='str'),
+ # proxy_url may contain a password: user:password@url
+ proxy_url=dict(required=False, type='str', no_log=True),
+ hostname_in_subject=dict(required=False, type='bool'),
+ nht_data_enabled=dict(required=False, type='bool'),
+ perf_data_enabled=dict(required=False, type='bool'),
+ retry_count=dict(required=False, type='int'),
+ reminder_enabled=dict(required=False, type='bool'),
+ max_http_size=dict(required=False, type='int'),
+ max_smtp_size=dict(required=False, type='int'),
+ private_data_removed=dict(required=False, type='bool'),
+ local_collection_enabled=dict(required=False, type='bool'),
+ ondemand_enabled=dict(required=False, type='bool'),
+ validate_digital_certificate=dict(required=False, type='bool')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ # present or absent requires modifying state to enabled or disabled
+ self.parameters['service_state'] = 'started' if self.parameters['state'] == 'present' else 'stopped'
+ self.set_playbook_zapi_key_map()
+
+ self.rest_api = OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+ if not self.use_rest:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def set_playbook_zapi_key_map(self):
+ self.na_helper.zapi_string_keys = {
+ 'node_name': 'node-name',
+ 'transport': 'transport',
+ 'post_url': 'post-url',
+ 'from_address': 'from',
+ 'proxy_url': 'proxy-url'
+ }
+ self.na_helper.zapi_int_keys = {
+ 'retry_count': 'retry-count',
+ 'max_http_size': 'max-http-size',
+ 'max_smtp_size': 'max-smtp-size'
+ }
+ self.na_helper.zapi_list_keys = {
+ 'noteto': ('noteto', 'mail-address'),
+ 'mail_hosts': ('mail-hosts', 'string'),
+ 'partner_addresses': ('partner-address', 'mail-address'),
+ 'to_addresses': ('to', 'mail-address')
+ }
+ self.na_helper.zapi_bool_keys = {
+ 'support': 'is-support-enabled',
+ 'hostname_in_subject': 'is-node-in-subject',
+ 'nht_data_enabled': 'is-nht-data-enabled',
+ 'perf_data_enabled': 'is-perf-data-enabled',
+ 'reminder_enabled': 'is-reminder-enabled',
+ 'private_data_removed': 'is-private-data-removed',
+ 'local_collection_enabled': 'is-local-collection-enabled',
+ 'ondemand_enabled': 'is-ondemand-enabled',
+ 'validate_digital_certificate': 'validate-digital-certificate'
+ }
+
+ def get_autosupport_config(self):
+ """
+ get current autosupport details
+ :return: dict()
+ """
+ asup_info = {}
+ if self.use_rest:
+ api = "private/cli/system/node/autosupport"
+ query = {
+ 'node': self.parameters['node_name'],
+ 'fields': 'state,node,transport,noteto,url,support,mail-hosts,from,partner-address,to,proxy-url,hostname-subj,nht,perf,retry-count,\
+reminder,max-http-size,max-smtp-size,remove-private-data,ondemand-server-url,support,reminder,ondemand-state,local-collection,validate-digital-certificate'
+ }
+ record, error = rest_generic.get_one_record(self.rest_api, api, query)
+
+ if error:
+ self.module.fail_json(msg='Error fetching info: %s' % error)
+
+ for param in ('transport', 'mail_hosts', 'proxy_url', 'retry_count',
+ 'max_http_size', 'max_smtp_size', 'noteto', 'validate_digital_certificate'):
+ if param in record:
+ asup_info[param] = record[param]
+
+ asup_info['support'] = record['support'] in ['enable', True]
+ asup_info['node_name'] = record['node'] if 'node' in record else ""
+ asup_info['post_url'] = record['url'] if 'url' in record else ""
+ asup_info['from_address'] = record['from'] if 'from' in record else ""
+ asup_info['to_addresses'] = record['to'] if 'to' in record else list()
+ asup_info['hostname_in_subject'] = record['hostname_subj'] if 'hostname_subj' in record else False
+ asup_info['nht_data_enabled'] = record['nht'] if 'nht' in record else False
+ asup_info['perf_data_enabled'] = record['perf'] if 'perf' in record else False
+ asup_info['reminder_enabled'] = record['reminder'] if 'reminder' in record else False
+ asup_info['private_data_removed'] = record['remove_private_data'] if 'remove_private_data' in record else False
+ asup_info['local_collection_enabled'] = record['local_collection'] if 'local_collection' in record else False
+ asup_info['ondemand_enabled'] = record['ondemand_state'] in ['enable', True] if 'ondemand_state' in record else False
+ asup_info['service_state'] = 'started' if record['state'] in ['enable', True] else 'stopped'
+ asup_info['partner_addresses'] = record['partner_address'] if 'partner_address' in record else list()
+ else:
+ asup_details = netapp_utils.zapi.NaElement('autosupport-config-get')
+ asup_details.add_new_child('node-name', self.parameters['node_name'])
+ try:
+ result = self.server.invoke_successfully(asup_details, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching info: %s' % to_native(error), exception=traceback.format_exc())
+ # zapi invoke successful
+ asup_attr_info = result.get_child_by_name('attributes').get_child_by_name('autosupport-config-info')
+ asup_info['service_state'] = 'started' if asup_attr_info['is-enabled'] == 'true' else 'stopped'
+ for item_key, zapi_key in self.na_helper.zapi_string_keys.items():
+ value = asup_attr_info.get_child_content(zapi_key)
+ asup_info[item_key] = value if value is not None else ""
+ for item_key, zapi_key in self.na_helper.zapi_int_keys.items():
+ value = asup_attr_info.get_child_content(zapi_key)
+ if value is not None:
+ asup_info[item_key] = self.na_helper.get_value_for_int(from_zapi=True, value=value)
+ for item_key, zapi_key in self.na_helper.zapi_bool_keys.items():
+ value = asup_attr_info.get_child_content(zapi_key)
+ if value is not None:
+ asup_info[item_key] = self.na_helper.get_value_for_bool(from_zapi=True, value=value)
+ for item_key, zapi_key in self.na_helper.zapi_list_keys.items():
+ parent, dummy = zapi_key
+ asup_info[item_key] = self.na_helper.get_value_for_list(from_zapi=True, zapi_parent=asup_attr_info.get_child_by_name(parent))
+
+ return asup_info
+
+ def modify_autosupport_config(self, modify):
+ """
+ modify autosupport config
+ @return: modfied attributes / FAILURE with an error_message
+ """
+
+ if self.use_rest:
+ api = "private/cli/system/node/autosupport"
+ query = {
+ 'node': self.parameters['node_name']
+ }
+ if 'service_state' in modify:
+ modify['state'] = modify['service_state'] == 'started'
+ del modify['service_state']
+
+ if 'post_url' in modify:
+ modify['url'] = modify.pop('post_url')
+ if 'from_address' in modify:
+ modify['from'] = modify.pop('from_address')
+ if 'to_addresses' in modify:
+ modify['to'] = modify.pop('to_addresses')
+ if 'hostname_in_subject' in modify:
+ modify['hostname_subj'] = modify.pop('hostname_in_subject')
+ if 'nht_data_enabled' in modify:
+ modify['nht'] = modify.pop('nht_data_enabled')
+ if 'perf_data_enabled' in modify:
+ modify['perf'] = modify.pop('perf_data_enabled')
+ if 'reminder_enabled' in modify:
+ modify['reminder'] = modify.pop('reminder_enabled')
+ if 'private_data_removed' in modify:
+ modify['remove_private_data'] = modify.pop('private_data_removed')
+ if 'local_collection_enabled' in modify:
+ modify['local_collection'] = modify.pop('local_collection_enabled')
+ if 'ondemand_enabled' in modify:
+ modify['ondemand_state'] = modify.pop('ondemand_enabled')
+ if 'partner_addresses' in modify:
+ modify['partner_address'] = modify.pop('partner_addresses')
+
+ dummy, error = rest_generic.patch_async(self.rest_api, api, None, modify, query)
+
+ if error:
+ self.module.fail_json(msg='Error modifying asup: %s' % error)
+ else:
+ asup_details = {'node-name': self.parameters['node_name']}
+ if modify.get('service_state'):
+ asup_details['is-enabled'] = 'true' if modify.get('service_state') == 'started' else 'false'
+ asup_config = netapp_utils.zapi.NaElement('autosupport-config-modify')
+ for item_key in modify:
+ if item_key in self.na_helper.zapi_string_keys:
+ zapi_key = self.na_helper.zapi_string_keys.get(item_key)
+ asup_details[zapi_key] = modify[item_key]
+ elif item_key in self.na_helper.zapi_int_keys:
+ zapi_key = self.na_helper.zapi_int_keys.get(item_key)
+ asup_details[zapi_key] = modify[item_key]
+ elif item_key in self.na_helper.zapi_bool_keys:
+ zapi_key = self.na_helper.zapi_bool_keys.get(item_key)
+ asup_details[zapi_key] = self.na_helper.get_value_for_bool(from_zapi=False, value=modify[item_key])
+ elif item_key in self.na_helper.zapi_list_keys:
+ parent_key, child_key = self.na_helper.zapi_list_keys.get(item_key)
+ asup_config.add_child_elem(self.na_helper.get_value_for_list(
+ from_zapi=False, zapi_parent=parent_key, zapi_child=child_key, data=modify.get(item_key)))
+
+ asup_config.translate_struct(asup_details)
+ try:
+ return self.server.invoke_successfully(asup_config, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying asup: %s' % to_native(error), exception=traceback.format_exc())
+
+ @staticmethod
+ def strip_password(url):
+ ''' if url matches user:password@address return user@address
+ otherwise return None
+ '''
+ if url:
+ needle = r'(.*):(.*)@(.*)'
+ matched = re.match(needle, url)
+ if matched:
+ return matched.group(1, 3)
+ return None, None
+
+ def idempotency_check(self, current, modify):
+ sanitized_modify = dict(modify)
+ if 'proxy_url' in modify:
+ user_url_m = self.strip_password(modify['proxy_url'])
+ user_url_c = self.strip_password(current.get('proxy_url'))
+ if user_url_m == user_url_c and user_url_m != (None, None):
+ # change in password, it can be a false positive as password is replaced with ********* by ONTAP
+ self.module.warn('na_ontap_autosupport is not idempotent because the password value in proxy_url cannot be compared.')
+ if user_url_m != (None, None):
+ # password was found in proxy_url, sanitize it, use something different than ZAPI *********
+ sanitized_modify['proxy_url'] = "%s:XXXXXXXX@%s" % user_url_m
+ return sanitized_modify
+
+ def apply(self):
+ """
+ Apply action to autosupport
+ """
+ current = self.get_autosupport_config()
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ sanitized_modify = self.idempotency_check(current, modify)
+ if self.na_helper.changed and not self.module.check_mode:
+ self.modify_autosupport_config(modify)
+ result = netapp_utils.generate_result(self.na_helper.changed, modify=sanitized_modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ """Execute action"""
+ asup_obj = NetAppONTAPasup()
+ asup_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_autosupport_invoke.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_autosupport_invoke.py
new file mode 100644
index 000000000..1f1f109d3
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_autosupport_invoke.py
@@ -0,0 +1,188 @@
+#!/usr/bin/python
+
+# (c) 2020-2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_autosupport_invoke
+'''
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'
+}
+
+DOCUMENTATION = '''
+
+module: na_ontap_autosupport_invoke
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+short_description: NetApp ONTAP send AutoSupport message
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: '20.4.0'
+description:
+ - Send an AutoSupport message from a node
+
+options:
+
+ name:
+ description:
+ - The name of the node to send the message to.
+ - Not specifying this option invokes AutoSupport on all nodes in the cluster.
+ type: str
+
+ autosupport_message:
+ description:
+ - Text sent in the subject line of the AutoSupport message.
+ - message is deprecated and will be removed to avoid a conflict with an Ansible internal variable.
+ type: str
+ aliases:
+ - message
+ version_added: 20.8.0
+
+ type:
+ description:
+ - Type of AutoSupport Collection to Issue.
+ choices: ['test', 'performance', 'all']
+ default: 'all'
+ type: str
+
+ uri:
+ description:
+ - send the AutoSupport message to the destination you specify instead of the configured destination.
+ type: str
+
+'''
+
+EXAMPLES = '''
+ - name: Send message
+ na_ontap_autosupport_invoke:
+ name: node1
+ autosupport_message: invoked test autosupport rest
+ uri: http://1.2.3.4/delivery_uri
+ type: test
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+'''
+
+RETURN = '''
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPasupInvoke(object):
+ ''' send ASUP message '''
+ def __init__(self):
+ self.use_rest = False
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ name=dict(required=False, type='str'),
+ autosupport_message=dict(required=False, type='str', aliases=["message"]),
+ type=dict(required=False, choices=[
+ 'test', 'performance', 'all'], default='all'),
+ uri=dict(required=False, type='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ if 'message' in self.parameters:
+ self.module.warn('Error: "message" option conflicts with Ansible internal variable - please use "autosupport_message".')
+
+ # REST API should be used for ONTAP 9.6 or higher.
+ self.rest_api = OntapRestAPI(self.module)
+ if self.rest_api.is_rest():
+ self.use_rest = True
+ else:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def get_nodes(self):
+ nodes = []
+ node_obj = netapp_utils.zapi.NaElement('system-node-get-iter')
+ desired_attributes = netapp_utils.zapi.NaElement('desired-attributes')
+ node_details_info = netapp_utils.zapi.NaElement('node-details-info')
+ node_details_info.add_new_child('node', '')
+ desired_attributes.add_child_elem(node_details_info)
+ node_obj.add_child_elem(desired_attributes)
+ try:
+ result = self.server.invoke_successfully(node_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg=to_native(error), exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) > 0:
+ node_info = result.get_child_by_name('attributes-list')
+ if node_info is not None:
+ nodes = [node_details.get_child_content('node') for node_details in node_info.get_children()]
+ return nodes
+
+ def send_zapi_message(self, params, node_name):
+ params['node-name'] = node_name
+ send_message = netapp_utils.zapi.NaElement.create_node_with_children('autosupport-invoke', **params)
+ try:
+ self.server.invoke_successfully(send_message, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error on sending autosupport message to node %s: %s."
+ % (node_name, to_native(error)),
+ exception=traceback.format_exc())
+
+ def send_message(self):
+ params = {}
+ if self.parameters.get('autosupport_message'):
+ params['message'] = self.parameters['autosupport_message']
+ if self.parameters.get('type'):
+ params['type'] = self.parameters['type']
+ if self.parameters.get('uri'):
+ params['uri'] = self.parameters['uri']
+
+ if self.use_rest:
+ if self.parameters.get('name'):
+ params['node.name'] = self.parameters['name']
+ node_name = params['node.name']
+ else:
+ node_name = '*'
+ api = 'support/autosupport/messages'
+ dummy, error = self.rest_api.post(api, params)
+ if error is not None:
+ self.module.fail_json(msg="Error on sending autosupport message to node %s: %s."
+ % (node_name, error))
+ else:
+ if self.parameters.get('name'):
+ node_names = [self.parameters['name']]
+ else:
+ # simulate REST behavior by sending to all nodes in the cluster
+ node_names = self.get_nodes()
+ for name in node_names:
+ self.send_zapi_message(params, name)
+
+ def apply(self):
+ if not self.module.check_mode:
+ self.send_message()
+ self.module.exit_json(changed=True)
+
+
+def main():
+ message = NetAppONTAPasupInvoke()
+ message.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_bgp_peer_group.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_bgp_peer_group.py
new file mode 100644
index 000000000..6c0fa63a0
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_bgp_peer_group.py
@@ -0,0 +1,356 @@
+#!/usr/bin/python
+
+# (c) 2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = """
+module: na_ontap_bgp_peer_group
+short_description: NetApp ONTAP module to create, modify or delete bgp peer group.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: '22.0.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Create, modify or delete bgp peer group.
+options:
+ state:
+ description:
+ - Create or delete BGP peer group.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+ name:
+ description:
+ - Name of the BGP peer group.
+ type: str
+ required: true
+ from_name:
+ description:
+ - Name of the existing BGP peer group to be renamed to C(name).
+ type: str
+ ipspace:
+ description:
+ - IPSpace name, cannot be modified after creation.
+ type: str
+ local:
+ description:
+ - Information describing the local interface that is being used to peer with a router using BGP.
+ - When creating BGP peer group, an existing BGP interface is used by specifying the interface, or create a new one by specifying the port and IP address.
+ - Cannot be modified after creation.
+ type: dict
+ suboptions:
+ interface:
+ description:
+ - An existing BGP interface.
+ - If interface not found, module will try to create BGP interface using C(local.ip) and C(local.port).
+ type: dict
+ suboptions:
+ name:
+ description:
+ - BGP interface name.
+ type: str
+ ip:
+ description:
+ - IP information, requird to create a new interface.
+ type: dict
+ suboptions:
+ address:
+ description:
+ - IPv4 or IPv6 address, example 10.10.10.7.
+ type: str
+ netmask:
+ description:
+ - Input as netmask length (16) or IPv4 mask (255.255.0.0).
+ - For IPv6, the default value is 64 with a valid range of 1 to 127.
+ type: str
+ port:
+ description:
+ - Port and node information, required to create a new interface.
+ type: dict
+ suboptions:
+ name:
+ description:
+ - Port name.
+ type: str
+ node:
+ description:
+ - Name of node on which the port is located.
+ type: dict
+ suboptions:
+ name:
+ description:
+ - Node name
+ type: str
+ peer:
+ description:
+ - Information describing the router to peer with
+ type: dict
+ suboptions:
+ address:
+ description:
+ - Peer router address.
+ type: str
+ asn:
+ description:
+ - Autonomous system number of peer.
+ - Cannot be modified after creation.
+ type: int
+"""
+
+EXAMPLES = """
+ - name: Create BGP peer group with existing bgp interface bgp_lif.
+ netapp.ontap.na_ontap_bgp_peer_group:
+ name: peer_group
+ ipspace: Default
+ local:
+ interface:
+ name: bgp_lif
+ peer:
+ address: 10.10.10.19
+ asn: 65501
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: "{{ validate_certs }}"
+
+ - name: Create new BGP interface new_bgp_lif and BGP peer group peer_group_1.
+ netapp.ontap.na_ontap_bgp_peer_group:
+ name: peer_group_1
+ ipspace: Default
+ local:
+ interface:
+ name: new_bgp_lif
+ ip:
+ address: 10.10.10.20
+ netmask: 24
+ port:
+ name: e0a
+ node:
+ name: ontap98-01
+ peer:
+ address: 10.10.10.20
+ asn: 65500
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: "{{ validate_certs }}"
+
+ # this will create bgp interface with random name.
+ - name: Create BGP interface without interface name and BGP peer group peer_group_2.
+ netapp.ontap.na_ontap_bgp_peer_group:
+ name: peer_group_2
+ ipspace: Default
+ local:
+ ip:
+ address: 10.10.10.22
+ netmask: 24
+ port:
+ name: e0a
+ node:
+ name: ontap98-01
+ peer:
+ address: 10.10.10.22
+ asn: 65512
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: "{{ validate_certs }}"
+
+ - name: Modify peer address.
+ netapp.ontap.na_ontap_bgp_peer_group:
+ name: peer_group_2
+ ipspace: Default
+ peer:
+ address: 10.10.55.22
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: "{{ validate_certs }}"
+
+ - name: Rename BGP peer group name and modify peer address.
+ netapp.ontap.na_ontap_bgp_peer_group:
+ from_name: peer_group_2
+ name: new_peer_group
+ ipspace: Default
+ peer:
+ address: 10.10.55.40
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: "{{ validate_certs }}"
+
+ - name: Delete BGP peer group.
+ netapp.ontap.na_ontap_bgp_peer_group:
+ name: new_peer_group
+ ipspace: Default
+ state: absent
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: "{{ validate_certs }}"
+"""
+
+RETURN = """
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic, netapp_ipaddress
+
+
+class NetAppOntapBgpPeerGroup:
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ from_name=dict(required=False, type='str'),
+ ipspace=dict(required=False, type='str'),
+ local=dict(required=False, type='dict', options=dict(
+ interface=dict(required=False, type='dict', options=dict(
+ name=dict(required=False, type='str'),
+ )),
+ ip=dict(required=False, type='dict', options=dict(
+ address=dict(required=False, type='str'),
+ netmask=dict(required=False, type='str')
+ )),
+ port=dict(required=False, type='dict', options=dict(
+ name=dict(required=False, type='str'),
+ node=dict(required=False, type='dict', options=dict(
+ name=dict(required=False, type='str')
+ ))
+ ))
+ )),
+ peer=dict(required=False, type='dict', options=dict(
+ address=dict(required=False, type='str'),
+ asn=dict(required=False, type='int')
+ ))
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.uuid = None
+ self.na_helper = NetAppModule(self.module)
+ self.parameters = self.na_helper.check_and_set_parameters(self.module)
+ if self.na_helper.safe_get(self.parameters, ['peer', 'address']):
+ self.parameters['peer']['address'] = netapp_ipaddress.validate_and_compress_ip_address(self.parameters['peer']['address'], self.module)
+ self.rest_api = netapp_utils.OntapRestAPI(self.module)
+ self.rest_api.fail_if_not_rest_minimum_version('na_ontap_bgp_peer_group', 9, 7)
+ self.parameters = self.na_helper.filter_out_none_entries(self.parameters)
+
+ def get_bgp_peer_group(self, name=None):
+ """
+ Get BGP peer group.
+ """
+ if name is None:
+ name = self.parameters['name']
+ api = 'network/ip/bgp/peer-groups'
+ query = {
+ 'name': name,
+ 'fields': 'name,uuid,peer'
+ }
+ if 'ipspace' in self.parameters:
+ query['ipspace.name'] = self.parameters['ipspace']
+ record, error = rest_generic.get_one_record(self.rest_api, api, query)
+ if error:
+ self.module.fail_json(msg='Error fetching BGP peer group %s: %s' % (name, to_native(error)),
+ exception=traceback.format_exc())
+ if record:
+ self.uuid = record['uuid']
+ return {
+ 'name': self.na_helper.safe_get(record, ['name']),
+ 'peer': self.na_helper.safe_get(record, ['peer'])
+ }
+ return None
+
+ def create_bgp_peer_group(self):
+ """
+ Create BGP peer group.
+ """
+ api = 'network/ip/bgp/peer-groups'
+ body = {
+ 'name': self.parameters['name'],
+ 'local': self.parameters['local'],
+ 'peer': self.parameters['peer']
+ }
+ if 'ipspace' in self.parameters:
+ body['ipspace.name'] = self.parameters['ipspace']
+ dummy, error = rest_generic.post_async(self.rest_api, api, body)
+ if error:
+ self.module.fail_json(msg='Error creating BGP peer group %s: %s.' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_bgp_peer_group(self, modify):
+ """
+ Modify BGP peer group.
+ """
+ api = 'network/ip/bgp/peer-groups'
+ body = {}
+ if 'name' in modify:
+ body['name'] = modify['name']
+ if 'peer' in modify:
+ body['peer'] = modify['peer']
+ dummy, error = rest_generic.patch_async(self.rest_api, api, self.uuid, body)
+ if error:
+ name = self.parameters['from_name'] if 'name' in modify else self.parameters['name']
+ self.module.fail_json(msg='Error modifying BGP peer group %s: %s.' % (name, to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_bgp_peer_group(self):
+ """
+ Delete BGP peer group.
+ """
+ api = 'network/ip/bgp/peer-groups'
+ dummy, error = rest_generic.delete_async(self.rest_api, api, self.uuid)
+ if error:
+ self.module.fail_json(msg='Error deleting BGP peer group %s: %s.' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ current = self.get_bgp_peer_group()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ modify = None
+ if cd_action == 'create':
+ if self.parameters.get('from_name'):
+ current = self.get_bgp_peer_group(self.parameters['from_name'])
+ if not current:
+ self.module.fail_json(msg="Error renaming BGP peer group, %s does not exist." % self.parameters['from_name'])
+ cd_action = None
+ elif not self.parameters.get('local') or not self.parameters.get('peer'):
+ self.module.fail_json(msg="Error creating BGP peer group %s, local and peer are required in create." % self.parameters['name'])
+ if cd_action is None:
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.na_helper.safe_get(modify, ['peer', 'asn']):
+ self.module.fail_json(msg="Error: cannot modify peer asn.")
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_bgp_peer_group()
+ elif cd_action == 'delete':
+ self.delete_bgp_peer_group()
+ else:
+ self.modify_bgp_peer_group(modify)
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ bgp_obj = NetAppOntapBgpPeerGroup()
+ bgp_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_broadcast_domain.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_broadcast_domain.py
new file mode 100644
index 000000000..ef74d1705
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_broadcast_domain.py
@@ -0,0 +1,690 @@
+#!/usr/bin/python
+
+# (c) 2018-2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_broadcast_domain
+'''
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: na_ontap_broadcast_domain
+short_description: NetApp ONTAP manage broadcast domains.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Modify a ONTAP broadcast domain.
+options:
+ state:
+ description:
+ - Whether the specified broadcast domain should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+ name:
+ description:
+ - Specify the broadcast domain name.
+ required: true
+ aliases:
+ - broadcast_domain
+ type: str
+ from_name:
+ description:
+ - Specify the broadcast domain name to be split into new broadcast domain.
+ version_added: 2.8.0
+ type: str
+ mtu:
+ description:
+ - Specify the required mtu for the broadcast domain.
+ type: int
+ ipspace:
+ description:
+ - Specify the required ipspace for the broadcast domain.
+ - With ZAPI, a domain ipspace cannot be modified after the domain has been created.
+ - With REST, a domain ipspace can be modified.
+ type: str
+ from_ipspace:
+ description:
+ - if used with C(from_name), it will try to find broadcast domain C(from_name) in C(from_ipspace), split action either rename broadcast_domain and
+ ipspace or create a new broadcast domain.
+ - If not C(from_name) present, it will try to find C(name) broadcast domain in C(from_ipspace) and modify ipspace using C(ipspace).
+ - Only supported with REST.
+ version_added: 2.15.0
+ type: str
+ ports:
+ description:
+ - Specify the ports associated with this broadcast domain. Should be comma separated.
+ - It represents the expected state of a list of ports at any time.
+ - Add a port if it is specified in expected state but not in current state.
+ - Delete a port if it is specified in current state but not in expected state.
+ - For split action, it represents the ports to be split from current broadcast domain and added to the new broadcast domain.
+ - If all ports are removed or split from a broadcast domain, the broadcast domain will be deleted automatically.
+ - With REST, if exact match of ports found with C(from_name), split action will rename the broadcast domain using C(name).
+ - With REST, if partial match of ports with C(from_name), split action will create a new broadcast domain using C(name) and
+ move partial matched ports from C(from_name) to C(name).
+ - With REST, if C(ports) not in C(from_name), split action will create a new broadcast domain using C(name) with C(ports).
+ type: list
+ elements: str
+'''
+
+EXAMPLES = """
+ - name: create broadcast domain
+ netapp.ontap.na_ontap_broadcast_domain:
+ state: present
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ name: ansible_domain
+ mtu: 1000
+ ipspace: Default
+ ports: ["khutton-vsim1:e0d-12", "khutton-vsim1:e0d-13"]
+ - name: modify broadcast domain
+ netapp.ontap.na_ontap_broadcast_domain:
+ state: present
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ name: ansible_domain
+ mtu: 1100
+ ipspace: Default
+ ports: ["khutton-vsim1:e0d-12", "khutton-vsim1:e0d-13"]
+ - name: split broadcast domain
+ netapp.ontap.na_ontap_broadcast_domain:
+ state: present
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ from_name: ansible_domain
+ name: new_ansible_domain
+ mtu: 1200
+ ipspace: Default
+ ports: khutton-vsim1:e0d-12
+ - name: delete broadcast domain
+ netapp.ontap.na_ontap_broadcast_domain:
+ state: absent
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ name: ansible_domain
+ ipspace: Default
+ - name: create broadcast domain REST
+ netapp.ontap.na_ontap_broadcast_domain:
+ state: present
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ name: ansible_domain
+ mtu: 1200
+ ipspace: Default
+ ports: ["khutton-vsim1:e0d-12","khutton-vsim1:e0d-13","khutton-vsim1:e0d-14"]
+ - name: rename broadcast domain if exact match of ports REST
+ netapp.ontap.na_ontap_broadcast_domain:
+ state: present
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ from_name: ansible_domain
+ name: new_ansible_domain
+ mtu: 1200
+ ipspace: Default
+ ports: ["khutton-vsim1:e0d-12","khutton-vsim1:e0d-13","khutton-vsim1:e0d-14"]
+ - name: if partial match, remove e0d-12 from new_ansible_domain & create new domain ansible_domain with port e0d-12 REST
+ netapp.ontap.na_ontap_broadcast_domain:
+ state: present
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ from_name: new_ansible_domain
+ name: ansible_domain
+ mtu: 1200
+ ipspace: Default
+ ports: ["khutton-vsim1:e0d-12"]
+ - name: Modify both broadcast domain and ipspace REST.
+ netapp.ontap.na_ontap_broadcast_domain:
+ state: present
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ from_name: ansible_domain
+ from_ipspace: Default
+ name: ansible_domain_ip1
+ ipspace: ipspace_1
+ mtu: 1200
+ ports: ["khutton-vsim1:e0d-12"]
+ - name: Modify ipspace only REST.
+ netapp.ontap.na_ontap_broadcast_domain:
+ state: present
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ from_ipspace: ipspace_1
+ name: ansible_domain_ip1
+ ipspace: Default
+ mtu: 1200
+ ports: ["khutton-vsim1:e0d-12"]
+ - name: delete broadcast domain new_ansible_domain.
+ netapp.ontap.na_ontap_broadcast_domain:
+ state: absent
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ name: new_ansible_domain
+ mtu: 1200
+ ipspace: Default
+ ports: ["khutton-vsim1:e0d-13","khutton-vsim1:e0d-14"]
+"""
+
+RETURN = """
+
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppOntapBroadcastDomain(object):
+ """
+ Create, Modifies and Destroys a Broadcast domain
+ """
+ def __init__(self):
+ """
+ Initialize the ONTAP Broadcast Domain class
+ """
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str', aliases=["broadcast_domain"]),
+ ipspace=dict(required=False, type='str'),
+ mtu=dict(required=False, type='int'),
+ ports=dict(required=False, type='list', elements='str'),
+ from_name=dict(required=False, type='str'),
+ from_ipspace=dict(required=False, type='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ # Set up Rest API
+ self.rest_api = OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+ self.desired_ports = None
+
+ if self.use_rest and 'ipspace' not in self.parameters:
+ error_msg = "Error: ipspace space is a required option with REST"
+ self.module.fail_json(msg=error_msg)
+
+ if 'ports' in self.parameters:
+ self.parameters['ports'] = list(set([port.strip() for port in self.parameters['ports']]))
+ if self.use_rest:
+ self.desired_ports = self.get_ports_rest(self.parameters['ports'])
+
+ if not self.use_rest:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+ if 'from_ipspace' in self.parameters:
+ self.parameters.pop('from_ipspace')
+ self.module.warn("from_ipspace is ignored when ZAPI is used.")
+
+ def get_broadcast_domain(self, broadcast_domain=None, ipspace=None):
+ """
+ Return details about the broadcast domain
+ :param broadcast_domain: specific broadcast domain to get.
+ :return: Details about the broadcast domain. None if not found.
+ :rtype: dict
+ """
+ if broadcast_domain is None:
+ broadcast_domain = self.parameters['name']
+ if ipspace is None:
+ # unlike rest, ipspace is not mandatory field for zapi.
+ ipspace = self.parameters.get('ipspace')
+ if self.use_rest:
+ return self.get_broadcast_domain_rest(broadcast_domain, ipspace)
+ domain_get_iter = netapp_utils.zapi.NaElement('net-port-broadcast-domain-get-iter')
+ broadcast_domain_info = netapp_utils.zapi.NaElement('net-port-broadcast-domain-info')
+ broadcast_domain_info.add_new_child('broadcast-domain', broadcast_domain)
+ if ipspace:
+ broadcast_domain_info.add_new_child('ipspace', ipspace)
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(broadcast_domain_info)
+ domain_get_iter.add_child_elem(query)
+ result = self.server.invoke_successfully(domain_get_iter, True)
+ domain_exists = None
+ # check if broadcast_domain exists
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) == 1:
+ domain_info = result.get_child_by_name('attributes-list').\
+ get_child_by_name('net-port-broadcast-domain-info')
+ domain_name = domain_info.get_child_content('broadcast-domain')
+ domain_mtu = domain_info.get_child_content('mtu')
+ domain_ipspace = domain_info.get_child_content('ipspace')
+ domain_ports = domain_info.get_child_by_name('ports')
+ if domain_ports is not None:
+ ports = [port.get_child_content('port') for port in domain_ports.get_children()]
+ else:
+ ports = []
+ domain_exists = {
+ 'domain-name': domain_name,
+ 'mtu': int(domain_mtu),
+ 'ipspace': domain_ipspace,
+ 'ports': ports
+ }
+ return domain_exists
+
+ def get_broadcast_domain_rest(self, broadcast_domain, ipspace):
+ api = 'network/ethernet/broadcast-domains'
+ query = {'name': broadcast_domain, 'ipspace.name': ipspace}
+ fields = 'uuid,name,ipspace,ports,mtu'
+ record, error = rest_generic.get_one_record(self.rest_api, api, query, fields)
+ if error:
+ self.module.fail_json(msg=error)
+ if record:
+ current = {
+ 'name': record['name'],
+ 'mtu': record['mtu'],
+ 'ipspace': record['ipspace']['name'],
+ 'uuid': record['uuid'],
+ 'ports': []
+ }
+ if 'ports' in record:
+ current['ports'] = ['%s:%s' % (port['node']['name'], port['name']) for port in record['ports']]
+ return current
+ return None
+
+ def create_broadcast_domain(self, ports=None):
+ """
+ Creates a new broadcast domain
+ """
+ if self.use_rest:
+ return self.create_broadcast_domain_rest(ports)
+ domain_obj = netapp_utils.zapi.NaElement('net-port-broadcast-domain-create')
+ domain_obj.add_new_child("broadcast-domain", self.parameters['name'])
+ if self.parameters.get('ipspace'):
+ domain_obj.add_new_child("ipspace", self.parameters['ipspace'])
+ if self.parameters.get('mtu'):
+ domain_obj.add_new_child("mtu", str(self.parameters['mtu']))
+ if self.parameters.get('ports'):
+ ports_obj = netapp_utils.zapi.NaElement('ports')
+ domain_obj.add_child_elem(ports_obj)
+ for port in self.parameters['ports']:
+ ports_obj.add_new_child('net-qualified-port-name', port)
+ try:
+ self.server.invoke_successfully(domain_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating broadcast domain %s: %s' %
+ (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def create_broadcast_domain_rest(self, ports=None):
+ api = 'network/ethernet/broadcast-domains'
+ body = {
+ 'name': self.parameters['name'],
+ 'mtu': self.parameters['mtu'],
+ 'ipspace': self.parameters['ipspace']
+ }
+ dummy, error = rest_generic.post_async(self.rest_api, api, body)
+ if error:
+ self.module.fail_json(msg=error)
+ if ports:
+ self.add_or_move_broadcast_domain_ports_rest(ports)
+
+ def delete_broadcast_domain(self, broadcast_domain=None, current=None):
+ """
+ Deletes a broadcast domain
+ """
+ if self.use_rest:
+ # all ports should be removed to delete broadcast domain in rest.
+ if 'ports' in current:
+ self.remove_broadcast_domain_ports_rest(current['ports'], current['ipspace'])
+ api = 'network/ethernet/broadcast-domains'
+ dummy, error = rest_generic.delete_async(self.rest_api, api, current['uuid'])
+ if error:
+ self.module.fail_json(msg=error)
+ else:
+ if broadcast_domain is None:
+ broadcast_domain = self.parameters['name']
+ domain_obj = netapp_utils.zapi.NaElement('net-port-broadcast-domain-destroy')
+ domain_obj.add_new_child("broadcast-domain", broadcast_domain)
+ if self.parameters.get('ipspace'):
+ domain_obj.add_new_child("ipspace", self.parameters['ipspace'])
+ try:
+ self.server.invoke_successfully(domain_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting broadcast domain %s: %s' %
+ (broadcast_domain, to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_broadcast_domain(self):
+ """
+ Modifies ipspace and mtu options of a broadcast domain
+ """
+ domain_obj = netapp_utils.zapi.NaElement('net-port-broadcast-domain-modify')
+ domain_obj.add_new_child("broadcast-domain", self.parameters['name'])
+ if self.parameters.get('mtu'):
+ domain_obj.add_new_child("mtu", str(self.parameters['mtu']))
+ if self.parameters.get('ipspace'):
+ domain_obj.add_new_child("ipspace", self.parameters['ipspace'])
+ try:
+ self.server.invoke_successfully(domain_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying broadcast domain %s: %s' %
+ (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def split_broadcast_domain(self):
+ """
+ split broadcast domain
+ """
+ domain_obj = netapp_utils.zapi.NaElement('net-port-broadcast-domain-split')
+ domain_obj.add_new_child("broadcast-domain", self.parameters['from_name'])
+ domain_obj.add_new_child("new-broadcast-domain", self.parameters['name'])
+ if self.parameters.get('ports'):
+ ports_obj = netapp_utils.zapi.NaElement('ports')
+ domain_obj.add_child_elem(ports_obj)
+ for port in self.parameters['ports']:
+ ports_obj.add_new_child('net-qualified-port-name', port)
+ if self.parameters.get('ipspace'):
+ domain_obj.add_new_child("ipspace", self.parameters['ipspace'])
+ try:
+ self.server.invoke_successfully(domain_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error splitting broadcast domain %s: %s' %
+ (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ if len(self.get_broadcast_domain_ports(self.parameters['from_name'])) == 0:
+ self.delete_broadcast_domain(self.parameters['from_name'])
+
+ def modify_broadcast_domain_or_ports(self, modify, current=None):
+ """
+ :param modify: modify attributes.
+ """
+ modify_keys = list(modify.keys())
+ domain_modify_options = ['mtu', 'name', 'ipspace']
+ if any(x in modify_keys for x in domain_modify_options):
+ if self.use_rest:
+ if modify.get('ports'):
+ del modify['ports']
+ self.modify_broadcast_domain_rest(current['uuid'], modify)
+ # update current ipspace as it required in modifying ports later.
+ if modify.get('ipspace'):
+ current['ipspace'] = modify['ipspace']
+ else:
+ self.modify_broadcast_domain()
+ if 'ports' in modify_keys:
+ self.modify_broadcast_domain_ports(current)
+
+ def get_modify_attributes(self, current, split):
+ """
+ :param current: current state.
+ :param split: True or False of split action.
+ :return: list of modified attributes.
+ """
+ modify = None
+ if self.parameters['state'] == 'present':
+ # split already handled ipspace and ports.
+ if self.parameters.get('from_name'):
+ if split:
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if modify.get('ports'):
+ del modify['ports']
+ else:
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ return modify
+
+ def modify_broadcast_domain_ports(self, current=None):
+ """
+ compare current and desired ports. Call add or remove ports methods if needed.
+ :return: None.
+ """
+ if self.use_rest:
+ current_ports = current['ports']
+ else:
+ current_ports = self.get_broadcast_domain_ports()
+ expect_ports = self.parameters['ports']
+ # if want to remove all ports, simply delete the broadcast domain.
+ if len(expect_ports) == 0:
+ self.delete_broadcast_domain(current=current)
+ return
+ ports_to_remove = list(set(current_ports) - set(expect_ports))
+ ports_to_add = list(set(expect_ports) - set(current_ports))
+
+ if len(ports_to_add) > 0:
+ if self.use_rest:
+ ports = self.get_ports_rest(ports_to_add)
+ if ports:
+ self.add_or_move_broadcast_domain_ports_rest(ports)
+ else:
+ self.add_broadcast_domain_ports(ports_to_add)
+
+ if len(ports_to_remove) > 0:
+ if self.use_rest:
+ self.remove_broadcast_domain_ports_rest(ports_to_remove, current['ipspace'])
+ else:
+ self.delete_broadcast_domain_ports(ports_to_remove)
+
+ def add_broadcast_domain_ports(self, ports):
+ """
+ Creates new broadcast domain ports
+ """
+ domain_obj = netapp_utils.zapi.NaElement('net-port-broadcast-domain-add-ports')
+ domain_obj.add_new_child("broadcast-domain", self.parameters['name'])
+ if self.parameters.get('ipspace'):
+ domain_obj.add_new_child("ipspace", self.parameters['ipspace'])
+ if ports:
+ ports_obj = netapp_utils.zapi.NaElement('ports')
+ domain_obj.add_child_elem(ports_obj)
+ for port in ports:
+ ports_obj.add_new_child('net-qualified-port-name', port)
+ try:
+ self.server.invoke_successfully(domain_obj, True)
+ return True
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating port for broadcast domain %s: %s' %
+ (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_broadcast_domain_ports(self, ports):
+ """
+ Deletes broadcast domain ports
+ :param: ports to be deleted.
+ """
+ domain_obj = netapp_utils.zapi.NaElement('net-port-broadcast-domain-remove-ports')
+ domain_obj.add_new_child("broadcast-domain", self.parameters['name'])
+ if self.parameters.get('ipspace'):
+ domain_obj.add_new_child("ipspace", self.parameters['ipspace'])
+ if ports:
+ ports_obj = netapp_utils.zapi.NaElement('ports')
+ domain_obj.add_child_elem(ports_obj)
+ for port in ports:
+ ports_obj.add_new_child('net-qualified-port-name', port)
+ try:
+ self.server.invoke_successfully(domain_obj, True)
+ return True
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting port for broadcast domain %s: %s' %
+ (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def get_broadcast_domain_ports(self, broadcast_domain=None):
+ """
+ Return details about the broadcast domain ports.
+ :return: Details about the broadcast domain ports. None if not found.
+ :rtype: list
+ """
+ if broadcast_domain is None:
+ broadcast_domain = self.parameters['name']
+ domain_get_iter = netapp_utils.zapi.NaElement('net-port-broadcast-domain-get-iter')
+ broadcast_domain_info = netapp_utils.zapi.NaElement('net-port-broadcast-domain-info')
+ broadcast_domain_info.add_new_child('broadcast-domain', broadcast_domain)
+ if self.parameters.get('ipspace'):
+ broadcast_domain_info.add_new_child('ipspace', self.parameters['ipspace'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(broadcast_domain_info)
+ domain_get_iter.add_child_elem(query)
+ result = self.server.invoke_successfully(domain_get_iter, True)
+ ports = []
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) == 1:
+ domain_info = result.get_child_by_name('attributes-list').get_child_by_name('net-port-broadcast-domain-info')
+ domain_ports = domain_info.get_child_by_name('ports')
+ if domain_ports is not None:
+ ports = [port.get_child_content('port') for port in domain_ports.get_children()]
+ return ports
+
+ def modify_broadcast_domain_rest(self, uuid, modify):
+ api = 'network/ethernet/broadcast-domains'
+ body = {}
+ # rename broadcast domain.
+ if 'name' in modify:
+ body['name'] = modify['name']
+ if 'ipspace' in modify:
+ body['ipspace.name'] = modify['ipspace']
+ if 'mtu' in modify:
+ body['mtu'] = modify['mtu']
+ dummy, error = rest_generic.patch_async(self.rest_api, api, uuid, body)
+ if error:
+ self.module.fail_json(msg=error)
+
+ def add_or_move_broadcast_domain_ports_rest(self, ports):
+ api = 'network/ethernet/ports'
+ body = {
+ 'broadcast_domain': {
+ 'name': self.parameters['name'],
+ 'ipspace': {'name': self.parameters['ipspace']}
+ }
+ }
+ for port in ports:
+ dummy, error = rest_generic.patch_async(self.rest_api, api, port['uuid'], body)
+ if error:
+ self.module.fail_json(msg=error)
+
+ def remove_broadcast_domain_ports_rest(self, ports, ipspace):
+ body = {'ports': ports}
+ api = "private/cli/network/port/broadcast-domain/remove-ports"
+ query = {'broadcast-domain': self.parameters['name'], 'ipspace': ipspace}
+ response, error = rest_generic.patch_async(self.rest_api, api, None, body, query)
+ if error:
+ self.module.fail_json(msg='Error removing ports: %s' % error)
+
+ def get_ports_rest(self, ports):
+ # if desired ports with uuid present then return only the ports to add or move.
+ if self.desired_ports:
+ return self.ports_to_add_move_from_desired(ports)
+ # list of desired ports not present in the node.
+ missing_ports = []
+ # list of uuid information of each desired port should present in broadcast domain.
+ desired_ports = []
+ for port in ports:
+ current = self.get_net_port_rest(port)
+ if current is None:
+ missing_ports.append(port)
+ else:
+ desired_ports.append(current)
+ # Error if any of provided ports are not found.
+ if missing_ports and self.parameters['state'] == 'present':
+ self.module.fail_json(msg='Error: ports: %s not found' % ', '.join(missing_ports))
+ return desired_ports
+
+ def get_net_port_rest(self, port):
+ if ':' not in port:
+ error_msg = "Error: Invalid value specified for port: %s, provide port name as node_name:port_name" % port
+ self.module.fail_json(msg=error_msg)
+ port_name = port.split(':')[1]
+ node = port.split(':')[0]
+ api = 'network/ethernet/ports'
+ query = {
+ 'name': port_name,
+ 'node.name': node,
+ }
+ fields = 'name,uuid'
+ record, error = rest_generic.get_one_record(self.rest_api, api, query, fields)
+ if error:
+ self.module.fail_json(msg=error)
+ if record:
+ current = {'uuid': record['uuid'], 'name': record['name']}
+ return current
+ return None
+
+ def ports_to_add_move_from_desired(self, ports):
+ ports_to_add_move = []
+ for port in ports:
+ port_name = port.split(':')[1]
+ for port_to_add_or_move in self.desired_ports:
+ if port_name == port_to_add_or_move['name']:
+ ports_to_add_move.append({'uuid': port_to_add_or_move['uuid']})
+ return ports_to_add_move
+
+ def apply(self):
+ """
+ Run Module based on play book
+ """
+ current = self.get_broadcast_domain()
+ cd_action, split = None, None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action == 'create' and any(self.parameters.get(attr) is not None for attr in ('from_name', 'from_ipspace')):
+ # either create new domain or split domain, also ipspace can be modified.
+ from_name = self.parameters.get('from_name', self.parameters['name'])
+ from_ipspace = self.parameters.get('from_ipspace', self.parameters.get('ipspace'))
+ from_current = self.get_broadcast_domain(from_name, from_ipspace)
+ split = self.na_helper.is_rename_action(from_current, current)
+ if split is None:
+ self.module.fail_json(msg='A domain cannot be split if it does not exist.',
+ exception=traceback.format_exc())
+ if split:
+ cd_action = None
+ current = from_current
+ if self.use_rest:
+ split = False
+ # check for exact match of ports only if from_name present.
+ if self.parameters.get('from_name'):
+ # rename with no change in ports.
+ if 'ports' not in self.parameters:
+ self.parameters['ports'] = from_current['ports']
+ partial_match = set(from_current['ports']) - set(self.parameters['ports'])
+ # create new broadcast domain with desired ports (REST will move them over from the other domain if necessary)
+ if partial_match:
+ cd_action = 'create'
+ current = None
+ # rename with no change in ports.
+ else:
+ self.parameters.pop('from_name')
+ modify = self.get_modify_attributes(current, split) if cd_action is None else {}
+ if self.na_helper.changed and not self.module.check_mode:
+ if split:
+ self.split_broadcast_domain()
+ if cd_action == 'create':
+ self.create_broadcast_domain(self.desired_ports)
+ elif cd_action == 'delete':
+ self.delete_broadcast_domain(current=current)
+ elif modify:
+ self.modify_broadcast_domain_or_ports(modify, current)
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ """
+ Creates the NetApp ONTAP Broadcast Domain Object that can be created, deleted and modified.
+ """
+ obj = NetAppOntapBroadcastDomain()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_broadcast_domain_ports.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_broadcast_domain_ports.py
new file mode 100644
index 000000000..baa949bd3
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_broadcast_domain_ports.py
@@ -0,0 +1,224 @@
+#!/usr/bin/python
+
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+module: na_ontap_broadcast_domain_ports
+short_description: NetApp ONTAP manage broadcast domain ports
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap_zapi
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Add or remove ONTAP broadcast domain ports. Existing ports that are not listed are kept.
+options:
+ state:
+ description:
+ - Whether the specified broadcast domain should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+ broadcast_domain:
+ description:
+ - Specify the broadcast_domain name
+ required: true
+ type: str
+ ipspace:
+ description:
+ - Specify the ipspace for the broadcast domain
+ type: str
+ ports:
+ description:
+ - Specify the list of ports to add to or remove from this broadcast domain.
+ required: true
+ type: list
+ elements: str
+
+'''
+
+EXAMPLES = """
+ - name: create broadcast domain ports
+ na_ontap_broadcast_domain_ports:
+ state=present
+ username={{ netapp_username }}
+ password={{ netapp_password }}
+ hostname={{ netapp_hostname }}
+ broadcast_domain=123kevin
+ ports=khutton-vsim1:e0d-13
+ - name: delete broadcast domain ports
+ na_ontap_broadcast_domain_ports:
+ state=absent
+ username={{ netapp_username }}
+ password={{ netapp_password }}
+ hostname={{ netapp_hostname }}
+ broadcast_domain=123kevin
+ ports=khutton-vsim1:e0d-13
+"""
+
+RETURN = """
+
+
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapBroadcastDomainPorts(object):
+ """
+ Create and Destroys Broadcast Domain Ports
+ """
+ def __init__(self):
+ """
+ Initialize the Ontap Net Route class
+ """
+ self.argument_spec = netapp_utils.na_ontap_zapi_only_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ broadcast_domain=dict(required=True, type='str'),
+ ipspace=dict(required=False, type='str', default=None),
+ ports=dict(required=True, type='list', elements='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ parameters = self.module.params
+ self.na_helper = NetAppModule(self.module)
+ self.na_helper.module_replaces('na_ontap_ports', self.module)
+ msg = 'The module only supports ZAPI and is deprecated; netapp.ontap.na_ontap_ports should be used instead.'
+ self.na_helper.fall_back_to_zapi(self.module, msg, parameters)
+
+ # set up state variables
+ self.state = parameters['state']
+ self.broadcast_domain = parameters['broadcast_domain']
+ self.ipspace = parameters['ipspace']
+ self.ports = parameters['ports']
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+ return
+
+ def get_broadcast_domain_ports(self):
+ """
+ Return details about the broadcast domain ports
+ :param:
+ name : broadcast domain name
+ :return: Details about the broadcast domain. None if not found.
+ :rtype: dict
+ """
+ domain_get_iter = netapp_utils.zapi.NaElement('net-port-broadcast-domain-get-iter')
+ broadcast_domain_info = netapp_utils.zapi.NaElement('net-port-broadcast-domain-info')
+ broadcast_domain_info.add_new_child('broadcast-domain', self.broadcast_domain)
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(broadcast_domain_info)
+ domain_get_iter.add_child_elem(query)
+ result = self.server.invoke_successfully(domain_get_iter, True)
+ domain_exists = None
+ # check if broadcast domain exists
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) == 1:
+ domain_info = result.get_child_by_name('attributes-list').get_child_by_name('net-port-broadcast-domain-info')
+ domain_name = domain_info.get_child_content('broadcast-domain')
+ domain_ports = domain_info.get_child_by_name('ports')
+ if domain_ports is not None:
+ ports = [port.get_child_content('port') for port in domain_ports.get_children()]
+ else:
+ ports = []
+ domain_exists = {
+ 'domain-name': domain_name,
+ 'ports': ports
+ }
+ return domain_exists
+
+ def create_broadcast_domain_ports(self, ports):
+ """
+ Creates new broadcast domain ports
+ """
+ domain_obj = netapp_utils.zapi.NaElement('net-port-broadcast-domain-add-ports')
+ domain_obj.add_new_child("broadcast-domain", self.broadcast_domain)
+ if self.ipspace:
+ domain_obj.add_new_child("ipspace", self.ipspace)
+ if ports:
+ ports_obj = netapp_utils.zapi.NaElement('ports')
+ domain_obj.add_child_elem(ports_obj)
+ for port in ports:
+ ports_obj.add_new_child('net-qualified-port-name', port)
+ try:
+ self.server.invoke_successfully(domain_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating port for broadcast domain %s: %s' %
+ (self.broadcast_domain, to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_broadcast_domain_ports(self, ports):
+ """
+ Deletes broadcast domain ports
+ """
+ domain_obj = netapp_utils.zapi.NaElement('net-port-broadcast-domain-remove-ports')
+ domain_obj.add_new_child("broadcast-domain", self.broadcast_domain)
+ if self.ipspace:
+ domain_obj.add_new_child("ipspace", self.ipspace)
+ if ports:
+ ports_obj = netapp_utils.zapi.NaElement('ports')
+ domain_obj.add_child_elem(ports_obj)
+ for port in ports:
+ ports_obj.add_new_child('net-qualified-port-name', port)
+ try:
+ self.server.invoke_successfully(domain_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting port for broadcast domain %s: %s' %
+ (self.broadcast_domain, to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ """
+ Run Module based on play book
+ """
+ changed = False
+ broadcast_domain_details = self.get_broadcast_domain_ports()
+ if broadcast_domain_details is None:
+ self.module.fail_json(msg='Error broadcast domain not found: %s' % self.broadcast_domain)
+ if self.state == 'present': # execute create
+ ports_to_add = [port for port in self.ports if port not in broadcast_domain_details['ports']]
+ if len(ports_to_add) > 0:
+ if not self.module.check_mode:
+ self.create_broadcast_domain_ports(ports_to_add)
+ changed = True
+ elif self.state == 'absent': # execute delete
+ ports_to_delete = [port for port in self.ports if port in broadcast_domain_details['ports']]
+ if len(ports_to_delete) > 0:
+ if not self.module.check_mode:
+ self.delete_broadcast_domain_ports(ports_to_delete)
+ changed = True
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ """
+ Creates the NetApp Ontap Net Route object and runs the correct play task
+ """
+ obj = NetAppOntapBroadcastDomainPorts()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cg_snapshot.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cg_snapshot.py
new file mode 100644
index 000000000..313bf223e
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cg_snapshot.py
@@ -0,0 +1,229 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+short_description: NetApp ONTAP manage consistency group snapshot
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Create consistency group snapshot for ONTAP volumes.
+ - This module only supports ZAPI and is deprecated.
+ - The final version of ONTAP to support ZAPI is 9.12.1.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap_zapi
+module: na_ontap_cg_snapshot
+options:
+ state:
+ description:
+ - If you want to create a snapshot.
+ default: present
+ type: str
+ vserver:
+ required: true
+ type: str
+ description:
+ - Name of the vserver.
+ volumes:
+ required: true
+ type: list
+ elements: str
+ description:
+ - A list of volumes in this filer that is part of this CG operation.
+ snapshot:
+ required: true
+ type: str
+ description:
+ - The provided name of the snapshot that is created in each volume.
+ timeout:
+ description:
+ - Timeout selector.
+ choices: ['urgent', 'medium', 'relaxed']
+ type: str
+ default: medium
+ snapmirror_label:
+ description:
+ - A human readable SnapMirror label to be attached with the consistency group snapshot copies.
+ type: str
+version_added: 2.7.0
+
+'''
+
+EXAMPLES = """
+ - name:
+ na_ontap_cg_snapshot:
+ state: present
+ vserver: vserver_name
+ snapshot: snapshot name
+ volumes: vol_name
+ username: "{{ netapp username }}"
+ password: "{{ netapp password }}"
+ hostname: "{{ netapp hostname }}"
+"""
+
+RETURN = """
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPCGSnapshot(object):
+ """
+ Methods to create CG snapshots
+ """
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_zapi_only_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', default='present'),
+ vserver=dict(required=True, type='str'),
+ volumes=dict(required=True, type='list', elements='str'),
+ snapshot=dict(required=True, type='str'),
+ timeout=dict(required=False, type='str', choices=[
+ 'urgent', 'medium', 'relaxed'], default='medium'),
+ snapmirror_label=dict(required=False, type='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=False
+ )
+
+ parameters = self.module.params
+
+ # set up variables
+ self.state = parameters['state']
+ self.vserver = parameters['vserver']
+ self.volumes = parameters['volumes']
+ self.snapshot = parameters['snapshot']
+ self.timeout = parameters['timeout']
+ self.snapmirror_label = parameters['snapmirror_label']
+ self.cgid = None
+ NetAppModule().module_deprecated(self.module)
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(
+ msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(
+ module=self.module, vserver=self.vserver)
+
+ def does_snapshot_exist(self, volume):
+ """
+ This is duplicated from na_ontap_snapshot
+ Checks to see if a snapshot exists or not
+ :return: Return True if a snapshot exists, false if it dosn't
+ """
+ # TODO: Remove this method and import snapshot module and
+ # call get after re-factoring __init__ across all the modules
+ # we aren't importing now, since __init__ does a lot of Ansible setup
+ snapshot_obj = netapp_utils.zapi.NaElement("snapshot-get-iter")
+ desired_attr = netapp_utils.zapi.NaElement("desired-attributes")
+ snapshot_info = netapp_utils.zapi.NaElement('snapshot-info')
+ comment = netapp_utils.zapi.NaElement('comment')
+ # add more desired attributes that are allowed to be modified
+ snapshot_info.add_child_elem(comment)
+ desired_attr.add_child_elem(snapshot_info)
+ snapshot_obj.add_child_elem(desired_attr)
+ # compose query
+ query = netapp_utils.zapi.NaElement("query")
+ snapshot_info_obj = netapp_utils.zapi.NaElement("snapshot-info")
+ snapshot_info_obj.add_new_child("name", self.snapshot)
+ snapshot_info_obj.add_new_child("volume", volume)
+ snapshot_info_obj.add_new_child("vserver", self.vserver)
+ query.add_child_elem(snapshot_info_obj)
+ snapshot_obj.add_child_elem(query)
+ result = self.server.invoke_successfully(snapshot_obj, True)
+ return_value = None
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) == 1:
+ attributes_list = result.get_child_by_name('attributes-list')
+ snap_info = attributes_list.get_child_by_name('snapshot-info')
+ return_value = {'comment': snap_info.get_child_content('comment')}
+ return return_value
+
+ def cgcreate(self):
+ """
+ Calls cg-start and cg-commit (when cg-start succeeds)
+ """
+ started = self.cg_start()
+ if started:
+ if self.cgid is not None:
+ self.cg_commit()
+ else:
+ self.module.fail_json(msg="Error fetching CG ID for CG commit %s" % self.snapshot,
+ exception=traceback.format_exc())
+ return started
+
+ def cg_start(self):
+ """
+ For the given list of volumes, creates cg-snapshot
+ """
+ snapshot_started = False
+ cgstart = netapp_utils.zapi.NaElement("cg-start")
+ cgstart.add_new_child("snapshot", self.snapshot)
+ cgstart.add_new_child("timeout", self.timeout)
+ volume_list = netapp_utils.zapi.NaElement("volumes")
+ cgstart.add_child_elem(volume_list)
+ for vol in self.volumes:
+ snapshot_exists = self.does_snapshot_exist(vol)
+ if snapshot_exists is None:
+ snapshot_started = True
+ volume_list.add_new_child("volume-name", vol)
+ if snapshot_started:
+ if self.snapmirror_label:
+ cgstart.add_new_child("snapmirror-label",
+ self.snapmirror_label)
+ try:
+ cgresult = self.server.invoke_successfully(
+ cgstart, enable_tunneling=True)
+ if cgresult.get_child_by_name('cg-id'):
+ self.cgid = cgresult['cg-id']
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error creating CG snapshot %s: %s" %
+ (self.snapshot, to_native(error)),
+ exception=traceback.format_exc())
+ return snapshot_started
+
+ def cg_commit(self):
+ """
+ When cg-start is successful, performs a cg-commit with the cg-id
+ """
+ cgcommit = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'cg-commit', **{'cg-id': self.cgid})
+ try:
+ self.server.invoke_successfully(cgcommit,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error committing CG snapshot %s: %s" %
+ (self.snapshot, to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ '''Applies action from playbook'''
+ if not self.module.check_mode:
+ changed = self.cgcreate()
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ '''Execute action from playbook'''
+ cg_obj = NetAppONTAPCGSnapshot()
+ cg_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs.py
new file mode 100644
index 000000000..b04a37110
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs.py
@@ -0,0 +1,563 @@
+#!/usr/bin/python
+
+# (c) 2018-2023, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# import untangle
+
+'''
+na_ontap_cifs
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Create or destroy or modify(path) cifs-share on ONTAP.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_cifs
+
+options:
+
+ comment:
+ description:
+ - The CIFS share description.
+ type: str
+ version_added: 21.7.0
+
+ path:
+ description:
+ - The file system path that is shared through this CIFS share. The path is the full, user visible path relative
+ to the vserver root, and it might be crossing junction mount points. The path is in UTF8 and uses forward
+ slash as directory separator.
+ type: str
+
+ vserver:
+ description:
+ - Vserver containing the CIFS share.
+ required: true
+ type: str
+
+ name:
+ description:
+ - The name of the CIFS share. The CIFS share name is a UTF-8 string with the following characters being
+ illegal; control characters from 0x00 to 0x1F, both inclusive, 0x22 (double quotes)
+ required: true
+ aliases: ['share_name']
+ type: str
+
+ share_properties:
+ description:
+ - The list of properties for the CIFS share.
+ - Not supported with REST.
+ - share-properties are separate fields in the REST API.
+ - You can achieve this functionality by setting C(access_based_enumeration), C(change_notify), C(encryption),
+ C(home_directory), C(oplocks), C(show_snapshot), C(continuously_available) and C(namespace_caching).
+ type: list
+ elements: str
+ version_added: 2.8.0
+
+ symlink_properties:
+ description:
+ - The list of symlink properties for this CIFS share.
+ - Not supported with REST, this option is replaced with C(unix_symlink) in REST.
+ type: list
+ elements: str
+ version_added: 2.8.0
+
+ state:
+ choices: ['present', 'absent']
+ description:
+ - Whether the specified CIFS share should exist or not.
+ type: str
+ default: present
+
+ vscan_fileop_profile:
+ choices: ['no_scan', 'standard', 'strict', 'writes_only']
+ description:
+ - Profile_set of file_ops to which vscan on access scanning is applicable.
+ - Not supported with REST.
+ type: str
+ version_added: 2.9.0
+
+ unix_symlink:
+ choices: ['local', 'widelink', 'disable']
+ description:
+ - The list of unix_symlink properties for this CIFS share
+ - This option only supported with REST.
+ type: str
+ version_added: 21.19.0
+
+ access_based_enumeration:
+ description:
+ - If enabled, all folders inside this share are visible to a user based on that individual user access right;
+ prevents the display of folders or other shared resources that the user does not have access to.
+ - This option only supported with REST.
+ type: bool
+ version_added: 22.3.0
+
+ allow_unencrypted_access:
+ description:
+ - Specifies whether or not the SMB2 clients are allowed to access the encrypted share.
+ - This option requires REST and ONTAP 9.11.0 or later.
+ type: bool
+ version_added: 22.3.0
+
+ change_notify:
+ description:
+ - Specifies whether CIFS clients can request for change notifications for directories on this share.
+ - This option only supported with REST.
+ type: bool
+ version_added: 22.3.0
+
+ encryption:
+ description:
+ - Specifies that SMB encryption must be used when accessing this share. Clients that do not support encryption are not
+ able to access this share.
+ - This option only supported with REST.
+ type: bool
+ version_added: 22.3.0
+
+ home_directory:
+ description:
+ - Specifies whether or not the share is a home directory share, where the share and path names are dynamic.
+ - ONTAP home directory functionality automatically offer each user a dynamic share to their home directory without creating an
+ individual SMB share for each user.
+ - This feature enable us to configure a share that maps to different directories based on the user that connects to it
+ - Instead of creating a separate shares for each user, a single share with a home directory parameters can be created.
+ - In a home directory share, ONTAP dynamically generates the share-name and share-path by substituting
+ %w, %u, and %d variables with the corresponding Windows user name, UNIX user name, and domain name, respectively.
+ - This option only supported with REST and cannot modify.
+ type: bool
+ version_added: 22.3.0
+
+ namespace_caching:
+ description:
+ - Specifies whether or not the SMB clients connecting to this share can cache the directory enumeration
+ results returned by the CIFS servers.
+ - This option requires REST and ONTAP 9.10.1 or later.
+ type: bool
+ version_added: 22.3.0
+
+ oplocks:
+ description:
+ - Specify whether opportunistic locks are enabled on this share. "Oplocks" allow clients to lock files and cache content locally,
+ which can increase performance for file operations.
+ - Only supported with REST.
+ type: bool
+ version_added: 22.3.0
+
+ show_snapshot:
+ description:
+ - Specifies whether or not the Snapshot copies can be viewed and traversed by clients.
+ - This option requires REST and ONTAP 9.10.1 or later.
+ type: bool
+ version_added: 22.3.0
+
+ continuously_available :
+ description:
+ - Specifies whether or not the clients connecting to this share can open files in a persistent manner.
+ - Files opened in this way are protected from disruptive events, such as, failover and giveback.
+ - This option requires REST and ONTAP 9.10.1 or later.
+ type: bool
+ version_added: 22.3.0
+
+ browsable:
+ description:
+ - Specifies whether or not the Windows clients can browse the share.
+ - This option requires REST and ONTAP 9.13.1 or later.
+ type: bool
+ version_added: 22.5.0
+
+ show_previous_versions:
+ description:
+ - Specifies that the previous version can be viewed and restored from the client.
+ - This option requires REST and ONTAP 9.13.1 or later.
+ type: bool
+ version_added: 22.5.0
+
+short_description: NetApp ONTAP Manage cifs-share
+version_added: 2.6.0
+
+'''
+
+EXAMPLES = """
+ - name: Create CIFS share - ZAPI
+ netapp.ontap.na_ontap_cifs:
+ state: present
+ name: cifsShareName
+ path: /
+ vserver: vserverName
+ share_properties: browsable,oplocks
+ symlink_properties: read_only,enable
+ comment: CIFS share description
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Delete CIFS share - ZAPI
+ netapp.ontap.na_ontap_cifs:
+ state: absent
+ name: cifsShareName
+ vserver: vserverName
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Modify path CIFS share - ZAPI
+ netapp.ontap.na_ontap_cifs:
+ state: present
+ name: pb_test
+ vserver: vserverName
+ path: /
+ share_properties: show_previous_versions
+ symlink_properties: disable
+ vscan_fileop_profile: no_scan
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Create CIFS share - REST
+ netapp.ontap.na_ontap_cifs:
+ state: present
+ name: cifsShareName
+ path: /
+ vserver: vserverName
+ oplocks: true
+ change_notify: true
+ unix_symlink: disable
+ comment: CIFS share description
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Modify CIFS share - REST
+ netapp.ontap.na_ontap_cifs:
+ state: present
+ name: cifsShareName
+ path: /
+ vserver: vserverName
+ oplocks: true
+ change_notify: true
+ unix_symlink: local
+ comment: CIFS share description
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppONTAPCifsShare:
+ """
+ Methods to create/delete/modify(path) CIFS share
+ """
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str', aliases=['share_name']),
+ path=dict(required=False, type='str'),
+ comment=dict(required=False, type='str'),
+ vserver=dict(required=True, type='str'),
+ unix_symlink=dict(required=False, type='str', choices=['local', 'widelink', 'disable']),
+ share_properties=dict(required=False, type='list', elements='str'),
+ symlink_properties=dict(required=False, type='list', elements='str'),
+ vscan_fileop_profile=dict(required=False, type='str', choices=['no_scan', 'standard', 'strict', 'writes_only']),
+ access_based_enumeration=dict(required=False, type='bool'),
+ change_notify=dict(required=False, type='bool'),
+ encryption=dict(required=False, type='bool'),
+ home_directory=dict(required=False, type='bool'),
+ oplocks=dict(required=False, type='bool'),
+ show_snapshot=dict(required=False, type='bool'),
+ allow_unencrypted_access=dict(required=False, type='bool'),
+ namespace_caching=dict(required=False, type='bool'),
+ continuously_available=dict(required=False, type='bool'),
+ browsable=dict(required=False, type='bool'),
+ show_previous_versions=dict(required=False, type='bool')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ # Set up Rest API
+ self.rest_api = netapp_utils.OntapRestAPI(self.module)
+ partially_supported_rest_properties = [['continuously_available', (9, 10, 1)], ['namespace_caching', (9, 10, 1)],
+ ['show_snapshot', (9, 10, 1)], ['allow_unencrypted_access', (9, 11)],
+ ['browsable', (9, 13, 1)], ['show_previous_versions', (9, 13, 1)]]
+ unsupported_rest_properties = ['share_properties', 'symlink_properties', 'vscan_fileop_profile']
+ self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, unsupported_rest_properties, partially_supported_rest_properties)
+ self.unsupported_zapi_properties = ['unix_symlink', 'access_based_enumeration', 'change_notify', 'encryption', 'home_directory',
+ 'oplocks', 'continuously_available', 'show_snapshot', 'namespace_caching', 'allow_unencrypted_access',
+ 'browsable', 'show_previous_versions']
+ self.svm_uuid = None
+ if not self.use_rest:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ for unsupported_zapi_property in self.unsupported_zapi_properties:
+ if self.parameters.get(unsupported_zapi_property) is not None:
+ msg = "Error: %s option is not supported with ZAPI. It can only be used with REST." % unsupported_zapi_property
+ self.module.fail_json(msg=msg)
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_cifs_share(self):
+ """
+ Return details about the cifs-share
+ :param:
+ name : Name of the cifs-share
+ :return: Details about the cifs-share. None if not found.
+ :rtype: dict
+ """
+ if self.use_rest:
+ return self.get_cifs_share_rest()
+ cifs_iter = netapp_utils.zapi.NaElement('cifs-share-get-iter')
+ cifs_info = netapp_utils.zapi.NaElement('cifs-share')
+ cifs_info.add_new_child('share-name', self.parameters.get('name'))
+ cifs_info.add_new_child('vserver', self.parameters.get('vserver'))
+
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(cifs_info)
+
+ cifs_iter.add_child_elem(query)
+
+ result = self.server.invoke_successfully(cifs_iter, True)
+
+ return_value = None
+ # check if query returns the expected cifs-share
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) == 1:
+ properties_list = []
+ symlink_list = []
+ cifs_attrs = result.get_child_by_name('attributes-list').\
+ get_child_by_name('cifs-share')
+ if cifs_attrs.get_child_by_name('share-properties'):
+ properties_attrs = cifs_attrs['share-properties']
+ if properties_attrs is not None:
+ properties_list = [property.get_content() for property in properties_attrs.get_children()]
+ if cifs_attrs.get_child_by_name('symlink-properties'):
+ symlink_attrs = cifs_attrs['symlink-properties']
+ if symlink_attrs is not None:
+ symlink_list = [symlink.get_content() for symlink in symlink_attrs.get_children()]
+ return_value = {
+ 'share': cifs_attrs.get_child_content('share-name'),
+ 'path': cifs_attrs.get_child_content('path'),
+ 'share_properties': properties_list,
+ 'symlink_properties': symlink_list
+ }
+ value = cifs_attrs.get_child_content('comment')
+ return_value['comment'] = value if value is not None else ''
+ if cifs_attrs.get_child_by_name('vscan-fileop-profile'):
+ return_value['vscan_fileop_profile'] = cifs_attrs['vscan-fileop-profile']
+
+ return return_value
+
+ def create_cifs_share(self):
+ """
+ Create CIFS share
+ """
+ options = {'share-name': self.parameters.get('name'),
+ 'path': self.parameters.get('path')}
+ cifs_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'cifs-share-create', **options)
+ self.create_modify_cifs_share(cifs_create, 'creating')
+
+ def create_modify_cifs_share(self, zapi_request, action):
+ if self.parameters.get('share_properties'):
+ property_attrs = netapp_utils.zapi.NaElement('share-properties')
+ zapi_request.add_child_elem(property_attrs)
+ for aproperty in self.parameters.get('share_properties'):
+ property_attrs.add_new_child('cifs-share-properties', aproperty)
+ if self.parameters.get('symlink_properties'):
+ symlink_attrs = netapp_utils.zapi.NaElement('symlink-properties')
+ zapi_request.add_child_elem(symlink_attrs)
+ for symlink in self.parameters.get('symlink_properties'):
+ symlink_attrs.add_new_child('cifs-share-symlink-properties', symlink)
+ if self.parameters.get('vscan_fileop_profile'):
+ fileop_attrs = netapp_utils.zapi.NaElement('vscan-fileop-profile')
+ fileop_attrs.set_content(self.parameters['vscan_fileop_profile'])
+ zapi_request.add_child_elem(fileop_attrs)
+ if self.parameters.get('comment'):
+ zapi_request.add_new_child('comment', self.parameters['comment'])
+
+ try:
+ self.server.invoke_successfully(zapi_request,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+
+ self.module.fail_json(msg='Error %s cifs-share %s: %s'
+ % (action, self.parameters.get('name'), to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_cifs_share(self):
+ """
+ Delete CIFS share
+ """
+ cifs_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'cifs-share-delete', **{'share-name': self.parameters.get('name')})
+
+ try:
+ self.server.invoke_successfully(cifs_delete, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting cifs-share %s: %s'
+ % (self.parameters.get('name'), to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_cifs_share(self):
+ """
+ modilfy path for the given CIFS share
+ """
+ options = {'share-name': self.parameters.get('name')}
+ cifs_modify = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'cifs-share-modify', **options)
+ if self.parameters.get('path'):
+ cifs_modify.add_new_child('path', self.parameters.get('path'))
+ self.create_modify_cifs_share(cifs_modify, 'modifying')
+
+ def get_cifs_share_rest(self):
+ """
+ get details of the CIFS share with rest API.
+ """
+ options = {'svm.name': self.parameters.get('vserver'),
+ 'name': self.parameters.get('name'),
+ 'fields': 'svm.uuid,'
+ 'name,'
+ 'path,'
+ 'comment,'
+ 'unix_symlink,'
+ 'access_based_enumeration,'
+ 'change_notify,'
+ 'encryption,'
+ 'oplocks,'}
+ if self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 10, 1):
+ options['fields'] += 'show_snapshot,namespace_caching,continuously_available,'
+ if self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 11, 0):
+ options['fields'] += 'allow_unencrypted_access,'
+ if self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 13, 1):
+ options['fields'] += 'browsable,show_previous_versions,'
+ api = 'protocols/cifs/shares'
+ record, error = rest_generic.get_one_record(self.rest_api, api, options)
+ if error:
+ self.module.fail_json(msg="Error on fetching cifs shares: %s" % error)
+ if record:
+ self.svm_uuid = record['svm']['uuid']
+ return {
+ 'path': record['path'],
+ 'comment': record.get('comment', ''),
+ 'unix_symlink': record.get('unix_symlink', ''),
+ 'access_based_enumeration': record.get('access_based_enumeration'),
+ 'change_notify': record.get('change_notify'),
+ 'encryption': record.get('encryption'),
+ 'oplocks': record.get('oplocks'),
+ 'continuously_available': record.get('continuously_available'),
+ 'show_snapshot': record.get('show_snapshot'),
+ 'namespace_caching': record.get('namespace_caching'),
+ 'allow_unencrypted_access': record.get('allow_unencrypted_access'),
+ 'browsable': record.get('browsable'),
+ 'show_previous_versions': record.get('show_previous_versions')
+ }
+ return None
+
+ def create_modify_body_rest(self, params=None):
+ body = {}
+ # modify is set in params, if not assign self.parameters for create.
+ if params is None:
+ params = self.parameters
+ options = ['path', 'comment', 'unix_symlink', 'access_based_enumeration', 'change_notify', 'encryption',
+ 'home_directory', 'oplocks', 'continuously_available', 'show_snapshot', 'namespace_caching',
+ 'allow_unencrypted_access', 'browsable', 'show_previous_versions']
+ for key in options:
+ if key in params:
+ body[key] = params[key]
+ return body
+
+ def create_cifs_share_rest(self):
+ """
+ create CIFS share with rest API.
+ """
+ if not self.use_rest:
+ return self.create_cifs_share()
+ body = self.create_modify_body_rest()
+ if 'vserver' in self.parameters:
+ body['svm.name'] = self.parameters['vserver']
+ if 'name' in self.parameters:
+ body['name'] = self.parameters['name']
+ if 'path' in self.parameters:
+ body['path'] = self.parameters['path']
+ api = 'protocols/cifs/shares'
+ dummy, error = rest_generic.post_async(self.rest_api, api, body)
+ if error is not None:
+ self.module.fail_json(msg="Error on creating cifs shares: %s" % error)
+
+ def delete_cifs_share_rest(self):
+ """
+ delete CIFS share with rest API.
+ """
+ if not self.use_rest:
+ return self.delete_cifs_share()
+ body = {'name': self.parameters.get('name')}
+ api = 'protocols/cifs/shares'
+ dummy, error = rest_generic.delete_async(self.rest_api, api, self.svm_uuid, body)
+ if error is not None:
+ self.module.fail_json(msg=" Error on deleting cifs shares: %s" % error)
+
+ def modify_cifs_share_rest(self, modify):
+ """
+ modilfy the given CIFS share with rest API.
+ """
+ if not self.use_rest:
+ return self.modify_cifs_share()
+ api = 'protocols/cifs/shares/%s' % self.svm_uuid
+ body = self.create_modify_body_rest(modify)
+ if body:
+ dummy, error = rest_generic.patch_async(self.rest_api, api, self.parameters['name'], body)
+ if error is not None:
+ self.module.fail_json(msg="Error on modifying cifs shares: %s" % error)
+
+ def apply(self):
+ '''Apply action to cifs share'''
+ current = self.get_cifs_share()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ # ZAPI accepts both 'show-previous-versions' and 'show_previous_versions', but only returns the latter
+ if not self.use_rest and cd_action is None and 'show-previous-versions' in self.parameters.get('share_properties', [])\
+ and current and 'show_previous_versions' in current.get('share_properties', []):
+ self.parameters['share_properties'].remove('show-previous-versions')
+ self.parameters['share_properties'].append('show_previous_versions')
+ modify = self.na_helper.get_modified_attributes(current, self.parameters) if cd_action is None else None
+
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_cifs_share_rest()
+ elif cd_action == 'delete':
+ self.delete_cifs_share_rest()
+ elif modify:
+ self.modify_cifs_share_rest(modify)
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ '''Execute action from playbook'''
+ cifs_obj = NetAppONTAPCifsShare()
+ cifs_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_acl.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_acl.py
new file mode 100644
index 000000000..110d56001
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_acl.py
@@ -0,0 +1,351 @@
+#!/usr/bin/python
+
+# (c) 2018-2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - "Create or destroy or modify cifs-share-access-controls on ONTAP"
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_cifs_acl
+options:
+ permission:
+ choices: ['no_access', 'read', 'change', 'full_control']
+ type: str
+ description:
+ - The access rights that the user or group has on the defined CIFS share.
+ share_name:
+ description:
+ - The name of the cifs-share-access-control to manage.
+ required: true
+ type: str
+ aliases: ['share']
+ state:
+ choices: ['present', 'absent']
+ description:
+ - Whether the specified CIFS share acl should exist or not.
+ default: present
+ type: str
+ vserver:
+ description:
+ - Name of the vserver to use.
+ required: true
+ type: str
+ user_or_group:
+ description:
+ - The user or group name for which the permissions are listed.
+ required: true
+ type: str
+ type:
+ description:
+ - The type (also known as user-group-type) of the user or group to add to the ACL.
+ - Type is required for create, delete and modify unix-user or unix-group to/from the ACL in ZAPI.
+ type: str
+ choices: [windows, unix_user, unix_group]
+ version_added: 21.17.0
+short_description: NetApp ONTAP manage cifs-share-access-control
+
+'''
+
+EXAMPLES = """
+ - name: Create CIFS share acl
+ netapp.ontap.na_ontap_cifs_acl:
+ state: present
+ share_name: cifsShareName
+ user_or_group: Everyone
+ permission: read
+ vserver: "{{ netapp_vserver }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: Modify CIFS share acl permission
+ netapp.ontap.na_ontap_cifs_acl:
+ state: present
+ share_name: cifsShareName
+ user_or_group: Everyone
+ permission: change
+ vserver: "{{ netapp_vserver }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+"""
+
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppONTAPCifsAcl:
+ """
+ Methods to create/delete/modify CIFS share/user access-control
+ """
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ share_name=dict(required=True, type='str', aliases=['share']),
+ user_or_group=dict(required=True, type='str'),
+ permission=dict(required=False, type='str', choices=['no_access', 'read', 'change', 'full_control']),
+ type=dict(required=False, type='str', choices=['windows', 'unix_user', 'unix_group']),
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['permission'])
+ ],
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ # Set up Rest API
+ self.rest_api = OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+
+ if not self.use_rest:
+ if netapp_utils.has_netapp_lib() is False:
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_cifs_acl(self):
+ """
+ Return details about the cifs-share-access-control
+ :param:
+ name : Name of the cifs-share-access-control
+ :return: Details about the cifs-share-access-control. None if not found.
+ :rtype: dict
+ """
+ cifs_acl_iter = netapp_utils.zapi.NaElement('cifs-share-access-control-get-iter')
+ cifs_acl_info = netapp_utils.zapi.NaElement('cifs-share-access-control')
+ cifs_acl_info.add_new_child('share', self.parameters['share_name'])
+ cifs_acl_info.add_new_child('user-or-group', self.parameters['user_or_group'])
+ cifs_acl_info.add_new_child('vserver', self.parameters['vserver'])
+ if self.parameters.get('type') is not None:
+ cifs_acl_info.add_new_child('user-group-type', self.parameters['type'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(cifs_acl_info)
+ cifs_acl_iter.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(cifs_acl_iter, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error getting cifs-share-access-control %s: %s'
+ % (self.parameters['share_name'], to_native(error)))
+ return_value = None
+ # check if query returns the expected cifs-share-access-control
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) == 1:
+
+ cifs_acl = result.get_child_by_name('attributes-list').get_child_by_name('cifs-share-access-control')
+ return_value = {
+ 'share': cifs_acl.get_child_content('share'),
+ 'user-or-group': cifs_acl.get_child_content('user-or-group'),
+ 'permission': cifs_acl.get_child_content('permission'),
+ 'type': cifs_acl.get_child_content('user-group-type'),
+ }
+ return return_value
+
+ def create_cifs_acl(self):
+ """
+ Create access control for the given CIFS share/user-group
+ """
+ options = {
+ 'share': self.parameters['share_name'],
+ 'user-or-group': self.parameters['user_or_group'],
+ 'permission': self.parameters['permission']
+ }
+ # type is required for unix-user and unix-group
+ if self.parameters.get('type') is not None:
+ options['user-group-type'] = self.parameters['type']
+
+ cifs_acl_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'cifs-share-access-control-create', **options)
+ try:
+ self.server.invoke_successfully(cifs_acl_create,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating cifs-share-access-control %s: %s'
+ % (self.parameters['share_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_cifs_acl(self):
+ """
+ Delete access control for the given CIFS share/user-group
+ """
+ options = {
+ 'share': self.parameters['share_name'],
+ 'user-or-group': self.parameters['user_or_group']
+ }
+ # type is required for unix-user and unix-group
+ if self.parameters.get('type') is not None:
+ options['user-group-type'] = self.parameters['type']
+ cifs_acl_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'cifs-share-access-control-delete', **options)
+ try:
+ self.server.invoke_successfully(cifs_acl_delete,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting cifs-share-access-control %s: %s'
+ % (self.parameters['share_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_cifs_acl_permission(self):
+ """
+ Change permission or type for the given CIFS share/user-group
+ """
+ options = {
+ 'share': self.parameters['share_name'],
+ 'user-or-group': self.parameters['user_or_group'],
+ 'permission': self.parameters['permission']
+ }
+ # type is required for unix-user and unix-group
+ if self.parameters.get('type') is not None:
+ options['user-group-type'] = self.parameters['type']
+
+ cifs_acl_modify = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'cifs-share-access-control-modify', **options)
+ try:
+ self.server.invoke_successfully(cifs_acl_modify,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying cifs-share-access-control permission %s: %s'
+ % (self.parameters['share_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def get_modify(self, current):
+
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if not modify or ('permission' in modify and len(modify) == 1):
+ return modify
+ if 'type' in modify:
+ self.module.fail_json(msg='Error: changing the type is not supported by ONTAP - current: %s, desired: %s'
+ % (current['type'], self.parameters['type']))
+ self.module.fail_json(msg='Error: only permission can be changed - modify: %s' % modify)
+
+ def get_cifs_share_rest(self):
+ """
+ get uuid of the svm which has CIFS share with rest API.
+ """
+ options = {'svm.name': self.parameters.get('vserver'),
+ 'name': self.parameters.get('share_name')}
+ api = 'protocols/cifs/shares'
+ fields = 'svm.uuid,name'
+ record, error = rest_generic.get_one_record(self.rest_api, api, options, fields)
+ if error:
+ self.module.fail_json(msg="Error on fetching cifs shares: %s" % error)
+ if record:
+ return {'uuid': record['svm']['uuid']}
+ self.module.fail_json(msg="Error: the cifs share does not exist: %s" % self.parameters['share_name'])
+
+ def get_cifs_acl_rest(self, svm_uuid):
+ """
+ get details of the CIFS share acl with rest API.
+ """
+ if not self.use_rest:
+ return self.get_cifs_acl()
+ query = {'user_or_group': self.parameters.get('user_or_group')}
+ ug_type = self.parameters.get('type')
+ if ug_type:
+ query['type'] = ug_type
+ api = 'protocols/cifs/shares/%s/%s/acls' % (svm_uuid['uuid'], self.parameters.get('share_name'))
+ fields = 'svm.uuid,user_or_group,type,permission'
+ record, error = rest_generic.get_one_record(self.rest_api, api, query, fields)
+ if error:
+ self.module.fail_json(msg="Error on fetching cifs shares acl: %s" % error)
+ if record:
+ return {
+ 'uuid': record['svm']['uuid'],
+ 'share': record['share'],
+ 'user_or_group': record['user_or_group'],
+ 'type': record['type'],
+ 'permission': record['permission']
+ }
+ return None
+
+ def create_cifs_acl_rest(self, svm_uuid):
+ """
+ create CIFS share acl with rest API.
+ """
+ if not self.use_rest:
+ return self.create_cifs_acl()
+ body = {
+ 'user_or_group': self.parameters.get('user_or_group'),
+ 'permission': self.parameters.get('permission')
+ }
+ ug_type = self.parameters.get('type')
+ if ug_type:
+ body['type'] = ug_type
+ api = 'protocols/cifs/shares/%s/%s/acls' % (svm_uuid['uuid'], self.parameters.get('share_name'))
+ dummy, error = rest_generic.post_async(self.rest_api, api, body)
+ if error is not None:
+ self.module.fail_json(msg="Error on creating cifs share acl: %s" % error)
+
+ def delete_cifs_acl_rest(self, current):
+ """
+ Delete access control for the given CIFS share/user-group with rest API.
+ """
+ if not self.use_rest:
+ return self.delete_cifs_acl()
+ body = {'svm.name': self.parameters.get('vserver')}
+ api = 'protocols/cifs/shares/%s/%s/acls/%s/%s' % (
+ current['uuid'], self.parameters.get('share_name'), self.parameters.get('user_or_group'), current.get('type'))
+ dummy, error = rest_generic.delete_async(self.rest_api, api, None, body)
+ if error is not None:
+ self.module.fail_json(msg="Error on deleting cifs share acl: %s" % error)
+
+ def modify_cifs_acl_permission_rest(self, current):
+ """
+ Change permission for the given CIFS share/user-group with rest API.
+ """
+ if not self.use_rest:
+ return self.modify_cifs_acl_permission()
+ body = {'permission': self.parameters.get('permission')}
+ api = 'protocols/cifs/shares/%s/%s/acls/%s/%s' % (
+ current['uuid'], self.parameters.get('share_name'), self.parameters.get('user_or_group'), current.get('type'))
+ dummy, error = rest_generic.patch_async(self.rest_api, api, None, body)
+ if error is not None:
+ self.module.fail_json(msg="Error modifying cifs share ACL permission: %s" % error)
+
+ def apply(self):
+ """
+ Apply action to cifs-share-access-control
+ """
+ svm_uuid = self.get_cifs_share_rest() if self.use_rest else None
+ current = self.get_cifs_acl_rest(svm_uuid)
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ modify = self.get_modify(current) if cd_action is None and self.parameters['state'] == 'present' else None
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_cifs_acl_rest(svm_uuid)
+ if cd_action == 'delete':
+ self.delete_cifs_acl_rest(current)
+ if modify:
+ self.modify_cifs_acl_permission_rest(current)
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ """
+ Execute action from playbook
+ """
+ cifs_acl = NetAppONTAPCifsAcl()
+ cifs_acl.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_local_group.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_local_group.py
new file mode 100644
index 000000000..42a552a41
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_local_group.py
@@ -0,0 +1,235 @@
+#!/usr/bin/python
+
+# (c) 2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = """
+module: na_ontap_cifs_local_group
+short_description: NetApp Ontap - create, delete or modify CIFS local group.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: '22.1.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create, delete or modify CIFS local group.
+options:
+ state:
+ description:
+ - Whether the specified member should be part of the CIFS local group
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ vserver:
+ description:
+ - Specifies the vserver that owns the CIFS local group
+ required: true
+ type: str
+
+ name:
+ description:
+ - Specifies name of the CIFS local group
+ required: true
+ type: str
+
+ from_name:
+ description:
+ - Specifies the existing cifs local group name.
+ - This option is used to rename cifs local group.
+ type: str
+
+ description:
+ description:
+ - Description for the local group.
+ type: str
+"""
+
+EXAMPLES = """
+ - name: create CIFS local group
+ netapp.ontap.na_ontap_cifs_local_group:
+ state: present
+ vserver: svm1
+ name: BUILTIN\\administrators
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ https: true
+ validate_certs: false
+
+ - name: Delete CIFS local group
+ netapp.ontap.na_ontap_cifs_local_group:
+ state: absent
+ vserver: svm1
+ name: BUILTIN\\administrators
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ https: true
+ validate_certs: false
+
+ - name: Modify CIFS local group description
+ netapp.ontap.na_ontap_cifs_local_group:
+ state: present
+ vserver: svm1
+ name: BUILTIN\\administrators
+ descrition: 'CIFS local group'
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ https: true
+ validate_certs: false
+
+ - name: Rename CIFS local group description
+ netapp.ontap.na_ontap_cifs_local_group:
+ state: present
+ vserver: svm1
+ name: ANSIBLE_CIFS\\test_users
+ descrition: 'CIFS local group'
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ https: true
+ validate_certs: false
+"""
+
+RETURN = """
+
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppOntapCifsLocalGroup:
+ """
+ Create, delete or modify CIFS local group
+ """
+ def __init__(self):
+ """
+ Initialize the Ontap CifsLocalGroup class
+ """
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ name=dict(required=True, type='str'),
+ description=dict(required=False, type='str'),
+ from_name=dict(required=False, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ # set up variables
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ self.rest_api = netapp_utils.OntapRestAPI(self.module)
+ self.rest_api.fail_if_not_rest_minimum_version('na_ontap_cifs_local_group', 9, 10, 1)
+ self.svm_uuid = None
+ self.sid = None
+
+ def get_cifs_local_group_rest(self, from_name=None):
+ """
+ Retrieves the local group of an SVM.
+ """
+ api = "protocols/cifs/local-groups"
+ query = {
+ 'name': from_name or self.parameters['name'],
+ 'svm.name': self.parameters['vserver'],
+ 'fields': 'svm.uuid,sid,description'
+ }
+ record, error = rest_generic.get_one_record(self.rest_api, api, query)
+ if error:
+ self.module.fail_json(msg="Error on fetching cifs local-group: %s" % error)
+ if record:
+ self.svm_uuid = self.na_helper.safe_get(record, ['svm', 'uuid'])
+ self.sid = self.na_helper.safe_get(record, ['sid'])
+ return {
+ 'name': self.na_helper.safe_get(record, ['name']),
+ 'description': record.get('description', ''),
+ }
+ return None
+
+ def create_cifs_local_group_rest(self):
+ """
+ Creates the local group of an SVM.
+ """
+ api = "protocols/cifs/local-groups"
+ body = {
+ 'name': self.parameters['name'],
+ 'svm.name': self.parameters['vserver']
+ }
+ if 'description' in self.parameters:
+ body['description'] = self.parameters['description']
+ record, error = rest_generic.post_async(self.rest_api, api, body)
+ if error:
+ self.module.fail_json(msg="Error on creating cifs local-group: %s" % error)
+
+ def delete_cifs_local_group_rest(self):
+ """
+ Destroy the local group of an SVM.
+ """
+ api = "protocols/cifs/local-groups/%s/%s" % (self.svm_uuid, self.sid)
+ record, error = rest_generic.delete_async(self.rest_api, api, None)
+ if error:
+ self.module.fail_json(msg="Error on deleting cifs local-group: %s" % error)
+
+ def modify_cifs_local_group_rest(self, modify):
+ """
+ Modify the description of CIFS local group.
+ Rename cifs local group.
+ """
+ body = {}
+ if 'description' in modify:
+ body['description'] = self.parameters['description']
+ if 'name' in modify:
+ body['name'] = self.parameters['name']
+ api = "protocols/cifs/local-groups/%s/%s" % (self.svm_uuid, self.sid)
+ dummy, error = rest_generic.patch_async(self.rest_api, api, None, body)
+ if error is not None:
+ self.module.fail_json(msg="Error on modifying cifs local-group: %s" % error)
+
+ def apply(self):
+ current = self.get_cifs_local_group_rest()
+ rename = None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action == 'create' and 'from_name' in self.parameters:
+ group_info = self.get_cifs_local_group_rest(self.parameters['from_name'])
+ rename = self.na_helper.is_rename_action(group_info, current)
+ if rename:
+ current = group_info
+ cd_action = None
+ else:
+ self.module.fail_json(msg='Error renaming cifs local group: %s - no cifs local group with from_name: %s.'
+ % (self.parameters['name'], self.parameters['from_name']))
+ modify = self.na_helper.get_modified_attributes(current, self.parameters) if cd_action is None else None
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_cifs_local_group_rest()
+ elif cd_action == 'delete':
+ self.delete_cifs_local_group_rest()
+ if modify or rename:
+ self.modify_cifs_local_group_rest(modify)
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action)
+ self.module.exit_json(**result)
+
+
+def main():
+ """
+ Creates the NetApp Ontap Cifs Local Group object and runs the correct play task
+ """
+ obj = NetAppOntapCifsLocalGroup()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_local_group_member.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_local_group_member.py
new file mode 100644
index 000000000..3003bd3bf
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_local_group_member.py
@@ -0,0 +1,292 @@
+#!/usr/bin/python
+
+# (c) 2021-2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+module: na_ontap_cifs_local_group_member
+short_description: NetApp Ontap - Add or remove CIFS local group member
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: '21.2.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Add or remove CIFS local group member
+options:
+ state:
+ description:
+ - Whether the specified member should be part of the CIFS local group
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ vserver:
+ description:
+ - Specifies the vserver that owns the CIFS local group
+ required: true
+ type: str
+
+ group:
+ description:
+ - Specifies name of the CIFS local group
+ required: true
+ type: str
+
+ member:
+ description:
+ - Specifies the name of the member
+ required: true
+ type: str
+
+notes:
+ - Supports check_mode.
+ - Supported with ZAPI.
+ - Supported with REST starting with ONTAP 9.10.1.
+"""
+
+EXAMPLES = """
+ - name: Add member to CIFS local group
+ netapp.ontap.na_ontap_cifs_local_group_member:
+ state: present
+ vserver: svm1
+ group: BUILTIN\\administrators
+ member: DOMAIN\\Domain Admins
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ontapi: "{{ ontap_facts.ontap_version }}"
+ https: true
+ validate_certs: false
+
+ - name: Remove member from CIFS local group
+ netapp.ontap.na_ontap_cifs_local_group_member:
+ state: absent
+ vserver: svm1
+ group: BUILTIN\\administrators
+ member: DOMAIN\\Domain Admins
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ontapi: "{{ ontap_facts.ontap_version }}"
+ https: true
+ validate_certs: false
+"""
+
+RETURN = """
+
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppOntapCifsLocalGroupMember:
+ """
+ Add or remove CIFS local group members
+ """
+ def __init__(self):
+ """
+ Initialize the Ontap CifsLocalGroupMember class
+ """
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ group=dict(required=True, type='str'),
+ member=dict(required=True, type='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ # set up variables
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ self.rest_api = netapp_utils.OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+
+ if self.use_rest and not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 10, 1):
+ msg = 'REST requires ONTAP 9.10.1 or later for cifs_local_group_member APIs.'
+ self.use_rest = self.na_helper.fall_back_to_zapi(self.module, msg, self.parameters)
+ self.svm_uuid = None
+ self.sid = None
+
+ if not self.use_rest:
+ if netapp_utils.has_netapp_lib() is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_cifs_local_group_rest(self):
+ """
+ Retrieves the local group of an SVM.
+ """
+ api = "protocols/cifs/local-groups"
+ query = {
+ 'name': self.parameters['group'],
+ 'svm.name': self.parameters['vserver'],
+ 'fields': 'svm.uuid,sid'
+ }
+ record, error = rest_generic.get_one_record(self.rest_api, api, query)
+ if error:
+ self.module.fail_json(msg="Error on fetching cifs local-group: %s" % error)
+ if record:
+ self.svm_uuid = self.na_helper.safe_get(record, ['svm', 'uuid'])
+ self.sid = self.na_helper.safe_get(record, ['sid'])
+ if record is None:
+ self.module.fail_json(
+ msg='CIFS local group %s does not exist on vserver %s' %
+ (self.parameters['group'], self.parameters['vserver'])
+ )
+
+ def get_cifs_local_group_member(self):
+ """
+ Retrieves local users, Active Directory users and
+ Active Directory groups which are members of the specified local group and SVM.
+ """
+ return_value = None
+
+ if self.use_rest:
+ self.get_cifs_local_group_rest()
+ api = 'protocols/cifs/local-groups/%s/%s/members' % (self.svm_uuid, self.sid)
+ query = {
+ 'name': self.parameters['member'],
+ 'svm.name': self.parameters['vserver'],
+ 'fields': 'name',
+ }
+ record, error = rest_generic.get_one_record(self.rest_api, api, query)
+ if error:
+ self.module.fail_json(
+ msg='Error getting CIFS local group members for group %s on vserver %s: %s' %
+ (self.parameters['group'], self.parameters['vserver'], to_native(error)), exception=traceback.format_exc()
+ )
+
+ if record:
+ return {
+ 'member': self.na_helper.safe_get(record, ['name'])
+ }
+ return record
+
+ else:
+ group_members_get_iter = netapp_utils.zapi.NaElement('cifs-local-group-members-get-iter')
+ group_members_info = netapp_utils.zapi.NaElement('cifs-local-group-members')
+ group_members_info.add_new_child('group-name', self.parameters['group'])
+ group_members_info.add_new_child('vserver', self.parameters['vserver'])
+ group_members_info.add_new_child('member', self.parameters['member'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(group_members_info)
+ group_members_get_iter.add_child_elem(query)
+
+ try:
+ result = self.server.invoke_successfully(group_members_get_iter, True)
+ if result.get_child_by_name('attributes-list'):
+ group_member_policy_attributes = result['attributes-list']['cifs-local-group-members']
+
+ return_value = {
+ 'group': group_member_policy_attributes['group-name'],
+ 'member': group_member_policy_attributes['member'],
+ 'vserver': group_member_policy_attributes['vserver']
+ }
+
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(
+ msg='Error getting CIFS local group members for group %s on vserver %s: %s' %
+ (self.parameters['group'], self.parameters['vserver'], to_native(error)), exception=traceback.format_exc()
+ )
+
+ return return_value
+
+ def add_cifs_local_group_member(self):
+ """
+ Adds a member to a CIFS local group
+ """
+ if self.use_rest:
+ api = 'protocols/cifs/local-groups/%s/%s/members' % (self.svm_uuid, self.sid)
+ body = {'name': self.parameters['member']}
+ dummy, error = rest_generic.post_async(self.rest_api, api, body)
+ if error:
+ self.module.fail_json(
+ msg='Error adding member %s to cifs local group %s on vserver %s: %s' %
+ (self.parameters['member'], self.parameters['group'], self.parameters['vserver'], to_native(error)), exception=traceback.format_exc()
+ )
+
+ else:
+ group_members_obj = netapp_utils.zapi.NaElement("cifs-local-group-members-add-members")
+ group_members_obj.add_new_child("group-name", self.parameters['group'])
+ member_names = netapp_utils.zapi.NaElement("member-names")
+ member_names.add_new_child('cifs-name', self.parameters['member'])
+ group_members_obj.add_child_elem(member_names)
+
+ try:
+ self.server.invoke_successfully(group_members_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(
+ msg='Error adding member %s to cifs local group %s on vserver %s: %s' %
+ (self.parameters['member'], self.parameters['group'], self.parameters['vserver'], to_native(error)), exception=traceback.format_exc()
+ )
+
+ def remove_cifs_local_group_member(self):
+ """
+ Removes a member from a CIFS local group
+ """
+ if self.use_rest:
+ api = 'protocols/cifs/local-groups/%s/%s/members' % (self.svm_uuid, self.sid)
+ body = {'name': self.parameters['member']}
+ dummy, error = rest_generic.delete_async(self.rest_api, api, None, body)
+ if error:
+ self.module.fail_json(
+ msg='Error removing member %s from cifs local group %s on vserver %s: %s' %
+ (self.parameters['member'], self.parameters['group'], self.parameters['vserver'], to_native(error)), exception=traceback.format_exc()
+ )
+
+ else:
+ group_members_obj = netapp_utils.zapi.NaElement("cifs-local-group-members-remove-members")
+ group_members_obj.add_new_child("group-name", self.parameters['group'])
+ member_names = netapp_utils.zapi.NaElement("member-names")
+ member_names.add_new_child('cifs-name', self.parameters['member'])
+ group_members_obj.add_child_elem(member_names)
+
+ try:
+ self.server.invoke_successfully(group_members_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(
+ msg='Error removing member %s from cifs local group %s on vserver %s: %s' %
+ (self.parameters['member'], self.parameters['group'], self.parameters['vserver'], to_native(error)), exception=traceback.format_exc()
+ )
+
+ def apply(self):
+ current = self.get_cifs_local_group_member()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+
+ if self.na_helper.changed:
+ if not self.module.check_mode:
+ if cd_action == 'create':
+ self.add_cifs_local_group_member()
+ elif cd_action == 'delete':
+ self.remove_cifs_local_group_member()
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action)
+ self.module.exit_json(**result)
+
+
+def main():
+ """
+ Creates the NetApp Ontap Cifs Local Group Member object and runs the correct play task
+ """
+ obj = NetAppOntapCifsLocalGroupMember()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_local_user.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_local_user.py
new file mode 100644
index 000000000..c594e76bb
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_local_user.py
@@ -0,0 +1,244 @@
+#!/usr/bin/python
+
+# (c) 2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: na_ontap_cifs_local_user
+short_description: NetApp ONTAP local CIFS user.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: '22.2.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+ - Create/Modify/Delete a local CIFS user
+options:
+ state:
+ choices: ['present', 'absent']
+ description:
+ - Whether the specified CIFS share should exist or not.
+ type: str
+ default: present
+
+ name:
+ description:
+ - The name of the local cifs user
+ required: true
+ type: str
+
+ vserver:
+ description:
+ - the name of the data vserver to use.
+ required: true
+ type: str
+
+ account_disabled:
+ description:
+ - Whether the local cifs user is disabled or not
+ type: bool
+
+ description:
+ description:
+ - the description for the local cifs user
+ type: str
+
+ full_name:
+ description:
+ - the full name for the local cifs user
+ type: str
+
+ user_password:
+ description:
+ - Password for new user
+ type: str
+
+ set_password:
+ description:
+ - Modify the existing user password
+ - Module is not idempotent when set to True
+ type: bool
+ default: False
+ '''
+
+EXAMPLES = """
+ - name: create local cifs user
+ netapp.ontap.na_ontap_cifs_local_user:
+ state: present
+ vserver: ansibleSVM_cifs
+ name: carchi-cifs2
+ user_password: mypassword
+ account_disabled: False
+ full_name: Chris Archibald
+ description: A user account for Chris
+
+ - name: modify local cifs user
+ netapp.ontap.na_ontap_cifs_local_user:
+ state: present
+ vserver: ansibleSVM_cifs
+ name: carchi-cifs2
+ account_disabled: False
+ full_name: Christopher Archibald
+ description: A user account for Chris Archibald
+
+ - name: Change local cifs user password
+ netapp.ontap.na_ontap_cifs_local_user:
+ state: present
+ vserver: ansibleSVM_cifs
+ name: carchi-cifs2
+ user_password: mypassword2
+ set_password: True
+ account_disabled: False
+ full_name: Christopher Archibald
+ description: A user account for Chris Archibald
+
+ - name: delete local cifs user
+ netapp.ontap.na_ontap_cifs_local_user:
+ state: absent
+ vserver: ansibleSVM_cifs
+ name: carchi-cifs2
+"""
+
+RETURN = """
+
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic, rest_vserver
+
+
+class NetAppOntapCifsLocalUser:
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ vserver=dict(required=True, type='str'),
+ account_disabled=dict(required=False, type='bool'),
+ full_name=dict(required=False, type='str'),
+ description=dict(required=False, type='str'),
+ user_password=dict(required=False, type='str', no_log=True),
+ set_password=dict(required=False, type='bool', default=False)
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.svm_uuid = None
+ self.sid = None
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ self.rest_api = OntapRestAPI(self.module)
+ self.rest_api.fail_if_not_rest_minimum_version('na_ontap_cifs_local_user', 9, 10, 1)
+
+ def get_cifs_local_user(self):
+ self.get_svm_uuid()
+ api = 'protocols/cifs/local-users'
+ fields = 'account_disabled,description,full_name,name,sid'
+ params = {'svm.uuid': self.svm_uuid, 'name': self.parameters['name'], 'fields': fields}
+ record, error = rest_generic.get_one_record(self.rest_api, api, params)
+ if error:
+ self.module.fail_json(msg='Error fetching cifs/local-user %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ if record:
+ return self.format_record(record)
+ return None
+
+ def get_svm_uuid(self):
+ self.svm_uuid, dummy = rest_vserver.get_vserver_uuid(self.rest_api, self.parameters['vserver'], self.module, True)
+
+ def format_record(self, record):
+ self.sid = record['sid']
+ try:
+ record['name'] = record['name'].split('\\')[1]
+ except SyntaxError:
+ self.module.fail_json(msg='Error fetching cifs/local-user')
+ return record
+
+ def create_cifs_local_user(self):
+ api = 'protocols/cifs/local-users'
+ body = {
+ 'svm.uuid': self.svm_uuid,
+ 'name': self.parameters['name'],
+ }
+ if self.parameters.get('user_password') is not None:
+ body['password'] = self.parameters['user_password']
+ if self.parameters.get('full_name') is not None:
+ body['full_name'] = self.parameters['full_name']
+ if self.parameters.get('description') is not None:
+ body['description'] = self.parameters['description']
+ if self.parameters.get('account_disabled') is not None:
+ body['account_disabled'] = self.parameters['account_disabled']
+ dummy, error = rest_generic.post_async(self.rest_api, api, body)
+ if error:
+ self.module.fail_json(msg="Error creating CIFS local users with name %s: %s" % (self.parameters['name'], error))
+
+ def delete_cifs_local_user(self):
+ api = 'protocols/cifs/local-users'
+ uuids = '%s/%s' % (self.svm_uuid, self.sid)
+ dummy, error = rest_generic.delete_async(self.rest_api, api, uuids)
+ if error:
+ self.module.fail_json(msg='Error while deleting CIFS local user: %s' % error)
+
+ def modify_cifs_local_user(self, modify):
+ api = 'protocols/cifs/local-users'
+ uuids = '%s/%s' % (self.svm_uuid, self.sid)
+ body = {}
+ if modify.get('full_name') is not None:
+ body['full_name'] = self.parameters['full_name']
+ if modify.get('description') is not None:
+ body['description'] = self.parameters['description']
+ if modify.get('account_disabled') is not None:
+ body['account_disabled'] = self.parameters['account_disabled']
+ if self.parameters['set_password'] and modify.get('user_password') is not None:
+ body['password'] = self.parameters['user_password']
+ dummy, error = rest_generic.patch_async(self.rest_api, api, uuids, body)
+ if error:
+ self.module.fail_json(msg='Error while modifying CIFS local user: %s' % error)
+
+ def apply(self):
+ current = self.get_cifs_local_user()
+ modify = None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None and self.parameters['state'] == 'present':
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.parameters['set_password'] and self.parameters.get('user_password') is not None:
+ if not modify:
+ modify = {}
+ self.na_helper.changed = True
+ modify.update({'user_password': self.parameters['user_password']})
+ self.module.warn("forcing a password change as set_password is true")
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_cifs_local_user()
+ elif cd_action == 'delete':
+ self.delete_cifs_local_user()
+ elif modify:
+ self.modify_cifs_local_user(modify)
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ """
+ Execute action from playbook
+ """
+ command = NetAppOntapCifsLocalUser()
+ command.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_local_user_modify.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_local_user_modify.py
new file mode 100644
index 000000000..0e43bd078
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_local_user_modify.py
@@ -0,0 +1,235 @@
+#!/usr/bin/python
+
+# (c) 2021, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+module: na_ontap_cifs_local_user_modify
+short_description: NetApp ONTAP modify local CIFS user.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: '21.4.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Modify a local CIFS user
+options:
+ name:
+ description:
+ - The name of the local cifs user
+ required: true
+ type: str
+
+ vserver:
+ description:
+ - the name of the data vserver to use.
+ required: true
+ type: str
+
+ is_account_disabled:
+ description:
+ - Whether the local cifs user is disabled or not
+ type: bool
+
+ description:
+ description:
+ - the description for the local cifs user
+ type: str
+
+ full_name:
+ description:
+ - the full name for the local cifs user
+ type: str
+ '''
+
+EXAMPLES = """
+ - name: Enable local CIFS Administrator account
+ na_ontap_cifs_local_user_modify:
+ name: BUILTIN\\administrators
+ vserver: ansible
+ is_account_disabled: false
+ username: '{{ username }}'
+ password: '{{ password }}'
+ hostname: '{{ hostname }}'
+
+ - name: Disable local CIFS Administrator account
+ na_ontap_cifs_local_user_modify:
+ name: BUILTIN\\administrators
+ vserver: ansible
+ is_account_disabled: true
+ username: '{{ username }}'
+ password: '{{ password }}'
+ hostname: '{{ hostname }}'
+
+"""
+
+RETURN = """
+
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppOntapCifsLocalUserModify():
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ name=dict(required=True, type='str'),
+ vserver=dict(required=True, type='str'),
+ is_account_disabled=dict(required=False, type='bool'),
+ full_name=dict(required=False, type='str'),
+ description=dict(required=False, type='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ self.rest_api = OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+
+ self.module.warn('This module is deprecated and na_ontap_cifs_local_user should be used instead')
+ if not self.use_rest:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_cifs_local_user(self):
+ """
+ Return a CIFS local user
+ :return: None if there is no CIFS local user matching
+ """
+ return_value = None
+ if self.use_rest:
+ api = "private/cli/vserver/cifs/users-and-groups/local-user"
+ query = {
+ 'fields': 'user-name,full-name,is-account-disabled,description',
+ 'user-name': self.parameters['name'],
+ 'vserver': self.parameters['vserver']
+ }
+ record, error = rest_generic.get_one_record(self.rest_api, api, query=query)
+ if error:
+ self.module.fail_json(msg=error)
+ if record:
+ return_value = {
+ 'name': record['user_name'],
+ 'is_account_disabled': record['is_account_disabled'],
+ 'vserver': record['vserver'],
+ 'description': record.get('description', ''),
+ 'full_name': record.get('full_name', '')
+ }
+ else:
+ cifs_local_user_obj = netapp_utils.zapi.NaElement('cifs-local-user-get-iter')
+ cifs_local_user_info = netapp_utils.zapi.NaElement('cifs-local-user')
+ cifs_local_user_info.add_new_child('user-name', self.parameters['name'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(cifs_local_user_info)
+ cifs_local_user_obj.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(cifs_local_user_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error getting user %s on vserver %s: %s' %
+ (self.parameters['name'], self.parameters['vserver'], to_native(error)), exception=traceback.format_exc())
+
+ if result.get_child_by_name('attributes-list'):
+ local_cifs_user_attributes = result['attributes-list']['cifs-local-user']
+
+ return_value = {
+ 'name': local_cifs_user_attributes['user-name'],
+ 'is_account_disabled': self.na_helper.get_value_for_bool(from_zapi=True, value=local_cifs_user_attributes['is-account-disabled']),
+ 'vserver': local_cifs_user_attributes['vserver'],
+ 'full_name': '',
+ 'description': '',
+ }
+
+ if local_cifs_user_attributes['full-name']:
+ return_value['full_name'] = local_cifs_user_attributes['full-name']
+
+ if local_cifs_user_attributes['description']:
+ return_value['description'] = local_cifs_user_attributes['description']
+
+ return return_value
+
+ def modify_cifs_local_user(self, modify):
+ """
+ Modifies a local cifs user
+ :return: None
+ """
+ if self.use_rest:
+ api = "private/cli/vserver/cifs/users-and-groups/local-user"
+ query = {
+ "user-name": self.parameters['name'],
+ 'vserver': self.parameters['vserver']
+ }
+
+ dummy, error = self.rest_api.patch(api, modify, query)
+ if error:
+ self.module.fail_json(msg=error, modify=modify)
+ else:
+ cifs_local_user_obj = netapp_utils.zapi.NaElement("cifs-local-user-modify")
+ cifs_local_user_obj.add_new_child('user-name', self.parameters['name'])
+ cifs_local_user_obj.add_new_child('is-account-disabled',
+ self.na_helper.get_value_for_bool(from_zapi=False, value=self.parameters['is_account_disabled']))
+
+ if 'full_name' in self.parameters:
+ cifs_local_user_obj.add_new_child('full-name', self.parameters['full_name'])
+
+ if 'description' in self.parameters:
+ cifs_local_user_obj.add_new_child('description', self.parameters['description'])
+
+ try:
+ self.server.invoke_successfully(cifs_local_user_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error modifying local CIFS user %s on vserver %s: %s" %
+ (self.parameters['name'], self.parameters['vserver'], to_native(error)), exception=traceback.format_exc())
+
+ def apply(self):
+ current = self.get_cifs_local_user()
+ if not current:
+ error = "User %s does not exist on vserver %s" % (self.parameters['name'], self.parameters['vserver'])
+ self.module.fail_json(msg=error)
+
+ if self.use_rest:
+ # name is a key, and REST does not allow to change it
+ # it should match anyway, but REST may prepend the domain name
+ self.parameters['name'] = current['name']
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+
+ if self.na_helper.changed and not self.module.check_mode:
+ self.modify_cifs_local_user(modify)
+
+ result = netapp_utils.generate_result(self.na_helper.changed, modify=modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ """
+ Execute action from playbook
+ """
+ command = NetAppOntapCifsLocalUserModify()
+ command.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_local_user_set_password.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_local_user_set_password.py
new file mode 100644
index 000000000..00f6d0804
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_local_user_set_password.py
@@ -0,0 +1,162 @@
+#!/usr/bin/python
+
+# (c) 2021-2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: na_ontap_cifs_local_user_set_password
+short_description: NetApp ONTAP set local CIFS user password
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: '21.8.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+ - Sets the password for the specified local user.
+ - NOTE - This module is not idempotent.
+ - Password must meet the following criteria
+ - The password must be at least six characters in length.
+ - The password must not contain user account name.
+ - The password must contain characters from three of the following four
+ - English uppercase characters (A through Z)
+ - English lowercase characters (a through z)
+ - Base 10 digits (0 through 9)
+ - Special characters
+
+options:
+ vserver:
+ description:
+ - name of the vserver.
+ required: true
+ type: str
+
+ user_name:
+ description:
+ - The name of the local CIFS user to set the password for.
+ required: true
+ type: str
+
+ user_password:
+ description:
+ - The password to set for the local CIFS user.
+ required: true
+ type: str
+'''
+
+EXAMPLES = '''
+ - name: Set local CIFS pasword for BUILTIN Administrator account
+ netapp.ontap.na_ontap_cifs_local_user_set_password:
+ user_name: Administrator
+ user_password: Test123!
+ vserver: ansible
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+'''
+
+RETURN = '''
+'''
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic, rest_vserver
+
+
+class NetAppONTAPCifsSetPassword:
+ '''
+ Set CIFS local user password.
+ '''
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ vserver=dict(required=True, type='str'),
+ user_name=dict(required=True, type='str'),
+ user_password=dict(required=True, type='str', no_log=True)
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ self.rest_api = OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+ self.svm_uuid = None
+
+ if self.use_rest and not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 10, 1):
+ msg = 'REST requires ONTAP 9.10.1 or later for protocols/cifs/local-users APIs.'
+ self.use_rest = self.na_helper.fall_back_to_zapi(self.module, msg, self.parameters)
+
+ if not self.use_rest:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def cifs_local_set_passwd(self):
+ """
+ :return: None
+ """
+ if self.use_rest:
+ return self.cifs_local_set_passwd_rest()
+ cifs_local_set_passwd = netapp_utils.zapi.NaElement('cifs-local-user-set-password')
+ cifs_local_set_passwd.add_new_child('user-name', self.parameters['user_name'])
+ cifs_local_set_passwd.add_new_child('user-password', self.parameters['user_password'])
+
+ try:
+ self.server.invoke_successfully(cifs_local_set_passwd, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error setting password for local CIFS user %s on vserver %s: %s'
+ % (self.parameters['user_name'], self.parameters['vserver'], to_native(e)),
+ exception=traceback.format_exc())
+
+ def cifs_local_set_passwd_rest(self):
+ self.get_svm_uuid()
+ sid = self.get_user_sid()
+ api = 'protocols/cifs/local-users'
+ uuids = '%s/%s' % (self.svm_uuid, sid)
+ body = {'password': self.parameters['user_password']}
+ dummy, error = rest_generic.patch_async(self.rest_api, api, uuids, body, job_timeout=120)
+ if error:
+ self.module.fail_json(msg='Error change password for user %s: %s' % (self.parameters['user_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def get_svm_uuid(self):
+ self.svm_uuid, dummy = rest_vserver.get_vserver_uuid(self.rest_api, self.parameters['vserver'], self.module, True)
+
+ def get_user_sid(self):
+ api = 'protocols/cifs/local-users'
+ fields = 'sid'
+ params = {'svm.uuid': self.svm_uuid, 'name': self.parameters['user_name'], 'fields': fields}
+ record, error = rest_generic.get_one_record(self.rest_api, api, params)
+ if error:
+ self.module.fail_json(msg='Error fetching cifs/local-user %s: %s' % (self.parameters['user_name'], to_native(error)),
+ exception=traceback.format_exc())
+ if record:
+ return record['sid']
+ self.module.fail_json(msg='Error no cifs/local-user with name %s' % (self.parameters['user_name']))
+
+ def apply(self):
+ changed = True
+ if not self.module.check_mode:
+ self.cifs_local_set_passwd()
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ obj = NetAppONTAPCifsSetPassword()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_server.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_server.py
new file mode 100644
index 000000000..8a65dd6c5
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_server.py
@@ -0,0 +1,619 @@
+#!/usr/bin/python
+""" this is cifs_server module
+
+ (c) 2018-2022, NetApp, Inc
+ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: na_ontap_cifs_server
+short_description: NetApp ONTAP CIFS server configuration
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+ - Creating / deleting and modifying the CIFS server .
+
+options:
+
+ state:
+ description:
+ - Whether the specified cifs_server should exist or not.
+ default: present
+ choices: ['present', 'absent']
+ type: str
+
+ service_state:
+ description:
+ - CIFS Server Administrative Status.
+ choices: ['stopped', 'started']
+ type: str
+
+ name:
+ description:
+ - Specifies the cifs_server name.
+ required: true
+ aliases: ['cifs_server_name']
+ type: str
+
+ admin_user_name:
+ description:
+ - Specifies the cifs server admin username.
+ - When used with absent, the account will be deleted if admin_password is also provided.
+ type: str
+
+ admin_password:
+ description:
+ - Specifies the cifs server admin password.
+ - When used with absent, the account will be deleted if admin_user_name is also provided.
+ type: str
+
+ domain:
+ description:
+ - The Fully Qualified Domain Name of the Windows Active Directory this CIFS server belongs to.
+ type: str
+
+ workgroup:
+ description:
+ - The NetBIOS name of the domain or workgroup this CIFS server belongs to.
+ type: str
+
+ ou:
+ description:
+ - The Organizational Unit (OU) within the Windows Active Directory this CIFS server belongs to.
+ version_added: 2.7.0
+ type: str
+
+ force:
+ type: bool
+ description:
+ - When state is present, if this is set and a machine account with the same name as specified in 'name' exists in the Active Directory,
+ it will be overwritten and reused.
+ - When state is absent, if this is set, the local CIFS configuration is deleted regardless of communication errors.
+ - For REST, it requires ontap version 9.11.
+ version_added: 2.7.0
+
+ vserver:
+ description:
+ - The name of the vserver to use.
+ required: true
+ type: str
+
+ from_name:
+ description:
+ - Specifies the existing cifs_server name.
+ - This option is used to rename cifs_server.
+ - Supported only in REST and requires force to be set to True.
+ - Requires ontap version 9.11.0.
+ - if the service is running, it will be stopped to perform the rename action, and automatically restarts.
+ - if the service is stopped, it will be briefly restarted after the rename action, and stopped again.
+ type: str
+ version_added: 21.19.0
+
+ encrypt_dc_connection:
+ description:
+ - Specifies whether encryption is required for domain controller connections.
+ - Only supported with REST and requires ontap version 9.8 or later. Use na_ontap_vserver_cifs_security with ZAPI.
+ type: bool
+ version_added: 21.20.0
+
+ kdc_encryption:
+ description:
+ - Specifies whether AES-128 and AES-256 encryption is enabled for all Kerberos-based communication with the Active Directory KDC.
+ - Only supported with REST. Use na_ontap_vserver_cifs_security with ZAPI.
+ type: bool
+ version_added: 21.20.0
+
+ smb_encryption:
+ description:
+ - Determine whether SMB encryption is required for incoming CIFS traffic.
+ - Only supported with REST. Use na_ontap_vserver_cifs_security with ZAPI.
+ type: bool
+ version_added: 21.20.0
+
+ smb_signing:
+ description:
+ - Specifies whether signing is required for incoming CIFS traffic.
+ - Only supported with REST. Use na_ontap_vserver_cifs_security with ZAPI.
+ type: bool
+ version_added: 21.20.0
+
+ restrict_anonymous:
+ description:
+ - Specifies what level of access an anonymous user is granted.
+ - Only supported with REST.
+ choices: ['no_enumeration', 'no_restriction', 'no_access']
+ type: str
+ version_added: 21.20.0
+
+ aes_netlogon_enabled:
+ description:
+ - Specifies whether or not an AES session key is enabled for the Netlogon channel.
+ - Only supported with REST and requires ontap version 9.10.1 or later.
+ type: bool
+ version_added: 21.20.0
+
+ ldap_referral_enabled:
+ description:
+ - Specifies whether or not LDAP referral chasing is enabled for AD LDAP connections.
+ - Only supported with REST and requires ontap version 9.10.1 or later. Use na_ontap_vserver_cifs_security with ZAPI.
+ type: bool
+ version_added: 21.20.0
+
+ use_ldaps:
+ description:
+ - Specifies whether or not to use use LDAPS for secure Active Directory LDAP connections.
+ - Only supported with REST and requires ontap version 9.10.1 or later. Use na_ontap_vserver_cifs_security with ZAPI.
+ type: bool
+ version_added: 21.20.0
+
+ use_start_tls:
+ description:
+ - Specifies whether or not to use SSL/TLS for allowing secure LDAP communication with Active Directory LDAP servers.
+ - Only supported with REST and requires ontap version 9.10.1 or later. Use na_ontap_vserver_cifs_security with ZAPI.
+ type: bool
+ version_added: 21.20.0
+
+ try_ldap_channel_binding:
+ description:
+ - Specifies whether or not channel binding is attempted in the case of TLS/LDAPS.
+ - Only supported with REST and requires ontap version 9.10.1 or later. Use na_ontap_vserver_cifs_security with ZAPI.
+ type: bool
+ version_added: 21.20.0
+
+ session_security:
+ description:
+ - Specifies client session security for AD LDAP connections.
+ - Only supported with REST and requires ontap version 9.10.1 or later. Use na_ontap_vserver_cifs_security with ZAPI.
+ choices: ['none', 'sign', 'seal']
+ type: str
+ version_added: 21.20.0
+
+'''
+
+EXAMPLES = '''
+ - name: Create cifs_server
+ netapp.ontap.na_ontap_cifs_server:
+ state: present
+ name: data2
+ vserver: svm1
+ service_state: stopped
+ domain: "{{ id_domain }}"
+ admin_user_name: "{{ domain_login }}"
+ admin_password: "{{ domain_pwd }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Delete cifs_server
+ netapp.ontap.na_ontap_cifs_server:
+ state: absent
+ name: data2
+ vserver: svm1
+ admin_user_name: "{{ domain_login }}"
+ admin_password: "{{ domain_pwd }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Start cifs_server
+ netapp.ontap.na_ontap_cifs_server:
+ state: present
+ name: data2
+ vserver: svm1
+ service_state: started
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Stop cifs_server
+ netapp.ontap.na_ontap_cifs_server:
+ state: present
+ name: data2
+ vserver: svm1
+ service_state: stopped
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Rename cifs_server - REST
+ netapp.ontap.na_ontap_cifs_server:
+ state: present
+ from_name: data2
+ name: cifs
+ vserver: svm1
+ force: True
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Modify cifs_server security - REST
+ netapp.ontap.na_ontap_cifs_server:
+ state: present
+ name: data2
+ vserver: svm1
+ service_state: stopped
+ encrypt_dc_connection: True,
+ smb_encryption: True,
+ kdc_encryption: True,
+ smb_signing: True,
+ aes_netlogon_enabled: True,
+ ldap_referral_enabled: True,
+ session_security: seal,
+ try_ldap_channel_binding: False,
+ use_ldaps: True,
+ use_start_tls": True
+ restrict_anonymous: no_access
+ domain: "{{ id_domain }}"
+ admin_user_name: "{{ domain_login }}"
+ admin_password: "{{ domain_pwd }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+'''
+
+RETURN = '''
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppOntapcifsServer:
+ """
+ object to describe cifs_server info
+ """
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ service_state=dict(required=False, choices=['stopped', 'started']),
+ name=dict(required=True, type='str', aliases=['cifs_server_name']),
+ workgroup=dict(required=False, type='str', default=None),
+ domain=dict(required=False, type='str'),
+ admin_user_name=dict(required=False, type='str'),
+ admin_password=dict(required=False, type='str', no_log=True),
+ ou=dict(required=False, type='str'),
+ force=dict(required=False, type='bool'),
+ vserver=dict(required=True, type='str'),
+ from_name=dict(required=False, type='str'),
+ smb_signing=dict(required=False, type='bool'),
+ encrypt_dc_connection=dict(required=False, type='bool'),
+ kdc_encryption=dict(required=False, type='bool'),
+ smb_encryption=dict(required=False, type='bool'),
+ restrict_anonymous=dict(required=False, type='str', choices=['no_enumeration', 'no_restriction', 'no_access']),
+ aes_netlogon_enabled=dict(required=False, type='bool'),
+ ldap_referral_enabled=dict(required=False, type='bool'),
+ session_security=dict(required=False, type='str', choices=['none', 'sign', 'seal']),
+ try_ldap_channel_binding=dict(required=False, type='bool'),
+ use_ldaps=dict(required=False, type='bool'),
+ use_start_tls=dict(required=False, type='bool')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[('use_ldaps', 'use_start_tls')]
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ self.parameters['cifs_server_name'] = self.parameters['name']
+ # Set up Rest API
+ self.rest_api = OntapRestAPI(self.module)
+ unsupported_rest_properties = ['workgroup']
+ partially_supported_rest_properties = [['encrypt_dc_connection', (9, 8)], ['aes_netlogon_enabled', (9, 10, 1)], ['ldap_referral_enabled', (9, 10, 1)],
+ ['session_security', (9, 10, 1)], ['try_ldap_channel_binding', (9, 10, 1)], ['use_ldaps', (9, 10, 1)],
+ ['use_start_tls', (9, 10, 1)], ['force', (9, 11)]]
+ self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, unsupported_rest_properties, partially_supported_rest_properties)
+
+ if not self.use_rest:
+ unsupported_zapi_properties = ['smb_signing', 'encrypt_dc_connection', 'kdc_encryption', 'smb_encryption', 'restrict_anonymous',
+ 'aes_netlogon_enabled', 'ldap_referral_enabled', 'try_ldap_channel_binding', 'session_security',
+ 'use_ldaps', 'use_start_tls', 'from_name']
+ used_unsupported_zapi_properties = [option for option in unsupported_zapi_properties if option in self.parameters]
+ if used_unsupported_zapi_properties:
+ self.module.fail_json(msg="Error: %s options supported only with REST." % " ,".join(used_unsupported_zapi_properties))
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_cifs_server(self):
+ """
+ Return details about the CIFS-server
+ :param:
+ name : Name of the name of the cifs_server
+
+ :return: Details about the cifs_server. None if not found.
+ :rtype: dict
+ """
+ cifs_server_info = netapp_utils.zapi.NaElement('cifs-server-get-iter')
+ cifs_server_attributes = netapp_utils.zapi.NaElement('cifs-server-config')
+ cifs_server_attributes.add_new_child('cifs-server', self.parameters['cifs_server_name'])
+ cifs_server_attributes.add_new_child('vserver', self.parameters['vserver'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(cifs_server_attributes)
+ cifs_server_info.add_child_elem(query)
+ result = self.server.invoke_successfully(cifs_server_info, True)
+ return_value = None
+
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) >= 1:
+
+ cifs_server_attributes = result.get_child_by_name('attributes-list').\
+ get_child_by_name('cifs-server-config')
+ service_state = cifs_server_attributes.get_child_content('administrative-status')
+ return_value = {
+ 'cifs_server_name': self.parameters['cifs_server_name'],
+ 'service_state': 'started' if service_state == 'up' else 'stopped'
+ }
+ return return_value
+
+ def create_cifs_server(self):
+ """
+ calling zapi to create cifs_server
+ """
+ options = {'cifs-server': self.parameters['cifs_server_name']}
+ if 'service_state' in self.parameters:
+ options['administrative-status'] = 'up' if self.parameters['service_state'] == 'started' else 'down'
+ if 'workgroup' in self.parameters:
+ options['workgroup'] = self.parameters['workgroup']
+ if 'domain' in self.parameters:
+ options['domain'] = self.parameters['domain']
+ if 'admin_user_name' in self.parameters:
+ options['admin-username'] = self.parameters['admin_user_name']
+ if 'admin_password' in self.parameters:
+ options['admin-password'] = self.parameters['admin_password']
+ if 'ou' in self.parameters:
+ options['organizational-unit'] = self.parameters['ou']
+ if 'force' in self.parameters:
+ options['force-account-overwrite'] = str(self.parameters['force']).lower()
+
+ cifs_server_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'cifs-server-create', **options)
+
+ try:
+ self.server.invoke_successfully(cifs_server_create,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as exc:
+ self.module.fail_json(msg='Error Creating cifs_server %s: %s' %
+ (self.parameters['cifs_server_name'], to_native(exc)), exception=traceback.format_exc())
+
+ def delete_cifs_server(self):
+ """
+ calling zapi to create cifs_server
+ """
+ options = {}
+ if 'admin_user_name' in self.parameters:
+ options['admin-username'] = self.parameters['admin_user_name']
+ if 'admin_password' in self.parameters:
+ options['admin-password'] = self.parameters['admin_password']
+ if 'force' in self.parameters:
+ options['force-account-delete'] = str(self.parameters['force']).lower()
+
+ if options:
+ cifs_server_delete = netapp_utils.zapi.NaElement.create_node_with_children('cifs-server-delete', **options)
+ else:
+ cifs_server_delete = netapp_utils.zapi.NaElement.create_node_with_children('cifs-server-delete')
+
+ try:
+ self.server.invoke_successfully(cifs_server_delete,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as exc:
+ self.module.fail_json(msg='Error deleting cifs_server %s: %s' % (self.parameters['cifs_server_name'], to_native(exc)),
+ exception=traceback.format_exc())
+
+ def start_cifs_server(self):
+ """
+ RModify the cifs_server.
+ """
+ cifs_server_modify = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'cifs-server-start')
+ try:
+ self.server.invoke_successfully(cifs_server_modify,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error modifying cifs_server %s: %s' % (self.parameters['cifs_server_name'], to_native(e)),
+ exception=traceback.format_exc())
+
+ def stop_cifs_server(self):
+ """
+ RModify the cifs_server.
+ """
+ cifs_server_modify = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'cifs-server-stop')
+ try:
+ self.server.invoke_successfully(cifs_server_modify, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error modifying cifs_server %s: %s' % (self.parameters['cifs_server_name'], to_native(e)),
+ exception=traceback.format_exc())
+
+ def get_cifs_server_rest(self, from_name=None):
+ """
+ get details of the cifs_server.
+ """
+ if not self.use_rest:
+ return self.get_cifs_server()
+ query = {'svm.name': self.parameters['vserver'],
+ 'fields': 'svm.uuid,'
+ 'enabled,'
+ 'security.smb_encryption,'
+ 'security.kdc_encryption,'
+ 'security.smb_signing,'
+ 'security.restrict_anonymous,'}
+ query['name'] = from_name or self.parameters['cifs_server_name']
+ api = 'protocols/cifs/services'
+ if self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 8):
+ query['fields'] += 'security.encrypt_dc_connection,'
+
+ if self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 10, 1):
+ security_option_9_10 = ('security.use_ldaps,'
+ 'security.use_start_tls,'
+ 'security.try_ldap_channel_binding,'
+ 'security.session_security,'
+ 'security.ldap_referral_enabled,'
+ 'security.aes_netlogon_enabled,')
+ query['fields'] += security_option_9_10
+ record, error = rest_generic.get_one_record(self.rest_api, api, query)
+ if error:
+ self.module.fail_json(msg="Error on fetching cifs: %s" % error)
+ if record:
+ record['service_state'] = 'started' if record.pop('enabled') else 'stopped'
+ return {
+ 'svm': {'uuid': self.na_helper.safe_get(record, ['svm', 'uuid'])},
+ 'cifs_server_name': self.na_helper.safe_get(record, ['name']),
+ 'service_state': self.na_helper.safe_get(record, ['service_state']),
+ 'smb_signing': self.na_helper.safe_get(record, ['security', 'smb_signing']),
+ 'encrypt_dc_connection': self.na_helper.safe_get(record, ['security', 'encrypt_dc_connection']),
+ 'kdc_encryption': self.na_helper.safe_get(record, ['security', 'kdc_encryption']),
+ 'smb_encryption': self.na_helper.safe_get(record, ['security', 'smb_encryption']),
+ 'aes_netlogon_enabled': self.na_helper.safe_get(record, ['security', 'aes_netlogon_enabled']),
+ 'ldap_referral_enabled': self.na_helper.safe_get(record, ['security', 'ldap_referral_enabled']),
+ 'session_security': self.na_helper.safe_get(record, ['security', 'session_security']),
+ 'try_ldap_channel_binding': self.na_helper.safe_get(record, ['security', 'try_ldap_channel_binding']),
+ 'use_ldaps': self.na_helper.safe_get(record, ['security', 'use_ldaps']),
+ 'use_start_tls': self.na_helper.safe_get(record, ['security', 'use_start_tls']),
+ 'restrict_anonymous': self.na_helper.safe_get(record, ['security', 'restrict_anonymous'])
+ }
+ return record
+
+ def build_ad_domain(self):
+ ad_domain = {}
+ if 'admin_user_name' in self.parameters:
+ ad_domain['user'] = self.parameters['admin_user_name']
+ if 'admin_password' in self.parameters:
+ ad_domain['password'] = self.parameters['admin_password']
+ if 'ou' in self.parameters:
+ ad_domain['organizational_unit'] = self.parameters['ou']
+ if 'domain' in self.parameters:
+ ad_domain['fqdn'] = self.parameters['domain']
+ return ad_domain
+
+ def create_modify_body_rest(self, params=None):
+ """
+ Function to define body for create and modify cifs server
+ """
+ body, query, security = {}, {}, {}
+ if params is None:
+ params = self.parameters
+ security_options = ['smb_signing', 'encrypt_dc_connection', 'kdc_encryption', 'smb_encryption', 'restrict_anonymous',
+ 'aes_netlogon_enabled', 'ldap_referral_enabled', 'try_ldap_channel_binding', 'session_security', 'use_ldaps', 'use_start_tls']
+ ad_domain = self.build_ad_domain()
+ if ad_domain:
+ body['ad_domain'] = ad_domain
+ if 'force' in self.parameters:
+ query['force'] = self.parameters['force']
+ for key in security_options:
+ if key in params:
+ security[key] = params[key]
+ if security:
+ body['security'] = security
+ if 'vserver' in params:
+ body['svm.name'] = params['vserver']
+ if 'cifs_server_name' in params:
+ body['name'] = self.parameters['cifs_server_name']
+ if 'service_state' in params:
+ body['enabled'] = params['service_state'] == 'started'
+ return body, query
+
+ def create_cifs_server_rest(self):
+ """
+ create the cifs_server.
+ """
+ if not self.use_rest:
+ return self.create_cifs_server()
+ body, query = self.create_modify_body_rest()
+ api = 'protocols/cifs/services'
+ dummy, error = rest_generic.post_async(self.rest_api, api, body, query)
+ if error is not None:
+ self.module.fail_json(msg="Error on creating cifs: %s" % error)
+
+ def delete_cifs_server_rest(self, current):
+ """
+ delete the cifs_server.
+ """
+ if not self.use_rest:
+ return self.delete_cifs_server()
+ ad_domain = self.build_ad_domain()
+ body = {'ad_domain': ad_domain} if ad_domain else None
+ query = {}
+ if 'force' in self.parameters:
+ query['force'] = self.parameters['force']
+ api = 'protocols/cifs/services'
+ dummy, error = rest_generic.delete_async(self.rest_api, api, current['svm']['uuid'], query, body=body)
+ if error is not None:
+ self.module.fail_json(msg="Error on deleting cifs server: %s" % error)
+
+ def modify_cifs_server_rest(self, current, modify):
+ """
+ Modify the state of CIFS server.
+ rename: cifs server should be in stopped state
+ """
+ if not self.use_rest:
+ return self.modify_cifs_server()
+ body, query = self.create_modify_body_rest(modify)
+ api = 'protocols/cifs/services'
+ dummy, error = rest_generic.patch_async(self.rest_api, api, current['svm']['uuid'], body, query)
+ if error is not None:
+ self.module.fail_json(msg="Error on modifying cifs server: %s" % error)
+
+ def modify_cifs_server(self):
+ """
+ Start or stop cifs server in ZAPI.
+ """
+ if self.parameters.get('service_state') == 'stopped':
+ self.stop_cifs_server()
+ else:
+ self.start_cifs_server()
+
+ def apply(self):
+ current = self.get_cifs_server_rest()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action == 'create' and 'from_name' in self.parameters:
+ current = self.get_cifs_server_rest(self.parameters['from_name'])
+ if current is None:
+ self.module.fail_json(msg='Error renaming cifs server: %s - no cifs server with from_name: %s.'
+ % (self.parameters['name'], self.parameters['from_name']))
+ if not self.parameters.get('force'):
+ self.module.fail_json(msg='Error: cannot rename cifs server from %s to %s without force.'
+ % (self.parameters['from_name'], self.parameters['name']))
+ # rename is handled in modify in REST.
+ cd_action = None
+ modify = self.na_helper.get_modified_attributes(current, self.parameters) if cd_action is None else None
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_cifs_server_rest()
+ elif cd_action == 'delete':
+ self.delete_cifs_server_rest(current)
+ else:
+ self.modify_cifs_server_rest(current, modify)
+ # rename will enable the cifs server also, so disable it if service_state is stopped.
+ if 'cifs_server_name' in modify and self.parameters.get('service_state') == 'stopped':
+ self.modify_cifs_server_rest(current, {'service_state': 'stopped'})
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ cifs_server = NetAppOntapcifsServer()
+ cifs_server.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cluster.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cluster.py
new file mode 100644
index 000000000..fb0f507fc
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cluster.py
@@ -0,0 +1,776 @@
+#!/usr/bin/python
+
+# (c) 2017-2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_cluster
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: na_ontap_cluster
+short_description: NetApp ONTAP cluster - create a cluster and add/remove nodes.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Create ONTAP cluster.
+ - Add or remove cluster nodes using cluster_ip_address.
+ - Adding a node requires ONTAP 9.3 or better.
+ - Removing a node requires ONTAP 9.4 or better.
+options:
+ state:
+ description:
+ - Whether the specified cluster should exist (deleting a cluster is not supported).
+ - Whether the node identified by its cluster_ip_address should be in the cluster or not.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+ cluster_name:
+ description:
+ - The name of the cluster to manage.
+ type: str
+ cluster_ip_address:
+ description:
+ - intra cluster IP address of the node to be added or removed.
+ type: str
+ single_node_cluster:
+ description:
+ - Whether the cluster is a single node cluster. Ignored for 9.3 or older versions.
+ - If present, it was observed that 'Cluster' interfaces were deleted, whatever the value with ZAPI.
+ version_added: 19.11.0
+ type: bool
+ cluster_location:
+ description:
+ - Cluster location, only relevant if performing a modify action.
+ version_added: 19.11.0
+ type: str
+ cluster_contact:
+ description:
+ - Cluster contact, only relevant if performing a modify action.
+ version_added: 19.11.0
+ type: str
+ node_name:
+ description:
+ - Name of the node to be added or removed from the cluster.
+ - Be aware that when adding a node, '-' are converted to '_' by the ONTAP backend.
+ - When creating a cluster, C(node_name) is ignored.
+ - When adding a node using C(cluster_ip_address), C(node_name) is optional.
+ - When used to remove a node, C(cluster_ip_address) and C(node_name) are mutually exclusive.
+ version_added: 20.9.0
+ type: str
+ time_out:
+ description:
+ - time to wait for cluster creation in seconds.
+ - Error out if task is not completed in defined time.
+ - if 0, the request is asynchronous.
+ - default is set to 3 minutes.
+ default: 180
+ type: int
+ version_added: 21.1.0
+ force:
+ description:
+ - forcibly remove a node that is down and cannot be brought online to remove its shared resources.
+ default: false
+ type: bool
+ version_added: 21.13.0
+ timezone:
+ description: timezone for the cluster. Only supported by REST.
+ type: dict
+ version_added: 21.24.0
+ suboptions:
+ name:
+ type: str
+ description:
+ - The timezone name must be
+ - A geographic region, usually expressed as area/location
+ - Greenwich Mean Time (GMT) or the difference in hours from GMT
+ - A valid alias; that is, a term defined by the standard to refer to a geographic region or GMT
+ - A system-specific or other term not associated with a geographic region or GMT
+ - "full list of supported alias can be found here: https://library.netapp.com/ecmdocs/ECMP1155590/html/GUID-D3B8A525-67A2-4BEE-99DB-EF52D6744B5F.html"
+ - Only supported by REST
+
+notes:
+ - supports REST and ZAPI
+'''
+
+EXAMPLES = """
+ - name: Create cluster
+ netapp.ontap.na_ontap_cluster:
+ state: present
+ cluster_name: new_cluster
+ time_out: 0
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: Add node to cluster (Join cluster)
+ netapp.ontap.na_ontap_cluster:
+ state: present
+ cluster_ip_address: 10.10.10.10
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: Add node to cluster (Join cluster)
+ netapp.ontap.na_ontap_cluster:
+ state: present
+ cluster_ip_address: 10.10.10.10
+ node_name: my_preferred_node_name
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: Create a 2 node cluster in one call
+ netapp.ontap.na_ontap_cluster:
+ state: present
+ cluster_name: new_cluster
+ cluster_ip_address: 10.10.10.10
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: Remove node from cluster
+ netapp.ontap.na_ontap_cluster:
+ state: absent
+ cluster_ip_address: 10.10.10.10
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: Remove node from cluster
+ netapp.ontap.na_ontap_cluster:
+ state: absent
+ node_name: node002
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: modify cluster
+ netapp.ontap.na_ontap_cluster:
+ state: present
+ cluster_contact: testing
+ cluster_location: testing
+ cluster_name: "{{ netapp_cluster}}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+"""
+
+import time
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppONTAPCluster:
+ """
+ object initialize and class methods
+ """
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ cluster_name=dict(required=False, type='str'),
+ cluster_ip_address=dict(required=False, type='str'),
+ cluster_location=dict(required=False, type='str'),
+ cluster_contact=dict(required=False, type='str'),
+ force=dict(required=False, type='bool', default=False),
+ single_node_cluster=dict(required=False, type='bool'),
+ node_name=dict(required=False, type='str'),
+ time_out=dict(required=False, type='int', default=180),
+ timezone=dict(required=False, type='dict', options=dict(
+ name=dict(type='str')
+ ))
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.warnings = []
+ # cached, so that we don't call the REST API more than once
+ self.node_records = None
+
+ if self.parameters['state'] == 'absent' and self.parameters.get('node_name') is not None and self.parameters.get('cluster_ip_address') is not None:
+ msg = 'when state is "absent", parameters are mutually exclusive: cluster_ip_address|node_name'
+ self.module.fail_json(msg=msg)
+
+ if self.parameters.get('node_name') is not None and '-' in self.parameters.get('node_name'):
+ self.warnings.append('ONTAP ZAPI converts "-" to "_", node_name: %s may be changed or not matched' % self.parameters.get('node_name'))
+
+ self.rest_api = OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+ if self.use_rest and self.parameters['state'] == 'absent' and not self.rest_api.meets_rest_minimum_version(True, 9, 7, 0):
+ self.module.warn('switching back to ZAPI as DELETE is not supported on 9.6')
+ self.use_rest = False
+ if not self.use_rest:
+ if self.na_helper.safe_get(self.parameters, ['timezone', 'name']):
+ self.module.fail_json(msg='Timezone is only supported with REST')
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def get_cluster_identity_rest(self):
+ ''' get cluster information, but the cluster may not exist yet
+ return:
+ None if the cluster cannot be reached
+ a dictionary of attributes
+ '''
+ record, error = rest_generic.get_one_record(self.rest_api, 'cluster', fields='contact,location,name,timezone')
+ if error:
+ if 'are available in precluster.' in error:
+ # assuming precluster state
+ return None
+ self.module.fail_json(msg='Error fetching cluster identity info: %s' % to_native(error),
+ exception=traceback.format_exc())
+ if record:
+ return {
+ 'cluster_contact': record.get('contact'),
+ 'cluster_location': record.get('location'),
+ 'cluster_name': record.get('name'),
+ 'timezone': self.na_helper.safe_get(record, ['timezone'])
+ }
+ return None
+
+ def get_cluster_identity(self, ignore_error=True):
+ ''' get cluster information, but the cluster may not exist yet
+ return:
+ None if the cluster cannot be reached
+ a dictionary of attributes
+ '''
+ if self.use_rest:
+ return self.get_cluster_identity_rest()
+
+ zapi = netapp_utils.zapi.NaElement('cluster-identity-get')
+ try:
+ result = self.server.invoke_successfully(zapi, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ if ignore_error:
+ return None
+ self.module.fail_json(msg='Error fetching cluster identity info: %s' % to_native(error),
+ exception=traceback.format_exc())
+ cluster_identity = {}
+ if result.get_child_by_name('attributes'):
+ identity_info = result.get_child_by_name('attributes').get_child_by_name('cluster-identity-info')
+ if identity_info:
+ cluster_identity['cluster_contact'] = identity_info.get_child_content('cluster-contact')
+ cluster_identity['cluster_location'] = identity_info.get_child_content('cluster-location')
+ cluster_identity['cluster_name'] = identity_info.get_child_content('cluster-name')
+ return cluster_identity
+ return None
+
+ def get_cluster_nodes_rest(self):
+ ''' get cluster node names, but the cluster may not exist yet
+ return:
+ None if the cluster cannot be reached
+ a list of nodes
+ '''
+ if self.node_records is None:
+ records, error = rest_generic.get_0_or_more_records(self.rest_api, 'cluster/nodes', fields='name,uuid,cluster_interfaces')
+ if error:
+ self.module.fail_json(msg='Error fetching cluster node info: %s' % to_native(error),
+ exception=traceback.format_exc())
+ self.node_records = records or []
+ return self.node_records
+
+ def get_cluster_node_names_rest(self):
+ ''' get cluster node names, but the cluster may not exist yet
+ return:
+ None if the cluster cannot be reached
+ a list of nodes
+ '''
+ records = self.get_cluster_nodes_rest()
+ return [record['name'] for record in records]
+
+ def get_cluster_nodes(self, ignore_error=True):
+ ''' get cluster node names, but the cluster may not exist yet
+ return:
+ None if the cluster cannot be reached
+ a list of nodes
+ '''
+ if self.use_rest:
+ return self.get_cluster_node_names_rest()
+
+ zapi = netapp_utils.zapi.NaElement('cluster-node-get-iter')
+ try:
+ result = self.server.invoke_successfully(zapi, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ if ignore_error:
+ return None
+ self.module.fail_json(msg='Error fetching cluster node info: %s' % to_native(error),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('attributes-list'):
+ cluster_nodes = []
+ for node_info in result.get_child_by_name('attributes-list').get_children():
+ node_name = node_info.get_child_content('node-name')
+ if node_name is not None:
+ cluster_nodes.append(node_name)
+ return cluster_nodes
+ return None
+
+ def get_cluster_ip_addresses_rest(self, cluster_ip_address):
+ ''' get list of IP addresses for this cluster
+ return:
+ a list of dictionaries
+ '''
+ if_infos = []
+ records = self.get_cluster_nodes_rest()
+ for record in records:
+ for interface in record.get('cluster_interfaces', []):
+ ip_address = self.na_helper.safe_get(interface, ['ip', 'address'])
+ if cluster_ip_address is None or ip_address == cluster_ip_address:
+ if_info = {
+ 'address': ip_address,
+ 'home_node': record['name'],
+ }
+ if_infos.append(if_info)
+ return if_infos
+
+ def get_cluster_ip_addresses(self, cluster_ip_address, ignore_error=True):
+ ''' get list of IP addresses for this cluster
+ return:
+ a list of dictionaries
+ '''
+ if_infos = []
+ zapi = netapp_utils.zapi.NaElement('net-interface-get-iter')
+ if cluster_ip_address is not None:
+ query = netapp_utils.zapi.NaElement('query')
+ net_info = netapp_utils.zapi.NaElement('net-interface-info')
+ net_info.add_new_child('address', cluster_ip_address)
+ query.add_child_elem(net_info)
+ zapi.add_child_elem(query)
+
+ try:
+ result = self.server.invoke_successfully(zapi, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ if ignore_error:
+ return if_infos
+ self.module.fail_json(msg='Error getting IP addresses: %s' % to_native(error),
+ exception=traceback.format_exc())
+
+ if result.get_child_by_name('attributes-list'):
+ for net_info in result.get_child_by_name('attributes-list').get_children():
+ if net_info:
+ if_info = {'address': net_info.get_child_content('address')}
+ if_info['home_node'] = net_info.get_child_content('home-node')
+ if_infos.append(if_info)
+ return if_infos
+
+ def get_cluster_ip_address(self, cluster_ip_address, ignore_error=True):
+ ''' get node information if it is discoverable
+ return:
+ None if the cluster cannot be reached
+ a dictionary of attributes
+ '''
+ if cluster_ip_address is None:
+ return None
+ if self.use_rest:
+ nodes = self.get_cluster_ip_addresses_rest(cluster_ip_address)
+ else:
+ nodes = self.get_cluster_ip_addresses(cluster_ip_address, ignore_error=ignore_error)
+ return nodes if len(nodes) > 0 else None
+
+ def create_cluster_body(self, modify=None, nodes=None):
+ body = {}
+ params = modify if modify is not None else self.parameters
+ for (param_key, rest_key) in {
+ 'cluster_contact': 'contact',
+ 'cluster_location': 'location',
+ 'cluster_name': 'name',
+ 'single_node_cluster': 'single_node_cluster',
+ 'timezone': 'timezone'
+ }.items():
+ if param_key in params:
+ body[rest_key] = params[param_key]
+ if nodes:
+ body['nodes'] = nodes
+ return body
+
+ def create_node_body(self):
+ node = {}
+ for (param_key, rest_key) in {
+ 'cluster_ip_address': 'cluster_interface.ip.address',
+ 'cluster_location': 'location',
+ 'node_name': 'name'
+ }.items():
+ if param_key in self.parameters:
+ node[rest_key] = self.parameters[param_key]
+ return node
+
+ def create_nodes(self):
+ node = self.create_node_body()
+ return [node] if node else None
+
+ def create_cluster_rest(self, older_api=False):
+ """
+ Create a cluster
+ """
+ query = None
+ body = self.create_cluster_body(nodes=self.create_nodes())
+ if 'single_node_cluster' in body:
+ query = {'single_node_cluster': body.pop('single_node_cluster')}
+ dummy, error = rest_generic.post_async(self.rest_api, 'cluster', body, query, job_timeout=120)
+ if error:
+ self.module.fail_json(msg='Error creating cluster %s: %s'
+ % (self.parameters['cluster_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def create_cluster(self, older_api=False):
+ """
+ Create a cluster
+ """
+ if self.use_rest:
+ return self.create_cluster_rest()
+
+ # Note: cannot use node_name here:
+ # 13001:The "-node-names" parameter must be used with either the "-node-uuids" or the "-cluster-ips" parameters.
+ options = {'cluster-name': self.parameters['cluster_name']}
+ if not older_api and self.parameters.get('single_node_cluster') is not None:
+ options['single-node-cluster'] = str(self.parameters['single_node_cluster']).lower()
+ cluster_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'cluster-create', **options)
+ try:
+ self.server.invoke_successfully(cluster_create,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ if error.message == "Extra input: single-node-cluster" and not older_api:
+ return self.create_cluster(older_api=True)
+ # Error 36503 denotes node already being used.
+ if to_native(error.code) == "36503":
+ return False
+ self.module.fail_json(msg='Error creating cluster %s: %s'
+ % (self.parameters['cluster_name'], to_native(error)),
+ exception=traceback.format_exc())
+ return True
+
+ def add_node_rest(self):
+ """
+ Add a node to an existing cluster
+ """
+ body = self.create_node_body()
+ dummy, error = rest_generic.post_async(self.rest_api, 'cluster/nodes', body, job_timeout=120)
+ if error:
+ self.module.fail_json(msg='Error adding node with ip %s: %s'
+ % (self.parameters.get('cluster_ip_address'), to_native(error)),
+ exception=traceback.format_exc())
+
+ def add_node(self, older_api=False):
+ """
+ Add a node to an existing cluster
+ 9.2 and 9.3 do not support cluster-ips so fallback to node-ip
+ """
+ if self.use_rest:
+ return self.add_node_rest()
+
+ if self.parameters.get('cluster_ip_address') is None:
+ return False
+ cluster_add_node = netapp_utils.zapi.NaElement('cluster-add-node')
+ if older_api:
+ cluster_add_node.add_new_child('node-ip', self.parameters.get('cluster_ip_address'))
+ else:
+ cluster_ips = netapp_utils.zapi.NaElement.create_node_with_children('cluster-ips', **{'ip-address': self.parameters.get('cluster_ip_address')})
+ cluster_add_node.add_child_elem(cluster_ips)
+ if self.parameters.get('node_name') is not None:
+ node_names = netapp_utils.zapi.NaElement.create_node_with_children('node-names', **{'string': self.parameters.get('node_name')})
+ cluster_add_node.add_child_elem(node_names)
+
+ try:
+ self.server.invoke_successfully(cluster_add_node, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ if error.message == "Extra input: cluster-ips" and not older_api:
+ return self.add_node(older_api=True)
+ # skip if error says no failed operations to retry.
+ if to_native(error) == "NetApp API failed. Reason - 13001:There are no failed \"cluster create\" or \"cluster add-node\" operations to retry.":
+ return False
+ self.module.fail_json(msg='Error adding node with ip %s: %s'
+ % (self.parameters.get('cluster_ip_address'), to_native(error)),
+ exception=traceback.format_exc())
+ return True
+
+ def get_uuid_from_ip(self, ip_address):
+ for node in self.get_cluster_nodes_rest():
+ if ip_address in (interface['ip']['address'] for interface in node['cluster_interfaces']):
+ return node['uuid']
+ return None
+
+ def get_uuid_from_name(self, node_name):
+ for node in self.get_cluster_nodes_rest():
+ if node_name == node['name']:
+ return node['uuid']
+ return None
+
+ def get_uuid(self):
+ if self.parameters.get('cluster_ip_address') is not None:
+ from_node = self.parameters['cluster_ip_address']
+ uuid = self.get_uuid_from_ip(from_node)
+ elif self.parameters.get('node_name') is not None:
+ from_node = self.parameters['node_name']
+ uuid = self.get_uuid_from_name(from_node)
+ else:
+ # Unexpected, for delete one of cluster_ip_address, node_name is required.
+ uuid = None
+ if uuid is None:
+ self.module.fail_json(msg='Internal error, cannot find UUID in %s: for %s or %s'
+ % (self.get_cluster_nodes_rest(), self.parameters['cluster_ip_address'], self.parameters.get('node_name') is not None),
+ exception=traceback.format_exc())
+ return uuid, from_node
+
+ def remove_node_rest(self):
+ """
+ Remove a node from an existing cluster
+ """
+ uuid, from_node = self.get_uuid()
+ query = {'force': True} if self.parameters.get('force') else None
+ dummy, error = rest_generic.delete_async(self.rest_api, 'cluster/nodes', uuid, query, job_timeout=120)
+ if error:
+ self.module.fail_json(msg='Error removing node with %s: %s'
+ % (from_node, to_native(error)), exception=traceback.format_exc())
+
+ def remove_node(self):
+ """
+ Remove a node from an existing cluster
+ """
+ if self.use_rest:
+ return self.remove_node_rest()
+
+ cluster_remove_node = netapp_utils.zapi.NaElement('cluster-remove-node')
+ from_node = ''
+ # cluster-ip and node-name are mutually exclusive:
+ # 13115:Element "cluster-ip" within "cluster-remove-node" has been excluded by another element.
+ if self.parameters.get('cluster_ip_address') is not None:
+ cluster_remove_node.add_new_child('cluster-ip', self.parameters.get('cluster_ip_address'))
+ from_node = 'IP: %s' % self.parameters.get('cluster_ip_address')
+ elif self.parameters.get('node_name') is not None:
+ cluster_remove_node.add_new_child('node', self.parameters.get('node_name'))
+ from_node = 'name: %s' % self.parameters.get('node_name')
+ if self.parameters.get('force'):
+ cluster_remove_node.add_new_child('force', 'true')
+
+ try:
+ self.server.invoke_successfully(cluster_remove_node, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ if error.message == "Unable to find API: cluster-remove-node":
+ msg = 'Error: ZAPI is not available. Removing a node requires ONTAP 9.4 or newer.'
+ self.module.fail_json(msg=msg)
+ self.module.fail_json(msg='Error removing node with %s: %s'
+ % (from_node, to_native(error)), exception=traceback.format_exc())
+
+ def modify_cluster_identity_rest(self, modify):
+ """
+ Modifies the cluster identity
+ """
+ body = self.create_cluster_body(modify)
+ dummy, error = rest_generic.patch_async(self.rest_api, 'cluster', None, body)
+ if error:
+ self.module.fail_json(msg='Error modifying cluster idetity details %s: %s'
+ % (self.parameters['cluster_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_cluster_identity(self, modify):
+ """
+ Modifies the cluster identity
+ """
+ if self.use_rest:
+ return self.modify_cluster_identity_rest(modify)
+
+ cluster_modify = netapp_utils.zapi.NaElement('cluster-identity-modify')
+ if modify.get('cluster_name') is not None:
+ cluster_modify.add_new_child("cluster-name", modify.get('cluster_name'))
+ if modify.get('cluster_location') is not None:
+ cluster_modify.add_new_child("cluster-location", modify.get('cluster_location'))
+ if modify.get('cluster_contact') is not None:
+ cluster_modify.add_new_child("cluster-contact", modify.get('cluster_contact'))
+
+ try:
+ self.server.invoke_successfully(cluster_modify,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying cluster idetity details %s: %s'
+ % (self.parameters['cluster_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def cluster_create_wait(self):
+ """
+ Wait whilst cluster creation completes
+ """
+ if self.use_rest:
+ # wait is part of post_async for REST
+ return
+
+ cluster_wait = netapp_utils.zapi.NaElement('cluster-create-join-progress-get')
+ is_complete = False
+ status = ''
+ retries = self.parameters['time_out']
+ errors = []
+ while not is_complete and status not in ('failed', 'success') and retries > 0:
+ retries = retries - 10
+ time.sleep(10)
+ try:
+ result = self.server.invoke_successfully(cluster_wait, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ # collecting errors, and retrying
+ errors.append(repr(error))
+ continue
+
+ clus_progress = result.get_child_by_name('attributes')
+ result = clus_progress.get_child_by_name('cluster-create-join-progress-info')
+ is_complete = self.na_helper.get_value_for_bool(from_zapi=True,
+ value=result.get_child_content('is-complete'))
+ status = result.get_child_content('status')
+
+ if self.parameters['time_out'] == 0:
+ is_complete = True
+ if not is_complete and status != 'success':
+ current_status_message = result.get_child_content('current-status-message')
+ errors.append('Failed to confirm cluster creation %s: %s' % (self.parameters.get('cluster_name'), current_status_message))
+ if retries <= 0:
+ errors.append("Timeout after %s seconds" % self.parameters['time_out'])
+ self.module.fail_json(msg='Error creating cluster %s: %s'
+ % (self.parameters['cluster_name'], str(errors)))
+
+ return is_complete
+
+ def node_add_wait(self):
+ """
+ Wait whilst node is being added to the existing cluster
+ """
+ if self.use_rest:
+ # wait is part of post_async for REST
+ return
+
+ cluster_node_status = netapp_utils.zapi.NaElement('cluster-add-node-status-get-iter')
+ node_status_info = netapp_utils.zapi.NaElement('cluster-create-add-node-status-info')
+ node_status_info.add_new_child('cluster-ip', self.parameters.get('cluster_ip_address'))
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(node_status_info)
+ cluster_node_status.add_child_elem(query)
+
+ is_complete = None
+ failure_msg = None
+ retries = self.parameters['time_out']
+ errors = []
+ while is_complete != 'success' and is_complete != 'failure' and retries > 0:
+ retries = retries - 10
+ time.sleep(10)
+ try:
+ result = self.server.invoke_successfully(cluster_node_status, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ if error.message == "Unable to find API: cluster-add-node-status-get-iter":
+ # This API is not supported for 9.3 or earlier releases, just wait a bit
+ time.sleep(60)
+ return
+ # collecting errors, and retrying
+ errors.append(repr(error))
+ continue
+
+ attributes_list = result.get_child_by_name('attributes-list')
+ join_progress = attributes_list.get_child_by_name('cluster-create-add-node-status-info')
+ is_complete = join_progress.get_child_content('status')
+ failure_msg = join_progress.get_child_content('failure-msg')
+
+ if self.parameters['time_out'] == 0:
+ is_complete = 'success'
+ if is_complete != 'success':
+ if 'Node is already in a cluster' in failure_msg:
+ return
+ elif retries <= 0:
+ errors.append("Timeout after %s seconds" % self.parameters['time_out'])
+ if failure_msg:
+ errors.append(failure_msg)
+ self.module.fail_json(msg='Error adding node with ip address %s: %s'
+ % (self.parameters['cluster_ip_address'], str(errors)))
+
+ def node_remove_wait(self):
+ ''' wait for node name or clister IP address to disappear '''
+ if self.use_rest:
+ # wait is part of delete_async for REST
+ return
+
+ node_name = self.parameters.get('node_name')
+ node_ip = self.parameters.get('cluster_ip_address')
+ retries = self.parameters['time_out']
+ while retries > 0:
+ retries = retries - 10
+ if node_name is not None and node_name not in self.get_cluster_nodes():
+ return
+ if node_ip is not None and self.get_cluster_ip_address(node_ip) is None:
+ return
+ time.sleep(10)
+ self.module.fail_json(msg='Timeout waiting for node to be removed from cluster.')
+
+ def get_cluster_action(self, cluster_identity):
+ cluster_action = None
+ if self.parameters.get('cluster_name') is not None:
+ cluster_action = self.na_helper.get_cd_action(cluster_identity, self.parameters)
+ if cluster_action == 'delete':
+ # delete only applies to node
+ cluster_action = None
+ self.na_helper.changed = False
+ return cluster_action
+
+ def get_node_action(self):
+ node_action = None
+ if self.parameters.get('cluster_ip_address') is not None:
+ existing_interfaces = self.get_cluster_ip_address(self.parameters.get('cluster_ip_address'))
+ if self.parameters.get('state') == 'present':
+ node_action = 'add_node' if existing_interfaces is None else None
+ else:
+ node_action = 'remove_node' if existing_interfaces is not None else None
+ if self.parameters.get('node_name') is not None and self.parameters['state'] == 'absent':
+ nodes = self.get_cluster_nodes()
+ if self.parameters.get('node_name') in nodes:
+ node_action = 'remove_node'
+ if node_action is not None:
+ self.na_helper.changed = True
+ return node_action
+
+ def apply(self):
+ """
+ Apply action to cluster
+ """
+ cluster_identity = self.get_cluster_identity(ignore_error=True)
+ cluster_action = self.get_cluster_action(cluster_identity)
+ node_action = self.get_node_action()
+ modify = self.na_helper.get_modified_attributes(cluster_identity, self.parameters)
+
+ if not self.module.check_mode:
+ if cluster_action == 'create' and self.create_cluster():
+ self.cluster_create_wait()
+ if node_action == 'add_node':
+ if self.add_node():
+ self.node_add_wait()
+ elif node_action == 'remove_node':
+ self.remove_node()
+ self.node_remove_wait()
+ if modify:
+ self.modify_cluster_identity(modify)
+
+ results = {'changed': self.na_helper.changed}
+ if self.warnings:
+ results['warnings'] = self.warnings
+ if netapp_utils.has_feature(self.module, 'show_modified'):
+ results['modify'] = modify
+ self.module.exit_json(**results)
+
+
+def main():
+ """
+ Create object and call apply
+ """
+ cluster_obj = NetAppONTAPCluster()
+ cluster_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cluster_ha.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cluster_ha.py
new file mode 100644
index 000000000..822a0778c
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cluster_ha.py
@@ -0,0 +1,151 @@
+#!/usr/bin/python
+
+# (c) 2018-2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - "Enable or disable HA on a cluster"
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_cluster_ha
+options:
+ state:
+ choices: ['present', 'absent']
+ type: str
+ description:
+ - "Whether HA on cluster should be enabled or disabled."
+ default: present
+short_description: NetApp ONTAP Manage HA status for cluster
+version_added: 2.6.0
+'''
+
+EXAMPLES = """
+ - name: "Enable HA status for cluster"
+ netapp.ontap.na_ontap_cluster_ha:
+ state: present
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppOntapClusterHA:
+ """
+ object initialize and class methods
+ """
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ # Set up Rest API
+ self.rest_api = netapp_utils.OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+
+ if not self.use_rest:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def modify_cluster_ha(self, configure):
+ """
+ Enable or disable HA on cluster
+ :return: None
+ """
+ if self.use_rest:
+ return self.modify_cluster_ha_rest(configure)
+
+ cluster_ha_modify = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'cluster-ha-modify', **{'ha-configured': configure})
+ try:
+ self.server.invoke_successfully(cluster_ha_modify,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying cluster HA to %s: %s'
+ % (configure, to_native(error)),
+ exception=traceback.format_exc())
+
+ def get_cluster_ha_enabled(self):
+ """
+ Get current cluster HA details
+ :return: dict if enabled, None if disabled
+ """
+ if self.use_rest:
+ return self.get_cluster_ha_enabled_rest()
+ cluster_ha_get = netapp_utils.zapi.NaElement('cluster-ha-get')
+ try:
+ result = self.server.invoke_successfully(cluster_ha_get,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError:
+ self.module.fail_json(msg='Error fetching cluster HA details',
+ exception=traceback.format_exc())
+ cluster_ha_info = result.get_child_by_name('attributes').get_child_by_name('cluster-ha-info')
+ if cluster_ha_info.get_child_content('ha-configured') == 'true':
+ return {'ha-configured': True}
+ return None
+
+ def get_cluster_ha_enabled_rest(self):
+ api = 'private/cli/cluster/ha'
+ params = {'fields': 'configured'}
+ record, error = rest_generic.get_one_record(self.rest_api, api, params)
+ if error:
+ self.module.fail_json(msg='Error fetching cluster HA details: %s' % to_native(error))
+ return {'ha-configured': True} if record['configured'] else None
+
+ def modify_cluster_ha_rest(self, configure):
+ api = 'private/cli/cluster/ha'
+ body = {'configured': True if configure == "true" else False}
+ dummy, error = rest_generic.patch_async(self.rest_api, api, None, body)
+ if error:
+ self.module.fail_json(msg='Error modifying cluster HA to %s: %s' % (configure, to_native(error)))
+
+ def apply(self):
+ """
+ Apply action to cluster HA
+ """
+ current = self.get_cluster_ha_enabled()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if not self.module.check_mode:
+ if cd_action == 'create':
+ self.modify_cluster_ha("true")
+ elif cd_action == 'delete':
+ self.modify_cluster_ha("false")
+
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action)
+ self.module.exit_json(**result)
+
+
+def main():
+ """
+ Create object and call apply
+ """
+ ha_obj = NetAppOntapClusterHA()
+ ha_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cluster_peer.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cluster_peer.py
new file mode 100644
index 000000000..820001cc4
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cluster_peer.py
@@ -0,0 +1,427 @@
+#!/usr/bin/python
+
+# (c) 2018-2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Create/Delete cluster peer relations on ONTAP
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+ - netapp.ontap.netapp.na_ontap_peer
+module: na_ontap_cluster_peer
+options:
+ state:
+ choices: ['present', 'absent']
+ type: str
+ description:
+ - Whether the specified cluster peer should exist or not.
+ default: present
+ source_intercluster_lifs:
+ description:
+ - List of intercluster addresses of the source cluster.
+ - Used as peer-addresses in destination cluster.
+ - All these intercluster lifs should belong to the source cluster.
+ version_added: 2.8.0
+ type: list
+ elements: str
+ aliases:
+ - source_intercluster_lif
+ dest_intercluster_lifs:
+ description:
+ - List of intercluster addresses of the destination cluster.
+ - Used as peer-addresses in source cluster.
+ - All these intercluster lifs should belong to the destination cluster.
+ version_added: 2.8.0
+ type: list
+ elements: str
+ aliases:
+ - dest_intercluster_lif
+ passphrase:
+ description:
+ - The arbitrary passphrase that matches the one given to the peer cluster.
+ type: str
+ source_cluster_name:
+ description:
+ - The name of the source cluster name in the peer relation to be deleted.
+ type: str
+ dest_cluster_name:
+ description:
+ - The name of the destination cluster name in the peer relation to be deleted.
+ - Required for delete
+ type: str
+ dest_hostname:
+ description:
+ - DEPRECATED - please use C(peer_options).
+ - Destination cluster IP or hostname which needs to be peered.
+ - Required to complete the peering process at destination cluster.
+ type: str
+ dest_username:
+ description:
+ - DEPRECATED - please use C(peer_options).
+ - Destination username.
+ - Optional if this is same as source username or if a certificate is used.
+ type: str
+ dest_password:
+ description:
+ - DEPRECATED - please use C(peer_options).
+ - Destination password.
+ - Optional if this is same as source password or if a certificate is used..
+ type: str
+ ipspace:
+ description:
+ - IPspace of the local intercluster LIFs.
+ - Assumes Default IPspace if not provided.
+ type: str
+ version_added: '20.11.0'
+ encryption_protocol_proposed:
+ description:
+ - Encryption protocol to be used for inter-cluster communication.
+ - Only available on ONTAP 9.5 or later.
+ choices: ['tls_psk', 'none']
+ type: str
+ version_added: '20.5.0'
+short_description: NetApp ONTAP Manage Cluster peering
+version_added: 2.7.0
+'''
+
+EXAMPLES = """
+
+ - name: Create cluster peer
+ netapp.ontap.na_ontap_cluster_peer:
+ state: present
+ source_intercluster_lifs: 1.2.3.4,1.2.3.5
+ dest_intercluster_lifs: 1.2.3.6,1.2.3.7
+ passphrase: XXXX
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ peer_options:
+ hostname: "{{ dest_netapp_hostname }}"
+ encryption_protocol_proposed: tls_psk
+
+ - name: Delete cluster peer
+ netapp.ontap.na_ontap_cluster_peer:
+ state: absent
+ source_cluster_name: test-source-cluster
+ dest_cluster_name: test-dest-cluster
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ peer_options:
+ hostname: "{{ dest_netapp_hostname }}"
+
+ - name: Create cluster peer - different credentials
+ netapp.ontap.na_ontap_cluster_peer:
+ state: present
+ source_intercluster_lifs: 1.2.3.4,1.2.3.5
+ dest_intercluster_lifs: 1.2.3.6,1.2.3.7
+ passphrase: XXXX
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ peer_options:
+ hostname: "{{ dest_netapp_hostname }}"
+ cert_filepath: "{{ cert_filepath }}"
+ key_filepath: "{{ key_filepath }}"
+ encryption_protocol_proposed: tls_psk
+
+"""
+
+RETURN = """
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppONTAPClusterPeer:
+ """
+ Class with cluster peer methods
+ """
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ source_intercluster_lifs=dict(required=False, type='list', elements='str', aliases=['source_intercluster_lif']),
+ dest_intercluster_lifs=dict(required=False, type='list', elements='str', aliases=['dest_intercluster_lif']),
+ passphrase=dict(required=False, type='str', no_log=True),
+ peer_options=dict(type='dict', options=netapp_utils.na_ontap_host_argument_spec_peer()),
+ dest_hostname=dict(required=False, type='str'),
+ dest_username=dict(required=False, type='str'),
+ dest_password=dict(required=False, type='str', no_log=True),
+ source_cluster_name=dict(required=False, type='str'),
+ dest_cluster_name=dict(required=False, type='str'),
+ ipspace=dict(required=False, type='str'),
+ encryption_protocol_proposed=dict(required=False, type='str', choices=['tls_psk', 'none'])
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ mutually_exclusive=[
+ ['peer_options', 'dest_hostname'],
+ ['peer_options', 'dest_username'],
+ ['peer_options', 'dest_password']
+ ],
+ required_one_of=[['peer_options', 'dest_hostname']],
+ required_if=[
+ ('state', 'absent', ['source_cluster_name', 'dest_cluster_name']),
+ ('state', 'present', ['source_intercluster_lifs', 'dest_intercluster_lifs'])
+ ],
+ supports_check_mode=True
+ )
+ self.generated_passphrase = None
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ # set peer server connection
+ if self.parameters.get('dest_hostname') is not None:
+ # if dest_hostname is present, peer_options is absent
+ self.parameters['peer_options'] = dict(
+ hostname=self.parameters.get('dest_hostname'),
+ username=self.parameters.get('dest_username'),
+ password=self.parameters.get('dest_password'),
+ )
+ netapp_utils.setup_host_options_from_module_params(
+ self.parameters['peer_options'], self.module,
+ netapp_utils.na_ontap_host_argument_spec_peer().keys())
+ self.use_rest = False
+ self.rest_api = OntapRestAPI(self.module)
+ self.src_use_rest = self.rest_api.is_rest()
+ self.dst_rest_api = OntapRestAPI(self.module, host_options=self.parameters['peer_options'])
+ self.dst_use_rest = self.dst_rest_api.is_rest()
+ self.use_rest = bool(self.src_use_rest and self.dst_use_rest)
+ if not self.use_rest:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+ self.dest_server = netapp_utils.setup_na_ontap_zapi(module=self.module, host_options=self.parameters['peer_options'])
+
+ def cluster_peer_get_iter(self, cluster):
+ """
+ Compose NaElement object to query current source cluster using peer-cluster-name and peer-addresses parameters
+ :param cluster: type of cluster (source or destination)
+ :return: NaElement object for cluster-get-iter with query
+ """
+ cluster_peer_get = netapp_utils.zapi.NaElement('cluster-peer-get-iter')
+ query = netapp_utils.zapi.NaElement('query')
+ cluster_peer_info = netapp_utils.zapi.NaElement('cluster-peer-info')
+ peer_lifs, peer_cluster = self.get_peer_lifs_cluster_keys(cluster)
+ if self.parameters.get(peer_lifs):
+ peer_addresses = netapp_utils.zapi.NaElement('peer-addresses')
+ for peer in self.parameters.get(peer_lifs):
+ peer_addresses.add_new_child('remote-inet-address', peer)
+ cluster_peer_info.add_child_elem(peer_addresses)
+ if self.parameters.get(peer_cluster):
+ cluster_peer_info.add_new_child('cluster-name', self.parameters[peer_cluster])
+ query.add_child_elem(cluster_peer_info)
+ cluster_peer_get.add_child_elem(query)
+ return cluster_peer_get
+
+ def cluster_peer_get(self, cluster):
+ """
+ Get current cluster peer info
+ :param cluster: type of cluster (source or destination)
+ :return: Dictionary of current cluster peer details if query successful, else return None
+ """
+ if self.use_rest:
+ return self.cluster_peer_get_rest(cluster)
+ cluster_peer_get_iter = self.cluster_peer_get_iter(cluster)
+ result, cluster_info = None, dict()
+ if cluster == 'source':
+ server = self.server
+ else:
+ server = self.dest_server
+ try:
+ result = server.invoke_successfully(cluster_peer_get_iter, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching cluster peer %s: %s'
+ % (cluster, to_native(error)),
+ exception=traceback.format_exc())
+ # return cluster peer details
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) >= 1:
+ cluster_peer_info = result.get_child_by_name('attributes-list').get_child_by_name('cluster-peer-info')
+ cluster_info['cluster_name'] = cluster_peer_info.get_child_content('cluster-name')
+ peers = cluster_peer_info.get_child_by_name('peer-addresses')
+ cluster_info['peer-addresses'] = [peer.get_content() for peer in peers.get_children()]
+ return cluster_info
+ return None
+
+ def get_peer_lifs_cluster_keys(self, cluster):
+ if cluster == 'source':
+ return 'dest_intercluster_lifs', 'dest_cluster_name'
+ return 'source_intercluster_lifs', 'source_cluster_name'
+
+ def cluster_peer_get_rest(self, cluster):
+ api = 'cluster/peers'
+ fields = 'remote'
+ restapi = self.rest_api if cluster == 'source' else self.dst_rest_api
+ records, error = rest_generic.get_0_or_more_records(restapi, api, None, fields)
+ if error:
+ self.module.fail_json(msg=error)
+ cluster_info = {}
+ if records is not None:
+ peer_lifs, peer_cluster = self.get_peer_lifs_cluster_keys(cluster)
+ for record in records:
+ if 'remote' in record:
+ peer_cluster_exist, peer_addresses_exist = False, False
+ # check peer lif or peer cluster present in each peer cluster data in current.
+ # if peer-lifs not present in parameters, use peer_cluster to filter desired cluster peer in current.
+ if self.parameters.get(peer_lifs) is not None:
+ peer_addresses_exist = set(self.parameters[peer_lifs]) == set(record['remote']['ip_addresses'])
+ else:
+ peer_cluster_exist = self.parameters[peer_cluster] == record['remote']['name']
+ if peer_addresses_exist or peer_cluster_exist:
+ cluster_info['cluster_name'] = record['remote']['name']
+ cluster_info['peer-addresses'] = record['remote']['ip_addresses']
+ cluster_info['uuid'] = record['uuid']
+ return cluster_info
+ return None
+
+ def cluster_peer_delete(self, cluster, uuid=None):
+ """
+ Delete a cluster peer on source or destination
+ For source cluster, peer cluster-name = destination cluster name and vice-versa
+ :param cluster: type of cluster (source or destination)
+ :return:
+ """
+ if self.use_rest:
+ return self.cluster_peer_delete_rest(cluster, uuid)
+ if cluster == 'source':
+ server, peer_cluster_name = self.server, self.parameters['dest_cluster_name']
+ else:
+ server, peer_cluster_name = self.dest_server, self.parameters['source_cluster_name']
+ cluster_peer_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'cluster-peer-delete', **{'cluster-name': peer_cluster_name})
+ try:
+ server.invoke_successfully(cluster_peer_delete, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting cluster peer %s: %s'
+ % (peer_cluster_name, to_native(error)),
+ exception=traceback.format_exc())
+
+ def cluster_peer_delete_rest(self, cluster, uuid):
+ server = self.rest_api if cluster == 'source' else self.dst_rest_api
+ dummy, error = rest_generic.delete_async(server, 'cluster/peers', uuid)
+ if error:
+ self.module.fail_json(msg=error)
+
+ def cluster_peer_create(self, cluster):
+ """
+ Create a cluster peer on source or destination
+ For source cluster, peer addresses = destination inter-cluster LIFs and vice-versa
+ :param cluster: type of cluster (source or destination)
+ :return: None
+ """
+ if self.use_rest:
+ return self.cluster_peer_create_rest(cluster)
+ cluster_peer_create = netapp_utils.zapi.NaElement.create_node_with_children('cluster-peer-create')
+ if self.parameters.get('passphrase') is not None:
+ cluster_peer_create.add_new_child('passphrase', self.parameters['passphrase'])
+ peer_addresses = netapp_utils.zapi.NaElement('peer-addresses')
+ server, peer_address = self.get_server_and_peer_address(cluster)
+ for each in peer_address:
+ peer_addresses.add_new_child('remote-inet-address', each)
+ cluster_peer_create.add_child_elem(peer_addresses)
+ if self.parameters.get('encryption_protocol_proposed') is not None:
+ cluster_peer_create.add_new_child('encryption-protocol-proposed', self.parameters['encryption_protocol_proposed'])
+ if self.parameters.get('ipspace') is not None:
+ cluster_peer_create.add_new_child('ipspace-name', self.parameters['ipspace'])
+
+ try:
+ server.invoke_successfully(cluster_peer_create, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating cluster peer %s: %s'
+ % (peer_address, to_native(error)),
+ exception=traceback.format_exc())
+
+ def get_server_and_peer_address(self, cluster):
+ if cluster == 'source':
+ server = self.rest_api if self.use_rest else self.server
+ return server, self.parameters['dest_intercluster_lifs']
+ server = self.dst_rest_api if self.use_rest else self.dest_server
+ return server, self.parameters['source_intercluster_lifs']
+
+ def cluster_peer_create_rest(self, cluster):
+ api = 'cluster/peers'
+ body = {}
+ if self.parameters.get('passphrase') is not None:
+ body['authentication.passphrase'] = self.parameters['passphrase']
+ # generate passphrase in source if passphrase not provided.
+ elif cluster == 'source':
+ body['authentication.generate_passphrase'] = True
+ elif cluster == 'destination':
+ body['authentication.passphrase'] = self.generated_passphrase
+ server, peer_address = self.get_server_and_peer_address(cluster)
+ body['remote.ip_addresses'] = peer_address
+ if self.parameters.get('encryption_protocol_proposed') is not None:
+ body['encryption.proposed'] = self.parameters['encryption_protocol_proposed']
+ else:
+ # Default value for encryption.proposed is tls_psk.
+ # explicitly set to none if encryption_protocol_proposed options not present in parameters.
+ body['encryption.proposed'] = 'none'
+ if self.parameters.get('ipspace') is not None:
+ body['ipspace.name'] = self.parameters['ipspace']
+ response, error = rest_generic.post_async(server, api, body)
+ if error:
+ self.module.fail_json(msg=error)
+ if response and cluster == 'source' and 'passphrase' not in self.parameters:
+ for record in response['records']:
+ self.generated_passphrase = record['authentication']['passphrase']
+
+ def apply(self):
+ """
+ Apply action to cluster peer
+ :return: None
+ """
+ source = self.cluster_peer_get('source')
+ destination = self.cluster_peer_get('destination')
+ source_action = self.na_helper.get_cd_action(source, self.parameters)
+ destination_action = self.na_helper.get_cd_action(destination, self.parameters)
+ self.na_helper.changed = False
+ # create only if expected cluster peer relation is not present on both source and destination clusters
+ # will error out with appropriate message if peer relationship already exists on either cluster
+ if source_action == 'create' or destination_action == 'create':
+ if not self.module.check_mode:
+ self.cluster_peer_create('source')
+ self.cluster_peer_create('destination')
+ self.na_helper.changed = True
+ # delete peer relation in cluster where relation is present
+ else:
+ if source_action == 'delete':
+ if not self.module.check_mode:
+ uuid = source['uuid'] if source and self.use_rest else None
+ self.cluster_peer_delete('source', uuid)
+ self.na_helper.changed = True
+ if destination_action == 'delete':
+ if not self.module.check_mode:
+ uuid = destination['uuid'] if destination and self.use_rest else None
+ self.cluster_peer_delete('destination', uuid)
+ self.na_helper.changed = True
+
+ result = netapp_utils.generate_result(self.na_helper.changed, extra_responses={'source_action': source_action,
+ 'destination_action': destination_action})
+ self.module.exit_json(**result)
+
+
+def main():
+ """
+ Execute action
+ :return: None
+ """
+ community_obj = NetAppONTAPClusterPeer()
+ community_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_command.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_command.py
new file mode 100644
index 000000000..377e1947c
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_command.py
@@ -0,0 +1,290 @@
+#!/usr/bin/python
+'''
+# (c) 2018-2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Run system-cli commands on ONTAP.
+ - Can't be used with cert authentication and domain authentication accounts.
+ - Requires ontapi and console permissions. Console is not supported for data vservers.
+ - Requires write permissions, even for show commands! ONTAP reports
+ "Insufficient privileges" and "user 'xxxxx' does not have write access to this resource"
+ for a readonly user.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap_zapi
+module: na_ontap_command
+short_description: NetApp ONTAP Run any cli command, the username provided needs to have console login permission.
+version_added: 2.7.0
+options:
+ command:
+ description:
+ - a comma separated list containing the command and arguments.
+ required: true
+ type: list
+ elements: str
+ privilege:
+ description:
+ - privilege level at which to run the command.
+ choices: ['admin', 'advanced']
+ default: admin
+ type: str
+ version_added: 2.8.0
+ return_dict:
+ description:
+ - Returns a parsesable dictionary instead of raw XML output
+ - C(result_value)
+ - C(status) > passed, failed.
+ - C(stdout) > command output in plaintext.
+ - C(stdout_lines) > list of command output lines.
+ - C(stdout_lines_filter) > empty list or list of command output lines matching I(include_lines) or I(exclude_lines) parameters.
+ - C(xml_dict) > JSON representation of what the CLI returned.
+ type: bool
+ default: false
+ version_added: 2.9.0
+ vserver:
+ description:
+ - If running as vserver admin, you must give a I(vserver) or module will fail
+ version_added: "19.10.0"
+ type: str
+ include_lines:
+ description:
+ - applied only when I(return_dict) is true
+ - return only lines containing string pattern in C(stdout_lines_filter)
+ default: ''
+ type: str
+ version_added: "19.10.0"
+ exclude_lines:
+ description:
+ - applied only when I(return_dict) is true
+ - return only lines containing string pattern in C(stdout_lines_filter)
+ default: ''
+ type: str
+ version_added: "19.10.0"
+'''
+
+EXAMPLES = """
+ - name: run ontap cli command
+ netapp.ontap.na_ontap_command:
+ hostname: "{{ hostname }}"
+ username: "{{ admin username }}"
+ password: "{{ admin password }}"
+ command: ['version']
+
+ # Same as above, but returns parseable dictonary
+ - name: run ontap cli command
+ netapp.ontap.na_ontap_command:
+ hostname: "{{ hostname }}"
+ username: "{{ admin username }}"
+ password: "{{ admin password }}"
+ command: ['node', 'show', '-fields', 'node,health,uptime,model']
+ privilege: 'admin'
+ return_dict: true
+
+ # Same as above, but with lines filtering
+ - name: run ontap cli command
+ netapp.ontap.na_ontap_command:
+ hostname: "{{ hostname }}"
+ username: "{{ admin username }}"
+ password: "{{ admin password }}"
+ command: ['node', 'show', '-fields', 'node,health,uptime,model']
+ exclude_lines: 'ode ' # Exclude lines with 'Node ' or 'node ', or anything else containing 'ode '.
+ privilege: 'admin'
+ return_dict: true
+"""
+
+RETURN = """
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+
+class NetAppONTAPCommand():
+ ''' calls a CLI command '''
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_zapi_only_spec()
+ self.argument_spec.update(dict(
+ command=dict(required=True, type='list', elements='str'),
+ privilege=dict(required=False, type='str', choices=['admin', 'advanced'], default='admin'),
+ return_dict=dict(required=False, type='bool', default=False),
+ vserver=dict(required=False, type='str'),
+ include_lines=dict(required=False, type='str', default=''),
+ exclude_lines=dict(required=False, type='str', default=''),
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ parameters = self.module.params
+ # set up state variables
+ self.command = parameters['command']
+ self.privilege = parameters['privilege']
+ self.vserver = parameters['vserver']
+ self.return_dict = parameters['return_dict']
+ self.include_lines = parameters['include_lines']
+ self.exclude_lines = parameters['exclude_lines']
+
+ self.result_dict = {
+ 'status': "",
+ 'result_value': 0,
+ 'invoked_command': " ".join(self.command),
+ 'stdout': "",
+ 'stdout_lines': [],
+ 'stdout_lines_filter': [],
+ 'xml_dict': {},
+ }
+ self.module.warn('The module only supports ZAPI and is deprecated, and will no longer work with newer versions '
+ 'of ONTAP when ONTAPI is deprecated in CY22-Q4')
+ self.module.warn('netapp.ontap.na_ontap_rest_cli should be used instead.')
+
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, wrap_zapi=True)
+
+ def run_command(self):
+ ''' calls the ZAPI '''
+ command_obj = netapp_utils.zapi.NaElement("system-cli")
+
+ args_obj = netapp_utils.zapi.NaElement("args")
+ if self.return_dict:
+ args_obj.add_new_child('arg', 'set')
+ args_obj.add_new_child('arg', '-showseparator')
+ args_obj.add_new_child('arg', '"###"')
+ args_obj.add_new_child('arg', ';')
+ for arg in self.command:
+ args_obj.add_new_child('arg', arg)
+ command_obj.add_child_elem(args_obj)
+ command_obj.add_new_child('priv', self.privilege)
+
+ try:
+ output = self.server.invoke_successfully(command_obj, True)
+ if self.return_dict:
+ # Parseable dict output
+ return self.parse_xml_to_dict(output.to_string())
+ else:
+ # Raw XML output
+ return output.to_string()
+
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error running command %s: %s' %
+ (self.command, to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ ''' calls the command and returns raw output '''
+ changed = True
+ if self.module.check_mode:
+ output = "Would run command: '%s'" % str(self.command)
+ else:
+ output = self.run_command()
+ self.module.exit_json(changed=changed, msg=output)
+
+ def parse_xml_to_dict(self, xmldata):
+ '''Parse raw XML from system-cli and create an Ansible parseable dictonary'''
+ xml_import_ok = True
+ xml_parse_ok = True
+ importing = 'ast'
+
+ try:
+ import ast
+ importing = 'xml.parsers.expat'
+ import xml.parsers.expat
+ except ImportError:
+ self.result_dict['status'] = "XML parsing failed. Cannot import %s!" % importing
+ self.result_dict['stdout'] = str(xmldata)
+ self.result_dict['result_value'] = -1
+ xml_import_ok = False
+
+ if xml_import_ok:
+ xml_str = xmldata.decode('utf-8').replace('\n', '---')
+ xml_parser = xml.parsers.expat.ParserCreate()
+ xml_parser.StartElementHandler = self._start_element
+ xml_parser.CharacterDataHandler = self._char_data
+ xml_parser.EndElementHandler = self._end_element
+
+ try:
+ xml_parser.Parse(xml_str)
+ except xml.parsers.expat.ExpatError as errcode:
+ self.result_dict['status'] = "XML parsing failed: " + str(errcode)
+ self.result_dict['stdout'] = str(xmldata)
+ self.result_dict['result_value'] = -1
+ xml_parse_ok = False
+
+ if xml_parse_ok:
+ self.result_dict['status'] = self.result_dict['xml_dict']['results']['attrs']['status']
+ stdout_string = self._format_escaped_data(self.result_dict['xml_dict']['cli-output']['data'])
+ self.result_dict['stdout'] = stdout_string
+ # Generate stdout_lines list
+ for line in stdout_string.split('\n'):
+ stripped_line = line.strip()
+ if len(stripped_line) > 1:
+ self.result_dict['stdout_lines'].append(stripped_line)
+
+ # Generate stdout_lines_filter_list
+ if self.exclude_lines:
+ if self.include_lines in stripped_line and self.exclude_lines not in stripped_line:
+ self.result_dict['stdout_lines_filter'].append(stripped_line)
+ elif self.include_lines and self.include_lines in stripped_line:
+ self.result_dict['stdout_lines_filter'].append(stripped_line)
+
+ self.result_dict['xml_dict']['cli-output']['data'] = stdout_string
+ cli_result_value = self.result_dict['xml_dict']['cli-result-value']['data']
+ try:
+ # get rid of extra quotes "'1'", but maybe "u'1'" or "b'1'"
+ cli_result_value = ast.literal_eval(cli_result_value)
+ except (SyntaxError, ValueError):
+ pass
+ try:
+ self.result_dict['result_value'] = int(cli_result_value)
+ except ValueError:
+ self.result_dict['result_value'] = cli_result_value
+
+ return self.result_dict
+
+ def _start_element(self, name, attrs):
+ ''' Start XML element '''
+ self.result_dict['xml_dict'][name] = {}
+ self.result_dict['xml_dict'][name]['attrs'] = attrs
+ self.result_dict['xml_dict'][name]['data'] = ""
+ self.result_dict['xml_dict']['active_element'] = name
+ self.result_dict['xml_dict']['last_element'] = ""
+
+ def _char_data(self, data):
+ ''' Dump XML elemet data '''
+ self.result_dict['xml_dict'][str(self.result_dict['xml_dict']['active_element'])]['data'] = repr(data)
+
+ def _end_element(self, name):
+ self.result_dict['xml_dict']['last_element'] = name
+ self.result_dict['xml_dict']['active_element'] = ""
+
+ @classmethod
+ def _format_escaped_data(cls, datastring):
+ ''' replace helper escape sequences '''
+ formatted_string = datastring.replace('------', '---').replace('---', '\n').replace("###", " ").strip()
+ retval_string = ""
+ for line in formatted_string.split('\n'):
+ stripped_line = line.strip()
+ if len(stripped_line) > 1:
+ retval_string += stripped_line + "\n"
+ return retval_string
+
+
+def main():
+ """
+ Execute action from playbook
+ """
+ command = NetAppONTAPCommand()
+ command.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_debug.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_debug.py
new file mode 100644
index 000000000..396240806
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_debug.py
@@ -0,0 +1,258 @@
+#!/usr/bin/python
+"""
+create Debug module to diagnose netapp-lib import and connection
+"""
+
+# (c) 2020-2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+module: na_ontap_debug
+short_description: NetApp ONTAP Debug netapp-lib import and connection.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 21.1.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Display issues related to importing netapp-lib and connection with diagnose
+options:
+ vserver:
+ description:
+ - The vserver name to test for ZAPI tunneling.
+ required: false
+ type: str
+'''
+EXAMPLES = """
+ - name: Check import netapp-lib
+ na_ontap_debug:
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+"""
+import sys
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.rest_user import get_users
+from ansible_collections.netapp.ontap.plugins.module_utils.rest_vserver import get_vserver
+
+
+class NetAppONTAPDebug(object):
+ """Class with Debug methods"""
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ vserver=dict(required=False, type="str"),
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.rest_api = netapp_utils.OntapRestAPI(self.module)
+ self.log_list = []
+ self.error_list = []
+ self.note_list = []
+ self.server = None
+
+ def list_versions(self):
+ self.log_list.append('Ansible version: %s' % netapp_utils.ANSIBLE_VERSION)
+ self.log_list.append('ONTAP collection version: %s' % netapp_utils.COLLECTION_VERSION)
+ self.log_list.append('Python version: %s' % sys.version[:3])
+ self.log_list.append('Python executable path: %s' % sys.executable)
+
+ def import_lib(self):
+ if not netapp_utils.has_netapp_lib():
+ msgs = [
+ 'Error importing netapp-lib or a dependency: %s.' % str(netapp_utils.IMPORT_EXCEPTION),
+ 'Install the python netapp-lib module or a missing dependency.',
+ 'Additional diagnostic information:',
+ 'Python Executable Path: %s.' % sys.executable,
+ 'Python Version: %s.' % sys.version,
+ 'System Path: %s.' % ','.join(sys.path),
+ ]
+ self.error_list.append(' '.join(msgs))
+ return
+ self.log_list.append('netapp-lib imported successfully.')
+
+ def check_connection(self, connection_type):
+ """
+ check connection errors and diagnose
+ """
+ error_string = None
+ if connection_type == "REST":
+ api = 'cluster'
+ message, error_string = self.rest_api.get(api)
+ elif connection_type == "ZAPI":
+ if 'vserver' not in self.parameters:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+ version_obj = netapp_utils.zapi.NaElement("system-get-version")
+ try:
+ result = self.server.invoke_successfully(version_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ error_string = to_native(error)
+ else:
+ self.module.fail_json(msg='Internal error, unexpected connection type: %s' % connection_type)
+
+ if error_string is not None:
+ summary_msg = None
+ error_patterns = ['Connection timed out',
+ 'Resource temporarily unavailable',
+ 'ConnectTimeoutError',
+ 'Network is unreachable']
+ if any(x in error_string for x in error_patterns):
+ summary_msg = 'Error: invalid or unreachable hostname: %s' % self.parameters['hostname']
+ if 'vserver' in self.parameters:
+ summary_msg += ' for SVM: %s ' % self.parameters['vserver']
+ self.error_list.append('Error in hostname - Address does not exist or is not reachable: ' + error_string)
+ self.error_list.append(summary_msg + ' using %s.' % connection_type)
+ return
+ error_patterns = ['Name or service not known', 'Name does not resolve']
+ if any(x in error_string for x in error_patterns):
+ summary_msg = 'Error: unknown or not resolvable hostname: %s' % self.parameters['hostname']
+ if 'vserver' in self.parameters:
+ summary_msg += ' for SVM: %s ' % self.parameters['vserver']
+ self.error_list.append('Error in hostname - DNS name cannot be resolved: ' + error_string)
+ self.error_list.append('%s cannot be resolved using %s.' % (summary_msg, connection_type))
+ else:
+ self.error_list.append('Other error for hostname: %s using %s: %s.' % (self.parameters['hostname'], connection_type, error_string))
+ self.error_list.append('Unclassified, see msg')
+ return False
+
+ ontap_version = message['version']['full'] if connection_type == 'REST' else result['version']
+ self.log_list.append('%s connected successfully.' % connection_type)
+ self.log_list.append('ONTAP version: %s' % ontap_version)
+ return True
+
+ def list_interfaces(self, vserver_name):
+ vserver, error = get_vserver(self.rest_api, vserver_name, fields='ip_interfaces')
+ if not error and not vserver:
+ error = 'not found'
+ if error:
+ self.error_list.append('Error getting vserver in list_interfaces: %s: %s' % (vserver_name, error))
+ else:
+ interfaces = vserver.get('ip_interfaces')
+ if not interfaces:
+ self.error_list.append('Error vserver is not associated with a network interface: %s' % vserver_name)
+ return
+ for interface in interfaces:
+ data = [vserver_name]
+ for field in (['name'], ['ip', 'address'], ['services']):
+ value = self.na_helper.safe_get(interface, field)
+ if isinstance(value, list):
+ value = ','.join(value)
+ if field == ['services'] and value and 'management' not in value:
+ self.note_list.append('NOTE: no management policy in services for %s: %s' % (data, value))
+ data.append(value)
+ self.log_list.append('vserver: %s, interface: %s, IP: %s, service policies: %s' % tuple(data))
+
+ def validate_user(self, user):
+ locked = user.get('locked')
+ if locked:
+ self.note_list.append('NOTE: user: %s is locked on vserver: %s' % (user['name'], self.na_helper.safe_get(user, ['owner', 'name'])))
+ applications = user.get('applications', [])
+ apps = [app['application'] for app in applications]
+ role = self.na_helper.safe_get(user, ['role', 'name'])
+ for application in ('http', 'ontapi', 'console'):
+ if application not in apps and (application != 'console' or role == 'admin'):
+ self.note_list.append('NOTE: application %s not found for user: %s: %s' % (application, user['name'], apps))
+ if application == 'console':
+ self.note_list.append("NOTE: console access is only needed for na_ontap_command.")
+ has_http = locked is False and 'http' in apps
+ has_ontapi = locked is False and 'ontapi' in apps
+ return has_http, has_ontapi
+
+ def list_users(self, vserver_name=None, user_name=None):
+ query = {'owner.name': vserver_name} if vserver_name else {'name': user_name}
+ users, error = get_users(self.rest_api, query, 'applications,locked,owner,role')
+ if not error and not users:
+ error = 'none found'
+ name = vserver_name or user_name
+ if error:
+ if 'not authorized for that command' in error:
+ self.log_list.append('Not autorized to get accounts for: %s: %s' % (name, error))
+ else:
+ self.error_list.append('Error getting accounts for: %s: %s' % (name, error))
+ else:
+ one_http, one_ontapi = False, False
+ for user in users:
+ data = {}
+ for field in ('owner', 'name', 'role', 'locked', 'applications'):
+ if field in ('owner', 'role'):
+ value = str(self.na_helper.safe_get(user, [field, 'name']))
+ else:
+ value = str(user.get(field))
+ data[field] = value
+ self.log_list.append(', '. join('%s: %s' % x for x in data.items()))
+ has_http, has_ontapi = self.validate_user(user)
+ one_http |= has_http
+ one_ontapi |= has_ontapi
+ msg = 'Error: no unlocked user for %s on vserver: %s'if vserver_name else\
+ 'Error: %s is not enabled for user %s'
+ if not one_http:
+ self.error_list.append(msg % ('http', name))
+ if not one_ontapi:
+ self.error_list.append(msg % ('ontapi', name))
+
+ def check_vserver(self, name):
+
+ self.list_interfaces(name)
+ self.list_users(vserver_name=name)
+
+ def apply(self):
+ """
+ Apply debug
+ """
+ # report Ansible and our collection versions
+ self.list_versions()
+
+ # check import netapp-lib
+ self.import_lib()
+
+ # check zapi connection errors only if import successful
+ if netapp_utils.has_netapp_lib():
+ self.check_connection("ZAPI")
+
+ # check rest connection errors
+ has_rest = self.check_connection("REST")
+
+ if has_rest:
+ self.list_users(user_name=self.parameters.get('username'))
+ if 'vserver' in self.parameters:
+ self.check_vserver(self.parameters['vserver'])
+
+ msgs = {}
+ if self.note_list:
+ msgs['notes'] = self.note_list
+ if self.error_list:
+ msgs['msg'] = self.error_list
+ if self.log_list:
+ msgs['msg_passed'] = self.log_list
+ self.module.fail_json(**msgs)
+ msgs['msg'] = self.log_list
+ self.module.exit_json(**msgs)
+
+
+def main():
+ """Execute action"""
+ debug_obj = NetAppONTAPDebug()
+ debug_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_disk_options.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_disk_options.py
new file mode 100644
index 000000000..3c05a6f56
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_disk_options.py
@@ -0,0 +1,175 @@
+#!/usr/bin/python
+
+# (c) 2021-2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: na_ontap_disk_options
+short_description: NetApp ONTAP modify storage disk options
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: '21.4.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Modify a nodes storage disk options
+- Requires ONTAP 9.6 or greater
+options:
+ node:
+ description:
+ - The node to modify a disk option for
+ required: true
+ type: str
+
+ bkg_firmware_update:
+ description:
+ - Whether or not background disk firmware updates should be enabled
+ type: bool
+
+ autocopy:
+ description:
+ - Whether or not disk auto copies should be enabled
+ type: bool
+
+ autoassign:
+ description:
+ - Whether or not disks should be automatically assigned to a node
+ type: bool
+
+ autoassign_policy:
+ description:
+ - the auto assign policy to use
+ type: str
+ choices: ['default', 'bay', 'shelf', 'stack']
+ '''
+
+EXAMPLES = """
+ - name: Enable Disk Auto Assign
+ na_ontap_disk_options:
+ node: node1
+ autoassign: true
+ username: '{{ username }}'
+ password: '{{ password }}'
+ hostname: '{{ hostname }}'
+
+ - name: Disable Disk Auto Assign
+ na_ontap_disk_options:
+ node: node1
+ autoassign: false
+ username: '{{ username }}'
+ password: '{{ password }}'
+ hostname: '{{ hostname }}'
+
+"""
+
+RETURN = """
+
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppOntapDiskOptions:
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ node=dict(required=True, type='str'),
+ bkg_firmware_update=dict(required=False, type='bool'),
+ autocopy=dict(required=False, type='bool'),
+ autoassign=dict(required=False, type='bool'),
+ autoassign_policy=dict(required=False, type='str', choices=['default', 'bay', 'shelf', 'stack'])
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ self.rest_api = OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+
+ if not self.use_rest:
+ self.module.fail_json(msg=self.rest_api.requires_ontap_version('na_ontap_disk_options', '9.6'))
+
+ def convert_to_bool(self, adict, key):
+ """burt1468160 - 9.8 returns True/False, but 9.10.1 returns 'on'/'off' """
+ value = adict[key]
+ if isinstance(value, bool):
+ return value
+ if value in ('on', 'off'):
+ return value == 'on'
+ self.module.fail_json(msg='Unexpected value for field %s: %s' % (key, value))
+
+ def get_disk_options(self):
+ """
+ Return a the current storage disk options for the node
+ :return: a dict of storage disk options
+ """
+ api = "private/cli/storage/disk/option"
+ query = {
+ 'fields': 'node,autoassign,bkg-firmware-update,autocopy,autoassign-policy',
+ 'node': self.parameters['node']
+ }
+ record, error = rest_generic.get_one_record(self.rest_api, api, query)
+
+ if error:
+ self.module.fail_json(msg='Error %s' % error)
+ if record is None:
+ self.module.fail_json(msg='Error on GET %s, no record.' % api)
+ return {
+ 'node': record['node'],
+ 'bkg_firmware_update': self.convert_to_bool(record, 'bkg_firmware_update'),
+ 'autocopy': self.convert_to_bool(record, 'autocopy'),
+ 'autoassign': self.convert_to_bool(record, 'autoassign'),
+ 'autoassign_policy': record['autoassign_policy']
+ }
+
+ def modify_disk_options(self, modify):
+ """
+ Modifies a nodes disk options
+ :return: None
+ """
+
+ api = "private/cli/storage/disk/option"
+ query = {
+ 'node': self.parameters['node']
+ }
+
+ dummy, error = rest_generic.patch_async(self.rest_api, api, None, modify, query)
+ if error:
+ self.module.fail_json(msg='Error %s' % error)
+
+ def apply(self):
+
+ current = self.get_disk_options()
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+
+ if self.na_helper.changed and not self.module.check_mode:
+ self.modify_disk_options(modify)
+
+ result = netapp_utils.generate_result(self.na_helper.changed, modify=modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ """
+ Execute action from playbook
+ """
+ command = NetAppOntapDiskOptions()
+ command.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_disks.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_disks.py
new file mode 100644
index 000000000..a4db53319
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_disks.py
@@ -0,0 +1,386 @@
+#!/usr/bin/python
+
+# (c) 2018-2021, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+
+module: na_ontap_disks
+
+short_description: NetApp ONTAP Assign disks to nodes
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.7.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Assign disks to a node.
+- Disk autoassign must be turned off before using this module to prevent the disks being reassigned automatically by the cluster.
+- This can be done through na_ontap_disk_options or via the cli "disk option modify -node <node_name> -autoassign off".
+- If min_spares is not specified min_spares default is 1 if SSD or 2 for any other disk type.
+- If disk_count is not specified all unassigned disks will be assigned to the node specified.
+
+options:
+ node:
+ required: true
+ type: str
+ description:
+ - The node that we want to assign/unassign disks.
+
+ disk_count:
+ description:
+ - Total number of disks a node should own.
+ type: int
+ version_added: 2.9.0
+
+ disk_type:
+ description:
+ - Assign specified type of disk (or set of disks).
+ type: str
+ choices: ['ATA', 'BSAS', 'FCAL', 'FSAS', 'LUN', 'MSATA', 'SAS', 'SSD', 'SSD_NVM', 'VMDISK', 'unknown']
+ version_added: 20.6.0
+
+ min_spares:
+ description:
+ - Minimum spares required per type for the node.
+ type: int
+ version_added: 21.7.0
+
+'''
+
+EXAMPLES = """
+ - name: Assign specified total disks to node
+ netapp.ontap.na_ontap_disks:
+ node: node1
+ disk_count: 56
+ disk_type: VMDISK
+ min_spares: 2
+ hostname: "{{ hostname }}"
+ username: "{{ admin username }}"
+ password: "{{ admin password }}"
+
+ - name: Assign all unassigned disks to node1
+ netapp.ontap.na_ontap_disks:
+ node: node1
+ hostname: "{{ hostname }}"
+ username: "{{ admin username }}"
+ password: "{{ admin password }}"
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+import ansible_collections.netapp.ontap.plugins.module_utils.rest_response_helpers as rrh
+
+
+class NetAppOntapDisks():
+ ''' object initialize and class methods '''
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ node=dict(required=True, type='str'),
+ disk_count=dict(required=False, type='int'),
+ disk_type=dict(required=False, type='str', choices=['ATA', 'BSAS', 'FCAL', 'FSAS', 'LUN', 'MSATA', 'SAS', 'SSD', 'SSD_NVM', 'VMDISK', 'unknown']),
+ min_spares=dict(required=False, type='int')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ # If min_spares is not specified min_spares is 1 if SSD, min_spares is 2 for any other disk type.
+ self.parameters['min_spares'] = 1 if self.parameters.get('disk_type') in ('SSD', 'SSD_NVM') else 2
+
+ self.rest_api = OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+
+ if not self.use_rest:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def get_disks(self, container_type, node=None):
+ """
+ Check for owned disks, unassigned disks or spare disks.
+ Return: list of disks or an empty list
+ """
+ if self.use_rest:
+ api = "storage/disks"
+ if container_type == 'owned':
+ query = {
+ 'home_node.name': node,
+ 'container_type': '!unassigned',
+ 'fields': 'name'
+ }
+ if container_type == 'unassigned':
+ query = {
+ 'container_type': 'unassigned',
+ 'fields': 'name'
+ }
+ if container_type == 'spare':
+ query = {
+ 'home_node.name': node,
+ 'container_type': 'spare',
+ 'fields': 'name'
+ }
+ if 'disk_type' in self.parameters:
+ query['type'] = self.parameters['disk_type']
+
+ message, error = self.rest_api.get(api, query)
+ records, error = rrh.check_for_0_or_more_records(api, message, error)
+
+ if error:
+ self.module.fail_json(msg=error)
+
+ return records if records else list()
+
+ else:
+ disk_iter = netapp_utils.zapi.NaElement('storage-disk-get-iter')
+ disk_storage_info = netapp_utils.zapi.NaElement('storage-disk-info')
+
+ if container_type == 'owned':
+ disk_ownership_info = netapp_utils.zapi.NaElement('disk-ownership-info')
+ disk_ownership_info.add_new_child('home-node-name', self.parameters['node'])
+ disk_storage_info.add_child_elem(disk_ownership_info)
+ if container_type == 'unassigned':
+ disk_raid_info = netapp_utils.zapi.NaElement('disk-raid-info')
+ disk_raid_info.add_new_child('container-type', 'unassigned')
+ disk_storage_info.add_child_elem(disk_raid_info)
+
+ disk_query = netapp_utils.zapi.NaElement('query')
+
+ if 'disk_type' in self.parameters and container_type in ('unassigned', 'owned'):
+ disk_inventory_info = netapp_utils.zapi.NaElement('disk-inventory-info')
+ disk_inventory_info.add_new_child('disk-type', self.parameters['disk_type'])
+ disk_query.add_child_elem(disk_inventory_info)
+
+ if container_type == 'spare':
+ disk_ownership_info = netapp_utils.zapi.NaElement('disk-ownership-info')
+ disk_raid_info = netapp_utils.zapi.NaElement('disk-raid-info')
+ disk_ownership_info.add_new_child('owner-node-name', node)
+ if 'disk_type' in self.parameters:
+ disk_inventory_info = netapp_utils.zapi.NaElement('disk-inventory-info')
+ disk_inventory_info.add_new_child('disk-type', self.parameters['disk_type'])
+ disk_storage_info.add_child_elem(disk_inventory_info)
+
+ disk_raid_info.add_new_child('container-type', 'spare')
+ disk_storage_info.add_child_elem(disk_ownership_info)
+ disk_storage_info.add_child_elem(disk_raid_info)
+
+ disk_query.add_child_elem(disk_storage_info)
+ disk_iter.add_child_elem(disk_query)
+
+ try:
+ result = self.server.invoke_successfully(disk_iter, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error getting disk information: %s' % (to_native(error)),
+ exception=traceback.format_exc())
+
+ disks = []
+
+ if result.get_child_by_name('attributes-list'):
+ attributes_list = result.get_child_by_name('attributes-list')
+ storage_disk_info_attributes = attributes_list.get_children()
+
+ for disk in storage_disk_info_attributes:
+ disk_inventory_info = disk.get_child_by_name('disk-inventory-info')
+ disk_name = disk_inventory_info.get_child_content('disk-cluster-name')
+ disks.append(disk_name)
+
+ return disks
+
+ def get_partner_node_name(self):
+ """
+ return: partner_node_name, str
+ """
+ if self.use_rest:
+ api = "/cluster/nodes"
+ query = {
+ 'ha.partners.name': self.parameters['node']
+ }
+ message, error = self.rest_api.get(api, query)
+ records, error = rrh.check_for_0_or_more_records(api, message, error)
+ if error:
+ self.module.fail_json(msg=error)
+
+ return records[0]['name'] if records else None
+
+ else:
+ partner_name = None
+ cf_status = netapp_utils.zapi.NaElement('cf-status')
+ cf_status.add_new_child('node', self.parameters['node'])
+
+ try:
+ result = self.server.invoke_successfully(cf_status, True)
+
+ if result.get_child_by_name('partner-name'):
+ partner_name = result.get_child_content('partner-name')
+
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error getting partner name for node %s: %s' % (self.parameters['node'], to_native(error)),
+ exception=traceback.format_exc())
+
+ return partner_name
+
+ def disk_assign(self, needed_disks):
+ """
+ Assign disks to node
+ """
+ if self.use_rest:
+ api = "private/cli/storage/disk/assign"
+ if needed_disks > 0:
+ body = {
+ 'owner': self.parameters['node'],
+ 'count': needed_disks
+ }
+ if 'disk_type' in self.parameters:
+ body['type'] = self.parameters['disk_type']
+ else:
+ body = {
+ 'node': self.parameters['node'],
+ 'all': True
+ }
+
+ dummy, error = self.rest_api.post(api, body)
+ if error:
+ self.module.fail_json(msg=error)
+
+ else:
+ if needed_disks > 0:
+ assign_disk = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'disk-sanown-assign', **{'owner': self.parameters['node'],
+ 'disk-count': str(needed_disks)})
+ if 'disk_type' in self.parameters:
+ assign_disk.add_new_child('disk-type', self.parameters['disk_type'])
+ else:
+ assign_disk = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'disk-sanown-assign', **{'node-name': self.parameters['node'],
+ 'all': 'true'})
+
+ try:
+ self.server.invoke_successfully(assign_disk,
+ enable_tunneling=True)
+ return True
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error assigning disks %s' % (to_native(error)),
+ exception=traceback.format_exc())
+
+ def disk_unassign(self, disks):
+ """
+ Unassign disks.
+ Disk autoassign must be turned off when removing ownership of a disk
+ """
+ if self.use_rest:
+ api = "private/cli/storage/disk/removeowner"
+ for disk in disks: # api requires 1 disk to be removed at a time.
+ body = {
+ 'disk': disk['name']
+ }
+ dummy, error = self.rest_api.post(api, body)
+ if error:
+ self.module.fail_json(msg=error)
+
+ else:
+ unassign_partitions = netapp_utils.zapi.NaElement('disk-sanown-remove-ownership')
+ disk_list = netapp_utils.zapi.NaElement('disk-list')
+
+ for disk in disks:
+ disk_list.add_new_child('disk-name', disk)
+
+ unassign_partitions.add_child_elem(disk_list)
+
+ try:
+ self.server.invoke_successfully(unassign_partitions, enable_tunneling=True)
+
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error unassigning disks %s' % to_native(error))
+ return True
+
+ def apply(self):
+ '''Apply action to disks'''
+ changed = False
+
+ owned_disks = self.get_disks(container_type='owned', node=self.parameters['node'])
+ owned_disks_count = len(owned_disks)
+ unassigned_disks = self.get_disks(container_type='unassigned')
+ owned_spare_disks = self.get_disks(container_type='spare', node=self.parameters['node'])
+
+ needed_disks = None
+ unassign = {
+ 'spare_disks': None,
+ 'unassign_disks': None
+ }
+
+ # unassign disks if more disks are currently owned than requested.
+ if 'disk_count' in self.parameters:
+ if self.parameters['disk_count'] < owned_disks_count:
+ unassign_disks_count = owned_disks_count - self.parameters['disk_count']
+ # check to make sure we will have sufficient spares after the removal.
+ if unassign_disks_count > (len(owned_spare_disks) - self.parameters['min_spares']):
+ self.module.fail_json(msg="disk removal would leave less than %s spares" % self.parameters['min_spares'])
+ # unassign disks.
+ unassign = {
+ 'spare_disks': owned_spare_disks,
+ 'unassign_disks': unassign_disks_count
+ }
+
+ # take spare disks from partner so they can be reassigned to the desired node.
+ elif self.parameters['disk_count'] > (owned_disks_count + len(unassigned_disks)):
+ required_disks_count = self.parameters['disk_count'] - (owned_disks_count + len(unassigned_disks))
+ partner_node_name = self.get_partner_node_name()
+ partner_spare_disks = self.get_disks(container_type='spare', node=partner_node_name)
+
+ if required_disks_count > (len(partner_spare_disks) - self.parameters['min_spares']):
+ self.module.fail_json(msg="not enough disks available")
+ else:
+ unassign = {
+ 'spare_disks': partner_spare_disks,
+ 'unassign_disks': required_disks_count
+ }
+
+ # assign disks to node.
+ if self.parameters['disk_count'] > owned_disks_count:
+ needed_disks = self.parameters['disk_count'] - owned_disks_count
+
+ else:
+ if len(unassigned_disks) >= 1:
+ # assign all unassigned disks to node
+ needed_disks = 0
+
+ # unassign
+ if unassign['spare_disks'] and unassign['unassign_disks']:
+ if not self.module.check_mode:
+ self.disk_unassign(unassign['spare_disks'][0:unassign['unassign_disks']])
+ changed = True
+ # assign
+ if needed_disks is not None:
+ if not self.module.check_mode:
+ self.disk_assign(needed_disks)
+ changed = True
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ ''' Create object and call apply '''
+ obj_aggr = NetAppOntapDisks()
+ obj_aggr.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_dns.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_dns.py
new file mode 100644
index 000000000..67d23cffd
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_dns.py
@@ -0,0 +1,368 @@
+#!/usr/bin/python
+
+# (c) 2018-2023, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_dns
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: na_ontap_dns
+short_description: NetApp ONTAP Create, delete, modify DNS servers.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.7.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create, delete, modify DNS servers.
+- With REST, the module is currently limited to data vservers for delete or modify operations.
+options:
+ state:
+ description:
+ - Whether the DNS servers should be enabled for the given vserver.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+
+ vserver:
+ description:
+ - The name of the vserver to use.
+ - With REST, for cluster scoped DNS, omit this option or set it to NULL.
+ - With ZAPI or REST, for cluster scoped DNS, this can also be set to the cluster vserver name.
+ type: str
+
+ domains:
+ description:
+ - List of DNS domains such as 'sales.bar.com'. The first domain is the one that the Vserver belongs to.
+ type: list
+ elements: str
+
+ nameservers:
+ description:
+ - List of IPv4 addresses of name servers such as '123.123.123.123'.
+ type: list
+ elements: str
+
+ skip_validation:
+ type: bool
+ description:
+ - By default, all nameservers are checked to validate they are available to resolve.
+ - If you DNS servers are not yet installed or momentarily not available, you can set this option to 'true'
+ - to bypass the check for all servers specified in nameservers field.
+ - With REST, requires ONTAP 9.9.1 or later and ignored for cluster DNS operations.
+ version_added: 2.8.0
+'''
+
+EXAMPLES = """
+ - name: create or modify DNS
+ netapp.ontap.na_ontap_dns:
+ state: present
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ vserver: "{{vservername}}"
+ domains: sales.bar.com
+ nameservers: 10.193.0.250,10.192.0.250
+ skip_validation: true
+
+ - name: create or modify cluster DNS with REST
+ netapp.ontap.na_ontap_dns:
+ state: present
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ domains: sales.bar.com
+ nameservers: 10.193.0.250,10.192.0.250
+"""
+
+RETURN = """
+
+"""
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+
+class NetAppOntapDns:
+ """
+ Enable and Disable dns
+ """
+
+ def __init__(self):
+ self.use_rest = False
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ vserver=dict(required=False, type='str'),
+ domains=dict(required=False, type='list', elements='str'),
+ nameservers=dict(required=False, type='list', elements='str'),
+ skip_validation=dict(required=False, type='bool')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[('state', 'present', ['domains', 'nameservers'])],
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ # Cluster vserver and data vserver use different REST API.
+ self.is_cluster = False
+
+ # REST API should be used for ONTAP 9.6 or higher, ZAPI for lower version
+ self.rest_api = netapp_utils.OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, None, [['skip_validation', (9, 9, 1)]])
+ if not self.use_rest:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ if not self.parameters.get('vserver'):
+ self.module.fail_json(msg="Error: vserver is a required parameter with ZAPI.")
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+ return
+
+ def patch_cluster_dns(self):
+ api = 'cluster'
+ body = {
+ 'dns_domains': self.parameters['domains'],
+ 'name_servers': self.parameters['nameservers']
+ }
+ if self.parameters.get('skip_validation'):
+ self.module.warn("skip_validation is ignored for cluster DNS operations in REST.")
+ dummy, error = rest_generic.patch_async(self.rest_api, api, None, body)
+ if error:
+ self.module.fail_json(msg="Error updating cluster DNS options: %s" % error)
+
+ def create_dns_rest(self):
+ """
+ Create DNS server
+ :return: none
+ """
+ if self.is_cluster or not self.parameters.get('vserver'):
+ # with 9.13, using scope=cluster with POST on 'name-services/dns' does not work:
+ # "svm.uuid" is a required field
+ return self.patch_cluster_dns()
+
+ api = 'name-services/dns'
+ body = {
+ 'domains': self.parameters['domains'],
+ 'servers': self.parameters['nameservers'],
+ 'svm': {
+ 'name': self.parameters['vserver']
+ }
+ }
+ if 'skip_validation' in self.parameters:
+ body['skip_config_validation'] = self.parameters['skip_validation']
+ dummy, error = rest_generic.post_async(self.rest_api, api, body)
+ if error:
+ self.module.fail_json(msg="Error creating DNS service: %s" % error)
+
+ def create_dns(self):
+ """
+ Create DNS server
+ :return: none
+ """
+ if self.use_rest:
+ return self.create_dns_rest()
+
+ dns = netapp_utils.zapi.NaElement('net-dns-create')
+ nameservers = netapp_utils.zapi.NaElement('name-servers')
+ domains = netapp_utils.zapi.NaElement('domains')
+ for each in self.parameters['nameservers']:
+ ip_address = netapp_utils.zapi.NaElement('ip-address')
+ ip_address.set_content(each)
+ nameservers.add_child_elem(ip_address)
+ dns.add_child_elem(nameservers)
+ for each in self.parameters['domains']:
+ domain = netapp_utils.zapi.NaElement('string')
+ domain.set_content(each)
+ domains.add_child_elem(domain)
+ dns.add_child_elem(domains)
+ if self.parameters.get('skip_validation'):
+ validation = netapp_utils.zapi.NaElement('skip-config-validation')
+ validation.set_content(str(self.parameters['skip_validation']))
+ dns.add_child_elem(validation)
+ try:
+ self.server.invoke_successfully(dns, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating dns: %s' % to_native(error),
+ exception=traceback.format_exc())
+
+ def destroy_dns_rest(self, dns_attrs):
+ """
+ Destroys an already created dns
+ :return:
+ """
+ if self.is_cluster:
+ error = 'Error: cluster scope when deleting DNS with REST requires ONTAP 9.9.1 or later.'
+ self.module.fail_json(msg=error)
+ api = 'name-services/dns'
+ dummy, error = rest_generic.delete_async(self.rest_api, api, dns_attrs['uuid'])
+ if error:
+ self.module.fail_json(msg="Error deleting DNS service: %s" % error)
+
+ def destroy_dns(self, dns_attrs):
+ """
+ Destroys an already created dns
+ :return:
+ """
+ if self.use_rest:
+ return self.destroy_dns_rest(dns_attrs)
+
+ try:
+ self.server.invoke_successfully(netapp_utils.zapi.NaElement('net-dns-destroy'), True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error destroying dns: %s' % to_native(error),
+ exception=traceback.format_exc())
+
+ def get_cluster(self):
+ api = "cluster"
+ record, error = rest_generic.get_one_record(self.rest_api, api)
+ if error:
+ self.module.fail_json(msg="Error getting cluster info: %s" % error)
+ return record
+
+ def get_cluster_dns(self):
+ cluster_attrs = self.get_cluster()
+ dns_attrs = None
+ if not self.parameters.get('vserver') or self.parameters['vserver'] == cluster_attrs['name']:
+ dns_attrs = {
+ 'domains': cluster_attrs.get('dns_domains'),
+ 'nameservers': cluster_attrs.get('name_servers'),
+ 'uuid': cluster_attrs['uuid'],
+ }
+ self.is_cluster = True
+ if dns_attrs['domains'] is None and dns_attrs['nameservers'] is None:
+ dns_attrs = None
+ return dns_attrs
+
+ def get_dns_rest(self):
+ if not self.parameters.get('vserver') and not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 9, 1):
+ # scope requires 9.9, so revert to cluster API
+ return self.get_cluster_dns()
+
+ api = "name-services/dns"
+ params = {'fields': 'domains,servers,svm'}
+ if self.parameters.get('vserver'):
+ # omit scope as vserver may be a cluster vserver
+ params['svm.name'] = self.parameters['vserver']
+ else:
+ params['scope'] = 'cluster'
+ record, error = rest_generic.get_one_record(self.rest_api, api, params)
+ if error:
+ self.module.fail_json(msg="Error getting DNS service: %s" % error)
+ if record:
+ return {
+ 'domains': record['domains'],
+ 'nameservers': record['servers'],
+ 'uuid': record['svm']['uuid']
+ }
+ if self.parameters.get('vserver') and not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 9, 1):
+ # There is a chance we are working at the cluster level
+ return self.get_cluster_dns()
+ return None
+
+ def get_dns(self):
+ if self.use_rest:
+ return self.get_dns_rest()
+
+ dns_obj = netapp_utils.zapi.NaElement('net-dns-get')
+ try:
+ result = self.server.invoke_successfully(dns_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ if to_native(error.code) == "15661":
+ # 15661 is object not found
+ return None
+ else:
+ self.module.fail_json(msg="Error getting DNS info: %s." % to_native(error), exception=traceback.format_exc())
+
+ attributes = result.get_child_by_name('attributes')
+ if attributes is None:
+ return
+ dns_info = attributes.get_child_by_name('net-dns-info')
+ nameservers = dns_info.get_child_by_name('name-servers')
+ attrs = {
+ 'nameservers': [
+ each.get_content() for each in nameservers.get_children()
+ ]
+ }
+ domains = dns_info.get_child_by_name('domains')
+ attrs['domains'] = [each.get_content() for each in domains.get_children()]
+ return attrs
+
+ def modify_dns_rest(self, dns_attrs):
+ if self.is_cluster:
+ return self.patch_cluster_dns()
+ body = {}
+ if dns_attrs['nameservers'] != self.parameters['nameservers']:
+ body['servers'] = self.parameters['nameservers']
+ if dns_attrs['domains'] != self.parameters['domains']:
+ body['domains'] = self.parameters['domains']
+ if 'skip_validation' in self.parameters:
+ body['skip_config_validation'] = self.parameters['skip_validation']
+ api = "name-services/dns"
+ dummy, error = rest_generic.patch_async(self.rest_api, api, dns_attrs['uuid'], body)
+ if error:
+ self.module.fail_json(msg="Error modifying DNS configuration: %s" % error)
+
+ def modify_dns(self, dns_attrs):
+ if self.use_rest:
+ return self.modify_dns_rest(dns_attrs)
+ dns = netapp_utils.zapi.NaElement('net-dns-modify')
+ if dns_attrs['nameservers'] != self.parameters['nameservers']:
+ nameservers = netapp_utils.zapi.NaElement('name-servers')
+ for each in self.parameters['nameservers']:
+ ip_address = netapp_utils.zapi.NaElement('ip-address')
+ ip_address.set_content(each)
+ nameservers.add_child_elem(ip_address)
+ dns.add_child_elem(nameservers)
+ if dns_attrs['domains'] != self.parameters['domains']:
+ domains = netapp_utils.zapi.NaElement('domains')
+ for each in self.parameters['domains']:
+ domain = netapp_utils.zapi.NaElement('string')
+ domain.set_content(each)
+ domains.add_child_elem(domain)
+ dns.add_child_elem(domains)
+ if self.parameters.get('skip_validation'):
+ validation = netapp_utils.zapi.NaElement('skip-config-validation')
+ validation.set_content(str(self.parameters['skip_validation']))
+ dns.add_child_elem(validation)
+ try:
+ self.server.invoke_successfully(dns, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying dns: %s' % to_native(error), exception=traceback.format_exc())
+
+ def apply(self):
+ dns_attrs = self.get_dns()
+ cd_action = self.na_helper.get_cd_action(dns_attrs, self.parameters)
+ modify = None
+ if cd_action is None:
+ modify = self.na_helper.get_modified_attributes(dns_attrs, self.parameters)
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_dns()
+ elif cd_action == 'delete':
+ self.destroy_dns(dns_attrs)
+ else:
+ self.modify_dns(dns_attrs)
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ """
+ Create, Delete, Modify DNS servers.
+ """
+ obj = NetAppOntapDns()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_domain_tunnel.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_domain_tunnel.py
new file mode 100644
index 000000000..67e238794
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_domain_tunnel.py
@@ -0,0 +1,168 @@
+#!/usr/bin/python
+
+# (c) 2021, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+module: na_ontap_domain_tunnel
+short_description: NetApp ONTAP domain tunnel
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: '21.3.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create, delete or modify the domain tunnel.
+options:
+ state:
+ description:
+ - Whether the domain tunnel should exist or not.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ vserver:
+ description:
+ - The name of the vserver that the domain tunnel should be created or deleted on.
+ required: true
+ type: str
+'''
+
+EXAMPLES = """
+ - name: Create Domain Tunnel
+ na_ontap_domain_tunnel:
+ state: present
+ vserver: svm1
+ hostname: "{{ hostname }}"
+ username: "{{ admin username }}"
+ password: "{{ admin password }}"
+"""
+
+RETURN = """
+
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapDomainTunnel(object):
+
+ def __init__(self):
+ """
+ Initialize the ONTAP domain tunnel class
+ """
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str')
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ self.rest_api = OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+
+ if not self.use_rest:
+ self.module.fail_json(msg=self.rest_api.requires_ontap_version('na_ontap_domain_tunnel', '9.7'))
+
+ def get_domain_tunnel(self):
+ """
+ Get the current domain tunnel info
+ """
+ api = "/security/authentication/cluster/ad-proxy"
+ message, error = self.rest_api.get(api)
+
+ if error:
+ if int(error['code']) != 4: # error code 4 is empty table
+ self.module.fail_json(msg=error)
+ if message:
+ message = {
+ 'vserver': message['svm']['name']
+ }
+ return message
+ else:
+ return None
+
+ def create_domain_tunnel(self):
+ """
+ Creates the domain tunnel on the specified vserver
+ """
+ api = "/security/authentication/cluster/ad-proxy"
+ body = {
+ "svm": {
+ "name": self.parameters['vserver']
+ }
+ }
+ dummy, error = self.rest_api.post(api, body)
+ if error:
+ self.module.fail_json(msg=error)
+
+ def modify_domain_tunnel(self):
+ """
+ Modifies the domain tunnel on the specified vserver
+ """
+ api = "/security/authentication/cluster/ad-proxy"
+ body = {
+ "svm": {
+ "name": self.parameters['vserver']
+ }
+ }
+ dummy, error = self.rest_api.patch(api, body)
+ if error:
+ self.module.fail_json(msg=error)
+
+ def delete_domain_tunnel(self):
+ """
+ Deletes the current domain tunnel
+ """
+ api = "/security/authentication/cluster/ad-proxy"
+
+ dummy, error = self.rest_api.delete(api)
+ if error:
+ self.module.fail_json(msg=error)
+
+ def apply(self):
+ current = self.get_domain_tunnel()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+
+ if self.na_helper.changed:
+ if not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_domain_tunnel()
+ elif cd_action == 'delete':
+ self.delete_domain_tunnel()
+ elif modify:
+ self.modify_domain_tunnel()
+
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ """
+ Creates the NetApp ONTAP Domain Tunnel and runs the correct playbook task
+ """
+ obj = NetAppOntapDomainTunnel()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_efficiency_policy.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_efficiency_policy.py
new file mode 100644
index 000000000..4e337cb9a
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_efficiency_policy.py
@@ -0,0 +1,414 @@
+#!/usr/bin/python
+
+# (c) 2019-2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_efficiency_policy
+'''
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: na_ontap_efficiency_policy
+short_description: NetApp ONTAP manage efficiency policies (sis policies)
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.9.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Create/Modify/Delete efficiency policies (sis policies)
+options:
+ state:
+ description:
+ - Whether the specified efficiency policy should exist or not.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ policy_name:
+ description:
+ - the name of the efficiency policy
+ required: true
+ type: str
+
+ comment:
+ description:
+ - A brief description of the policy.
+ type: str
+
+ duration:
+ description:
+ - The duration in hours for which the scheduled efficiency operation should run.
+ After this time expires, the efficiency operation will be stopped even if the operation is incomplete.
+ If '-' is specified as the duration, the efficiency operation will run till it completes. Otherwise, the duration has to be an integer greater than 0.
+ By default, the operation runs till it completes.
+ type: str
+
+ enabled:
+ description:
+ - If the value is true, the efficiency policy is active in this cluster.
+ If the value is false this policy will not be activated by the schedulers and hence will be inactive.
+ type: bool
+
+ policy_type:
+ description:
+ - The policy type reflects the reason a volume using this policy will start processing a changelog.
+ - (Changelog processing is identifying and eliminating duplicate blocks which were written since the changelog was last processed.)
+ - threshold Changelog processing occurs once the changelog reaches a certain percent full.
+ - scheduled Changelog processing will be triggered by time.
+ choices: ['threshold', 'scheduled']
+ type: str
+
+ qos_policy:
+ description:
+ - QoS policy for the efficiency operation.
+ - background efficiency operation will run in background with minimal or no impact on data serving client operations,
+ - best-effort efficiency operations may have some impact on data serving client operations.
+ choices: ['background', 'best_effort']
+ type: str
+
+ schedule:
+ description:
+ - Cron type job schedule name. When the associated policy is set on a volume, the efficiency operation will be triggered for the volume on this schedule.
+ - These schedules can be created using the na_ontap_job_schedule module
+ type: str
+
+ vserver:
+ description:
+ - Name of the vserver to use.
+ required: true
+ type: str
+
+ changelog_threshold_percent:
+ description:
+ - Specifies the percentage at which the changelog will be processed for a threshold type of policy, tested once each hour.
+ type: int
+ version_added: '19.11.0'
+'''
+
+EXAMPLES = """
+ - name: Create threshold efficiency policy
+ netapp.ontap.na_ontap_efficiency_policy:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ vserver: ansible
+ state: present
+ policy_name: test
+ comment: This policy is for x and y
+ enabled: true
+ policy_type: threshold
+ qos_policy: background
+ changelog_threshold_percent: 20
+
+ - name: Create Scheduled efficiency Policy
+ netapp.ontap.na_ontap_efficiency_policy:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ vserver: ansible
+ state: present
+ policy_name: test2
+ comment: This policy is for x and y
+ enabled: true
+ schedule: new_job_schedule
+ duration: 1
+ policy_type: scheduled
+ qos_policy: background
+"""
+
+RETURN = """
+"""
+
+import traceback
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppOntapEfficiencyPolicy(object):
+ """
+ Create, delete and modify efficiency policy
+ """
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ policy_name=dict(required=True, type='str'),
+ comment=dict(required=False, type='str'),
+ duration=dict(required=False, type='str'),
+ enabled=dict(required=False, type='bool'),
+ policy_type=dict(required=False, choices=['threshold', 'scheduled']),
+ qos_policy=dict(required=False, choices=['background', 'best_effort']),
+ schedule=dict(required=False, type='str'),
+ vserver=dict(required=True, type='str'),
+ changelog_threshold_percent=dict(required=False, type='int')
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[('changelog_threshold_percent', 'duration'), ('changelog_threshold_percent', 'schedule')]
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ # Set up Rest API
+ self.rest_api = OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+ self.uuid = None
+
+ if self.use_rest and not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 8, 0):
+ msg = 'REST requires ONTAP 9.8 or later for efficiency_policy APIs.'
+ self.use_rest = self.na_helper.fall_back_to_zapi(self.module, msg, self.parameters)
+
+ if self.parameters.get('policy_type') and self.parameters['state'] == 'present':
+ if self.parameters['policy_type'] == 'threshold':
+ if self.parameters.get('duration'):
+ self.module.fail_json(msg="duration cannot be set if policy_type is threshold")
+ if self.parameters.get('schedule'):
+ self.module.fail_json(msg='schedule cannot be set if policy_type is threshold')
+ # if policy_type is 'scheduled'
+ else:
+ if self.parameters.get('changelog_threshold_percent'):
+ self.module.fail_json(msg='changelog_threshold_percent cannot be set if policy_type is scheduled')
+
+ # if duration not set for a policy, ZAPI returns "-", whereas REST returns 0.
+ # "-" is an invalid value in REST, set to 0 if REST.
+ if self.parameters.get('duration') == "-" and self.use_rest:
+ self.parameters['duration'] = '0'
+
+ if not self.use_rest:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+ self.set_playbook_zapi_key_map()
+
+ def set_playbook_zapi_key_map(self):
+
+ self.na_helper.zapi_int_keys = {
+ 'changelog_threshold_percent': 'changelog-threshold-percent'
+ }
+ self.na_helper.zapi_str_keys = {
+ 'policy_name': 'policy-name',
+ 'comment': 'comment',
+ 'policy_type': 'policy-type',
+ 'qos_policy': 'qos-policy',
+ 'schedule': 'schedule',
+ 'duration': 'duration'
+ }
+ self.na_helper.zapi_bool_keys = {
+ 'enabled': 'enabled'
+ }
+
+ def get_efficiency_policy(self):
+ """
+ Get a efficiency policy
+ :return: a efficiency-policy info
+ """
+ if self.use_rest:
+ return self.get_efficiency_policy_rest()
+ sis_policy_obj = netapp_utils.zapi.NaElement("sis-policy-get-iter")
+ query = netapp_utils.zapi.NaElement("query")
+ sis_policy_info = netapp_utils.zapi.NaElement("sis-policy-info")
+ sis_policy_info.add_new_child("policy-name", self.parameters['policy_name'])
+ sis_policy_info.add_new_child("vserver", self.parameters['vserver'])
+ query.add_child_elem(sis_policy_info)
+ sis_policy_obj.add_child_elem(query)
+ try:
+ results = self.server.invoke_successfully(sis_policy_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error searching for efficiency policy %s: %s" % (self.parameters['policy_name'], to_native(error)),
+ exception=traceback.format_exc())
+ return_value = {}
+ if results.get_child_by_name('num-records') and int(results.get_child_content('num-records')) == 1:
+ attributes_list = results.get_child_by_name('attributes-list')
+ sis_info = attributes_list.get_child_by_name('sis-policy-info')
+ for option, zapi_key in self.na_helper.zapi_int_keys.items():
+ return_value[option] = self.na_helper.get_value_for_int(from_zapi=True, value=sis_info.get_child_content(zapi_key))
+ for option, zapi_key in self.na_helper.zapi_bool_keys.items():
+ return_value[option] = self.na_helper.get_value_for_bool(from_zapi=True, value=sis_info.get_child_content(zapi_key))
+ for option, zapi_key in self.na_helper.zapi_str_keys.items():
+ return_value[option] = sis_info.get_child_content(zapi_key)
+ return return_value
+ return None
+
+ def get_efficiency_policy_rest(self):
+ api = 'storage/volume-efficiency-policies'
+ query = {'name': self.parameters['policy_name'], 'svm.name': self.parameters['vserver']}
+ fields = 'name,type,start_threshold_percent,qos_policy,schedule,comment,duration,enabled'
+ record, error = rest_generic.get_one_record(self.rest_api, api, query, fields)
+ if error:
+ self.module.fail_json(msg="Error searching for efficiency policy %s: %s" % (self.parameters['policy_name'], error))
+ if record:
+ self.uuid = record['uuid']
+ current = {
+ 'policy_name': record['name'],
+ 'policy_type': record['type'],
+ 'qos_policy': record['qos_policy'],
+ 'schedule': record['schedule']['name'] if 'schedule' in record else None,
+ 'enabled': record['enabled'],
+ 'duration': str(record['duration']) if 'duration' in record else None,
+ 'changelog_threshold_percent': record['start_threshold_percent'] if 'start_threshold_percent' in record else None,
+ 'comment': record['comment']
+ }
+ return current
+ return None
+
+ def create_efficiency_policy(self):
+ """
+ Creates a efficiency policy
+ :return: None
+ """
+ if self.use_rest:
+ return self.create_efficiency_policy_rest()
+ sis_policy_obj = netapp_utils.zapi.NaElement("sis-policy-create")
+ for option, zapi_key in self.na_helper.zapi_int_keys.items():
+ if self.parameters.get(option):
+ sis_policy_obj.add_new_child(zapi_key,
+ self.na_helper.get_value_for_int(from_zapi=False,
+ value=self.parameters[option]))
+ for option, zapi_key in self.na_helper.zapi_bool_keys.items():
+ if self.parameters.get(option):
+ sis_policy_obj.add_new_child(zapi_key,
+ self.na_helper.get_value_for_bool(from_zapi=False,
+ value=self.parameters[option]))
+ for option, zapi_key in self.na_helper.zapi_str_keys.items():
+ if self.parameters.get(option):
+ sis_policy_obj.add_new_child(zapi_key, str(self.parameters[option]))
+ try:
+ self.server.invoke_successfully(sis_policy_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error creating efficiency policy %s: %s" % (self.parameters["policy_name"], to_native(error)),
+ exception=traceback.format_exc())
+
+ def create_efficiency_policy_rest(self):
+ api = 'storage/volume-efficiency-policies'
+ body = {
+ 'svm.name': self.parameters['vserver'],
+ 'name': self.parameters['policy_name']
+ }
+ create_or_modify_body = self.form_create_or_modify_body(self.parameters)
+ if create_or_modify_body:
+ body.update(create_or_modify_body)
+ dummy, error = rest_generic.post_async(self.rest_api, api, body)
+ if error:
+ self.module.fail_json(msg="Error creating efficiency policy %s: %s" % (self.parameters["policy_name"], error))
+
+ def form_create_or_modify_body(self, create_or_modify):
+ """
+ Form body contents for create or modify efficiency policy.
+ :return: create or modify body.
+ """
+ body = {}
+ if 'comment' in create_or_modify:
+ body['comment'] = create_or_modify['comment']
+ if 'duration' in create_or_modify:
+ body['duration'] = create_or_modify['duration']
+ if 'enabled' in create_or_modify:
+ body['enabled'] = create_or_modify['enabled']
+ if 'qos_policy' in create_or_modify:
+ body['qos_policy'] = create_or_modify['qos_policy']
+ if 'schedule' in create_or_modify:
+ body['schedule'] = {'name': create_or_modify['schedule']}
+ if 'changelog_threshold_percent' in create_or_modify:
+ body['start_threshold_percent'] = create_or_modify['changelog_threshold_percent']
+ if 'policy_type' in create_or_modify:
+ body['type'] = create_or_modify['policy_type']
+ return body
+
+ def delete_efficiency_policy(self):
+ """
+ Delete a efficiency Policy
+ :return: None
+ """
+ if self.use_rest:
+ return self.delete_efficiency_policy_rest()
+ sis_policy_obj = netapp_utils.zapi.NaElement("sis-policy-delete")
+ sis_policy_obj.add_new_child("policy-name", self.parameters['policy_name'])
+ try:
+ self.server.invoke_successfully(sis_policy_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error deleting efficiency policy %s: %s" % (self.parameters["policy_name"], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_efficiency_policy_rest(self):
+ api = 'storage/volume-efficiency-policies'
+ dummy, error = rest_generic.delete_async(self.rest_api, api, self.uuid)
+ if error:
+ self.module.fail_json(msg="Error deleting efficiency policy %s: %s" % (self.parameters["policy_name"], error))
+
+ def modify_efficiency_policy(self, modify):
+ """
+ Modify a efficiency policy
+ :return: None
+ """
+ if self.use_rest:
+ return self.modify_efficiency_policy_rest(modify)
+ sis_policy_obj = netapp_utils.zapi.NaElement("sis-policy-modify")
+ sis_policy_obj.add_new_child("policy-name", self.parameters['policy_name'])
+ for attribute in modify:
+ sis_policy_obj.add_new_child(self.attribute_to_name(attribute), str(self.parameters[attribute]))
+ try:
+ self.server.invoke_successfully(sis_policy_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error modifying efficiency policy %s: %s" % (self.parameters["policy_name"], to_native(error)),
+ exception=traceback.format_exc())
+
+ @staticmethod
+ def attribute_to_name(attribute):
+ return str.replace(attribute, '_', '-')
+
+ def validate_modify(self, current, modify):
+ """
+ sis-policy-create zapi pre-checks the options and fails if it's not supported.
+ is-policy-modify pre-checks one of the options, but tries to modify the others even it's not supported. And it will mess up the vsim.
+ Do the checks before sending to the zapi.
+ This checks applicable for REST modify too.
+ """
+ if current['policy_type'] == 'scheduled' and self.parameters.get('policy_type') != 'threshold':
+ if modify.get('changelog_threshold_percent'):
+ self.module.fail_json(msg="changelog_threshold_percent cannot be set if policy_type is scheduled")
+ elif current['policy_type'] == 'threshold' and self.parameters.get('policy_type') != 'scheduled':
+ if modify.get('duration'):
+ self.module.fail_json(msg="duration cannot be set if policy_type is threshold")
+ elif modify.get('schedule'):
+ self.module.fail_json(msg="schedule cannot be set if policy_type is threshold")
+
+ def modify_efficiency_policy_rest(self, modify):
+ api = 'storage/volume-efficiency-policies'
+ body = self.form_create_or_modify_body(modify)
+ dummy, error = rest_generic.patch_async(self.rest_api, api, self.uuid, body)
+ if error:
+ self.module.fail_json(msg="Error modifying efficiency policy %s: %s" % (self.parameters["policy_name"], error))
+
+ def apply(self):
+ current = self.get_efficiency_policy()
+ modify = None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None and self.parameters['state'] == 'present':
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if modify:
+ self.validate_modify(current, modify)
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_efficiency_policy()
+ elif cd_action == 'delete':
+ self.delete_efficiency_policy()
+ elif modify:
+ self.modify_efficiency_policy(modify)
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ obj = NetAppOntapEfficiencyPolicy()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ems_destination.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ems_destination.py
new file mode 100644
index 000000000..76ddfa31b
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ems_destination.py
@@ -0,0 +1,199 @@
+#!/usr/bin/python
+
+# (c) 2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_ems_destination
+'''
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: na_ontap_ems_destination
+short_description: NetApp ONTAP configuration for EMS event destination
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 21.23.0
+author: Bartosz Bielawski (@bielawb) <bartek.bielawski@live.com>
+description:
+ - Configure EMS destination. Currently certificate authentication for REST is not supported.
+options:
+ state:
+ description:
+ - Whether the destination should be present or not.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+ name:
+ description:
+ - Name of the EMS destination.
+ required: true
+ type: str
+ type:
+ description:
+ - Type of the EMS destination.
+ choices: ['email', 'syslog', 'rest_api']
+ required: true
+ type: str
+ destination:
+ description:
+ - Destination - content depends on the type.
+ required: true
+ type: str
+ filters:
+ description:
+ - List of filters that destination is linked to.
+ required: true
+ type: list
+ elements: str
+'''
+
+EXAMPLES = """
+ - name: Configure REST EMS destination
+ netapp.ontap.na_ontap_ems_destination:
+ state: present
+ name: rest
+ type: rest_api
+ filters: ['important_events']
+ destination: http://my.rest.api/address
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+
+ - name: Remove email EMS destination
+ netapp.ontap.na_ontap_ems_destination:
+ state: absent
+ name: email_destination
+ type: email
+ filters: ['important_events']
+ destination: netapp@company.com
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+"""
+
+RETURN = """
+
+"""
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppOntapEmsDestination:
+ """Create/Modify/Remove EMS destination"""
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ type=dict(required=True, type='str', choices=['email', 'syslog', 'rest_api']),
+ destination=dict(required=True, type='str'),
+ filters=dict(required=True, type='list', elements='str')
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.rest_api = netapp_utils.OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+
+ if not self.use_rest:
+ self.module.fail_json(msg='na_ontap_ems_destination is only supported with REST API')
+
+ def fail_on_error(self, error, action):
+ if error is None:
+ return
+ self.module.fail_json(msg="Error %s: %s" % (action, error))
+
+ def generate_filters_list(self, filters):
+ return [{'name': filter} for filter in filters]
+
+ def get_ems_destination(self, name):
+ api = 'support/ems/destinations'
+ fields = 'name,type,destination,filters.name'
+ query = dict(name=name, fields=fields)
+ record, error = rest_generic.get_one_record(self.rest_api, api, query)
+ self.fail_on_error(error, 'fetching EMS destination for %s' % name)
+ if record:
+ current = {
+ 'name': self.na_helper.safe_get(record, ['name']),
+ 'type': self.na_helper.safe_get(record, ['type']),
+ 'destination': self.na_helper.safe_get(record, ['destination']),
+ 'filters': None
+ }
+ # 9.9.0 and earlier versions returns rest-api, convert it to rest_api.
+ if current['type'] and '-' in current['type']:
+ current['type'] = current['type'].replace('-', '_')
+ if self.na_helper.safe_get(record, ['filters']):
+ current['filters'] = [filter['name'] for filter in record['filters']]
+ return current
+ return None
+
+ def create_ems_destination(self):
+ api = 'support/ems/destinations'
+ name = self.parameters['name']
+ body = {
+ 'name': name,
+ 'type': self.parameters['type'],
+ 'destination': self.parameters['destination'],
+ 'filters': self.generate_filters_list(self.parameters['filters'])
+ }
+ dummy, error = rest_generic.post_async(self.rest_api, api, body)
+ self.fail_on_error(error, 'creating EMS destinations for %s' % name)
+
+ def delete_ems_destination(self, name):
+ api = 'support/ems/destinations'
+ dummy, error = rest_generic.delete_async(self.rest_api, api, name)
+ self.fail_on_error(error, 'deleting EMS destination for %s' % name)
+
+ def modify_ems_destination(self, name, modify):
+ if 'type' in modify:
+ # changing type is not supported
+ self.delete_ems_destination(name)
+ self.create_ems_destination()
+ else:
+ body = {}
+ for option in modify:
+ if option == 'filters':
+ body[option] = self.generate_filters_list(modify[option])
+ else:
+ body[option] = modify[option]
+ if body:
+ api = 'support/ems/destinations'
+ dummy, error = rest_generic.patch_async(self.rest_api, api, name, body)
+ self.fail_on_error(error, 'modifying EMS destination for %s' % name)
+
+ def apply(self):
+ name = None
+ modify = None
+ current = self.get_ems_destination(self.parameters['name'])
+ name = self.parameters['name']
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None and self.parameters['state'] == 'present':
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+
+ saved_modify = str(modify)
+ if self.na_helper.changed and not self.module.check_mode:
+ if modify:
+ self.modify_ems_destination(name, modify)
+ elif cd_action == 'create':
+ self.create_ems_destination()
+ else:
+ self.delete_ems_destination(name)
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, saved_modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ obj = NetAppOntapEmsDestination()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ems_filter.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ems_filter.py
new file mode 100644
index 000000000..bdd3a73c3
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ems_filter.py
@@ -0,0 +1,250 @@
+#!/usr/bin/python
+
+# (c) 2023, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: na_ontap_ems_filter
+short_description: NetApp ONTAP EMS Filter
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 22.4.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Create, delete, or modify EMS filters on NetApp ONTAP. This module only supports REST.
+notes:
+ - This module only supports REST.
+
+options:
+ state:
+ description:
+ - Whether the specified user should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: 'present'
+
+ name:
+ description:
+ - Name of the EMS Filter
+ required: True
+ type: str
+
+ rules:
+ description: List of EMS filter rules
+ type: list
+ elements: dict
+ suboptions:
+ index:
+ description: Index of rule
+ type: int
+ required: True
+ type:
+ description: The type of rule
+ type: str
+ choices: ['include', 'exclude']
+ required: True
+ message_criteria:
+ description: Message criteria for EMS filter, required one of severities, name_pattern when creating ems filter.
+ type: dict
+ suboptions:
+ severities:
+ description: comma separated string of severities this rule applies to
+ type: str
+ name_pattern:
+ description: Name pattern to apply rule to
+ type: str
+'''
+
+EXAMPLES = """
+ - name: Create EMS filter
+ netapp.ontap.na_ontap_ems_filter:
+ state: present
+ name: carchi_ems
+ rules:
+ - index: 1
+ type: include
+ message_criteria:
+ severities: "error"
+ name_pattern: "callhome.*"
+ - index: 2
+ type: include
+ message_criteria:
+ severities: "EMERGENCY"
+
+ - name: Modify EMS filter add rule
+ netapp.ontap.na_ontap_ems_filter:
+ state: present
+ name: carchi_ems
+ rules:
+ - index: 1
+ type: include
+ message_criteria:
+ severities: "error"
+ name_pattern: "callhome.*"
+ - index: 2
+ type: include
+ message_criteria:
+ severities: "EMERGENCY"
+ - index: 3
+ type: include
+ message_criteria:
+ severities: "ALERT"
+
+ - name: Delete EMS Filter
+ netapp.ontap.na_ontap_ems_filter:
+ state: absent
+ name: carchi_ems
+"""
+
+RETURN = """
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppOntapEMSFilters:
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ rules=dict(type='list', elements='dict', options=dict(
+ index=dict(required=True, type="int"),
+ type=dict(required=True, type="str", choices=['include', 'exclude']),
+ message_criteria=dict(type="dict", options=dict(
+ severities=dict(required=False, type="str"),
+ name_pattern=dict(required=False, type="str")
+ ))
+ ))
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule(self.module)
+ self.parameters = self.na_helper.check_and_set_parameters(self.module)
+ self.rest_api = netapp_utils.OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+ if not self.use_rest:
+ self.module.fail_json(msg="This module require REST with ONTAP 9.6 or higher")
+
+ def get_ems_filter(self):
+ api = 'support/ems/filters'
+ params = {'name': self.parameters['name'],
+ 'fields': "rules"}
+ record, error = rest_generic.get_one_record(self.rest_api, api, params)
+ if error:
+ self.module.fail_json(msg="Error fetching ems filter %s: %s" % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ return record
+
+ def create_ems_filter(self):
+ api = 'support/ems/filters'
+ body = {'name': self.parameters['name']}
+ if self.parameters.get('rules'):
+ body['rules'] = self.na_helper.filter_out_none_entries(self.parameters['rules'])
+ dummy, error = rest_generic.post_async(self.rest_api, api, body)
+ if error:
+ self.module.fail_json(msg="Error creating EMS filter %s: %s" % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_ems_filter(self):
+ api = 'support/ems/filters'
+ dummy, error = rest_generic.delete_async(self.rest_api, api, self.parameters['name'])
+ if error:
+ self.module.fail_json(msg='Error deleting EMS filter %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_ems_filter(self):
+ # only variable other than name is rules, so if we hit this we know rules has been changed
+ api = 'support/ems/filters'
+ body = {'rules': self.na_helper.filter_out_none_entries(self.parameters['rules'])}
+ dummy, error = rest_generic.patch_async(self.rest_api, api, self.parameters['name'], body)
+ if error:
+ self.module.fail_json(msg='Error modifying EMS filter %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def find_modify(self, current):
+ # The normal modify will not work for 2 reasons
+ # First ems filter will add a new rule at the end that excludes everything that there isn't a rule for
+ # Second Options that are not given are returned as '*' in rest
+ if not current:
+ return False
+ # Modify Current to remove auto added rule, from testing it always appears to be the last element
+ if current.get('rules'):
+ current['rules'].pop()
+ # Next check if both have no rules
+ if current.get('rules') is None and self.parameters.get('rules') is None:
+ return False
+ # Next let check if rules is the same size if not we need to modify
+ if len(current.get('rules')) != len(self.parameters.get('rules')):
+ return True
+ # Next let put the current rules in a dictionary by rule number
+ current_rules = self.dic_of_rules(current)
+ # Now we need to compare each field to see if there is a match
+ modify = False
+ for rule in self.parameters['rules']:
+ # allow modify if a desired rule index may not exist in current rules.
+ # when testing found only index 1, 2 are allowed, if try to set index other than this, let REST throw error.
+ if current_rules.get(rule['index']) is None:
+ modify = True
+ break
+ # Check if types are the same
+ if rule['type'].lower() != current_rules[rule['index']]['type'].lower():
+ modify = True
+ break
+ if rule.get('message_criteria'):
+ if rule['message_criteria'].get('severities') and rule['message_criteria']['severities'].lower() != \
+ current_rules[rule['index']]['message_criteria']['severities'].lower():
+ modify = True
+ break
+ if rule['message_criteria'].get('name_pattern') and rule['message_criteria']['name_pattern'] != \
+ current_rules[rule['index']]['message_criteria']['name_pattern']:
+ modify = True
+ break
+ return modify
+
+ def dic_of_rules(self, current):
+ rules = {}
+ for rule in current['rules']:
+ rules[rule['index']] = rule
+ return rules
+
+ def apply(self):
+ current = self.get_ems_filter()
+ cd_action, modify = None, False
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None:
+ modify = self.find_modify(current)
+ if modify:
+ self.na_helper.changed = True
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_ems_filter()
+ if cd_action == 'delete':
+ self.delete_ems_filter()
+ if modify:
+ self.modify_ems_filter()
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ '''Apply volume operations from playbook'''
+ obj = NetAppOntapEMSFilters()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_export_policy.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_export_policy.py
new file mode 100644
index 000000000..3b182e13c
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_export_policy.py
@@ -0,0 +1,274 @@
+#!/usr/bin/python
+
+# (c) 2018-2023, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_export_policy
+'''
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: na_ontap_export_policy
+short_description: NetApp ONTAP manage export-policy
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Create or destroy or rename export-policies on ONTAP
+options:
+ state:
+ description:
+ - Whether the specified export policy should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+ name:
+ description:
+ - The name of the export-policy to manage.
+ type: str
+ required: true
+ from_name:
+ description:
+ - The name of the export-policy to be renamed.
+ type: str
+ version_added: 2.7.0
+ vserver:
+ required: true
+ type: str
+ description:
+ - Name of the vserver to use.
+'''
+
+EXAMPLES = """
+ - name: Create Export Policy
+ netapp.ontap.na_ontap_export_policy:
+ state: present
+ name: ansiblePolicyName
+ vserver: vs_hack
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: Rename Export Policy
+ netapp.ontap.na_ontap_export_policy:
+ state: present
+ from_name: ansiblePolicyName
+ vserver: vs_hack
+ name: newPolicyName
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: Delete Export Policy
+ netapp.ontap.na_ontap_export_policy:
+ state: absent
+ name: ansiblePolicyName
+ vserver: vs_hack
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPExportPolicy():
+ """
+ Class with export policy methods
+ """
+
+ def __init__(self):
+ self.use_rest = False
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ from_name=dict(required=False, type='str', default=None),
+ vserver=dict(required=True, type='str')
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ self.rest_api = netapp_utils.OntapRestAPI(self.module)
+ if self.rest_api.is_rest():
+ self.use_rest = True
+ elif HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_export_policy(self, name=None):
+ """
+ Return details about the export-policy
+ :param:
+ name : Name of the export-policy
+ :return: Details about the export-policy. None if not found.
+ :rtype: dict
+ """
+ if name is None:
+ name = self.parameters['name']
+ if self.use_rest:
+ return self.get_export_policy_rest(name)
+ else:
+ export_policy_iter = netapp_utils.zapi.NaElement('export-policy-get-iter')
+ export_policy_info = netapp_utils.zapi.NaElement('export-policy-info')
+ export_policy_info.add_new_child('policy-name', name)
+ export_policy_info.add_new_child('vserver', self.parameters['vserver'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(export_policy_info)
+ export_policy_iter.add_child_elem(query)
+ result = self.server.invoke_successfully(export_policy_iter, True)
+ return_value = None
+ # check if query returns the expected export-policy
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) == 1:
+ export_policy = result.get_child_by_name('attributes-list').get_child_by_name(
+ 'export-policy-info').get_child_by_name('policy-name')
+ return_value = {
+ 'policy-name': export_policy
+ }
+ return return_value
+
+ def create_export_policy(self):
+ """
+ Creates an export policy
+ """
+ if self.use_rest:
+ return self.create_export_policy_rest()
+ export_policy_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'export-policy-create', **{'policy-name': self.parameters['name']})
+ try:
+ self.server.invoke_successfully(export_policy_create,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error on creating export-policy %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_export_policy(self, current):
+ """
+ Delete export-policy
+ """
+ if self.use_rest:
+ return self.delete_export_policy_rest(current)
+ export_policy_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'export-policy-destroy', **{'policy-name': self.parameters['name'], })
+ try:
+ self.server.invoke_successfully(export_policy_delete,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error on deleting export-policy %s: %s'
+ % (self.parameters['name'],
+ to_native(error)), exception=traceback.format_exc())
+
+ def rename_export_policy(self):
+ """
+ Rename the export-policy.
+ """
+ export_policy_rename = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'export-policy-rename', **{'policy-name': self.parameters['from_name'],
+ 'new-policy-name': self.parameters['name']})
+ try:
+ self.server.invoke_successfully(export_policy_rename,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error on renaming export-policy %s:%s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def get_export_policy_rest(self, name):
+ options = {'fields': 'name,id',
+ 'svm.name': self.parameters['vserver'],
+ 'name': name}
+ api = 'protocols/nfs/export-policies/'
+ record, error = rest_generic.get_one_record(self.rest_api, api, options)
+ if error:
+ self.module.fail_json(msg="Error on fetching export policy: %s" % error)
+ if record:
+ return {
+ 'name': record['name'],
+ 'id': record['id']
+ }
+ else:
+ return record
+
+ def create_export_policy_rest(self):
+ params = {'name': self.parameters['name'],
+ 'svm.name': self.parameters['vserver']}
+ api = 'protocols/nfs/export-policies'
+ dummy, error = rest_generic.post_async(self.rest_api, api, params)
+ if error is not None:
+ self.module.fail_json(msg="Error on creating export policy: %s" % error)
+
+ def delete_export_policy_rest(self, current):
+ policy_id = current['id']
+ api = 'protocols/nfs/export-policies'
+ dummy, error = rest_generic.delete_async(self.rest_api, api, policy_id)
+ if error is not None:
+ self.module.fail_json(msg=" Error on deleting export policy: %s" % error)
+
+ def rename_export_policy_rest(self, current):
+ policy_id = current['id']
+ params = {'name': self.parameters['name']}
+ api = 'protocols/nfs/export-policies'
+ dummy, error = rest_generic.patch_async(self.rest_api, api, policy_id, params)
+ if error is not None:
+ self.module.fail_json(msg="Error on renaming export policy: %s" % error)
+
+ def apply(self):
+ """
+ Apply action to export-policy
+ """
+ modify, rename = None, None
+ current = self.get_export_policy()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action == 'create' and self.parameters.get('from_name'):
+ current = self.get_export_policy(self.parameters['from_name'])
+ if current is None:
+ self.module.fail_json(
+ msg="Error renaming: export policy %s does not exist" % self.parameters['from_name'])
+ rename = True
+
+ if self.na_helper.changed and not self.module.check_mode:
+ if rename:
+ modify = {'name': self.parameters['name']}
+ if self.use_rest:
+ self.rename_export_policy_rest(current)
+ else:
+ self.rename_export_policy()
+ elif cd_action == 'create':
+ self.create_export_policy()
+ elif cd_action == 'delete':
+ self.delete_export_policy(current)
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ """
+ Execute action
+ """
+ export_policy = NetAppONTAPExportPolicy()
+ export_policy.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_export_policy_rule.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_export_policy_rule.py
new file mode 100644
index 000000000..8b9414074
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_export_policy_rule.py
@@ -0,0 +1,745 @@
+#!/usr/bin/python
+
+# (c) 2018-2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_export_policy_rule
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+
+module: na_ontap_export_policy_rule
+
+short_description: NetApp ONTAP manage export policy rules
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+ - Create or delete or modify export rules in ONTAP
+
+options:
+ state:
+ description:
+ - Whether the specified export policy rule should exist or not.
+ required: false
+ choices: ['present', 'absent']
+ type: str
+ default: present
+
+ name:
+ description:
+ - The name of the export policy this rule will be added to (or modified, or removed from).
+ required: True
+ type: str
+ aliases:
+ - policy_name
+
+ client_match:
+ description:
+ - List of Client Match host names, IP Addresses, Netgroups, or Domains.
+ type: list
+ elements: str
+
+ anonymous_user_id:
+ description:
+ - User name or ID to which anonymous users are mapped. Default value is '65534'.
+ type: str
+
+ ro_rule:
+ description:
+ - List of Read only access specifications for the rule
+ choices: ['any','none','never','krb5','krb5i','krb5p','ntlm','sys']
+ type: list
+ elements: str
+
+ rw_rule:
+ description:
+ - List of Read Write access specifications for the rule
+ choices: ['any','none','never','krb5','krb5i','krb5p','ntlm','sys']
+ type: list
+ elements: str
+
+ super_user_security:
+ description:
+ - List of Read Write access specifications for the rule
+ choices: ['any','none','never','krb5','krb5i','krb5p','ntlm','sys']
+ type: list
+ elements: str
+
+ allow_suid:
+ description:
+ - If 'true', NFS server will honor SetUID bits in SETATTR operation. Default value on creation is 'true'
+ type: bool
+
+ protocol:
+ description:
+ - List of Client access protocols.
+ - Default value is set to 'any' during create.
+ choices: [any,nfs,nfs3,nfs4,cifs,flexcache]
+ type: list
+ elements: str
+ aliases:
+ - protocols
+
+ rule_index:
+ description:
+ - Index of the export policy rule.
+ - When rule_index is not set, we try to find a rule with an exact match.
+ If found, no action is taken with state set to present, and the rule is deleted with state set to absent.
+ An error is reported if more than one rule is found.
+ - When rule_index is set and state is present, if a rule cannot be found with this index,
+ we try to find a rule with an exact match and assign the index to this rule if found.
+ If no match is found, a new rule is created.
+ - All attributes that are set are used for an exact match. As a minimum, client_match, ro_rule, and rw_rule are required.
+ type: int
+
+ from_rule_index:
+ description:
+ - index of the export policy rule to be re-indexed
+ type: int
+ version_added: 21.20.0
+
+ vserver:
+ description:
+ - Name of the vserver to use.
+ required: true
+ type: str
+
+ ntfs_unix_security:
+ description:
+ - NTFS export UNIX security options.
+ - With REST, supported from ONTAP 9.9.1 version.
+ type: str
+ choices: ['fail', 'ignore']
+ version_added: 21.18.0
+
+ force_delete_on_first_match:
+ description:
+ - when rule_index is not set, the default is to report an error on multiple matches.
+ - when this option is set, one of the rules with an exact match is deleted when state is absent.
+ - ignored when state is present.
+ type: bool
+ default: false
+ version_added: 21.23.0
+
+ chown_mode:
+ description:
+ - Specifies who is authorized to change the ownership mode of a file.
+ - With REST, supported from ONTAP 9.9.1 version.
+ type: str
+ choices: ['restricted', 'unrestricted']
+ version_added: 22.0.0
+
+ allow_device_creation:
+ description:
+ - Specifies whether or not device creation is allowed.
+ - default is true.
+ - With REST, supported from ONTAP 9.9.1 version.
+ type: bool
+ version_added: 22.0.0
+'''
+
+EXAMPLES = """
+ - name: Create ExportPolicyRule
+ netapp.ontap.na_ontap_export_policy_rule:
+ state: present
+ name: default123
+ rule_index: 100
+ vserver: ci_dev
+ client_match: 0.0.0.0/0,1.1.1.0/24
+ ro_rule: krb5,krb5i
+ rw_rule: any
+ protocol: nfs,nfs3
+ super_user_security: any
+ anonymous_user_id: 65534
+ allow_suid: true
+ ntfs_unix_security: ignore
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Modify ExportPolicyRule
+ netapp.ontap.na_ontap_export_policy_rule:
+ state: present
+ name: default123
+ rule_index: 100
+ client_match: 0.0.0.0/0
+ anonymous_user_id: 65521
+ ro_rule: ntlm
+ rw_rule: any
+ protocol: any
+ allow_suid: false
+ ntfs_unix_security: fail
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: rename ExportPolicyRule index
+ netapp.ontap.na_ontap_export_policy_rule:
+ state: present
+ name: default123
+ from_rule_index: 100
+ rule_index: 99
+ client_match: 0.0.0.0/0
+ anonymous_user_id: 65521
+ ro_rule: ntlm
+ rw_rule: any
+ protocol: any
+ allow_suid: false
+ ntfs_unix_security: fail
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Delete ExportPolicyRule
+ netapp.ontap.na_ontap_export_policy_rule:
+ state: absent
+ name: default123
+ rule_index: 99
+ vserver: ci_dev
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+"""
+
+RETURN = """
+
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+
+class NetAppontapExportRule:
+ ''' object initialize and class methods '''
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str', aliases=['policy_name']),
+ protocol=dict(required=False,
+ type='list', elements='str', default=None,
+ choices=['any', 'nfs', 'nfs3', 'nfs4', 'cifs', 'flexcache'],
+ aliases=['protocols']),
+ client_match=dict(required=False, type='list', elements='str'),
+ ro_rule=dict(required=False,
+ type='list', elements='str', default=None,
+ choices=['any', 'none', 'never', 'krb5', 'krb5i', 'krb5p', 'ntlm', 'sys']),
+ rw_rule=dict(required=False,
+ type='list', elements='str', default=None,
+ choices=['any', 'none', 'never', 'krb5', 'krb5i', 'krb5p', 'ntlm', 'sys']),
+ super_user_security=dict(required=False,
+ type='list', elements='str', default=None,
+ choices=['any', 'none', 'never', 'krb5', 'krb5i', 'krb5p', 'ntlm', 'sys']),
+ allow_suid=dict(required=False, type='bool'),
+ from_rule_index=dict(required=False, type='int'),
+ rule_index=dict(required=False, type='int'),
+ anonymous_user_id=dict(required=False, type='str'),
+ vserver=dict(required=True, type='str'),
+ ntfs_unix_security=dict(required=False, type='str', choices=['fail', 'ignore']),
+ force_delete_on_first_match=dict(required=False, type='bool', default=False),
+ chown_mode=dict(required=False, type='str', choices=['restricted', 'unrestricted']),
+ allow_device_creation=dict(required=False, type='bool'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.set_playbook_zapi_key_map()
+ self.policy_id = None
+
+ self.rest_api = netapp_utils.OntapRestAPI(self.module)
+ partially_supported_rest_properties = [['ntfs_unix_security', (9, 9, 1)], ['allow_suid', (9, 9, 1)],
+ ['allow_device_creation', (9, 9, 1)], ['chown_mode', (9, 9, 1)]]
+ self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, None, partially_supported_rest_properties)
+ if not self.use_rest:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+ if 'rule_index' not in self.parameters:
+ self.fail_on_missing_required_params('matching (as rule_index is not specified) or creating')
+
+ def fail_on_missing_required_params(self, action):
+ missing_keys = [key for key in ('client_match', 'ro_rule', 'rw_rule') if self.parameters.get(key) is None]
+ plural = 's' if len(missing_keys) > 1 else ''
+ if missing_keys:
+ self.module.fail_json(msg='Error: Missing required option%s for %s export policy rule: %s' % (plural, action, ', '.join(missing_keys)))
+
+ def set_playbook_zapi_key_map(self):
+ self.na_helper.zapi_string_keys = {
+ 'anonymous_user_id': 'anonymous-user-id',
+ 'client_match': 'client-match',
+ 'name': 'policy-name',
+ 'ntfs_unix_security': 'export-ntfs-unix-security-ops',
+ 'chown_mode': 'export-chown-mode'
+ }
+ self.na_helper.zapi_list_keys = {
+ 'protocol': ('protocol', 'access-protocol'),
+ 'ro_rule': ('ro-rule', 'security-flavor'),
+ 'rw_rule': ('rw-rule', 'security-flavor'),
+ 'super_user_security': ('super-user-security', 'security-flavor'),
+ }
+ self.na_helper.zapi_bool_keys = {
+ 'allow_suid': 'is-allow-set-uid-enabled',
+ 'allow_device_creation': 'is-allow-dev-is-enabled'
+ }
+ self.na_helper.zapi_int_keys = {
+ 'rule_index': 'rule-index'
+ }
+
+ @staticmethod
+ def set_dict_when_not_none(query, key, value):
+ if value is not None:
+ query[key] = value
+
+ @staticmethod
+ def list_to_string(alist):
+ return ','.join(alist).replace(' ', '') if alist else ''
+
+ def set_query_parameters(self, rule_index):
+ """
+ Return dictionary of query parameters and
+ :return:
+ """
+ query = {
+ 'policy-name': self.parameters['name'],
+ 'vserver': self.parameters['vserver']
+ }
+ if rule_index is not None:
+ query['rule-index'] = rule_index
+ else:
+ for item_key, value in self.parameters.items():
+ zapi_key = None
+ if item_key in self.na_helper.zapi_string_keys and item_key != 'client_match':
+ # ignore client_match as ZAPI query is string based and preserves order
+ zapi_key = self.na_helper.zapi_string_keys[item_key]
+ elif item_key in self.na_helper.zapi_bool_keys:
+ zapi_key = self.na_helper.zapi_bool_keys[item_key]
+ value = self.na_helper.get_value_for_bool(from_zapi=False, value=value)
+ # skipping int keys to not include rule index in query as we're matching on attributes
+ elif item_key in self.na_helper.zapi_list_keys:
+ zapi_key, child_key = self.na_helper.zapi_list_keys[item_key]
+ value = [{child_key: item} for item in value] if value else None
+ if zapi_key:
+ self.set_dict_when_not_none(query, zapi_key, value)
+
+ return {
+ 'query': {
+ 'export-rule-info': query
+ }
+ }
+
+ def get_export_policy_rule(self, rule_index):
+ """
+ Return details about the export policy rule
+ If rule_index is None, fetch policy based on attributes
+ :param:
+ name : Name of the export_policy
+ :return: Details about the export_policy. None if not found.
+ :rtype: dict
+ """
+ if self.use_rest:
+ return self.get_export_policy_rule_rest(rule_index)
+ result = None
+ rule_iter = netapp_utils.zapi.NaElement('export-rule-get-iter')
+ query = self.set_query_parameters(rule_index)
+ rule_iter.translate_struct(query)
+ try:
+ result = self.server.invoke_successfully(rule_iter, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error getting export policy rule %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ if result is not None and result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ if rule_index is None:
+ return self.match_export_policy_rule_exactly(result.get_child_by_name('attributes-list').get_children(), query, is_rest=False)
+ return self.zapi_export_rule_info_to_dict(result.get_child_by_name('attributes-list').get_child_by_name('export-rule-info'))
+ return None
+
+ def zapi_export_rule_info_to_dict(self, rule_info):
+ current = {}
+ for item_key, zapi_key in self.na_helper.zapi_string_keys.items():
+ current[item_key] = rule_info.get_child_content(zapi_key)
+ if item_key == 'client_match' and current[item_key]:
+ current[item_key] = current[item_key].split(',')
+ for item_key, zapi_key in self.na_helper.zapi_bool_keys.items():
+ current[item_key] = self.na_helper.get_value_for_bool(from_zapi=True,
+ value=rule_info[zapi_key])
+ for item_key, zapi_key in self.na_helper.zapi_int_keys.items():
+ current[item_key] = self.na_helper.get_value_for_int(from_zapi=True,
+ value=rule_info[zapi_key])
+ for item_key, zapi_key in self.na_helper.zapi_list_keys.items():
+ parent, dummy = zapi_key
+ current[item_key] = self.na_helper.get_value_for_list(from_zapi=True,
+ zapi_parent=rule_info.get_child_by_name(parent))
+ return current
+
+ def set_export_policy_id(self):
+ """
+ Fetch export-policy id
+ :param:
+ name : Name of the export-policy
+
+ :return: Set self.policy_id
+ """
+ if self.policy_id is not None:
+ return
+ if self.use_rest:
+ return self.set_export_policy_id_rest()
+ export_policy_iter = netapp_utils.zapi.NaElement('export-policy-get-iter')
+ attributes = {
+ 'query': {
+ 'export-policy-info': {
+ 'policy-name': self.parameters['name'],
+ 'vserver': self.parameters['vserver']
+ }
+ }
+ }
+
+ export_policy_iter.translate_struct(attributes)
+ try:
+ result = self.server.invoke_successfully(export_policy_iter, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error getting export policy %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) == 1:
+ self.policy_id = self.na_helper.safe_get(result, ['attributes-list', 'export-policy-info', 'policy-id'])
+ if self.policy_id is None:
+ self.module.fail_json(msg='Error getting export policy id for %s: got: %s.'
+ % (self.parameters['name'], result.to_string()))
+
+ def add_parameters_for_create_or_modify(self, na_element_object, params):
+ """
+ Add children node for create or modify NaElement object
+ :param na_element_object: modify or create NaElement object
+ :param values: dictionary of cron values to be added
+ :return: None
+ """
+ for key, value in params.items():
+ if key in self.na_helper.zapi_string_keys:
+ zapi_key = self.na_helper.zapi_string_keys.get(key)
+ # convert client_match list to comma-separated string
+ if value and key == 'client_match':
+ value = self.list_to_string(value)
+ elif key in self.na_helper.zapi_list_keys:
+ parent_key, child_key = self.na_helper.zapi_list_keys.get(key)
+ value = self.na_helper.get_value_for_list(from_zapi=False, zapi_parent=parent_key, zapi_child=child_key, data=value)
+ elif key in self.na_helper.zapi_int_keys:
+ zapi_key = self.na_helper.zapi_int_keys.get(key)
+ value = self.na_helper.get_value_for_int(from_zapi=False, value=value)
+ elif key in self.na_helper.zapi_bool_keys:
+ zapi_key = self.na_helper.zapi_bool_keys.get(key)
+ value = self.na_helper.get_value_for_bool(from_zapi=False, value=value)
+ else:
+ # ignore options that are not relevant
+ value = None
+
+ if value is not None:
+ if key in self.na_helper.zapi_list_keys:
+ na_element_object.add_child_elem(value)
+ else:
+ na_element_object[zapi_key] = value
+
+ def create_export_policy_rule(self):
+ """
+ create rule for the export policy.
+ """
+ if self.use_rest:
+ return self.create_export_policy_rule_rest()
+ export_rule_create = netapp_utils.zapi.NaElement('export-rule-create')
+ self.add_parameters_for_create_or_modify(export_rule_create, self.parameters)
+ try:
+ self.server.invoke_successfully(export_rule_create, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating export policy rule %s: %s'
+ % (self.parameters['name'], to_native(error)), exception=traceback.format_exc())
+
+ def create_export_policy(self):
+ """
+ Creates an export policy
+ """
+ if self.use_rest:
+ return self.create_export_policy_rest()
+ export_policy_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'export-policy-create', **{'policy-name': self.parameters['name']})
+ try:
+ self.server.invoke_successfully(export_policy_create,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating export policy %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_export_policy_rule(self, rule_index):
+ """
+ delete rule for the export policy.
+ """
+ if self.use_rest:
+ return self.delete_export_policy_rule_rest(rule_index)
+ export_rule_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'export-rule-destroy', **{'policy-name': self.parameters['name'],
+ 'rule-index': str(rule_index)})
+
+ try:
+ self.server.invoke_successfully(export_rule_delete,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting export policy rule %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_export_policy_rule(self, params, rule_index=None, rename=False):
+ '''
+ Modify an existing export policy rule
+ :param params: dict() of attributes with desired values
+ :return: None
+ '''
+ if self.use_rest:
+ return self.modify_export_policy_rule_rest(params, rule_index, rename)
+ params.pop('rule_index', None)
+ if params:
+ export_rule_modify = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'export-rule-modify', **{'policy-name': self.parameters['name'],
+ 'rule-index': str(rule_index)})
+ self.add_parameters_for_create_or_modify(export_rule_modify, params)
+ try:
+ self.server.invoke_successfully(export_rule_modify, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying export policy rule index %s: %s'
+ % (rule_index, to_native(error)),
+ exception=traceback.format_exc())
+ if rename:
+ export_rule_set_index = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'export-rule-set-index', **{'policy-name': self.parameters['name'],
+ 'rule-index': str(self.parameters['from_rule_index']),
+ 'new-rule-index': str(self.parameters['rule_index'])})
+ try:
+ self.server.invoke_successfully(export_rule_set_index, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error reindexing export policy rule index %s: %s'
+ % (self.parameters['from_rule_index'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def set_export_policy_id_rest(self):
+ if self.policy_id is not None:
+ return
+ options = {'fields': 'name,id',
+ 'svm.name': self.parameters['vserver'],
+ 'name': self.parameters['name']}
+ api = 'protocols/nfs/export-policies'
+ record, error = rest_generic.get_one_record(self.rest_api, api, options)
+ if error:
+ self.module.fail_json(msg="Error on fetching export policy: %s" % error)
+ if record:
+ self.policy_id = record['id']
+
+ def get_export_policy_rule_exact_match(self, query):
+ """ fetch rules based on attributes
+ REST queries only allow for one value at a time in a list, so:
+ 1. get a short list of matches using a simple query
+ 2. then look for an exact match
+ """
+ api = 'protocols/nfs/export-policies/%s/rules' % self.policy_id
+ query.update(self.create_query(self.parameters))
+ records, error = rest_generic.get_0_or_more_records(self.rest_api, api, query)
+ if error:
+ # If no rule matches the query, return None
+ if "entry doesn't exist" in error:
+ return None
+ self.module.fail_json(msg="Error on fetching export policy rules: %s" % error)
+ return self.match_export_policy_rule_exactly(records, query, is_rest=True)
+
+ def match_export_policy_rule_exactly(self, records, query, is_rest):
+ if not records:
+ return None
+ founds = []
+ for record in records:
+ record = self.filter_get_results(record) if is_rest else self.zapi_export_rule_info_to_dict(record)
+ modify = self.na_helper.get_modified_attributes(record, self.parameters)
+ modify.pop('rule_index', None)
+ if not modify:
+ founds.append(record)
+ if founds and len(founds) > 1 and not (self.parameters['state'] == 'absent' and self.parameters['force_delete_on_first_match']):
+ self.module.fail_json(msg='Error multiple records exist for query: %s. Specify index to modify or delete a rule. Found: %s'
+ % (query, founds))
+ return founds[0] if founds else None
+
+ def get_export_policy_rule_rest(self, rule_index):
+ self.set_export_policy_id_rest()
+ if not self.policy_id:
+ return None
+ query = {'fields': 'anonymous_user,clients,index,protocols,ro_rule,rw_rule,superuser'}
+ if self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 9, 1):
+ query['fields'] += ',ntfs_unix_security,allow_suid,chown_mode,allow_device_creation'
+ if rule_index is None:
+ return self.get_export_policy_rule_exact_match(query)
+ api = 'protocols/nfs/export-policies/%s/rules/%s' % (self.policy_id, rule_index)
+ record, error = rest_generic.get_one_record(self.rest_api, api, query)
+ if error:
+ # If rule index passed in doesn't exist, return None
+ if "entry doesn't exist" in error:
+ return None
+ self.module.fail_json(msg="Error on fetching export policy rule: %s" % error)
+ return self.filter_get_results(record) if record else None
+
+ def filter_get_results(self, record):
+ record['rule_index'] = record.pop('index')
+ record['anonymous_user_id'] = record.pop('anonymous_user')
+ record['protocol'] = record.pop('protocols')
+ record['super_user_security'] = record.pop('superuser')
+ record['client_match'] = [each['match'] for each in record['clients']]
+ record.pop('clients')
+ return record
+
+ def create_export_policy_rest(self):
+ body = {'name': self.parameters['name'], 'svm.name': self.parameters['vserver']}
+ api = 'protocols/nfs/export-policies'
+ dummy, error = rest_generic.post_async(self.rest_api, api, body)
+ if error is not None:
+ self.module.fail_json(msg="Error on creating export policy: %s" % error)
+
+ def create_export_policy_rule_rest(self):
+ api = 'protocols/nfs/export-policies/%s/rules?return_records=true' % self.policy_id
+ response, error = rest_generic.post_async(self.rest_api, api, self.create_body(self.parameters))
+ if error:
+ self.module.fail_json(msg="Error on creating export policy rule: %s" % error)
+ # force a 'rename' to set the index
+ rule_index = None
+ if response and response.get('num_records') == 1:
+ rule_index = self.na_helper.safe_get(response, ['records', 0, 'index'])
+ if rule_index is None:
+ self.module.fail_json(msg="Error on creating export policy rule, returned response is invalid: %s" % response)
+ if self.parameters.get('rule_index'):
+ self.modify_export_policy_rule_rest({}, rule_index, True)
+
+ def client_match_format(self, client_match):
+ return [{'match': each} for each in client_match]
+
+ def delete_export_policy_rule_rest(self, rule_index):
+ api = 'protocols/nfs/export-policies/%s/rules' % self.policy_id
+ dummy, error = rest_generic. delete_async(self.rest_api, api, rule_index)
+ if error:
+ self.module.fail_json(msg="Error on deleting export policy Rule: %s" % error)
+
+ def create_body(self, params):
+ body = self.create_body_or_query_common(params)
+ # lists
+ if params.get('protocol'):
+ body['protocols'] = self.parameters['protocol']
+ if params.get('super_user_security'):
+ body['superuser'] = self.parameters['super_user_security']
+ if params.get('client_match'):
+ body['clients'] = self.client_match_format(self.parameters['client_match'])
+ if params.get('ro_rule'):
+ body['ro_rule'] = self.parameters['ro_rule']
+ if params.get('rw_rule'):
+ body['rw_rule'] = self.parameters['rw_rule']
+ return body
+
+ def create_query(self, params):
+ query = self.create_body_or_query_common(params)
+ # for list, do an initial query based on first element
+ if params.get('protocol'):
+ query['protocols'] = self.parameters['protocol'][0]
+ if params.get('super_user_security'):
+ query['superuser'] = self.parameters['super_user_security'][0]
+ if params.get('client_match'):
+ query['clients.match'] = self.parameters['client_match'][0]
+ if params.get('ro_rule'):
+ query['ro_rule'] = self.parameters['ro_rule'][0]
+ if params.get('rw_rule'):
+ query['rw_rule'] = self.parameters['rw_rule'][0]
+ return query
+
+ def create_body_or_query_common(self, params):
+ result = {}
+ if params.get('anonymous_user_id') is not None:
+ result['anonymous_user'] = self.parameters['anonymous_user_id']
+ if params.get('ntfs_unix_security') is not None:
+ result['ntfs_unix_security'] = self.parameters['ntfs_unix_security']
+ if params.get('allow_suid') is not None:
+ result['allow_suid'] = self.parameters['allow_suid']
+ if params.get('chown_mode') is not None:
+ result['chown_mode'] = self.parameters['chown_mode']
+ if params.get('allow_device_creation') is not None:
+ result['allow_device_creation'] = self.parameters['allow_device_creation']
+ return result
+
+ def modify_export_policy_rule_rest(self, params, rule_index, rename=False):
+ api = 'protocols/nfs/export-policies/%s/rules' % self.policy_id
+ query = {'new_index': self.parameters['rule_index']} if rename else None
+ dummy, error = rest_generic.patch_async(self.rest_api, api, rule_index, self.create_body(params), query)
+
+ if error:
+ self.module.fail_json(msg="Error on modifying export policy Rule: %s" % error)
+
+ def apply(self):
+ ''' Apply required action from the play'''
+ current = self.get_export_policy_rule(self.parameters.get('rule_index'))
+ cd_action, rename, modify = None, None, None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ # if rule_index is not None, see if we need to re-index an existing rule
+ # the existing rule may be indexed by from_rule_index or we can match the attributes
+ if cd_action == 'create' and self.parameters.get('rule_index'):
+ from_current = self.get_export_policy_rule(self.parameters.get('from_rule_index'))
+ rename = self.na_helper.is_rename_action(from_current, current)
+ if rename is None and self.parameters.get('from_rule_index') is not None:
+ self.module.fail_json(
+ msg="Error reindexing: export policy rule %s does not exist." % self.parameters['from_rule_index'])
+ if rename:
+ current = from_current
+ cd_action = None
+ self.parameters['from_rule_index'] = current['rule_index']
+
+ if cd_action is None and self.parameters['state'] == 'present':
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+
+ if self.na_helper.changed:
+ self.set_export_policy_id()
+ if cd_action == 'create':
+ self.fail_on_missing_required_params('creating')
+
+ if self.na_helper.changed and not self.module.check_mode:
+ # create export policy (if policy doesn't exist) only when changed=True
+ if rename:
+ self.modify_export_policy_rule(modify, self.parameters['from_rule_index'], rename=True)
+ elif cd_action == 'create':
+ if not self.policy_id:
+ self.create_export_policy()
+ self.set_export_policy_id()
+ self.create_export_policy_rule()
+ elif cd_action == 'delete':
+ self.delete_export_policy_rule(current['rule_index'])
+ elif modify:
+ self.modify_export_policy_rule(modify, current['rule_index'])
+
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ ''' Create object and call apply '''
+ rule_obj = NetAppontapExportRule()
+ rule_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fcp.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fcp.py
new file mode 100644
index 000000000..7fee744cf
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fcp.py
@@ -0,0 +1,275 @@
+#!/usr/bin/python
+
+# (c) 2018-2021, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: na_ontap_fcp
+short_description: NetApp ONTAP Start, Stop and Enable FCP services.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.7.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Start, Stop and Enable FCP services.
+options:
+ state:
+ description:
+ - Whether the FCP should be enabled or not.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+
+ status:
+ description:
+ - Whether the FCP should be up or down
+ choices: ['up', 'down']
+ type: str
+ default: up
+
+ vserver:
+ description:
+ - The name of the vserver to use.
+ required: true
+ type: str
+
+'''
+
+EXAMPLES = """
+ - name: create FCP
+ na_ontap_fcp:
+ state: present
+ status: down
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ vserver: "{{vservername}}"
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapFCP:
+ """
+ Enable and Disable FCP
+ """
+
+ def __init__(self):
+ self.use_rest = False
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ status=dict(required=False, type='str', choices=['up', 'down'], default='up')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ self.rest_api = OntapRestAPI(self.module)
+ if self.rest_api.is_rest():
+ self.use_rest = True
+ elif HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def create_fcp(self):
+ """
+ Create's and Starts an FCP
+ :return: none
+ """
+ try:
+ self.server.invoke_successfully(netapp_utils.zapi.NaElement('fcp-service-create'), True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating FCP: %s' %
+ (to_native(error)),
+ exception=traceback.format_exc())
+
+ def start_fcp(self):
+ """
+ Starts an existing FCP
+ :return: none
+ """
+ try:
+ self.server.invoke_successfully(netapp_utils.zapi.NaElement('fcp-service-start'), True)
+ except netapp_utils.zapi.NaApiError as error:
+ # Error 13013 denotes fcp service already started.
+ if to_native(error.code) == "13013":
+ return None
+ else:
+ self.module.fail_json(msg='Error starting FCP %s' % (to_native(error)),
+ exception=traceback.format_exc())
+
+ def stop_fcp(self):
+ """
+ Steps an Existing FCP
+ :return: none
+ """
+ try:
+ self.server.invoke_successfully(netapp_utils.zapi.NaElement('fcp-service-stop'), True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error Stoping FCP %s' %
+ (to_native(error)),
+ exception=traceback.format_exc())
+
+ def destroy_fcp(self):
+ """
+ Destroys an already stopped FCP
+ :return:
+ """
+ try:
+ self.server.invoke_successfully(netapp_utils.zapi.NaElement('fcp-service-destroy'), True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error destroying FCP %s' %
+ (to_native(error)),
+ exception=traceback.format_exc())
+
+ def get_fcp(self):
+ if self.use_rest:
+ return self.get_fcp_rest()
+ fcp_obj = netapp_utils.zapi.NaElement('fcp-service-get-iter')
+ fcp_info = netapp_utils.zapi.NaElement('fcp-service-info')
+ fcp_info.add_new_child('vserver', self.parameters['vserver'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(fcp_info)
+ fcp_obj.add_child_elem(query)
+ result = self.server.invoke_successfully(fcp_obj, True)
+ # There can only be 1 FCP per vserver. If true, one is set up, else one isn't set up
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) >= 1:
+ return True
+ else:
+ return False
+
+ def current_status(self):
+ try:
+ status = self.server.invoke_successfully(netapp_utils.zapi.NaElement('fcp-service-status'), True)
+ return status.get_child_content('is-available') == 'true'
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error destroying FCP: %s' %
+ (to_native(error)),
+ exception=traceback.format_exc())
+
+ def status_to_bool(self):
+ return self.parameters['status'] == 'up'
+
+ def get_fcp_rest(self):
+ options = {'fields': 'enabled,svm.uuid',
+ 'svm.name': self.parameters['vserver']}
+ api = 'protocols/san/fcp/services'
+ record, error = rest_generic.get_one_record(self.rest_api, api, options)
+ if error:
+ self.module.fail_json(msg="Error on fetching fcp: %s" % error)
+ if record:
+ record['status'] = 'up' if record.pop('enabled') else 'down'
+ return record
+
+ def create_fcp_rest(self):
+ params = {'svm.name': self.parameters['vserver'],
+ 'enabled': self.status_to_bool()}
+ api = 'protocols/san/fcp/services'
+ dummy, error = rest_generic.post_async(self.rest_api, api, params)
+ if error is not None:
+ self.module.fail_json(msg="Error on creating fcp: %s" % error)
+
+ def destroy_fcp_rest(self, current):
+ api = 'protocols/san/fcp/services'
+ dummy, error = rest_generic.delete_async(self.rest_api, api, current['svm']['uuid'])
+ if error is not None:
+ self.module.fail_json(msg=" Error on deleting fcp policy: %s" % error)
+
+ def start_stop_fcp_rest(self, enabled, current):
+ params = {'enabled': enabled}
+ api = 'protocols/san/fcp/services'
+ dummy, error = rest_generic.patch_async(self.rest_api, api, current['svm']['uuid'], params)
+ if error is not None:
+ self.module.fail_json(msg="Error on modifying fcp: %s" % error)
+
+ def zapi_apply(self, current):
+ changed = False
+ # this is a mess i don't want to touch...
+ if self.parameters['state'] == 'present':
+ if current:
+ if self.parameters['status'] == 'up':
+ if not self.current_status():
+ if not self.module.check_mode:
+ self.start_fcp()
+ changed = True
+ else:
+ if self.current_status():
+ if not self.module.check_mode:
+ self.stop_fcp()
+ changed = True
+ else:
+ if not self.module.check_mode:
+ self.create_fcp()
+ if self.parameters['status'] == 'up':
+ self.start_fcp()
+ elif self.parameters['status'] == 'down':
+ self.stop_fcp()
+ changed = True
+ else:
+ if current:
+ if not self.module.check_mode:
+ if self.current_status():
+ self.stop_fcp()
+ self.destroy_fcp()
+ changed = True
+ return changed
+
+ def apply(self):
+ current = self.get_fcp()
+ if not self.use_rest:
+ changed = self.zapi_apply(current)
+ result = netapp_utils.generate_result(changed)
+ else:
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ changed = self.na_helper.changed
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_fcp_rest()
+ elif modify:
+ if modify['status'] == 'up':
+ self.start_stop_fcp_rest(True, current)
+ else:
+ self.start_stop_fcp_rest(False, current)
+ elif cd_action == 'delete':
+ if current['status'] == 'up':
+ self.start_stop_fcp_rest(False, current)
+ self.destroy_fcp_rest(current)
+ result = netapp_utils.generate_result(changed, cd_action, modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ """
+ Start, Stop and Enable FCP services.
+ """
+ obj = NetAppOntapFCP()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fdsd.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fdsd.py
new file mode 100644
index 000000000..9cf442185
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fdsd.py
@@ -0,0 +1,173 @@
+#!/usr/bin/python
+
+# (c) 2021, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = """
+module: na_ontap_fdsd
+short_description: NetApp ONTAP create or remove a File Directory security descriptor.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 21.8.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Create or remove a security descriptor.
+options:
+ state:
+ description:
+ - Whether the specified security descriptor should exist or not.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ name:
+ description:
+ - Specifies the name of the security descriptor.
+ required: true
+ type: str
+
+ vserver:
+ description:
+ - Specifies the vserver.
+ required: true
+ type: str
+"""
+
+EXAMPLES = """
+ - name: Create File Directory Security Descriptor
+ netapp.ontap.na_ontap_fdsd:
+ state: present
+ name: "ansible_sdl"
+ vserver: "svm1"
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Delete File Directory Security Descriptor
+ netapp.ontap.na_ontap_fdsd:
+ state: absent
+ vserver: "svm1"
+ name: "ansible_sdl"
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+"""
+
+RETURN = """
+
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+import ansible_collections.netapp.ontap.plugins.module_utils.rest_response_helpers as rrh
+
+
+class NetAppOntapFDSD():
+ """
+ Creates or removes a File Directory Security Descriptor
+ """
+ def __init__(self):
+ """
+ Initialize the ONTAP File Directory Security Descripter class
+ """
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ name=dict(required=True, type='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ # set up variables
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ self.rest_api = OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+
+ if not self.use_rest:
+ self.module.fail_json(msg=self.rest_api.requires_ontap_version('na_ontap_fdsd', '9.6'))
+
+ def get_fdsd(self):
+ """
+ Get File Directory Security Descriptor
+ """
+ api = "private/cli/vserver/security/file-directory/ntfs"
+ query = {
+ 'ntfs-sd': self.parameters['name'],
+ 'vserver': self.parameters['vserver']
+ }
+
+ message, error = self.rest_api.get(api, query)
+ records, error = rrh.check_for_0_or_more_records(api, message, error)
+
+ if error:
+ self.module.fail_json(msg=error)
+
+ return records if records else None
+
+ def add_fdsd(self):
+ """
+ Adds a new File Directory Security Descriptor
+ """
+ api = "private/cli/vserver/security/file-directory/ntfs"
+ body = {
+ 'ntfs-sd': self.parameters['name'],
+ 'vserver': self.parameters['vserver']
+ }
+
+ dummy, error = self.rest_api.post(api, body)
+
+ if error:
+ self.module.fail_json(msg=error)
+
+ def remove_fdsd(self):
+ """
+ Deletes a File Directory Security Descriptor
+ """
+ api = "private/cli/vserver/security/file-directory/ntfs"
+ body = {
+ 'ntfs-sd': self.parameters['name'],
+ 'vserver': self.parameters['vserver']
+ }
+
+ dummy, error = self.rest_api.delete(api, body)
+
+ if error:
+ self.module.fail_json(msg=error)
+
+ def apply(self):
+ current = self.get_fdsd()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+
+ if self.na_helper.changed:
+ if not self.module.check_mode:
+ if cd_action == 'create':
+ self.add_fdsd()
+ elif cd_action == 'delete':
+ self.remove_fdsd()
+
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action)
+ self.module.exit_json(**result)
+
+
+def main():
+ """
+ Creates and removes File Directory Security Descriptors
+ """
+ obj = NetAppOntapFDSD()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fdsp.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fdsp.py
new file mode 100644
index 000000000..4fac6fc90
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fdsp.py
@@ -0,0 +1,171 @@
+#!/usr/bin/python
+
+# (c) 2021, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = """
+module: na_ontap_fdsp
+short_description: NetApp ONTAP create or delete a file directory security policy
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 21.8.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+ - Create or delete a file directory security policy.
+options:
+ state:
+ description:
+ - Whether the specified policy should exist or not.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ name:
+ description:
+ - Specifies the name of the policy.
+ required: true
+ type: str
+
+ vserver:
+ description:
+ - Specifies the vserver for the security policy.
+ required: true
+ type: str
+"""
+
+EXAMPLES = """
+ - name: Create File Directory Security Policy
+ netapp.ontap.na_ontap_fdsp:
+ state: present
+ name: "ansible_security_policyl"
+ vserver: "svm1"
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Delete File Directory Security Policy
+ netapp.ontap.na_ontap_fdsp:
+ state: absent
+ vserver: "svm1"
+ name: "ansible_security_policyl"
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+"""
+
+RETURN = """
+
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+import ansible_collections.netapp.ontap.plugins.module_utils.rest_response_helpers as rrh
+
+
+class NetAppOntapFDSP():
+ """
+ Creates or Destroys a File Directory Security Policy
+ """
+ def __init__(self):
+ """
+ Initialize the ONTAP File Directory Security Policy class
+ """
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ name=dict(required=True, type='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ # set up variables
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ self.rest_api = OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+
+ if not self.use_rest:
+ self.module.fail_json(msg=self.rest_api.requires_ontap_version('na_ontap_fdsp', '9.6'))
+
+ def get_fdsp(self):
+ """
+ Get File Directory Security Policy
+ """
+ api = "private/cli/vserver/security/file-directory/policy"
+ query = {
+ 'policy-name': self.parameters['name'],
+ 'vserver': self.parameters['vserver']
+ }
+
+ message, error = self.rest_api.get(api, query)
+ records, error = rrh.check_for_0_or_more_records(api, message, error)
+
+ if error:
+ self.module.fail_json(msg=error)
+
+ return records if records else None
+
+ def add_fdsp(self):
+ """
+ Adds a new File Directory Security Policy
+ """
+ api = "private/cli/vserver/security/file-directory/policy"
+ body = {
+ 'policy-name': self.parameters['name'],
+ 'vserver': self.parameters['vserver']
+ }
+
+ dummy, error = self.rest_api.post(api, body)
+ if error:
+ self.module.fail_json(msg=error)
+
+ def remove_fdsp(self):
+ """
+ Deletes a File Directory Security Policy
+ """
+ api = "private/cli/vserver/security/file-directory/policy"
+ body = {
+ 'policy-name': self.parameters['name'],
+ 'vserver': self.parameters['vserver']
+ }
+
+ dummy, error = self.rest_api.delete(api, body)
+ if error:
+ self.module.fail_json(msg=error)
+
+ def apply(self):
+ current = self.get_fdsp()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+
+ if self.na_helper.changed:
+ if not self.module.check_mode:
+ if cd_action == 'create':
+ self.add_fdsp()
+ elif cd_action == 'delete':
+ self.remove_fdsp()
+
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action)
+ self.module.exit_json(**result)
+
+
+def main():
+ """
+ Creates or removes File Directory Security Policy
+ """
+ obj = NetAppOntapFDSP()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fdspt.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fdspt.py
new file mode 100644
index 000000000..9e402d952
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fdspt.py
@@ -0,0 +1,257 @@
+#!/usr/bin/python
+
+# (c) 2021, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = """
+module: na_ontap_fdspt
+short_description: NetApp ONTAP create, delete or modify File Directory security policy tasks
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 21.8.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Create, modify or remove file directory security policy tasks.
+
+options:
+ state:
+ description:
+ - Whether the specified Policy Task should exist or not.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ name:
+ description:
+ - Specifies the name of the policy the task will be associated with.
+ required: true
+ type: str
+
+ vserver:
+ description:
+ - Specifies the vserver for the File Directory security policy.
+ required: true
+ type: str
+
+ access_control:
+ description:
+ - Specifies access control of the task.
+ choices: ['file_directory', 'slag']
+ type: str
+
+ ntfs_mode:
+ description:
+ - Specifies NTFS propagation mode.
+ choices: ['propagate', 'ignore', 'replace']
+ type: str
+
+ ntfs_sd:
+ description:
+ - Specifies the NTFS security descriptor name.
+ type: list
+ elements: str
+
+ path:
+ description:
+ - Specifies the file or folder path of the task. In case of SLAG this path specify the volume or qtree mounted path.
+ required: true
+ type: str
+
+ security_type:
+ description:
+ - Specifies the type of security. If not specified ONTAP will default to ntfs.
+ choices: ['ntfs', 'nfsv4']
+ type: str
+
+ index_num:
+ description:
+ - Specifies the index number of a task. Tasks are applied in order. A task with a larger index value is applied after a task with a lower \
+ index number. If you do not specify this optional parameter, new tasks are applied to the end of the index list.
+ type: int
+
+notes:
+- check_mode is supported for this module.
+"""
+
+EXAMPLES = """
+ - name: Create File Directory Security Policy Task
+ netapp.ontap.na_ontap_na_ontap_fdspt:
+ state: present
+ name: "ansible_pl"
+ access_control: "file_directory"
+ ntfs_sd: "ansible1_sd"
+ ntfs_mode: "replace"
+ security_type: "ntfs"
+ path: "/volume1"
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Modify File Directory Security Policy Task
+ netapp.ontap.na_ontap_na_ontap_fdspt:
+ state: present
+ name: "ansible_pl"
+ access_control: "file_directory"
+ path: "/volume1"
+ ntfs_sd: "ansible1_sd"
+ ntfs_mode: "replace"
+ security_type: "ntfs"
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Remove File Directory Security Policy Task
+ netapp.ontap.na_ontap_na_ontap_fdspt:
+ state: absent
+ vserver: "SVM1"
+ name: "ansible_pl"
+ access_control: "file_directory"
+ path: "/volume1"
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+"""
+
+RETURN = """
+
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+import ansible_collections.netapp.ontap.plugins.module_utils.rest_response_helpers as rrh
+
+
+class NetAppOntapFDSPT():
+ """
+ Creates, Modifies and removes a File Directory Security Policy Tasks
+ """
+ def __init__(self):
+ """
+ Initialize the Ontap File Directory Security Policy Tasks class
+ """
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ name=dict(required=True, type='str'),
+ path=dict(required=True, type='str'),
+ access_control=dict(required=False, choices=['file_directory', 'slag'], type='str'),
+ ntfs_sd=dict(required=False, type='list', elements='str'),
+ ntfs_mode=dict(required=False, choices=['propagate', 'ignore', 'replace'], type='str'),
+ security_type=dict(required=False, choices=['ntfs', 'nfsv4'], type='str'),
+ index_num=dict(required=False, type='int')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ # set up variables
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ self.rest_api = OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+
+ if not self.use_rest:
+ self.module.fail_json(msg=self.rest_api.requires_ontap_version('na_ontap_fdspt', '9.6'))
+
+ def get_fdspt(self):
+ """
+ Get File Directory Security Policy Task
+ """
+ api = "private/cli/vserver/security/file-directory/policy/task"
+ query = {
+ 'policy-name': self.parameters['name'],
+ 'path': self.parameters['path'],
+ 'fields': 'vserver,ntfs-mode,ntfs-sd,security-type,access-control,index-num'
+ }
+
+ message, error = self.rest_api.get(api, query)
+ records, error = rrh.check_for_0_or_1_records(api, message, error)
+
+ if error:
+ self.module.fail_json(msg=error)
+ if records:
+ if 'ntfs_sd' not in records: # ntfs_sd is not included in the response if there is not an associated value. Required for modify
+ records['ntfs_sd'] = []
+
+ return records if records else None
+
+ def add_fdspt(self):
+ """
+ Adds a new File Directory Security Policy Task
+ """
+ api = "private/cli/vserver/security/file-directory/policy/task/add"
+ body = {
+ 'policy-name': self.parameters['name'],
+ 'vserver': self.parameters['vserver'],
+ 'path': self.parameters['path']
+ }
+
+ for i in ('ntfs_mode', 'ntfs_sd', 'security_type', 'access_control', 'index_num'):
+ if i in self.parameters:
+ body[i.replace('_', '-')] = self.parameters[i]
+
+ dummy, error = self.rest_api.post(api, body)
+ if error:
+ self.module.fail_json(msg=error)
+
+ def remove_fdspt(self):
+ """
+ Deletes a File Directory Security Policy Task
+ """
+ api = "private/cli/vserver/security/file-directory/policy/task/remove"
+ body = {
+ 'policy-name': self.parameters['name'],
+ 'vserver': self.parameters['vserver'],
+ 'path': self.parameters['path']
+ }
+
+ dummy, error = self.rest_api.delete(api, body)
+ if error:
+ self.module.fail_json(msg=error)
+
+ def modify_fdspt(self):
+ """
+ Modifies a File Directory Security Policy Task
+ """
+ # Modify endpoint is not functional.
+ self.remove_fdspt()
+ self.add_fdspt()
+
+ def apply(self):
+ current, modify = self.get_fdspt(), None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None and self.parameters['state'] == 'present':
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.add_fdspt()
+ elif cd_action == 'delete':
+ self.remove_fdspt()
+ elif modify:
+ self.modify_fdspt()
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ """
+ Creates, deletes and modifies File Directory Security Policy Tasks
+ """
+ obj = NetAppOntapFDSPT()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fdss.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fdss.py
new file mode 100644
index 000000000..6c4670221
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fdss.py
@@ -0,0 +1,123 @@
+#!/usr/bin/python
+
+# (c) 2021, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = """
+module: na_ontap_fdss
+short_description: NetApp ONTAP File Directory Security Set.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 21.8.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Set file directory security information.
+- This module is not idempotent. If re-running this module to apply the currently assigned policy, the policy will be reassigned.
+options:
+ state:
+ description:
+ - Whether the specified Policy Task should exist or not.
+ choices: ['present']
+ default: present
+ type: str
+ name:
+ description:
+ - Specifies the security policy to apply.
+ required: true
+ type: str
+
+ vserver:
+ description:
+ - Specifies the Vserver that contains the path to which the security policy is applied.
+ required: true
+ type: str
+"""
+EXAMPLES = """
+ - name: Set File Directory Security
+ netapp.ontap.na_ontap_fdss:
+ state: present
+ vserver: "svm1"
+ name: "ansible_pl"
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+"""
+
+RETURN = """
+
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+import ansible_collections.netapp.ontap.plugins.module_utils.rest_response_helpers as rrh
+
+
+class NetAppOntapFDSS():
+ """
+ Applys a File Directory Security Policy
+ """
+ def __init__(self):
+ """
+ Initialize the Ontap File Directory Security class
+ """
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present'], default='present'),
+ name=dict(required=True, type='str'),
+ vserver=dict(required=True, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ # set up variables
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ self.rest_api = OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+
+ if not self.use_rest:
+ self.module.fail_json(msg=self.rest_api.requires_ontap_version('na_ontap_fdss', '9.6'))
+
+ def set_fdss(self):
+ """
+ Apply File Directory Security
+ """
+
+ api = "private/cli/vserver/security/file-directory/apply"
+ query = {
+ 'policy_name': self.parameters['name'],
+ 'vserver': self.parameters['vserver'],
+ }
+
+ response, error = self.rest_api.post(api, query) # response will contain the job ID created by the post.
+ response, error = rrh.check_for_error_and_job_results(api, response, error, self.rest_api)
+
+ if error:
+ self.module.fail_json(msg=error)
+
+ def apply(self):
+ self.set_fdss()
+ self.module.exit_json(changed=True)
+
+
+def main():
+ """
+ File Directory Security Policy Tasks
+ """
+ obj = NetAppOntapFDSS()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_file_directory_policy.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_file_directory_policy.py
new file mode 100644
index 000000000..d0f89d826
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_file_directory_policy.py
@@ -0,0 +1,363 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'
+}
+
+DOCUMENTATION = """
+
+module: na_ontap_file_directory_policy
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+short_description: NetApp ONTAP create, delete, or modify vserver security file-directory policy
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap_zapi
+version_added: 20.8.0
+description:
+ - Create, modify, or destroy vserver security file-directory policy
+ - Add or remove task from policy.
+ - Each time a policy/task is created/modified, automatically apply policy to vserver.
+ - This module only supports ZAPI and is deprecated.
+ - The final version of ONTAP to support ZAPI is 9.12.1.
+options:
+ state:
+ description:
+ - Whether the specified policy or task should exist or not.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ vserver:
+ description:
+ - Specifies the vserver for the policy.
+ required: true
+ type: str
+
+ policy_name:
+ description:
+ - Specifies the name of the policy.
+ type: str
+ required: true
+
+ access_control:
+ description:
+ - Specifies the access control of task to be applied.
+ choices: ['file_directory', 'slag']
+ type: str
+
+ ntfs_mode:
+ description:
+ - Specifies NTFS Propagation Mode.
+ choices: ['propagate', 'ignore', 'replace']
+ type: str
+
+ ntfs_sd:
+ description:
+ - Specifies NTFS security descriptor identifier.
+ type: list
+ elements: str
+
+ path:
+ description:
+ - Specifies the file or folder path of the task.
+ - If path is specified and the policy which the task is adding to, does not exist, it will create the policy first then add the task to it.
+ - If path is specified, delete operation only removes task from policy.
+ type: str
+
+ security_type:
+ description:
+ - Specifies the type of security.
+ type: str
+ choices: ['ntfs', 'nfsv4']
+
+ ignore_broken_symlinks:
+ description:
+ - Skip Broken Symlinks.
+ - Options used when applying the policy to vserver.
+ type: bool
+
+"""
+
+EXAMPLES = """
+
+ - name: create policy
+ na_ontap_file_directory_policy:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ state: present
+ vserver: ansible
+ policy_name: file_policy
+ ignore_broken_symlinks: false
+
+ - name: add task to existing file_policy
+ na_ontap_file_directory_policy:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ state: present
+ vserver: ansible
+ policy_name: file_policy
+ path: /vol
+ ntfs_sd: ansible_sd
+ ntfs_mode: propagate
+
+ - name: delete task from file_policy.
+ na_ontap_file_directory_policy:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ state: absent
+ vserver: ansible
+ policy_name: file_policy
+ path: /vol
+
+ - name: delete file_policy along with the tasks.
+ na_ontap_file_directory_policy:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ state: absent
+ vserver: ansible
+ policy_name: file_policy
+
+
+"""
+
+RETURN = """
+
+"""
+
+import traceback
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapFilePolicy(object):
+
+ def __init__(self):
+ """
+ Initialize the Ontap file directory policy class
+ """
+
+ self.argument_spec = netapp_utils.na_ontap_zapi_only_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ policy_name=dict(required=True, type='str'),
+ access_control=dict(required=False, type='str', choices=['file_directory', 'slag']),
+ ntfs_mode=dict(required=False, choices=['propagate', 'ignore', 'replace']),
+ ntfs_sd=dict(required=False, type='list', elements='str'),
+ path=dict(required=False, type='str'),
+ security_type=dict(required=False, type='str', choices=['ntfs', 'nfsv4']),
+ ignore_broken_symlinks=dict(required=False, type='bool')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True,
+ )
+
+ # set up variables
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.na_helper.module_deprecated(self.module)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg='The python NetApp-Lib module is required')
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def create_policy(self):
+ policy_obj = netapp_utils.zapi.NaElement("file-directory-security-policy-create")
+ policy_obj.add_new_child('policy-name', self.parameters['policy_name'])
+ try:
+ self.server.invoke_successfully(policy_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(
+ msg='Error creating file-directory policy %s: %s' % (self.parameters['policy_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def get_policy_iter(self):
+ policy_get_iter = netapp_utils.zapi.NaElement('file-directory-security-policy-get-iter')
+ policy_info = netapp_utils.zapi.NaElement('file-directory-security-policy')
+ policy_info.add_new_child('vserver', self.parameters['vserver'])
+ policy_info.add_new_child('policy-name', self.parameters['policy_name'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(policy_info)
+ policy_get_iter.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(policy_get_iter, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching file-directory policy %s: %s'
+ % (self.parameters['policy_name'], to_native(error)),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ attributes_list = result.get_child_by_name('attributes-list')
+ policy = attributes_list.get_child_by_name('file-directory-security-policy')
+ return policy.get_child_content('policy-name')
+ return None
+
+ def remove_policy(self):
+ remove_policy = netapp_utils.zapi.NaElement('file-directory-security-policy-delete')
+ remove_policy.add_new_child('policy-name', self.parameters['policy_name'])
+ try:
+ self.server.invoke_successfully(remove_policy, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(
+ msg='Error removing file-directory policy %s: %s' % (self.parameters['policy_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def get_task_iter(self):
+ task_get_iter = netapp_utils.zapi.NaElement('file-directory-security-policy-task-get-iter')
+ task_info = netapp_utils.zapi.NaElement('file-directory-security-policy-task')
+ task_info.add_new_child('vserver', self.parameters['vserver'])
+ task_info.add_new_child('policy-name', self.parameters['policy_name'])
+ task_info.add_new_child('path', self.parameters['path'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(task_info)
+ task_get_iter.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(task_get_iter, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching task from file-directory policy %s: %s'
+ % (self.parameters['policy_name'], to_native(error)),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ attributes_list = result.get_child_by_name('attributes-list')
+ task = attributes_list.get_child_by_name('file-directory-security-policy-task')
+ task_result = dict()
+ task_result['path'] = task.get_child_content('path')
+ if task.get_child_by_name('ntfs-mode'):
+ task_result['ntfs_mode'] = task.get_child_content('ntfs-mode')
+ if task.get_child_by_name('security-type'):
+ task_result['security_type'] = task.get_child_content('security-type')
+ if task.get_child_by_name('ntfs-sd'):
+ task_result['ntfs_sd'] = [ntfs_sd.get_content() for ntfs_sd in task.get_child_by_name('ntfs-sd').get_children()]
+ return task_result
+ return None
+
+ def add_task_to_policy(self):
+ policy_add_task = netapp_utils.zapi.NaElement('file-directory-security-policy-task-add')
+ policy_add_task.add_new_child('path', self.parameters['path'])
+ policy_add_task.add_new_child('policy-name', self.parameters['policy_name'])
+ if self.parameters.get('access_control') is not None:
+ policy_add_task.add_new_child('access-control', self.parameters['access_control'])
+ if self.parameters.get('ntfs_mode') is not None:
+ policy_add_task.add_new_child('ntfs-mode', self.parameters['ntfs_mode'])
+ if self.parameters.get('ntfs_sd') is not None:
+ ntfs_sds = netapp_utils.zapi.NaElement('ntfs-sd')
+ for ntfs_sd in self.parameters['ntfs_sd']:
+ ntfs_sds.add_new_child('file-security-ntfs-sd', ntfs_sd)
+ policy_add_task.add_child_elem(ntfs_sds)
+ if self.parameters.get('security_type') is not None:
+ policy_add_task.add_new_child('security-type', self.parameters['security_type'])
+ try:
+ self.server.invoke_successfully(policy_add_task, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error adding task to file-directory policy %s: %s'
+ % (self.parameters['policy_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def remove_task_from_policy(self):
+ policy_remove_task = netapp_utils.zapi.NaElement('file-directory-security-policy-task-remove')
+ policy_remove_task.add_new_child('path', self.parameters['path'])
+ policy_remove_task.add_new_child('policy-name', self.parameters['policy_name'])
+ try:
+ self.server.invoke_successfully(policy_remove_task, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error removing task from file-directory policy %s: %s'
+ % (self.parameters['policy_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_task(self, modify):
+ policy_modify_task = netapp_utils.zapi.NaElement('file-directory-security-policy-task-modify')
+ policy_modify_task.add_new_child('path', self.parameters['path'])
+ policy_modify_task.add_new_child('policy-name', self.parameters['policy_name'])
+ if modify.get('ntfs_mode') is not None:
+ policy_modify_task.add_new_child('ntfs-mode', self.parameters['ntfs_mode'])
+ if modify.get('ntfs_sd') is not None:
+ ntfs_sds = netapp_utils.zapi.NaElement('ntfs-sd')
+ for ntfs_sd in self.parameters['ntfs_sd']:
+ ntfs_sds.add_new_child('file-security-ntfs-sd', ntfs_sd)
+ policy_modify_task.add_child_elem(ntfs_sds)
+ if modify.get('security_type') is not None:
+ policy_modify_task.add_new_child('security-type', self.parameters['security_type'])
+ try:
+ self.server.invoke_successfully(policy_modify_task, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying task in file-directory policy %s: %s'
+ % (self.parameters['policy_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def set_sd(self):
+ set_sd = netapp_utils.zapi.NaElement('file-directory-security-set')
+ set_sd.add_new_child('policy-name', self.parameters['policy_name'])
+ if self.parameters.get('ignore-broken-symlinks'):
+ set_sd.add_new_child('ignore-broken-symlinks', str(self.parameters['ignore_broken_symlinks']))
+ try:
+ self.server.invoke_successfully(set_sd, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error applying file-directory policy %s: %s'
+ % (self.parameters['policy_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ current = self.get_policy_iter()
+ cd_action, task_cd_action, task_modify = None, None, None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if self.parameters.get('path'):
+ current_task = self.get_task_iter()
+ task_cd_action = self.na_helper.get_cd_action(current_task, self.parameters)
+ if task_cd_action is None and self.parameters['state'] == 'present':
+ task_modify = self.na_helper.get_modified_attributes(current_task, self.parameters)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if self.parameters.get('path'):
+ if task_cd_action == 'create':
+ # if policy doesn't exist, create the policy first.
+ if cd_action == 'create':
+ self.create_policy()
+ self.add_task_to_policy()
+ self.set_sd()
+ elif task_cd_action == 'delete':
+ # delete the task, not the policy.
+ self.remove_task_from_policy()
+ elif task_modify:
+ self.modify_task(task_modify)
+ self.set_sd()
+ else:
+ if cd_action == 'create':
+ self.create_policy()
+ self.set_sd()
+ elif cd_action == 'delete':
+ self.remove_policy()
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, extra_responses={'task action': task_cd_action,
+ 'task modify': task_modify})
+ self.module.exit_json(**result)
+
+
+def main():
+ """
+ Creates, deletes and modifies file directory policy
+ """
+ obj = NetAppOntapFilePolicy()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_file_security_permissions.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_file_security_permissions.py
new file mode 100644
index 000000000..2e5b844f0
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_file_security_permissions.py
@@ -0,0 +1,760 @@
+#!/usr/bin/python
+
+# (c) 2022-2023, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: na_ontap_file_security_permissions
+short_description: NetApp ONTAP NTFS file security permissions
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 22.0.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Create, delete, or modify NTFS file security and audit policies of file or directory on NetApp ONTAP.
+ - Note that ACLs are mached based on ('user', 'access', 'access_control', 'apply_to').
+ In order to modify any of these 4 properties, the module deletes the ACL and creates a new one.
+
+options:
+ state:
+ description:
+ - Whether the specified file security permission should exist or not.
+ - When absent, all ACLs are deleted, irrespective of the contents of C(acls).
+ - See C(access_control) to only delete all SLAG ACLS, or only delete file-directory ACLs.
+ - Inherited ACLs are ignored, they can't be deleted or modified.
+ choices: ['present', 'absent']
+ type: str
+ default: 'present'
+
+ path:
+ description:
+ - The path of the file or directory on which to apply security permissions.
+ type: str
+ required: true
+
+ vserver:
+ description:
+ - Name of the vserver to use.
+ type: str
+ required: true
+
+ owner:
+ description:
+ - Specifies the owner of the NTFS security descriptor (SD).
+ - You can specify the owner using either a user name or security identifier (SID).
+ - The owner of the SD can modify the permissions on the file (or folder) or files (or folders) to which the SD is applied and
+ can give other users the right to take ownership of the object or objects to which the SD is applied.
+ type: str
+
+ control_flags:
+ description:
+ - Specifies the control flags in the SD. It is a Hexadecimal Value.
+ type: str
+
+ group:
+ description:
+ - Specifies the owner's primary group.
+ - Specify the owner group using either a group name or SID.
+ type: str
+
+ ignore_paths:
+ description:
+ - For each file or directory in the list, specifies that permissions on this file or directory cannot be replaced.
+ type: list
+ elements: str
+
+ propagation_mode:
+ description:
+ - Specifies how to propagate security settings to child subfolders and files.
+ - Defaults to propagate.
+ choices: ['propagate', 'replace']
+ type: str
+
+ access_control:
+ description:
+ - An Access Control Level specifies the access control of the task to be applied.
+ - Valid values are "file-directory" or "Storage-Level Access Guard (SLAG)".
+ - SLAG is used to apply the specified security descriptors with the task for the volume or qtree.
+ - Otherwise, the security descriptors are applied on files and directories at the specified path.
+ - The value slag is not supported on FlexGroups volumes. The default value is "file-directory".
+ - This field requires ONTAP 9.10.1 or later. This defaults to "file_directory".
+ - When state is present, all ACLs not listed in C(acls) are deleted when this option is absent.
+ If this option is present, only ACLs matching its value are deleted.
+ - When state is absent, all ACLs are deleted when this option is absent.
+ If this option is present, only ACLs matching its value are deleted.
+ choices: ['file_directory', 'slag']
+ type: str
+
+ acls:
+ description:
+ - A discretionary access security list (DACL) identifies the trustees that are allowed or denied access to a securable object.
+ - When a process tries to access a securable object, the system checks the access control entries (ACEs)
+ in the object's DACL to determine whether to grant access to it.
+ type: list
+ elements: dict
+ suboptions:
+ access_control:
+ description:
+ - An Access Control Level specifies the access control of the task to be applied.
+ - Valid values are "file-directory" or "Storage-Level Access Guard (SLAG)".
+ - SLAG is used to apply the specified security descriptors with the task for the volume or qtree.
+ - Otherwise, the security descriptors are applied on files and directories at the specified path.
+ - The value slag is not supported on FlexGroups volumes. The default value is "file-directory".
+ - This field requires ONTAP 9.10.1 or later. This defaults to "file_directory".
+ choices: ['file_directory', 'slag']
+ type: str
+ access:
+ description:
+ - Specifies whether the ACL is for DACL or SACL.
+ - Currently tested with access_allow, access_deny for DACL and audit_failure, audit_success for SACL.
+ choices: [access_allow, access_deny,
+ access_allowed_callback, access_denied_callback, access_allowed_callback_object, access_denied_callback_object,
+ system_audit_callback, system_audit_callback_object, system_resource_attribute, system_scoped_policy_id,
+ audit_failure, audit_success, audit_success_and_failure]
+ type: str
+ required: true
+ user:
+ description:
+ - Specifies the account to which the ACE applies. Specify either name or SID.
+ - As of 21.24.0, the module is not idempotent when using a SID.
+ - To make it easier when also using C(na_ontap_file_security_permissions_acl), this is aliased to C(acl_user).
+ type: str
+ required: true
+ aliases: ['acl_user']
+ rights:
+ description:
+ - Specifies the access right controlled by the ACE for the account specified.
+ - The "rights" parameter is mutually exclusive with the "advanced_rights" parameter.
+ - ONTAP translates rights into advanced_rights and this module is not idempotent when rights are used.
+ - Make sure to use C(advanced_rights) to maintain idempotency. C(rights) can be used to discover the mapping to C(advanced_rights).
+ choices: ['no_access', 'full_control', 'modify', 'read_and_execute', 'read', 'write']
+ type: str
+ apply_to:
+ description:
+ - Specifies where to apply the DACL or SACL entries.
+ - At least one suboption must be set to true. Suboptions that are not set are assumed to be false.
+ - With SLAGs, ONTAP accepts the three suboptions to be set to true, but creates 2 ACLs.
+ This module requires the 2 ACLs to be present to preserve idempotency.
+ See also C(validate_changes).
+ type: dict
+ required: true
+ suboptions:
+ files:
+ description:
+ - Apply to Files.
+ type: bool
+ default: false
+ sub_folders:
+ description:
+ - Apply to all sub-folders.
+ type: bool
+ default: false
+ this_folder:
+ description:
+ - Apply only to this folder
+ type: bool
+ default: false
+ advanced_rights:
+ description:
+ - Specifies the advanced access right controlled by the ACE for the account specified.
+ type: dict
+ suboptions:
+ append_data:
+ description:
+ - Append Data.
+ type: bool
+ delete:
+ description:
+ - Delete.
+ type: bool
+ delete_child:
+ description:
+ - Delete Child.
+ type: bool
+ execute_file:
+ description:
+ - Execute File.
+ type: bool
+ full_control:
+ description:
+ - Full Control.
+ type: bool
+ read_attr:
+ description:
+ - Read Attributes.
+ type: bool
+ read_data:
+ description:
+ - Read Data.
+ type: bool
+ read_ea:
+ description:
+ - Read Extended Attributes.
+ type: bool
+ read_perm:
+ description:
+ - Read Permissions.
+ type: bool
+ write_attr:
+ description:
+ - Write Attributes.
+ type: bool
+ write_data:
+ description:
+ - Write Data.
+ type: bool
+ write_ea:
+ description:
+ - Write Extended Attributes.
+ type: bool
+ write_owner:
+ description:
+ - Write Owner.
+ type: bool
+ write_perm:
+ description:
+ - Write Permission.
+ type: bool
+ ignore_paths:
+ description:
+ - For each file or directory in the list, specifies that permissions on this file or directory cannot be replaced.
+ type: list
+ elements: str
+ propagation_mode:
+ description:
+ - Specifies how to propagate security settings to child subfolders and files.
+ - Defaults to propagate.
+ - This option valid only in create ACL.
+ choices: ['propagate', 'replace']
+ type: str
+
+ validate_changes:
+ description:
+ - ACLs may not be applied as expected.
+ - For instance, if Everyone is inherited will all permissions, additional users will be granted all permissions, regardless of the request.
+ - For this specific example, you can either delete the top level Everyone, or create a new ACL for Everyone at a lower level.
+ - When using C(rights), ONTAP translates them into C(advanced_rights) so the validation will always fail.
+ - Valid values are C(ignore), no checking; C(warn) to issue a warning; C(error) to fail the module.
+ - With SLAGS, ONTAP may split one ACL into two ACLs depending on the C(apply_to) settings. To maintain idempotency, please provide 2 ACLs as input.
+ choices: ['ignore', 'warn', 'error']
+ type: str
+ default: error
+
+notes:
+ - Supports check_mode.
+ - Only supported with REST and requires ONTAP 9.9.1 or later..
+ - SLAG requires ONTAP 9.10.1 or later.
+ - When state is present, if an ACL is inherited, and a desired ACL matches, a new ACL is created as the inherited cannot be modified.
+ - When state is absent, inherited ACLs are ignored.
+'''
+
+EXAMPLES = """
+ - name: Create file directory security permissions.
+ netapp.ontap.na_ontap_file_security_permissions:
+ state: present
+ vserver: svm1
+ access_control: file_directory
+ path: /vol200/newfile.txt
+ owner: "{{ user }}"
+ # Note, wihout quotes, use a single backslash in AD user names
+ # with quotes, it needs to be escaped as a double backslash
+ # user: "ANSIBLE_CIFS\\user1"
+ # we can't show an example with a single backslash as this is a python file, but it works in YAML.
+ acls:
+ - access: access_deny
+ user: "{{ user }}"
+ apply_to:
+ files: true
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ https: "{{ https }}"
+ validate_certs: "{{ validate_certs }}"
+
+ - name: Modify file directory security permissions.
+ netapp.ontap.na_ontap_file_security_permissions:
+ state: present
+ vserver: svm1
+ access_control: file_directory
+ path: /vol200/newfile.txt
+ acls:
+ - access: access_deny
+ user: "{{ user }}"
+ apply_to:
+ files: true
+ - access: access_allow
+ user: "{{ user }}"
+ apply_to:
+ files: true
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ https: "{{ https }}"
+ validate_certs: "{{ validate_certs }}"
+
+ - name: Delete file directory security ACLs.
+ netapp.ontap.na_ontap_file_security_permissions:
+ state: absent
+ vserver: svm1
+ access_control: file_directory
+ path: /vol200/newfile.txt
+ acls:
+ - access: access_deny
+ user: "{{ user }}"
+ apply_to:
+ files: true
+ - access: access_allow
+ user: "{{ user }}"
+ apply_to:
+ files: true
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ https: "{{ https }}"
+ validate_certs: "{{ validate_certs }}"
+"""
+
+RETURN = """
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic, rest_vserver
+
+
+class NetAppOntapFileSecurityPermissions:
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ path=dict(required=True, type='str'),
+ owner=dict(required=False, type='str'),
+ control_flags=dict(required=False, type='str'),
+ group=dict(required=False, type='str'),
+ access_control=dict(required=False, type='str', choices=['file_directory', 'slag']),
+ ignore_paths=dict(required=False, type='list', elements='str'),
+ propagation_mode=dict(required=False, type='str', choices=['propagate', 'replace']),
+ acls=dict(type='list', elements='dict', options=dict(
+ access=dict(required=True, type='str', choices=[
+ 'access_allow', 'access_deny',
+ 'access_allowed_callback', 'access_denied_callback', 'access_allowed_callback_object', 'access_denied_callback_object',
+ 'system_audit_callback', 'system_audit_callback_object', 'system_resource_attribute', 'system_scoped_policy_id',
+ 'audit_failure', 'audit_success', 'audit_success_and_failure']),
+ access_control=dict(required=False, type='str', choices=['file_directory', 'slag']),
+ user=dict(required=True, type='str', aliases=['acl_user']),
+ rights=dict(required=False,
+ choices=['no_access', 'full_control', 'modify', 'read_and_execute', 'read', 'write'],
+ type='str'),
+ apply_to=dict(required=True, type='dict', options=dict(
+ files=dict(required=False, type='bool', default=False),
+ sub_folders=dict(required=False, type='bool', default=False),
+ this_folder=dict(required=False, type='bool', default=False),
+ )),
+ advanced_rights=dict(required=False, type='dict', options=dict(
+ append_data=dict(required=False, type='bool'),
+ delete=dict(required=False, type='bool'),
+ delete_child=dict(required=False, type='bool'),
+ execute_file=dict(required=False, type='bool'),
+ full_control=dict(required=False, type='bool'),
+ read_attr=dict(required=False, type='bool'),
+ read_data=dict(required=False, type='bool'),
+ read_ea=dict(required=False, type='bool'),
+ read_perm=dict(required=False, type='bool'),
+ write_attr=dict(required=False, type='bool'),
+ write_data=dict(required=False, type='bool'),
+ write_ea=dict(required=False, type='bool'),
+ write_owner=dict(required=False, type='bool'),
+ write_perm=dict(required=False, type='bool'),
+ )),
+ ignore_paths=dict(required=False, type='list', elements='str'),
+ propagation_mode=dict(required=False, type='str', choices=['propagate', 'replace']),
+ )),
+ validate_changes=dict(required=False, type='str', choices=['ignore', 'warn', 'error'], default='error'),
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.svm_uuid = None
+ self.na_helper = NetAppModule(self)
+ self.parameters = self.na_helper.check_and_set_parameters(self.module)
+ self.rest_api = netapp_utils.OntapRestAPI(self.module)
+ self.rest_api.fail_if_not_rest_minimum_version('na_ontap_file_security_permissions', 9, 9, 1)
+ dummy, error = self.rest_api.is_rest(partially_supported_rest_properties=[['access_control', (9, 10, 1)], ['acls.access_control', (9, 10, 1)]],
+ parameters=self.parameters)
+ if error:
+ self.module.fail_json(msg=error)
+ self.parameters = self.na_helper.filter_out_none_entries(self.parameters)
+ self.apply_to_keys = ['files', 'sub_folders', 'this_folder']
+ # POST at SD level only expects a subset of keys in ACL
+ self.post_acl_keys = ['access', 'advanced_rights', 'apply_to', 'rights', 'user']
+ if self.parameters['state'] == 'present':
+ self.validate_acls()
+
+ def validate_acls(self):
+ if 'acls' not in self.parameters:
+ return
+ self.parameters['acls'] = self.na_helper.filter_out_none_entries(self.parameters['acls'])
+ for acl in self.parameters['acls']:
+ if 'rights' in acl:
+ if 'advanced_rights' in acl:
+ self.module.fail_json(msg="Error: suboptions 'rights' and 'advanced_rights' are mutually exclusive.")
+ self.module.warn('This module is not idempotent when "rights" is used, make sure to use "advanced_rights".')
+ # validate that at least one suboption is true
+ if not any(self.na_helper.safe_get(acl, ['apply_to', key]) for key in self.apply_to_keys):
+ self.module.fail_json(msg="Error: at least one suboption must be true for apply_to. Got: %s" % acl)
+ # error if identical acls are set.
+ self.match_acl_with_acls(acl, self.parameters['acls'])
+ for option in ('access_control', 'ignore_paths', 'propagation_mode'):
+ value = self.parameters.get(option)
+ if value is not None:
+ for acl in self.parameters['acls']:
+ if acl.get(option) not in (None, value):
+ self.module.fail_json(msg="Error: mismatch between top level value and ACL value for %s: %s vs %s"
+ % (option, value, acl.get(option)))
+ # make sure options are set in ach ACL, so we can match easily desired ACLs with current ACLs
+ acl[option] = value
+
+ @staticmethod
+ def url_encode(url_fragment):
+ """
+ replace special characters with URL encoding:
+ %2F for /, %5C for backslash
+ """
+ # \ is the escape character in python, so \\ means \
+ return url_fragment.replace("/", "%2F").replace("\\", "%5C")
+
+ def get_svm_uuid(self):
+ self.svm_uuid, dummy = rest_vserver.get_vserver_uuid(self.rest_api, self.parameters['vserver'], self.module, True)
+
+ def get_file_security_permissions(self):
+ api = 'protocols/file-security/permissions/%s/%s' % (self.svm_uuid, self.url_encode(self.parameters['path']))
+ fields = 'acls,control_flags,group,owner'
+ record, error = rest_generic.get_one_record(self.rest_api, api, {'fields': fields})
+ # If we get 655865 the path we gave was not found, so we don't want to fail we want to return None
+ if error:
+ # if path not exists and state absent, return None and changed is False.
+ if '655865' in error and self.parameters['state'] == 'absent':
+ return None
+ self.module.fail_json(msg="Error fetching file security permissions %s: %s" % (self.parameters['path'], to_native(error)),
+ exception=traceback.format_exc())
+ return self.form_current(record) if record else None
+
+ def form_current(self, record):
+ current = {
+ 'group': self.na_helper.safe_get(record, ['group']),
+ 'owner': self.na_helper.safe_get(record, ['owner']),
+ 'control_flags': self.na_helper.safe_get(record, ['control_flags']),
+ 'path': record['path']
+ }
+ acls = []
+
+ def form_acl(acl):
+ advanced_rights_keys = ['append_data', 'delete', 'delete_child', 'execute_file', 'full_control', 'read_attr',
+ 'read_data', 'read_ea', 'read_perm', 'write_attr', 'write_data', 'write_ea', 'write_owner', 'write_perm']
+ advanced_rights = {}
+ apply_to = {}
+ if 'advanced_rights' in acl:
+ for key in advanced_rights_keys:
+ # REST does not return the keys when the value is False
+ advanced_rights[key] = acl['advanced_rights'].get(key, False)
+ if 'apply_to' in acl:
+ for key in self.apply_to_keys:
+ # REST does not return the keys when the value is False
+ apply_to[key] = acl['apply_to'].get(key, False)
+ return {
+ 'advanced_rights': advanced_rights or None,
+ 'apply_to': apply_to or None
+ }
+
+ for acl in record.get('acls', []):
+ each_acl = {
+ 'access': self.na_helper.safe_get(acl, ['access']),
+ 'access_control': self.na_helper.safe_get(acl, ['access_control']),
+ 'inherited': self.na_helper.safe_get(acl, ['inherited']),
+ 'rights': self.na_helper.safe_get(acl, ['rights']),
+ 'user': self.na_helper.safe_get(acl, ['user']),
+ }
+ each_acl.update(form_acl(acl))
+ acls.append(each_acl)
+ current['acls'] = acls or None
+ return current
+
+ @staticmethod
+ def has_acls(current):
+ return bool(current and current.get('acls'))
+
+ def set_option(self, body, option):
+ if self.parameters.get(option) is not None:
+ body[option] = self.parameters[option]
+ return True
+ return False
+
+ def sanitize_acl_for_post(self, acl):
+ ''' some fields like access_control, propagation_mode are not accepted for POST operation '''
+ post_acl = dict(acl)
+ for key in acl:
+ if key not in self.post_acl_keys:
+ post_acl.pop(key)
+ return post_acl
+
+ def sanitize_acls_for_post(self, acls):
+ ''' some fields like access_control, propagation_mode are not accepted for POST operation '''
+ return [self.sanitize_acl_for_post(acl) for acl in acls]
+
+ def create_file_security_permissions(self):
+ api = 'protocols/file-security/permissions/%s/%s' % (self.svm_uuid, self.url_encode(self.parameters['path']))
+ body = {}
+ for option in ('access_control', 'control_flags', 'group', 'owner', 'ignore_paths', 'propagation_mode'):
+ self.set_option(body, option)
+ body['acls'] = self.sanitize_acls_for_post(self.parameters.get('acls', []))
+ dummy, error = rest_generic.post_async(self.rest_api, api, body, job_timeout=120)
+ if error:
+ self.module.fail_json(msg='Error creating file security permissions %s: %s' % (self.parameters['path'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def add_file_security_permissions_acl(self, acl):
+ api = 'protocols/file-security/permissions/%s/%s/acl' % (self.svm_uuid, self.url_encode(self.parameters['path']))
+ for option in ('access_control', 'propagation_mode'):
+ # we already verified these options are consistent when present, so it's OK to overrid
+ self.set_option(acl, option)
+ dummy, error = rest_generic.post_async(self.rest_api, api, acl, timeout=0)
+ if error:
+ self.module.fail_json(msg='Error adding file security permissions acl %s: %s' % (self.parameters['path'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_file_security_permissions_acl(self, acl):
+ api = 'protocols/file-security/permissions/%s/%s/acl' % (self.svm_uuid, self.url_encode(self.parameters['path']))
+ acl = dict(acl)
+ user = acl.pop('user')
+ for option in ('access_control', 'propagation_mode'):
+ # we already verified these options are consistent when present, so it's OK to overrid
+ self.set_option(acl, option)
+ dummy, error = rest_generic.patch_async(self.rest_api, api, self.url_encode(user), acl, {'return_records': 'true'})
+ if error:
+ self.module.fail_json(msg='Error modifying file security permissions acl %s: %s' % (self.parameters['path'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_file_security_permissions_acl(self, acl):
+ api = 'protocols/file-security/permissions/%s/%s/acl' % (self.svm_uuid, self.url_encode(self.parameters['path']))
+ # some fieds are set to None when not present
+ acl = self.na_helper.filter_out_none_entries(acl)
+ # drop keys not accepted in body
+ user = acl.pop('user')
+ acl.pop('advanced_rights', None)
+ acl.pop('rights', None)
+ acl.pop('inherited', None)
+ for option in ('access_control', 'propagation_mode'):
+ # we already verified these options are consistent when present, so it's OK to override
+ self.set_option(acl, option)
+ dummy, error = rest_generic.delete_async(self.rest_api, api, self.url_encode(user), {'return_records': 'true'}, acl, timeout=0)
+ if error:
+ self.module.fail_json(msg='Error deleting file security permissions acl %s: %s' % (self.parameters['path'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_file_security_permissions(self, modify):
+ api = 'protocols/file-security/permissions/%s/%s' % (self.svm_uuid, self.url_encode(self.parameters['path']))
+ body = {}
+ for option in modify:
+ self.set_option(body, option)
+ dummy, error = rest_generic.patch_async(self.rest_api, api, None, body, job_timeout=120)
+ if error:
+ self.module.fail_json(msg='Error modifying file security permissions %s: %s' % (self.parameters['path'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def match_acl_with_acls(self, acl, acls):
+ """ return acl if user and access and apply_to are matched, otherwiese None """
+ matches = []
+ for an_acl in acls:
+ # with 9.9.1, access_control is not supported. It will be set to None in received ACLs, and omitted in desired ACLs
+ # but we can assume the user would like to see file_directory.
+ # We can't modify inherited ACLs. But we can create a new one at a lower scope.
+ inherited = an_acl['inherited'] if 'inherited' in an_acl else False and (acl['inherited'] if 'inherited' in acl else False)
+ if (acl['user'] == an_acl['user']
+ and acl['access'] == an_acl['access']
+ and acl.get('access_control', 'file_directory') == an_acl.get('access_control', 'file_directory')
+ and acl['apply_to'] == an_acl['apply_to']
+ and not inherited):
+ matches.append(an_acl)
+ if len(matches) > 1:
+ self.module.fail_json(msg='Error: found more than one desired ACLs with same user, access, access_control and apply_to %s' % matches)
+ return matches[0] if matches else None
+
+ def get_acl_actions_on_modify(self, modify, current):
+ acl_actions = {'patch-acls': [], 'post-acls': [], 'delete-acls': []}
+ if not self.has_acls(current):
+ acl_actions['post-acls'] = modify['acls']
+ return acl_actions
+ for acl in modify['acls']:
+ current_acl = self.match_acl_with_acls(acl, current['acls'])
+ if current_acl:
+ # if exact match of 2 acl found, look for modify in that matched desired and current acl.
+ if self.is_modify_acl_required(acl, current_acl):
+ acl_actions['patch-acls'].append(acl)
+ else:
+ acl_actions['post-acls'].append(acl)
+ # Ignore inherited ACLs
+ for acl in current['acls']:
+ desired_acl = self.match_acl_with_acls(acl, self.parameters['acls'])
+ if not desired_acl and not acl.get('inherited') and self.parameters.get('access_control') in (None, acl.get('access_control')):
+ # only delete ACLs that matches the desired access_control, or all ACLs if not set
+ acl_actions['delete-acls'].append(acl)
+ return acl_actions
+
+ def is_modify_acl_required(self, desired_acl, current_acl):
+ current_acl_copy = current_acl.copy()
+ current_acl_copy.pop('user')
+ modify = self.na_helper.get_modified_attributes(current_acl_copy, desired_acl)
+ return bool(modify)
+
+ def get_acl_actions_on_delete(self, current):
+ acl_actions = {'patch-acls': [], 'post-acls': [], 'delete-acls': []}
+ self.na_helper.changed = False
+ if current.get('acls'):
+ for acl in current['acls']:
+ # only delete ACLs that matches the desired access_control, or all ACLs if not set
+ if not acl.get('inherited') and self.parameters.get('access_control') in (None, acl.get('access_control')):
+ self.na_helper.changed = True
+ acl_actions['delete-acls'].append(acl)
+ return acl_actions
+
+ def get_modify_actions(self, current):
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if 'path' in modify:
+ self.module.fail_json(msg='Error: mismatch on path values: desired: %s, received: %s' % (self.parameters['path'], current['path']))
+ if 'acls' in modify:
+ acl_actions = self.get_acl_actions_on_modify(modify, current)
+ # validate_modify function will check a modify in acl is required or not.
+ # if neither patch-acls or post-acls required and modify None, set changed to False.
+ del modify['acls']
+ else:
+ acl_actions = {'patch-acls': [], 'post-acls': [], 'delete-acls': []}
+ if not any((acl_actions['patch-acls'], acl_actions['post-acls'], acl_actions['delete-acls'], modify)):
+ self.na_helper.changed = False
+ return modify, acl_actions
+
+ def get_acl_actions_on_create(self):
+ """
+ POST does not accept access_control and propagation_mode at the ACL level, these are global values for all ACLs.
+ Since the user could have a list of ACLs with mixed property we should useP OST the create the SD and 1 group of ACLs
+ then loop over the remaining ACLS.
+ """
+ # split ACLs into four categories
+ acls_groups = {}
+ preferred_group = (None, None)
+ special_accesses = []
+ for acl in self.parameters.get('acls', []):
+ access_control = acl.get('access_control', 'file_directory')
+ propagation_mode = acl.get('propagation_mode', 'propagate')
+ if access_control not in acls_groups:
+ acls_groups[access_control] = {}
+ if propagation_mode not in acls_groups[access_control]:
+ acls_groups[access_control][propagation_mode] = []
+ acls_groups[access_control][propagation_mode].append(acl)
+ access = acl.get('access')
+ if access not in ('access_allow', 'access_deny', 'audit_success', 'audit_failure'):
+ if preferred_group == (None, None):
+ preferred_group = (access_control, propagation_mode)
+ if preferred_group != (access_control, propagation_mode):
+ self.module.fail_json(msg="Error: acl %s with access %s conflicts with other ACLs using accesses: %s "
+ "with different access_control or propagation_mode: %s."
+ % (acl, access, special_accesses, preferred_group))
+ special_accesses.append(access)
+
+ if preferred_group == (None, None):
+ # find a non empty list of ACLs
+ # use sorted to make this deterministic
+ for acc_key, acc_value in sorted(acls_groups.items()):
+ for prop_key, prop_value in sorted(acc_value.items()):
+ if prop_value:
+ preferred_group = (acc_key, prop_key)
+ break
+ if preferred_group != (None, None):
+ break
+
+ # keep one category for create, and move the remaining ACLs into post-acls
+ create_acls = []
+ acl_actions = {'patch-acls': [], 'post-acls': [], 'delete-acls': []}
+ # use sorted to make this deterministic
+ for acc_key, acc_value in sorted(acls_groups.items()):
+ for prop_key, prop_value in sorted(acc_value.items()):
+ if (acc_key, prop_key) == preferred_group:
+ create_acls = prop_value
+ self.parameters['access_control'] = acc_key
+ self.parameters['propagation_mode'] = prop_key
+ elif prop_value:
+ acl_actions['post-acls'].extend(prop_value)
+ self.parameters['acls'] = create_acls
+ return acl_actions
+
+ def get_actions(self):
+ current = self.get_file_security_permissions()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ modify, acl_actions = self.get_modify_actions(current) if cd_action is None else (None, {})
+ if cd_action == 'create' and self.parameters.get('access_control') is None:
+ acl_actions = self.get_acl_actions_on_create()
+ if cd_action == 'delete':
+ # delete is not supported by the API, or rather a DELETE will only delete the SLAG ACLs and nothing else.
+ # so we just loop through all the ACLs
+ acl_actions = self.get_acl_actions_on_delete(current)
+ cd_action = None
+ return cd_action, modify, acl_actions
+
+ def apply(self):
+
+ self.get_svm_uuid()
+ cd_action, modify, acl_actions = self.get_actions()
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_file_security_permissions()
+ if modify:
+ self.modify_file_security_permissions(modify)
+ # delete ACLs first, to avoid conflicts with new or modified rules
+ for delete_acl in acl_actions.get('delete-acls', []):
+ self.delete_file_security_permissions_acl(delete_acl)
+ # PATCH call succeeds, but its not working: changes are not reflecting
+ # modify before adding new rules to avoid conflicts
+ for patch_acl in acl_actions.get('patch-acls', []):
+ self.modify_file_security_permissions_acl(patch_acl)
+ for post_acl in acl_actions.get('post-acls', []):
+ self.add_file_security_permissions_acl(post_acl)
+ changed = self.na_helper.changed
+ self.validate_changes(cd_action, modify)
+ self.na_helper.changed = changed
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify)
+ self.module.exit_json(**result)
+
+ def validate_changes(self, cd_action, modify):
+ if self.parameters['validate_changes'] == 'ignore':
+ return
+ new_cd_action, new_modify, acl_actions = self.get_actions()
+ errors = []
+ if new_cd_action is not None:
+ errors.append('%s still required after %s (with modify: %s)' % (new_cd_action, cd_action, modify))
+ if new_modify:
+ errors.append('modify: %s still required after %s' % (new_modify, modify))
+ # changes in ACLs
+ errors.extend('%s still required for %s' % (key, value) for key, value in acl_actions.items() if value)
+ if errors:
+ msg = 'Error - %s' % ' - '.join(errors)
+ if self.parameters['validate_changes'] == 'error':
+ self.module.fail_json(msg=msg)
+ if self.parameters['validate_changes'] == 'warn':
+ self.module.warn(msg)
+
+
+def main():
+ """Apply volume operations from playbook"""
+ obj = NetAppOntapFileSecurityPermissions()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_file_security_permissions_acl.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_file_security_permissions_acl.py
new file mode 100644
index 000000000..277986466
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_file_security_permissions_acl.py
@@ -0,0 +1,495 @@
+#!/usr/bin/python
+
+# (c) 2022-2023, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: na_ontap_file_security_permissions_acl
+short_description: NetApp ONTAP file security permissions ACL
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 22.0.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Add, delete, or modify a file_security_permissions ACL on NetApp ONTAP.
+ - Note that ACLs are mached based on ('user', 'access', 'access_control', 'apply_to').
+ To modify any of these 4 properties, you would need to delete the ACL and create a new one.
+ Or use C(netapp.ontap.na_ontap_file_security_permissions).
+
+options:
+ state:
+ description:
+ - Whether the specified file security permissions ACL should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: 'present'
+
+ path:
+ description:
+ - The path of the file or directory on which to apply security permissions.
+ type: str
+ required: true
+
+ vserver:
+ description:
+ - Name of the vserver to use.
+ type: str
+ required: true
+
+ access_control:
+ description:
+ - An Access Control Level specifies the access control of the task to be applied.
+ - Valid values are "file-directory" or "Storage-Level Access Guard (SLAG)".
+ - SLAG is used to apply the specified security descriptors with the task for the volume or qtree.
+ - Otherwise, the security descriptors are applied on files and directories at the specified path.
+ - The value slag is not supported on FlexGroups volumes. The default value is "file-directory".
+ - This field requires ONTAP 9.10.1 or later. This defaults to "file_directory".
+ choices: ['file_directory', 'slag']
+ type: str
+ access:
+ description:
+ - An ACE is an element in an access control list (ACL). An ACL can have zero or more ACEs.
+ - Each ACE controls or monitors access to an object by a specified trustee.
+ choices: ['access_allow', 'access_deny', 'audit_failure', 'audit_success']
+ type: str
+ required: true
+ acl_user:
+ description:
+ - Specifies the account to which the ACE applies. Specify either name or SID.
+ - As of 22.0.0, the module is not idempotent when using a SID.
+ - Note - we cannot use C(user) as if conflicts with the option for the admin user.
+ type: str
+ required: true
+ rights:
+ description:
+ - Specifies the access right controlled by the ACE for the account specified.
+ - The "rights" parameter is mutually exclusive with the "advanced_rights" parameter.
+ - ONTAP translates rights into advanced_rights and this module is not idempotent when rights are used.
+ - Make sure to use C(advanced_rights) to maintain idempotency. C(rights) can be used to discover the mapping to C(advanced_rights).
+ choices: ['no_access', 'full_control', 'modify', 'read_and_execute', 'read', 'write']
+ type: str
+ apply_to:
+ description:
+ - Specifies where to apply the DACL or SACL entries.
+ - With SLAGs, ONTAP accepts the three suboptions to be set to true, but creates 2 ACLs.
+ This module requires the 2 ACLs to be present to preserve idempotency.
+ See also C(validate_changes).
+ type: dict
+ required: true
+ suboptions:
+ files:
+ description:
+ - Apply to Files.
+ type: bool
+ default: false
+ sub_folders:
+ description:
+ - Apply to all sub-folders.
+ type: bool
+ default: false
+ this_folder:
+ description:
+ - Apply only to this folder
+ type: bool
+ default: false
+ advanced_rights:
+ description:
+ - Specifies the advanced access right controlled by the ACE for the account specified.
+ type: dict
+ suboptions:
+ append_data:
+ description:
+ - Append Data.
+ type: bool
+ required: false
+ delete:
+ description:
+ - Delete.
+ type: bool
+ required: false
+ delete_child:
+ description:
+ - Delete Child.
+ type: bool
+ required: false
+ execute_file:
+ description:
+ - Execute File.
+ type: bool
+ required: false
+ full_control:
+ description:
+ - Full Control.
+ type: bool
+ required: false
+ read_attr:
+ description:
+ - Read Attributes.
+ type: bool
+ required: false
+ read_data:
+ description:
+ - Read Data.
+ type: bool
+ required: false
+ read_ea:
+ description:
+ - Read Extended Attributes.
+ type: bool
+ required: false
+ read_perm:
+ description:
+ - Read Permissions.
+ type: bool
+ required: false
+ write_attr:
+ description:
+ - Write Attributes.
+ type: bool
+ required: false
+ write_data:
+ description:
+ - Write Data.
+ type: bool
+ required: false
+ write_ea:
+ description:
+ - Write Extended Attributes.
+ type: bool
+ required: false
+ write_owner:
+ description:
+ - Write Owner.
+ type: bool
+ required: false
+ write_perm:
+ description:
+ - Write Permission.
+ type: bool
+ required: false
+ ignore_paths:
+ description:
+ - For each file or directory in the list, specifies that permissions on this file or directory cannot be replaced.
+ type: list
+ elements: str
+ propagation_mode:
+ description:
+ - Specifies how to propagate security settings to child subfolders and files.
+ - Defaults to propagate.
+ - This option is valid in create, but cannot modify.
+ choices: ['propagate', 'replace']
+ type: str
+
+ validate_changes:
+ description:
+ - ACLs may not be applied as expected.
+ - For instance, if Everyone is inherited will all permissions, additional users will be granted all permissions, regardless of the request.
+ - For this specific example, you can either delete the top level Everyone, or create a new ACL for Everyone at a lower level.
+ - When using C(rights), ONTAP translates them into C(advanced_rights) so the validation will always fail.
+ - Valid values are C(ignore), no checking; C(warn) to issue a warning; C(error) to fail the module.
+ - With SLAGS, ONTAP may split one ACL into two ACLs depending on the C(apply_to) settings. To maintain idempotency, please provide 2 ACLs as input.
+ choices: ['ignore', 'warn', 'error']
+ type: str
+ default: error
+
+notes:
+ - Supports check_mode.
+ - Only supported with REST and requires ONTAP 9.9.1 or later.
+ - SLAG requires ONTAP 9.10.1 or later.
+'''
+
+EXAMPLES = """
+ - name: Add ACL for file or directory security permissions.
+ netapp.ontap.na_ontap_file_security_permissions_acl:
+ vserver: "{{ vserver_name }}"
+ access_control: file_directory
+ path: "{{ file_mount_path }}"
+ validate_changes: warn
+ access: access_allow
+ # Note, wihout quotes, use a single backslash in AD user names
+ # with quotes, it needs to be escaped as a double backslash
+ # user: "ANSIBLE_CIFS\\user1"
+ # we can't show an example with a single backslash as this is a python file, but it works in YAML.
+ acl_user: "user1"
+ apply_to:
+ this_folder: true
+ advanced_rights:
+ append_data: true
+ delete: false
+
+ - name: Modify ACL for file or directory security permissions.
+ netapp.ontap.na_ontap_file_security_permissions_acl:
+ vserver: "{{ vserver_name }}"
+ access_control: file_directory
+ path: "{{ file_mount_path }}"
+ validate_changes: warn
+ access: access_allow
+ acl_user: "user1"
+ apply_to:
+ this_folder: true
+ advanced_rights:
+ append_data: false
+ delete: true
+
+ - name: Delete ACL for file or directory security permissions.
+ netapp.ontap.na_ontap_file_security_permissions_acl:
+ vserver: "{{ vserver_name }}"
+ access_control: file_directory
+ path: "{{ file_mount_path }}"
+ validate_changes: warn
+ access: access_allow
+ acl_user: "user1"
+ apply_to:
+ this_folder: true
+ state: absent
+"""
+
+RETURN = """
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic, rest_vserver
+
+
+class NetAppOntapFileSecurityPermissionsACL:
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ path=dict(required=True, type='str'),
+ access_control=dict(required=False, type='str', choices=['file_directory', 'slag']),
+ access=dict(required=True, choices=['access_allow', 'access_deny', 'audit_failure', 'audit_success'], type='str'),
+ apply_to=dict(required=True, type='dict', options=dict(
+ files=dict(required=False, type='bool', default=False),
+ sub_folders=dict(required=False, type='bool', default=False),
+ this_folder=dict(required=False, type='bool', default=False),
+ )),
+ advanced_rights=dict(required=False, type='dict', options=dict(
+ append_data=dict(required=False, type='bool'),
+ delete=dict(required=False, type='bool'),
+ delete_child=dict(required=False, type='bool'),
+ execute_file=dict(required=False, type='bool'),
+ full_control=dict(required=False, type='bool'),
+ read_attr=dict(required=False, type='bool'),
+ read_data=dict(required=False, type='bool'),
+ read_ea=dict(required=False, type='bool'),
+ read_perm=dict(required=False, type='bool'),
+ write_attr=dict(required=False, type='bool'),
+ write_data=dict(required=False, type='bool'),
+ write_ea=dict(required=False, type='bool'),
+ write_owner=dict(required=False, type='bool'),
+ write_perm=dict(required=False, type='bool'),
+ )),
+ ignore_paths=dict(required=False, type='list', elements='str'),
+ propagation_mode=dict(required=False, type='str', choices=['propagate', 'replace']),
+ rights=dict(required=False,
+ choices=['no_access', 'full_control', 'modify', 'read_and_execute', 'read', 'write'],
+ type='str'),
+ acl_user=dict(required=True, type='str'),
+ validate_changes=dict(required=False, type='str', choices=['ignore', 'warn', 'error'], default='error'),
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.svm_uuid = None
+ self.na_helper = NetAppModule(self.module)
+ self.parameters = self.na_helper.check_and_set_parameters(self.module)
+ self.rest_api = netapp_utils.OntapRestAPI(self.module)
+ self.rest_api.fail_if_not_rest_minimum_version('na_ontap_file_security_permissions_acl', 9, 9, 1)
+ dummy, error = self.rest_api.is_rest(partially_supported_rest_properties=[['access_control', (9, 10, 1)]], parameters=self.parameters)
+ self.apply_to_keys = ['files', 'sub_folders', 'this_folder']
+ if self.parameters['state'] == 'present':
+ self.validate_acl()
+ self.parameters['user'] = self.parameters['acl_user']
+
+ def validate_acl(self):
+ self.parameters = self.na_helper.filter_out_none_entries(self.parameters)
+ if 'rights' in self.parameters:
+ if 'advanced_rights' in self.parameters:
+ self.module.fail_json(msg="Error: suboptions 'rights' and 'advanced_rights' are mutually exclusive.")
+ self.module.warn('This module is not idempotent when "rights" is used, make sure to use "advanced_rights".')
+ # validate that at least one suboption is true
+ if not any(self.na_helper.safe_get(self.parameters, ['apply_to', key]) for key in self.apply_to_keys):
+ self.module.fail_json(msg="Error: at least one suboption must be true for apply_to. Got: %s" % self.parameters.get('apply_to'))
+
+ @staticmethod
+ def url_encode(url_fragment):
+ """
+ replace special characters with URL encoding:
+ %2F for /, %5C for backslash
+ """
+ # \ is the escape character in python, so \\ means \
+ return url_fragment.replace("/", "%2F").replace("\\", "%5C")
+
+ def get_svm_uuid(self):
+ self.svm_uuid, dummy = rest_vserver.get_vserver_uuid(self.rest_api, self.parameters['vserver'], self.module, True)
+
+ def get_file_security_permissions_acl(self):
+ """ we cannot get a single ACL - get a list, and find ours"""
+ api = 'protocols/file-security/permissions/%s/%s' % (self.svm_uuid, self.url_encode(self.parameters['path']))
+ fields = 'acls'
+ record, error = rest_generic.get_one_record(self.rest_api, api, fields=fields)
+ # If we get 655865 the path we gave was not found, so we don't want to fail we want to return None
+ if error:
+ if '655865' in error and self.parameters['state'] == 'absent':
+ return None
+ self.module.fail_json(msg="Error fetching file security permissions %s: %s" % (self.parameters['path'], to_native(error)),
+ exception=traceback.format_exc())
+ if record and 'acls' in record:
+ record = self.form_current(record)
+ return self.match_acl_with_acls(self.parameters, record['acls'])
+ return None
+
+ def form_current(self, record):
+ current = {
+ 'group': self.na_helper.safe_get(record, ['group']),
+ 'owner': self.na_helper.safe_get(record, ['owner']),
+ 'control_flags': self.na_helper.safe_get(record, ['control_flags']),
+ 'path': record['path']
+ }
+ acls = []
+
+ def form_acl(acl):
+ advanced_rights_keys = ['append_data', 'delete', 'delete_child', 'execute_file', 'full_control', 'read_attr',
+ 'read_data', 'read_ea', 'read_perm', 'write_attr', 'write_data', 'write_ea', 'write_owner', 'write_perm']
+ advanced_rights = {}
+ apply_to = {}
+ if 'advanced_rights' in acl:
+ for key in advanced_rights_keys:
+ # REST does not return the keys when the value is False
+ advanced_rights[key] = acl['advanced_rights'].get(key, False)
+ if 'apply_to' in acl:
+ for key in self.apply_to_keys:
+ # REST does not return the keys when the value is False
+ apply_to[key] = acl['apply_to'].get(key, False)
+ return {
+ 'advanced_rights': advanced_rights or None,
+ 'apply_to': apply_to or None
+ }
+
+ for acl in record.get('acls', []):
+ each_acl = {
+ 'access': self.na_helper.safe_get(acl, ['access']),
+ 'access_control': self.na_helper.safe_get(acl, ['access_control']),
+ 'inherited': self.na_helper.safe_get(acl, ['inherited']),
+ 'rights': self.na_helper.safe_get(acl, ['rights']),
+ 'user': self.na_helper.safe_get(acl, ['user']),
+ }
+ each_acl.update(form_acl(acl))
+ acls.append(each_acl)
+ current['acls'] = acls or None
+ return current
+
+ def build_body(self, action):
+ keys = {
+ 'create': ['access', 'access_control', 'advanced_rights', 'apply_to', 'ignore_paths', 'propagation_mode', 'rights', 'user'],
+ 'modify': ['access', 'access_control', 'advanced_rights', 'apply_to', 'ignore_paths', 'rights'],
+ 'delete': ['access', 'access_control', 'apply_to', 'ignore_paths', 'propagation_mode'],
+ # 'delete': ['access', 'access_control', 'ignore_paths', 'propagation_mode'],
+ }
+ if action not in keys:
+ self.module.fail_json(msg='Internal error - unexpected action %s' % action)
+ body = {}
+ for key in keys[action]:
+ if key in self.parameters:
+ body[key] = self.parameters[key]
+ return body
+
+ def create_file_security_permissions_acl(self):
+ api = 'protocols/file-security/permissions/%s/%s/acl' % (self.svm_uuid, self.url_encode(self.parameters['path']))
+ body = self.build_body('create')
+ dummy, error = rest_generic.post_async(self.rest_api, api, body, timeout=0)
+ if error:
+ self.module.fail_json(msg='Error creating file security permissions acl %s: %s' % (self.parameters['path'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_file_security_permissions_acl(self):
+ api = 'protocols/file-security/permissions/%s/%s/acl' % (self.svm_uuid, self.url_encode(self.parameters['path']))
+ body = self.build_body('modify')
+ dummy, error = rest_generic.patch_async(self.rest_api, api, self.url_encode(self.parameters['user']), body)
+ if error:
+ self.module.fail_json(msg='Error modifying file security permissions acl %s: %s' % (self.parameters['path'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_file_security_permissions_acl(self):
+ api = 'protocols/file-security/permissions/%s/%s/acl' % (self.svm_uuid, self.url_encode(self.parameters['path']))
+ body = self.build_body('delete')
+ dummy, error = rest_generic.delete_async(self.rest_api, api, self.url_encode(self.parameters['user']), body=body, timeout=0)
+ if error:
+ self.module.fail_json(msg='Error deleting file security permissions acl %s: %s' % (self.parameters['path'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def match_acl_with_acls(self, acl, acls):
+ """ return acl if user and access and apply_to are matched, otherwiese None """
+ matches = []
+ for an_acl in acls:
+ # with 9.9.1, access_control is not supported. It will be set to None in received ACLs, and omitted in desired ACLs
+ # but we can assume the user would like to see file_directory.
+ # We can't modify inherited ACLs. But we can create a new one at a lower scope.
+ inherited = an_acl['inherited'] if 'inherited' in an_acl else False and (acl['inherited'] if 'inherited' in acl else False)
+ if (acl['user'] == an_acl['user']
+ and acl['access'] == an_acl['access']
+ and acl.get('access_control', 'file_directory') == an_acl.get('access_control', 'file_directory')
+ and acl['apply_to'] == an_acl['apply_to']
+ and not inherited):
+ matches.append(an_acl)
+ if len(matches) > 1:
+ self.module.fail_json(msg='Error matching ACLs, found more than one match. Found %s' % matches)
+ return matches[0] if matches else None
+
+ def get_actions(self):
+ current = self.get_file_security_permissions_acl()
+ modify = None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None and self.parameters['state'] == 'present':
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ return cd_action, modify
+
+ def apply(self):
+ self.get_svm_uuid()
+ cd_action, modify = self.get_actions()
+
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_file_security_permissions_acl()
+ if cd_action == 'delete':
+ self.delete_file_security_permissions_acl()
+ if modify:
+ self.modify_file_security_permissions_acl()
+ self.validate_changes(cd_action, modify)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+ def validate_changes(self, cd_action, modify):
+ if self.parameters['validate_changes'] == 'ignore':
+ return
+ new_cd_action, new_modify = self.get_actions()
+ errors = []
+ if new_cd_action is not None:
+ errors.append('%s still required after %s (with modify: %s)' % (new_cd_action, cd_action, modify))
+ if new_modify:
+ errors.append('modify: %s still required after %s' % (new_modify, modify))
+ if errors:
+ msg = 'Error - %s' % ' - '.join(errors)
+ if self.parameters['validate_changes'] == 'error':
+ self.module.fail_json(msg=msg)
+ if self.parameters['validate_changes'] == 'warn':
+ self.module.warn(msg)
+
+
+def main():
+ """Apply volume operations from playbook"""
+ obj = NetAppOntapFileSecurityPermissionsACL()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_firewall_policy.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_firewall_policy.py
new file mode 100644
index 000000000..7addf9c02
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_firewall_policy.py
@@ -0,0 +1,325 @@
+#!/usr/bin/python
+
+# (c) 2018-2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: na_ontap_firewall_policy
+short_description: NetApp ONTAP Manage a firewall policy
+version_added: 2.7.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Configure firewall on an ONTAP node and manage firewall policy for an ONTAP SVM
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap_zapi
+requirements:
+ - Python package ipaddress. Install using 'pip install ipaddress'
+options:
+ state:
+ description:
+ - Whether to set up a firewall policy or not
+ choices: ['present', 'absent']
+ type: str
+ default: present
+ allow_list:
+ description:
+ - A list of IPs and masks to use.
+ - The host bits of the IP addresses used in this list must be set to 0.
+ type: list
+ elements: str
+ policy:
+ description:
+ - A policy name for the firewall policy
+ type: str
+ service:
+ description:
+ - The service to apply the policy to
+ - https and ssh are not supported starting with ONTAP 9.6
+ - portmap is supported for ONTAP 9.4, 9.5 and 9.6
+ - none is supported for ONTAP 9.8 onwards.
+ choices: ['dns', 'http', 'https', 'ndmp', 'ndmps', 'ntp', 'portmap', 'rsh', 'snmp', 'ssh', 'telnet', 'none']
+ type: str
+ vserver:
+ description:
+ - The Vserver to apply the policy to.
+ type: str
+ enable:
+ description:
+ - enable firewall on a node
+ choices: ['enable', 'disable']
+ type: str
+ logging:
+ description:
+ - enable logging for firewall on a node
+ choices: ['enable', 'disable']
+ type: str
+ node:
+ description:
+ - The node to run the firewall configuration on
+ type: str
+'''
+
+EXAMPLES = """
+ - name: create firewall Policy
+ netapp.ontap.na_ontap_firewall_policy:
+ state: present
+ allow_list: [1.2.3.0/24,1.3.0.0/16]
+ policy: pizza
+ service: http
+ vserver: ci_dev
+ hostname: "{{ netapp hostname }}"
+ username: "{{ netapp username }}"
+ password: "{{ netapp password }}"
+
+ - name: Modify firewall Policy
+ netapp.ontap.na_ontap_firewall_policy:
+ state: present
+ allow_list: [1.5.3.0/24]
+ policy: pizza
+ service: http
+ vserver: ci_dev
+ hostname: "{{ netapp hostname }}"
+ username: "{{ netapp username }}"
+ password: "{{ netapp password }}"
+
+ - name: Destory firewall Policy
+ netapp.ontap.na_ontap_firewall_policy:
+ state: absent
+ policy: pizza
+ service: http
+ vserver: ci_dev
+ hostname: "{{ netapp hostname }}"
+ username: "{{ netapp username }}"
+ password: "{{ netapp password }}"
+
+ - name: Enable firewall and logging on a node
+ netapp.ontap.na_ontap_firewall_policy:
+ node: test-vsim1
+ enable: enable
+ logging: enable
+ hostname: "{{ netapp hostname }}"
+ username: "{{ netapp username }}"
+ password: "{{ netapp password }}"
+
+"""
+
+RETURN = """
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils import netapp_ipaddress
+
+
+class NetAppONTAPFirewallPolicy:
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_zapi_only_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ allow_list=dict(required=False, type='list', elements='str'),
+ policy=dict(required=False, type='str'),
+ service=dict(required=False, type='str', choices=['dns', 'http', 'https', 'ndmp', 'ndmps',
+ 'ntp', 'portmap', 'rsh', 'snmp', 'ssh', 'telnet', 'none']),
+ vserver=dict(required=False, type="str"),
+ enable=dict(required=False, type="str", choices=['enable', 'disable']),
+ logging=dict(required=False, type="str", choices=['enable', 'disable']),
+ node=dict(required=False, type="str")
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_together=(['policy', 'service', 'vserver'],
+ ['enable', 'node']
+ ),
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.na_helper.module_replaces('na_ontap_service_policy', self.module)
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def validate_ip_addresses(self):
+ '''
+ Validate if the given IP address is a network address (i.e. it's host bits are set to 0)
+ ONTAP doesn't validate if the host bits are set,
+ and hence doesn't add a new address unless the IP is from a different network.
+ So this validation allows the module to be idempotent.
+ :return: None
+ '''
+ for ip in self.parameters['allow_list']:
+ netapp_ipaddress.validate_ip_address_is_network_address(ip, self.module)
+
+ def get_firewall_policy(self):
+ """
+ Get a firewall policy
+ :return: returns a firewall policy object, or returns False if there are none
+ """
+ net_firewall_policy_obj = netapp_utils.zapi.NaElement("net-firewall-policy-get-iter")
+ attributes = {
+ 'query': {
+ 'net-firewall-policy-info': self.firewall_policy_attributes()
+ }
+ }
+ net_firewall_policy_obj.translate_struct(attributes)
+
+ try:
+ result = self.server.invoke_successfully(net_firewall_policy_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error getting firewall policy %s:%s" % (self.parameters['policy'],
+ to_native(error)),
+ exception=traceback.format_exc())
+
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ attributes_list = result.get_child_by_name('attributes-list')
+ policy_info = attributes_list.get_child_by_name('net-firewall-policy-info')
+ ips = self.na_helper.get_value_for_list(from_zapi=True,
+ zapi_parent=policy_info.get_child_by_name('allow-list'))
+ return {
+ 'service': policy_info['service'],
+ 'allow_list': ips}
+ return None
+
+ def create_firewall_policy(self):
+ """
+ Create a firewall policy for given vserver
+ :return: None
+ """
+ net_firewall_policy_obj = netapp_utils.zapi.NaElement("net-firewall-policy-create")
+ net_firewall_policy_obj.translate_struct(self.firewall_policy_attributes())
+ if self.parameters.get('allow_list'):
+ self.validate_ip_addresses()
+ net_firewall_policy_obj.add_child_elem(self.na_helper.get_value_for_list(from_zapi=False,
+ zapi_parent='allow-list',
+ zapi_child='ip-and-mask',
+ data=self.parameters['allow_list'])
+ )
+ try:
+ self.server.invoke_successfully(net_firewall_policy_obj, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error creating Firewall Policy: %s" % (to_native(error)), exception=traceback.format_exc())
+
+ def destroy_firewall_policy(self):
+ """
+ Destroy a Firewall Policy from a vserver
+ :return: None
+ """
+ net_firewall_policy_obj = netapp_utils.zapi.NaElement("net-firewall-policy-destroy")
+ net_firewall_policy_obj.translate_struct(self.firewall_policy_attributes())
+ try:
+ self.server.invoke_successfully(net_firewall_policy_obj, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error destroying Firewall Policy: %s" % (to_native(error)), exception=traceback.format_exc())
+
+ def modify_firewall_policy(self, modify):
+ """
+ Modify a firewall Policy on a vserver
+ :return: none
+ """
+ self.validate_ip_addresses()
+ net_firewall_policy_obj = netapp_utils.zapi.NaElement("net-firewall-policy-modify")
+ net_firewall_policy_obj.translate_struct(self.firewall_policy_attributes())
+ net_firewall_policy_obj.add_child_elem(self.na_helper.get_value_for_list(from_zapi=False,
+ zapi_parent='allow-list',
+ zapi_child='ip-and-mask',
+ data=modify['allow_list']))
+ try:
+ self.server.invoke_successfully(net_firewall_policy_obj, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error modifying Firewall Policy: %s" % (to_native(error)), exception=traceback.format_exc())
+
+ def firewall_policy_attributes(self):
+ return {
+ 'policy': self.parameters['policy'],
+ 'service': self.parameters['service'],
+ 'vserver': self.parameters['vserver'],
+ }
+
+ def get_firewall_config_for_node(self):
+ """
+ Get firewall configuration on the node
+ :return: dict() with firewall config details
+ """
+ if self.parameters.get('logging') and self.parameters.get('node') is None:
+ self.module.fail_json(msg='Error: Missing parameter \'node\' to modify firewall logging')
+ net_firewall_config_obj = netapp_utils.zapi.NaElement("net-firewall-config-get")
+ net_firewall_config_obj.add_new_child('node-name', self.parameters['node'])
+ try:
+ result = self.server.invoke_successfully(net_firewall_config_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error getting Firewall Configuration: %s" % (to_native(error)),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('attributes'):
+ firewall_info = result['attributes'].get_child_by_name('net-firewall-config-info')
+ return {'enable': self.change_status_to_bool(firewall_info.get_child_content('is-enabled'), to_zapi=False),
+ 'logging': self.change_status_to_bool(firewall_info.get_child_content('is-logging'), to_zapi=False)}
+ return None
+
+ def modify_firewall_config(self, modify):
+ """
+ Modify the configuration of a firewall on node
+ :return: None
+ """
+ net_firewall_config_obj = netapp_utils.zapi.NaElement("net-firewall-config-modify")
+ net_firewall_config_obj.add_new_child('node-name', self.parameters['node'])
+ if modify.get('enable'):
+ net_firewall_config_obj.add_new_child('is-enabled', self.change_status_to_bool(self.parameters['enable']))
+ if modify.get('logging'):
+ net_firewall_config_obj.add_new_child('is-logging', self.change_status_to_bool(self.parameters['logging']))
+ try:
+ self.server.invoke_successfully(net_firewall_config_obj, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error modifying Firewall Config: %s" % (to_native(error)),
+ exception=traceback.format_exc())
+
+ def change_status_to_bool(self, input, to_zapi=True):
+ if to_zapi:
+ return 'true' if input == 'enable' else 'false'
+ else:
+ return 'enable' if input == 'true' else 'disable'
+
+ def apply(self):
+ cd_action, modify, modify_config = None, None, None
+ if self.parameters.get('policy'):
+ current = self.get_firewall_policy()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None and self.parameters['state'] == 'present':
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.parameters.get('node'):
+ current_config = self.get_firewall_config_for_node()
+ # firewall config for a node is always present, we cannot create or delete a firewall on a node
+ modify_config = self.na_helper.get_modified_attributes(current_config, self.parameters)
+
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_firewall_policy()
+ elif cd_action == 'delete':
+ self.destroy_firewall_policy()
+ else:
+ if modify:
+ self.modify_firewall_policy(modify)
+ if modify_config:
+ self.modify_firewall_config(modify_config)
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify, extra_responses={'modify_config': modify_config})
+ self.module.exit_json(**result)
+
+
+def main():
+ """
+ Execute action from playbook
+ :return: nothing
+ """
+ cg_obj = NetAppONTAPFirewallPolicy()
+ cg_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_firmware_upgrade.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_firmware_upgrade.py
new file mode 100644
index 000000000..63966d4e8
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_firmware_upgrade.py
@@ -0,0 +1,873 @@
+#!/usr/bin/python
+
+# (c) 2019-2021, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Update ONTAP service-prosessor firmware
+ - The recommend procedure is to
+ 1. download the firmware package from the NetApp Support site
+ 2. copy the package to a web server
+ 3. download the package from the web server using this module
+ - Once a disk qualification, disk, shelf, or ACP firmware package is downloaded, ONTAP will automatically update the related resources in background.
+ - It may take some time to complete.
+ - For service processor, the update requires a node reboot to take effect.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_firmware_upgrade
+options:
+ state:
+ description:
+ - Whether the specified ONTAP firmware should be upgraded or not.
+ default: present
+ type: str
+ node:
+ description:
+ - Node on which the device is located.
+ - Not required if package_url is present and force_disruptive_update is False.
+ - If this option is not given, the firmware will be downloaded on all nodes in the cluster,
+ - and the resources will be updated in background on all nodes, except for service processor.
+ - For service processor, the upgrade will happen automatically when each node is rebooted.
+ type: str
+ clear_logs:
+ description:
+ - Clear logs on the device after update. Default value is true.
+ - Not used if force_disruptive_update is False.
+ - Not supported with REST when set to false.
+ type: bool
+ default: true
+ package:
+ description:
+ - Name of the package file containing the firmware to be installed. Not required when -baseline is true.
+ - Not used if force_disruptive_update is False.
+ - Not supported with REST.
+ type: str
+ package_url:
+ description:
+ - URL of the package file containing the firmware to be downloaded.
+ - Once the package file is downloaded to a node, the firmware update will happen automatically in background.
+ - For SP, the upgrade will happen automatically when a node is rebooted.
+ - For SP, the upgrade will happen automatically if autoupdate is enabled (which is the recommended setting).
+ version_added: "20.5.0"
+ type: str
+ force_disruptive_update:
+ description:
+ - If set to C(False), and URL is given, the upgrade is non disruptive. If URL is not given, no operation is performed.
+ - Do not set this to C(True), unless directed by NetApp Tech Support.
+ - It will force an update even if the resource is not ready for it, and can be disruptive.
+ - Not supported with REST when set to true.
+ type: bool
+ version_added: "20.5.0"
+ default: false
+ shelf_module_fw:
+ description:
+ - Shelf module firmware to be updated to.
+ - Not used if force_disruptive_update is False (ONTAP will automatically select the firmware)
+ - Not supported with REST.
+ type: str
+ disk_fw:
+ description:
+ - disk firmware to be updated to.
+ - Not used if force_disruptive_update is False (ONTAP will automatically select the firmware)
+ - Not supported with REST.
+ type: str
+ update_type:
+ description:
+ - Type of firmware update to be performed. Options include serial_full, serial_differential, network_full.
+ - Not used if force_disruptive_update is False (ONTAP will automatically select the firmware)
+ - Not supported with REST.
+ type: str
+ install_baseline_image:
+ description:
+ - Install the version packaged with ONTAP if this parameter is set to true. Otherwise, package must be used to specify the package to install.
+ - Not used if force_disruptive_update is False (ONTAP will automatically select the firmware)
+ - Not supported with REST when set to true.
+ type: bool
+ default: false
+ firmware_type:
+ description:
+ - Type of firmware to be upgraded. Options include shelf, ACP, service-processor, and disk.
+ - For shelf firmware upgrade the operation is asynchronous, and therefore returns no errors that might occur during the download process.
+ - Shelf firmware upgrade is idempotent if shelf_module_fw is provided .
+ - disk firmware upgrade is idempotent if disk_fw is provided .
+ - With check mode, SP, ACP, disk, and shelf firmware upgrade is not idempotent.
+ - This operation will only update firmware on shelves/disk that do not have the latest firmware-revision.
+ - For normal operations, choose one of storage or service-processor.
+ - Type storage includes acp, shelf and disk and ONTAP will automatically determine what to do.
+ - With REST, the module does not validate that the package matches the firmware type. ONTAP determines the type automatically.
+ - With REST, C(storage) downloads any firmware, including service-processor firmware.
+ - With REST, C(service-processor) unlocks SP reboot options.
+ choices: ['storage', 'service-processor', 'shelf', 'acp', 'disk']
+ type: str
+ default: storage
+ fail_on_502_error:
+ description:
+ - The firmware download may take time if the web server is slow and if there are many nodes in the cluster.
+ - ONTAP will break the ZAPI connection after 5 minutes with a 502 Bad Gateway error, even though the download
+ is still happening.
+ - By default, this module ignores this error and assumes the download is progressing as ONTAP does not
+ provide a way to check the status.
+ - When setting this option to true, the module will report 502 as an error.
+ - Not supported with REST when set to true.
+ type: bool
+ default: false
+ version_added: "20.6.0"
+ rename_package:
+ description:
+ - Rename the package.
+ - Only available if 'firmware_type' is 'service-processor'.
+ - Not supported with REST.
+ type: str
+ version_added: "20.7.0"
+ replace_package:
+ description:
+ - Replace the local package.
+ - Only available if 'firmware_type' is 'service-processor'.
+ - Not supported with REST when set to false.
+ type: bool
+ version_added: "20.7.0"
+ reboot_sp:
+ description:
+ - Reboot service processor before downloading package.
+ - Only available if 'firmware_type' is 'service-processor'.
+ - Defaults to True if not set when 'firmware_type' is 'service-processor'.
+ - Set this explictly to true to avoid a warning, and to false to not reboot the SP.
+ - Rebooting the SP before download is strongly recommended.
+ type: bool
+ version_added: "20.7.0"
+ reboot_sp_after_download:
+ description:
+ - Reboot service processor after downloading package.
+ - Only available if 'firmware_type' is 'service-processor'.
+ type: bool
+ version_added: "21.15.0"
+ server_username:
+ description:
+ - username to authenticate with the firmware package server.
+ - Ignored with ZAPI.
+ type: str
+ version_added: "21.15.0"
+ server_password:
+ description:
+ - password to authenticate with the firmware package server.
+ - Ignored with ZAPI.
+ type: str
+ version_added: "21.15.0"
+short_description: NetApp ONTAP firmware upgrade for SP, shelf, ACP, and disk.
+version_added: 2.9.0
+'''
+
+EXAMPLES = """
+
+ - name: firmware upgrade
+ netapp.ontap.na_ontap_firmware_upgrade:
+ state: present
+ package_url: "{{ web_link }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: firmware upgrade, confirm successful download
+ netapp.ontap.na_ontap_firmware_upgrade:
+ state: present
+ package_url: "{{ web_link }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ fail_on_502_error: true
+ - name: SP firmware upgrade
+ netapp.ontap.na_ontap_firmware_upgrade:
+ state: present
+ node: vsim1
+ package: "{{ file name }}"
+ package_url: "{{ web_link }}"
+ clear_logs: True
+ install_baseline_image: False
+ update_type: serial_full
+ force_disruptive_update: False
+ firmware_type: service-processor
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: SP firmware download replace package
+ tags:
+ - sp_download
+ netapp.ontap.na_ontap_firmware_upgrade:
+ state: present
+ node: vsim1
+ package_url: "{{ web_link }}"
+ firmware_type: service-processor
+ replace_package: true
+ reboot_sp: true
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: false
+ - name: SP firmware download rename package
+ tags:
+ - sp_download
+ netapp.ontap.na_ontap_firmware_upgrade:
+ state: present
+ node: vsim1
+ package_url: "{{ web_link }}"
+ firmware_type: service-processor
+ rename_package: SP_FW.zip
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: false
+ - name: ACP firmware download and upgrade
+ netapp.ontap.na_ontap_firmware_upgrade:
+ state: present
+ node: vsim1
+ firmware_type: acp
+ package_url: "{{ web_link }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: shelf firmware upgrade
+ netapp.ontap.na_ontap_firmware_upgrade:
+ state: present
+ firmware_type: shelf
+ package_url: "{{ web_link }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: disk firmware upgrade
+ netapp.ontap.na_ontap_firmware_upgrade:
+ state: present
+ firmware_type: disk
+ package_url: "{{ web_link }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: any firmware upgrade (REST)
+ netapp.ontap.na_ontap_firmware_upgrade:
+ state: present
+ package_url: "{{ web_link }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: SP firmware upgrade with reboots (REST)
+ netapp.ontap.na_ontap_firmware_upgrade:
+ state: present
+ package_url: "{{ web_link }}"
+ firmware_type: service-processor
+ reboot_sp_: true
+ reboot_sp_after_download: true
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+msg:
+ description: Returns additional information in case of success.
+ returned: always
+ type: str
+"""
+
+import time
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+MSGS = dict(
+ no_action='No action taken.',
+ dl_completed='Firmware download completed.',
+ dl_completed_slowly='Firmware download completed, slowly.',
+ dl_in_progress='Firmware download still in progress.'
+)
+
+
+class NetAppONTAPFirmwareUpgrade:
+ """
+ Class with ONTAP firmware upgrade methods
+ """
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', default='present'),
+ node=dict(required=False, type='str'),
+ firmware_type=dict(type='str', choices=['storage', 'service-processor', 'shelf', 'acp', 'disk'], default='storage'),
+ clear_logs=dict(required=False, type='bool', default=True),
+ package=dict(required=False, type='str'),
+ install_baseline_image=dict(required=False, type='bool', default=False),
+ update_type=dict(required=False, type='str'),
+ shelf_module_fw=dict(required=False, type='str'),
+ disk_fw=dict(required=False, type='str'),
+ package_url=dict(required=False, type='str'),
+ force_disruptive_update=dict(required=False, type='bool', default=False),
+ fail_on_502_error=dict(required=False, type='bool', default=False),
+ rename_package=dict(required=False, type='str'),
+ replace_package=dict(required=False, type='bool'),
+ reboot_sp=dict(required=False, type='bool'),
+ reboot_sp_after_download=dict(required=False, type='bool'),
+ server_username=dict(required=False, type='str'),
+ server_password=dict(required=False, type='str', no_log=True),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('firmware_type', 'acp', ['node']),
+ ('firmware_type', 'disk', ['node']),
+ ('firmware_type', 'service-processor', ['node']),
+ ('force_disruptive_update', True, ['firmware_type']),
+ ('reboot_sp', True, ['node']),
+ ('reboot_sp_after_download', True, ['node']),
+ ],
+ required_together=[['server_username', 'server_password']],
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self._node_uuid = None # to cache calls to get_node_uuid
+
+ self.rest_api = OntapRestAPI(self.module)
+ unsupported_rest_properties = ['package', 'update_type', 'rename_package', 'shelf_module_fw', 'disk_fw']
+ # only accept default value for these 5 options (2 True and 3 False)
+ # accept the default value (for replace_package, this is implicit for REST) but switch to ZAPI or error out if set to False
+ unsupported_rest_properties.extend(option for option in ('clear_logs', 'replace_package') if self.parameters.get(option) is False)
+ # accept the default value of False, but switch to ZAPI or error out if set to True
+ unsupported_rest_properties.extend(option for option in ('install_baseline_image', 'force_disruptive_update', 'fail_on_502_error')
+ if self.parameters[option])
+
+ self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, unsupported_rest_properties)
+
+ if self.parameters.get('firmware_type') == 'storage' and self.parameters.get('force_disruptive_update'):
+ self.module.fail_json(msg='Do not set force_disruptive_update to True, unless directed by NetApp Tech Support')
+
+ for option in ('reboot_sp', 'reboot_sp_after_download'):
+ if self.parameters.get('firmware_type') != 'service-processor' and self.parameters.get(option):
+ self.module.warn('%s is ignored when firmware_type is not set to service-processor' % option)
+ if self.parameters.get('firmware_type') == 'service-processor' and self.parameters.get('reboot_sp') is None:
+ self.module.warn('Forcing a reboot of SP before download - set reboot_sp: true to disable this warning.')
+ self.parameters['reboot_sp'] = True
+ if not self.use_rest and self.parameters.get('firmware_type') == 'service-processor':
+ msg = 'With ZAPI and firmware_type set to service-processor: '
+ if 'node' not in self.parameters:
+ self.module.fail_json(msg=msg + 'parameter node should be present.')
+ if self.parameters.get('install_baseline_image') and self.parameters.get('package') is not None:
+ self.module.fail_json(msg=msg + 'do not specify both package and install_baseline_image: true.')
+ if self.parameters.get('force_disruptive_update') \
+ and self.parameters.get('install_baseline_image') is False \
+ and self.parameters.get('package') is None:
+ self.module.fail_json(msg=msg + 'specify at least one of package or install_baseline_image: true.')
+
+ if not self.use_rest:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, wrap_zapi=True)
+
+ def firmware_image_get_iter(self):
+ """
+ Compose NaElement object to query current firmware version
+ :return: NaElement object for firmware_image_get_iter with query
+ """
+ firmware_image_get = netapp_utils.zapi.NaElement('service-processor-get-iter')
+ query = netapp_utils.zapi.NaElement('query')
+ firmware_image_info = netapp_utils.zapi.NaElement('service-processor-info')
+ firmware_image_info.add_new_child('node', self.parameters['node'])
+ query.add_child_elem(firmware_image_info)
+ firmware_image_get.add_child_elem(query)
+ return firmware_image_get
+
+ def firmware_image_get(self, node_name):
+ """
+ Get current firmware image info
+ :return: True if query successful, else return None
+ """
+ firmware_image_get_iter = self.firmware_image_get_iter()
+ try:
+ result = self.server.invoke_successfully(firmware_image_get_iter, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching firmware image details: %s: %s'
+ % (self.parameters['node'], to_native(error)),
+ exception=traceback.format_exc())
+ # return firmware image details
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) > 0:
+ sp_info = result.get_child_by_name('attributes-list').get_child_by_name('service-processor-info')
+ return sp_info.get_child_content('firmware-version')
+ return None
+
+ def acp_firmware_update_required(self):
+ """
+ where acp firmware upgrade is required
+ :return: True is firmware upgrade is required else return None
+ """
+ acp_firmware_get_iter = netapp_utils.zapi.NaElement('storage-shelf-acp-module-get-iter')
+ query = netapp_utils.zapi.NaElement('query')
+ acp_info = netapp_utils.zapi.NaElement('storage-shelf-acp-module')
+ query.add_child_elem(acp_info)
+ acp_firmware_get_iter.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(acp_firmware_get_iter, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching acp firmware details details: %s'
+ % (to_native(error)), exception=traceback.format_exc())
+ acp_module_info = self.na_helper.safe_get(result, ['attributes-list', 'storage-shelf-acp-module'])
+ if acp_module_info:
+ state = acp_module_info.get_child_content('state')
+ if state == 'firmware_update_required':
+ # acp firmware version upgrade required
+ return True
+ return False
+
+ def sp_firmware_image_update_progress_get(self, node_name):
+ """
+ Get current firmware image update progress info
+ :return: Dictionary of firmware image update progress if query successful, else return None
+ """
+ firmware_update_progress_get = netapp_utils.zapi.NaElement('service-processor-image-update-progress-get')
+ firmware_update_progress_get.add_new_child('node', self.parameters['node'])
+
+ firmware_update_progress_info = {}
+ try:
+ result = self.server.invoke_successfully(firmware_update_progress_get, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching firmware image upgrade progress details: %s'
+ % (to_native(error)), exception=traceback.format_exc())
+ # return firmware image update progress details
+ if result.get_child_by_name('attributes').get_child_by_name('service-processor-image-update-progress-info'):
+ update_progress_info = result.get_child_by_name('attributes').get_child_by_name('service-processor-image-update-progress-info')
+ firmware_update_progress_info['is-in-progress'] = update_progress_info.get_child_content('is-in-progress')
+ firmware_update_progress_info['node'] = update_progress_info.get_child_content('node')
+ return firmware_update_progress_info
+
+ def shelf_firmware_info_get(self):
+ """
+ Get the current firmware of shelf module
+ :return:dict with module id and firmware info
+ """
+ shelf_id_fw_info = {}
+ shelf_firmware_info_get = netapp_utils.zapi.NaElement('storage-shelf-info-get-iter')
+ desired_attributes = netapp_utils.zapi.NaElement('desired-attributes')
+ storage_shelf_info = netapp_utils.zapi.NaElement('storage-shelf-info')
+ shelf_module = netapp_utils.zapi.NaElement('shelf-modules')
+ shelf_module_info = netapp_utils.zapi.NaElement('storage-shelf-module-info')
+ shelf_module.add_child_elem(shelf_module_info)
+ storage_shelf_info.add_child_elem(shelf_module)
+ desired_attributes.add_child_elem(storage_shelf_info)
+ shelf_firmware_info_get.add_child_elem(desired_attributes)
+
+ try:
+ result = self.server.invoke_successfully(shelf_firmware_info_get, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching shelf module firmware details: %s'
+ % (to_native(error)), exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) > 0:
+ shelf_info = result.get_child_by_name('attributes-list').get_child_by_name('storage-shelf-info')
+ if (shelf_info.get_child_by_name('shelf-modules') and
+ shelf_info.get_child_by_name('shelf-modules').get_child_by_name('storage-shelf-module-info')):
+ shelves = shelf_info['shelf-modules'].get_children()
+ for shelf in shelves:
+ shelf_id_fw_info[shelf.get_child_content('module-id')] = shelf.get_child_content('module-fw-revision')
+ return shelf_id_fw_info
+
+ def disk_firmware_info_get(self):
+ """
+ Get the current firmware of disks module
+ :return:
+ """
+ disk_id_fw_info = {}
+ disk_firmware_info_get = netapp_utils.zapi.NaElement('storage-disk-get-iter')
+ desired_attributes = netapp_utils.zapi.NaElement('desired-attributes')
+ storage_disk_info = netapp_utils.zapi.NaElement('storage-disk-info')
+ disk_inv = netapp_utils.zapi.NaElement('disk-inventory-info')
+ storage_disk_info.add_child_elem(disk_inv)
+ desired_attributes.add_child_elem(storage_disk_info)
+ disk_firmware_info_get.add_child_elem(desired_attributes)
+ try:
+ result = self.server.invoke_successfully(disk_firmware_info_get, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching disk module firmware details: %s'
+ % (to_native(error)), exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) > 0:
+ disk_info = result.get_child_by_name('attributes-list')
+ disks = disk_info.get_children()
+ for disk in disks:
+ disk_id_fw_info[disk.get_child_content('disk-uid')] = disk.get_child_by_name('disk-inventory-info').get_child_content('firmware-revision')
+ return disk_id_fw_info
+
+ def disk_firmware_update_required(self):
+ """
+ Check weather disk firmware upgrade is required or not
+ :return: True if the firmware upgrade is required
+ """
+ disk_firmware_info = self.disk_firmware_info_get()
+ return any(
+ disk_firmware_info[disk] != self.parameters['disk_fw']
+ for disk in disk_firmware_info
+ )
+
+ def shelf_firmware_update_required(self):
+ """
+ Check weather shelf firmware upgrade is required or not
+ :return: True if the firmware upgrade is required
+ """
+ shelf_firmware_info = self.shelf_firmware_info_get()
+ return any(
+ shelf_firmware_info[module] != self.parameters['shelf_module_fw']
+ for module in shelf_firmware_info
+ )
+
+ def sp_firmware_image_update(self):
+ """
+ Update current firmware image
+ """
+ firmware_update_info = netapp_utils.zapi.NaElement('service-processor-image-update')
+ if self.parameters.get('package') is not None:
+ firmware_update_info.add_new_child('package', self.parameters['package'])
+ if self.parameters.get('clear_logs') is not None:
+ firmware_update_info.add_new_child('clear-logs', str(self.parameters['clear_logs']))
+ if self.parameters.get('install_baseline_image') is not None:
+ firmware_update_info.add_new_child('install-baseline-image', str(self.parameters['install_baseline_image']))
+ firmware_update_info.add_new_child('node', self.parameters['node'])
+ firmware_update_info.add_new_child('update-type', self.parameters['update_type'])
+
+ try:
+ self.server.invoke_successfully(firmware_update_info, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ # Current firmware version matches the version to be installed
+ if to_native(error.code) == '13001' and (error.message.startswith('Service Processor update skipped')):
+ return False
+ self.module.fail_json(msg='Error updating firmware image for %s: %s'
+ % (self.parameters['node'], to_native(error)),
+ exception=traceback.format_exc())
+ return True
+
+ def shelf_firmware_upgrade(self):
+ """
+ Upgrade shelf firmware image
+ """
+ shelf_firmware_update_info = netapp_utils.zapi.NaElement('storage-shelf-firmware-update')
+ try:
+ self.server.invoke_successfully(shelf_firmware_update_info, enable_tunneling=True)
+ return True
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error updating shelf firmware image : %s'
+ % (to_native(error)), exception=traceback.format_exc())
+
+ def acp_firmware_upgrade(self):
+
+ """
+ Upgrade shelf firmware image
+ """
+ acp_firmware_update_info = netapp_utils.zapi.NaElement('storage-shelf-acp-firmware-update')
+ acp_firmware_update_info.add_new_child('node-name', self.parameters['node'])
+ try:
+ self.server.invoke_successfully(acp_firmware_update_info, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error updating acp firmware image : %s'
+ % (to_native(error)), exception=traceback.format_exc())
+
+ def disk_firmware_upgrade(self):
+
+ """
+ Upgrade disk firmware
+ """
+ disk_firmware_update_info = netapp_utils.zapi.NaElement('disk-update-disk-fw')
+ disk_firmware_update_info.add_new_child('node-name', self.parameters['node'])
+ try:
+ self.server.invoke_successfully(disk_firmware_update_info, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error updating disk firmware image : %s'
+ % (to_native(error)), exception=traceback.format_exc())
+ return True
+
+ def download_firmware(self):
+ if self.use_rest:
+ return self.download_software_rest()
+
+ ''' calls the system-cli ZAPI as there is no ZAPI for this feature '''
+ msg = MSGS['dl_completed']
+ command = ['storage', 'firmware', 'download', '-node', self.parameters['node'] if self.parameters.get('node') else '*',
+ '-package-url', self.parameters['package_url']]
+ command_obj = netapp_utils.zapi.NaElement("system-cli")
+
+ args_obj = netapp_utils.zapi.NaElement("args")
+ for arg in command:
+ args_obj.add_new_child('arg', arg)
+ command_obj.add_child_elem(args_obj)
+ command_obj.add_new_child('priv', 'advanced')
+
+ output = None
+ try:
+ output = self.server.invoke_successfully(command_obj, True)
+
+ except netapp_utils.zapi.NaApiError as error:
+ # with netapp_lib, error.code may be a number or a string
+ try:
+ err_num = int(error.code)
+ except ValueError:
+ err_num = -1
+ if err_num == 60: # API did not finish on time
+ # even if the ZAPI reports a timeout error, it does it after the command completed
+ msg = MSGS['dl_completed_slowly']
+ elif err_num == 502 and not self.parameters['fail_on_502_error']: # Bad Gateway
+ # ONTAP proxy breaks the connection after 5 minutes, we can assume the download is progressing slowly
+ msg = MSGS['dl_in_progress']
+ else:
+ self.module.fail_json(msg='Error running command %s: %s' % (command, to_native(error)),
+ exception=traceback.format_exc())
+ except netapp_utils.zapi.etree.XMLSyntaxError as error:
+ self.module.fail_json(msg='Error decoding output from command %s: %s' % (command, to_native(error)),
+ exception=traceback.format_exc())
+
+ if output is not None:
+ # command completed, check for success
+ status = output.get_attr('status')
+ cli_output = output.get_child_content('cli-output')
+ if status is None or status != 'passed' or cli_output is None or cli_output == "":
+ if status is None:
+ extra_info = "'status' attribute missing"
+ elif status != 'passed':
+ extra_info = "check 'status' value"
+ else:
+ extra_info = 'check console permissions'
+ self.module.fail_json(msg='unable to download package from %s: %s. Received: %s' %
+ (self.parameters['package_url'], extra_info, output.to_string()))
+
+ if cli_output is not None:
+ if cli_output.startswith('Error:') or \
+ 'Failed to download package from' in cli_output:
+ self.module.fail_json(msg='failed to download package from %s: %s' % (self.parameters['package_url'], cli_output))
+ msg += " Extra info: %s" % cli_output
+
+ return msg
+
+ def download_sp_image(self):
+ fetch_package = netapp_utils.zapi.NaElement('system-image-fetch-package')
+ fetch_package.add_new_child('node', self.parameters['node'])
+ fetch_package.add_new_child('package', self.parameters['package_url'])
+ if self.parameters.get('rename_package'):
+ fetch_package.add_new_child('rename-package', self.parameters['rename_package'])
+ if self.parameters.get('replace_package'):
+ fetch_package.add_new_child('replace-package', str(self.parameters['replace_package']))
+ try:
+ self.server.invoke_successfully(fetch_package, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching system image package from %s: %s'
+ % (self.parameters['package_url'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def download_sp_image_progress(self):
+ progress = netapp_utils.zapi.NaElement('system-image-update-progress-get')
+ progress.add_new_child('node', self.parameters['node'])
+ progress_info = {}
+ try:
+ result = self.server.invoke_successfully(progress, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching system image package download progress: %s'
+ % (to_native(error)), exception=traceback.format_exc())
+ if result.get_child_by_name('phase'):
+ progress_info['phase'] = result.get_child_content('phase')
+ else:
+ progress_info['phase'] = None
+ if result.get_child_by_name('exit-message'):
+ progress_info['exit_message'] = result.get_child_content('exit-message')
+ else:
+ progress_info['exit_message'] = None
+ if result.get_child_by_name('exit-status'):
+ progress_info['exit_status'] = result.get_child_content('exit-status')
+ else:
+ progress_info['exit_status'] = None
+ if result.get_child_by_name('last-message'):
+ progress_info['last_message'] = result.get_child_content('last-message')
+ else:
+ progress_info['last_message'] = None
+ if result.get_child_by_name('run-status'):
+ progress_info['run_status'] = result.get_child_content('run-status')
+ else:
+ progress_info['run_status'] = None
+ return progress_info
+
+ def reboot_sp(self):
+ if self.use_rest:
+ return self.reboot_sp_rest()
+ reboot = netapp_utils.zapi.NaElement('service-processor-reboot')
+ reboot.add_new_child('node', self.parameters['node'])
+ try:
+ self.server.invoke_successfully(reboot, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error rebooting service processor: %s'
+ % (to_native(error)),
+ exception=traceback.format_exc())
+
+ def get_node_uuid(self):
+ if self._node_uuid is not None:
+ return self._node_uuid
+ api = 'cluster/nodes'
+ query = {'name': self.parameters['node']}
+ node, error = rest_generic.get_one_record(self.rest_api, api, query, fields='uuid')
+ if error:
+ self.module.fail_json(msg='Error reading node UUID: %s' % error)
+ if not node:
+ self.module.fail_json(msg='Error: node not found %s, current nodes: %s.' % (self.parameters['node'], ', '.join(self.get_node_names())))
+ self._node_uuid = node['uuid']
+ return node['uuid']
+
+ def get_node_names(self):
+ api = 'cluster/nodes'
+ nodes, error = rest_generic.get_0_or_more_records(self.rest_api, api, fields='name')
+ if error:
+ self.module.fail_json(msg='Error reading nodes: %s' % error)
+ return [node['name'] for node in nodes]
+
+ def reboot_sp_rest_cli(self):
+ """ for older versions of ONTAP, use the REST CLI passthrough """
+ api = 'private/cli/sp/reboot-sp'
+ query = {'node': self.parameters['node']}
+ dummy, error = rest_generic.patch_async(self.rest_api, api, None, None, query)
+ return error
+
+ def get_sp_state(self):
+ api = 'cluster/nodes/%s' % self.get_node_uuid()
+ node, error = rest_generic.get_one_record(self.rest_api, api, fields='service_processor.state')
+ if error:
+ self.module.fail_json(msg='Error getting node SP state: %s' % error)
+ if node:
+ return self.na_helper.safe_get(node, ['service_processor', 'state'])
+
+ def wait_for_sp_reboot(self):
+ for dummy in range(20):
+ time.sleep(15)
+ state = self.get_sp_state()
+ if state != 'rebooting':
+ break
+ else:
+ self.module.warn('node did not finish up booting in 5 minutes!')
+
+ def reboot_sp_rest(self):
+ uuid = self.get_node_uuid()
+ api = 'cluster/nodes'
+ body = {'service_processor.action': 'reboot'}
+ dummy, error = rest_generic.patch_async(self.rest_api, api, uuid, body)
+ if error and 'Unexpected argument "service_processor.action"' in error:
+ error = self.reboot_sp_rest_cli()
+ if error:
+ error = 'reboot_sp requires ONTAP 9.10.1 or newer, falling back to CLI passthrough failed: ' + error
+ if error:
+ self.module.fail_json(msg='Error rebooting node SP: %s' % error)
+
+ def download_sp_firmware(self):
+ if self.parameters.get('reboot_sp'):
+ self.reboot_sp()
+ if self.use_rest:
+ return self.download_software_rest()
+ self.download_sp_image()
+ progress = self.download_sp_image_progress()
+ # progress only show the current or most recent update/install operation.
+ if progress['phase'] == 'Download':
+ while progress['run_status'] is not None and progress['run_status'] != 'Exited':
+ time.sleep(10)
+ progress = self.download_sp_image_progress()
+ if progress['exit_status'] != 'Success':
+ self.module.fail_json(msg=progress['exit_message'], exception=traceback.format_exc())
+ return MSGS['dl_completed']
+ return MSGS['no_action']
+
+ def download_software_rest(self):
+ body = {'url': self.parameters['package_url']}
+ for attr in ('username', 'password'):
+ value = self.parameters.get('server_%s' % attr)
+ if value:
+ body[attr] = value
+ api = 'cluster/software/download'
+ # burt 1442080 - when timeout is 30, the API may return a 500 error, though the job says download completed!
+ message, error = rest_generic.post_async(self.rest_api, api, body, job_timeout=self.parameters.get('time_out', 180), timeout=0)
+ if error:
+ self.module.fail_json(msg='Error downloading software: %s' % error)
+ return message
+
+ def apply(self):
+ """
+ Apply action to upgrade firmware
+ """
+ changed = False
+ msg = MSGS['no_action']
+ if self.parameters.get('package_url'):
+ if not self.module.check_mode:
+ if self.parameters.get('firmware_type') == 'service-processor':
+ msg = self.download_sp_firmware()
+ if self.parameters.get('reboot_sp') and self.use_rest:
+ self.wait_for_sp_reboot()
+ else:
+ msg = self.download_firmware()
+ changed = True
+ if not self.parameters['force_disruptive_update'] and not self.parameters.get('reboot_sp_after update'):
+ # disk_qual, disk, shelf, and ACP are automatically updated in background
+ # The SP firmware is automatically updated on reboot
+ self.module.exit_json(changed=changed, msg=msg)
+ if msg == MSGS['dl_in_progress']:
+ # can't force an update if the software is still downloading
+ self.module.fail_json(msg="Cannot force update: %s" % msg)
+ self.disruptive_update(changed)
+
+ def disruptive_update(self, changed):
+ if self.parameters.get('firmware_type') == 'service-processor':
+ if self.parameters.get('reboot_sp_after update'):
+ self.reboot_sp()
+ if not self.parameters['force_disruptive_update']:
+ return
+ # service-processor firmware upgrade
+ current = self.firmware_image_get(self.parameters['node'])
+
+ if self.parameters.get('state') == 'present' and current:
+ if not self.module.check_mode:
+ if self.sp_firmware_image_update():
+ changed = True
+ firmware_update_progress = self.sp_firmware_image_update_progress_get(self.parameters['node'])
+ while firmware_update_progress.get('is-in-progress') == 'true':
+ time.sleep(25)
+ firmware_update_progress = self.sp_firmware_image_update_progress_get(self.parameters['node'])
+ else:
+ # we don't know until we try the upgrade
+ changed = True
+
+ elif self.parameters.get('firmware_type') == 'shelf':
+ # shelf firmware upgrade
+ if self.parameters.get('shelf_module_fw'):
+ if self.shelf_firmware_update_required():
+ changed = True if self.module.check_mode else self.shelf_firmware_upgrade()
+ else:
+ # with check_mode, we don't know until we try the upgrade -- assuming the worst
+ changed = True if self.module.check_mode else self.shelf_firmware_upgrade()
+ elif self.parameters.get('firmware_type') == 'acp' and self.acp_firmware_update_required():
+ # acp firmware upgrade
+ if not self.module.check_mode:
+ self.acp_firmware_upgrade()
+ changed = True
+ elif self.parameters.get('firmware_type') == 'disk':
+ # Disk firmware upgrade
+ if self.parameters.get('disk_fw'):
+ if self.disk_firmware_update_required():
+ changed = True if self.module.check_mode else self.disk_firmware_upgrade()
+ else:
+ # with check_mode, we don't know until we try the upgrade -- assuming the worst
+ changed = True if self.module.check_mode else self.disk_firmware_upgrade()
+ self.module.exit_json(changed=changed, msg='forced update for %s' % self.parameters.get('firmware_type'))
+
+
+def main():
+ """Execute action"""
+ fwupgrade_obj = NetAppONTAPFirmwareUpgrade()
+ fwupgrade_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_flexcache.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_flexcache.py
new file mode 100644
index 000000000..3abdec524
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_flexcache.py
@@ -0,0 +1,672 @@
+#!/usr/bin/python
+
+# (c) 2019-2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+"""
+na_ontap_flexcache
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+short_description: NetApp ONTAP FlexCache - create/delete relationship
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Create/Delete FlexCache volume relationships.
+ - This module does not modify an existing FlexCache volume with two exceptions.
+ - When using REST, a prepopulate can be started on an exising FlexCache volume.
+ - When using REST, the volume can be mounted or unmounted. Set path to '' to unmount it.
+ - It is required the volume is mounted to prepopulate it.
+ - Some actions are also available through the na_ontap_volume.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_flexcache
+version_added: 2.8.0
+options:
+ state:
+ choices: ['present', 'absent']
+ description:
+ - Whether the specified relationship should exist or not.
+ default: present
+ type: str
+ origin_volume:
+ description:
+ - Name of the origin volume for the FlexCache.
+ - Required for creation.
+ type: str
+ origin_vserver:
+ description:
+ - Name of the origin vserver for the FlexCache.
+ - Required for creation.
+ type: str
+ origin_cluster:
+ description:
+ - Name of the origin cluster for the FlexCache.
+ - Defaults to cluster associated with target vserver if absent.
+ - Not used for creation.
+ type: str
+ name:
+ description:
+ - Name of the target volume for the FlexCache.
+ required: true
+ type: str
+ aliases: ['volume']
+ version_added: 21.3.0
+ junction_path:
+ description:
+ - Junction path of the cache volume.
+ type: str
+ aliases: ['path']
+ auto_provision_as:
+ description:
+ - Use this parameter to automatically select existing aggregates for volume provisioning. Eg flexgroup
+ - Note that the fastest aggregate type with at least one aggregate on each node of the cluster will be selected.
+ - Ignored when using REST - omit aggr_list for automatic selection.
+ type: str
+ size:
+ description:
+ - Size of cache volume.
+ type: int
+ size_unit:
+ description:
+ - The unit used to interpret the size parameter.
+ choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
+ type: str
+ default: gb
+ vserver:
+ description:
+ - Name of the target vserver for the FlexCache.
+ - Note that hostname, username, password are intended for the target vserver.
+ required: true
+ type: str
+ aggr_list:
+ description:
+ - List of aggregates to host target FlexCache volume.
+ type: list
+ elements: str
+ aliases: ['aggregates']
+ aggr_list_multiplier:
+ description:
+ - Aggregate list repeat count.
+ - REST - Number of FlexCache constituents per aggregate when the C(aggregates) field is mentioned.
+ type: int
+ aliases: ['constituents_per_aggregate']
+ force_unmount:
+ description:
+ - Unmount FlexCache volume. Delete the junction path at which the volume is mounted before deleting the FlexCache relationship.
+ type: bool
+ default: false
+ force_offline:
+ description:
+ - Offline FlexCache volume before deleting the FlexCache relationship.
+ - The volume will be destroyed and data can be lost.
+ type: bool
+ default: false
+ time_out:
+ description:
+ - time to wait for flexcache creation or deletion in seconds
+ - if 0, the request is asynchronous
+ - default is set to 3 minutes
+ type: int
+ default: 180
+ prepopulate:
+ version_added: 21.3.0
+ description:
+ - prepopulate FlexCache with data from origin volume.
+ - requires ONTAP 9.8 or later, and REST support.
+ - dir_paths must be set for this option to be effective.
+ type: dict
+ suboptions:
+ dir_paths:
+ description:
+ - List of directory paths in the owning SVM's namespace at which the FlexCache volume is mounted.
+ - Path must begin with '/'.
+ type: list
+ elements: str
+ required: true
+ exclude_dir_paths:
+ description:
+ - Directory path which needs to be excluded from prepopulation.
+ - Path must begin with '/'.
+ - Requires ONTAP 9.9 or later.
+ type: list
+ elements: str
+ recurse:
+ description:
+ - Specifies whether or not the prepopulate action should search through the directory-path recursively.
+ - If not set, the default value 'true' is used.
+ type: bool
+ force_prepopulate_if_already_created:
+ description:
+ - by default, this module will start a prepopulate task each time it is called, and is not idempotent.
+ - if set to false, the prepopulate task is not started if the FlexCache already exists.
+ type: bool
+ default: true
+'''
+
+EXAMPLES = """
+
+ - name: Create FlexCache
+ netapp.ontap.na_ontap_flexcache:
+ state: present
+ origin_volume: test_src
+ name: test_dest
+ origin_vserver: ansible_src
+ vserver: ansible_dest
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Delete FlexCache
+ netapp.ontap.na_ontap_flexcache:
+ state: absent
+ name: test_dest
+ vserver: ansible_dest
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+"""
+
+RETURN = """
+"""
+
+import time
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic, rest_volume
+
+
+class NetAppONTAPFlexCache:
+ """
+ Class with FlexCache methods
+ """
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'],
+ default='present'),
+ origin_volume=dict(required=False, type='str'), # origins[0]
+ origin_vserver=dict(required=False, type='str'), # origins[0]
+ origin_cluster=dict(required=False, type='str'), # origins[0]
+ auto_provision_as=dict(required=False, type='str'), # ignored with REST
+ name=dict(required=True, type='str', aliases=['volume']),
+ junction_path=dict(required=False, type='str', aliases=['path']),
+ size=dict(required=False, type='int'),
+ size_unit=dict(default='gb',
+ choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb',
+ 'pb', 'eb', 'zb', 'yb'], type='str'),
+ vserver=dict(required=True, type='str'),
+ aggr_list=dict(required=False, type='list', elements='str', aliases=['aggregates']),
+ aggr_list_multiplier=dict(required=False, type='int', aliases=['constituents_per_aggregate']),
+ force_offline=dict(required=False, type='bool', default=False),
+ force_unmount=dict(required=False, type='bool', default=False),
+ time_out=dict(required=False, type='int', default=180),
+ prepopulate=dict(required=False, type='dict', options=dict(
+ dir_paths=dict(required=True, type='list', elements='str'),
+ exclude_dir_paths=dict(required=False, type='list', elements='str'),
+ recurse=dict(required=False, type='bool'),
+ force_prepopulate_if_already_created=dict(required=False, type='bool', default=True),
+ ))
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ mutually_exclusive=[
+ ('aggr_list', 'auto_provision_as'),
+ ],
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule(self.module)
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ if self.parameters.get('size'):
+ self.parameters['size'] = self.parameters['size'] * netapp_utils.POW2_BYTE_MAP[self.parameters['size_unit']]
+ # setup later if required
+ self.origin_server = None
+
+ self.rest_api = netapp_utils.OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+
+ ontap_98_options = ['prepopulate']
+ if not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 8) and any(x in self.parameters for x in ontap_98_options):
+ self.module.fail_json(msg='Error: %s' % self.rest_api.options_require_ontap_version(ontap_98_options, version='9.8'))
+
+ if 'prepopulate' in self.parameters:
+ # sanitize the dictionary, as Ansible fills everything with None values
+ self.parameters['prepopulate'] = self.na_helper.filter_out_none_entries(self.parameters['prepopulate'])
+ ontap_99_options = ['exclude_dir_paths']
+ if not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 9) and any(x in self.parameters['prepopulate'] for x in ontap_99_options):
+ options = ['prepopulate: ' + x for x in ontap_99_options]
+ self.module.fail_json(msg='Error: %s' % self.rest_api.options_require_ontap_version(options, version='9.9'))
+
+ if not self.use_rest:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def add_parameter_to_dict(self, adict, name, key, tostr=False):
+ ''' add defined parameter (not None) to adict using key '''
+ value = self.parameters.get(name)
+ if value is not None:
+ adict[key] = str(value) if tostr else value
+
+ def get_job(self, jobid, server):
+ """
+ Get job details by id
+ """
+ job_get = netapp_utils.zapi.NaElement('job-get')
+ job_get.add_new_child('job-id', jobid)
+ try:
+ result = server.invoke_successfully(job_get, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ if to_native(error.code) == "15661":
+ # Not found
+ return None
+ self.module.fail_json(msg='Error fetching job info: %s' % to_native(error),
+ exception=traceback.format_exc())
+ job_info = result.get_child_by_name('attributes').get_child_by_name('job-info')
+ return {
+ 'job-progress': job_info['job-progress'],
+ 'job-state': job_info['job-state'],
+ 'job-completion': job_info['job-completion'] if job_info.get_child_by_name('job-completion') is not None else None
+ }
+
+ def check_job_status(self, jobid):
+ """
+ Loop until job is complete
+ """
+ server = self.server
+ sleep_time = 5
+ time_out = self.parameters['time_out']
+ while time_out > 0:
+ results = self.get_job(jobid, server)
+ # If running as cluster admin, the job is owned by cluster vserver
+ # rather than the target vserver.
+ if results is None and server == self.server:
+ results = netapp_utils.get_cserver(self.server)
+ server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ continue
+ if results is None:
+ error = 'cannot locate job with id: %s' % jobid
+ break
+ if results['job-state'] in ('queued', 'running'):
+ time.sleep(sleep_time)
+ time_out -= sleep_time
+ continue
+ if results['job-state'] in ('success', 'failure'):
+ break
+ else:
+ self.module.fail_json(msg='Unexpected job status in: %s' % repr(results))
+
+ if results is not None:
+ if results['job-state'] == 'success':
+ error = None
+ elif results['job-state'] in ('queued', 'running'):
+ error = 'job completion exceeded expected timer of: %s seconds' % self.parameters['time_out']
+ elif results['job-completion'] is not None:
+ error = results['job-completion']
+ else:
+ error = results['job-progress']
+ return error
+
+ def flexcache_get_iter(self):
+ """
+ Compose NaElement object to query current FlexCache relation
+ """
+ options = {'volume': self.parameters['name']}
+ self.add_parameter_to_dict(options, 'origin_volume', 'origin-volume')
+ self.add_parameter_to_dict(options, 'origin_vserver', 'origin-vserver')
+ self.add_parameter_to_dict(options, 'origin_cluster', 'origin-cluster')
+ flexcache_info = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'flexcache-info', **options)
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(flexcache_info)
+ flexcache_get_iter = netapp_utils.zapi.NaElement('flexcache-get-iter')
+ flexcache_get_iter.add_child_elem(query)
+ return flexcache_get_iter
+
+ def flexcache_get(self):
+ """
+ Get current FlexCache relations
+ :return: Dictionary of current FlexCache details if query successful, else None
+ """
+ if self.use_rest:
+ api = 'storage/flexcache/flexcaches'
+ query = {
+ 'name': self.parameters['name'],
+ 'svm.name': self.parameters['vserver']
+ }
+ if 'origin_cluster' in self.parameters:
+ query['origin.cluster.name'] = self.parameters['origin_cluster']
+ fields = 'svm,name,uuid,path'
+ flexcache, error = rest_generic.get_one_record(self.rest_api, api, query, fields)
+ self.na_helper.fail_on_error(error)
+ if flexcache is None:
+ return None
+ return dict(
+ vserver=flexcache['svm']['name'],
+ name=flexcache['name'],
+ uuid=flexcache['uuid'],
+ junction_path=flexcache.get('path')
+ )
+
+ flexcache_get_iter = self.flexcache_get_iter()
+ flex_info = {}
+ try:
+ result = self.server.invoke_successfully(flexcache_get_iter, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching FlexCache info: %s' % to_native(error),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) == 1:
+ flexcache_info = result.get_child_by_name('attributes-list') \
+ .get_child_by_name('flexcache-info')
+ flex_info['origin_cluster'] = flexcache_info.get_child_content('origin-cluster')
+ flex_info['origin_volume'] = flexcache_info.get_child_content('origin-volume')
+ flex_info['origin_vserver'] = flexcache_info.get_child_content('origin-vserver')
+ flex_info['size'] = flexcache_info.get_child_content('size')
+ flex_info['name'] = flexcache_info.get_child_content('volume')
+ flex_info['vserver'] = flexcache_info.get_child_content('vserver')
+
+ return flex_info
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) > 1:
+ msg = 'Multiple records found for %s:' % self.parameters['name']
+ self.module.fail_json(msg='Error fetching FlexCache info: %s' % msg)
+ return None
+
+ def flexcache_rest_create_body(self, mappings):
+ ''' maps self.parameters to REST API body attributes, using mappings to identify fields to add '''
+ body = {}
+ for key, value in mappings.items():
+ if key in self.parameters:
+ if key == 'aggr_list':
+ body[value] = [dict(name=aggr) for aggr in self.parameters[key]]
+ else:
+ body[value] = self.parameters[key]
+ elif key == 'origins':
+ # this is an artificial key, to match the REST list of dict structure
+ origin = dict(
+ volume=dict(name=self.parameters['origin_volume']),
+ svm=dict(name=self.parameters['origin_vserver'])
+ )
+ body[value] = [origin]
+ return body
+
+ def flexcache_rest_create(self):
+ ''' use POST to create a FlexCache '''
+ mappings = dict(
+ name='name',
+ vserver='svm.name',
+ junction_path='path',
+ size='size',
+ aggr_list='aggregates',
+ aggr_list_multiplier='constituents_per_aggregate',
+ origins='origins',
+ prepopulate='prepopulate'
+ )
+ body = self.flexcache_rest_create_body(mappings)
+ api = 'storage/flexcache/flexcaches'
+ response, error = rest_generic.post_async(self.rest_api, api, body, job_timeout=self.parameters['time_out'])
+ self.na_helper.fail_on_error(error)
+ return response
+
+ def flexcache_rest_modify(self, uuid):
+ ''' use PATCH to start prepopulating a FlexCache '''
+ mappings = dict( # name cannot be set, though swagger example shows it
+ prepopulate='prepopulate'
+ )
+ body = self.flexcache_rest_create_body(mappings)
+ api = 'storage/flexcache/flexcaches'
+ response, error = rest_generic.patch_async(self.rest_api, api, uuid, body, job_timeout=self.parameters['time_out'])
+ self.na_helper.fail_on_error(error)
+ return response
+
+ def flexcache_create_async(self):
+ """
+ Create a FlexCache relationship
+ """
+ options = {'origin-volume': self.parameters['origin_volume'],
+ 'origin-vserver': self.parameters['origin_vserver'],
+ 'volume': self.parameters['name']}
+ self.add_parameter_to_dict(options, 'junction_path', 'junction-path')
+ self.add_parameter_to_dict(options, 'auto_provision_as', 'auto-provision-as')
+ self.add_parameter_to_dict(options, 'size', 'size', tostr=True)
+ if self.parameters.get('aggr_list') and self.parameters.get('aggr_list_multiplier'):
+ self.add_parameter_to_dict(options, 'aggr_list_multiplier', 'aggr-list-multiplier', tostr=True)
+ flexcache_create = netapp_utils.zapi.NaElement.create_node_with_children('flexcache-create-async', **options)
+ if self.parameters.get('aggr_list'):
+ aggregates = netapp_utils.zapi.NaElement('aggr-list')
+ for aggregate in self.parameters['aggr_list']:
+ aggregates.add_new_child('aggr-name', aggregate)
+ flexcache_create.add_child_elem(aggregates)
+ try:
+ result = self.server.invoke_successfully(flexcache_create, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating FlexCache: %s' % to_native(error),
+ exception=traceback.format_exc())
+ results = {}
+ for key in ('result-status', 'result-jobid'):
+ if result.get_child_by_name(key):
+ results[key] = result[key]
+ return results
+
+ def flexcache_create(self):
+ """
+ Create a FlexCache relationship
+ Check job status
+ """
+ if self.use_rest:
+ return self.flexcache_rest_create()
+
+ results = self.flexcache_create_async()
+ status = results.get('result-status')
+ if status == 'in_progress' and 'result-jobid' in results:
+ if self.parameters['time_out'] == 0:
+ # asynchronous call, assuming success!
+ return
+ error = self.check_job_status(results['result-jobid'])
+ if error is None:
+ return
+ else:
+ self.module.fail_json(msg='Error when creating flexcache: %s' % error)
+ self.module.fail_json(msg='Unexpected error when creating flexcache: results is: %s' % repr(results))
+
+ def flexcache_delete_async(self):
+ """
+ Delete FlexCache relationship at destination cluster
+ """
+ options = {'volume': self.parameters['name']}
+ flexcache_delete = netapp_utils.zapi.NaElement.create_node_with_children('flexcache-destroy-async', **options)
+ try:
+ result = self.server.invoke_successfully(flexcache_delete, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting FlexCache: %s' % (to_native(error)),
+ exception=traceback.format_exc())
+ results = {}
+ for key in ('result-status', 'result-jobid'):
+ if result.get_child_by_name(key):
+ results[key] = result[key]
+ return results
+
+ def rest_offline_volume(self, current):
+ """
+ Offline the volume using REST PATCH method.
+ """
+ uuid = current.get('uuid')
+ if uuid is None:
+ error = 'Error, no uuid in current: %s' % str(current)
+ self.na_helper.fail_on_error(error)
+ body = dict(state='offline')
+ return self.patch_volume_rest(uuid, body)
+
+ def volume_offline(self, current):
+ """
+ Offline FlexCache volume at destination cluster
+ """
+ if self.use_rest:
+ self.rest_offline_volume(current)
+ else:
+ options = {'name': self.parameters['name']}
+ xml = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-offline', **options)
+ try:
+ self.server.invoke_successfully(xml, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error offlining FlexCache volume: %s'
+ % (to_native(error)),
+ exception=traceback.format_exc())
+
+ def rest_mount_volume(self, current, path):
+ """
+ Mount the volume using REST PATCH method.
+ If path is empty string, unmount the volume.
+ """
+ uuid = current.get('uuid')
+ if uuid is None:
+ error = 'Error, no uuid in current: %s' % str(current)
+ self.na_helper.fail_on_error(error)
+ body = dict(nas=dict(path=path))
+ return self.patch_volume_rest(uuid, body)
+
+ def rest_unmount_volume(self, current):
+ """
+ Unmount the volume using REST PATCH method.
+ """
+ self.rest_mount_volume(current, '') if current.get('junction_path') else None
+
+ def volume_unmount(self, current):
+ """
+ Unmount FlexCache volume at destination cluster
+ """
+ if self.use_rest:
+ self.rest_unmount_volume(current)
+ else:
+ options = {'volume-name': self.parameters['name']}
+ xml = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-unmount', **options)
+ try:
+ self.server.invoke_successfully(xml, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error unmounting FlexCache volume: %s'
+ % (to_native(error)),
+ exception=traceback.format_exc())
+
+ def patch_volume_rest(self, uuid, body):
+ response, error = rest_volume.patch_volume(self.rest_api, uuid, body)
+ self.na_helper.fail_on_error(error)
+ return response
+
+ def flexcache_rest_delete(self, current):
+ """
+ Delete the flexcache using REST DELETE method.
+ """
+ response = None
+ uuid = current.get('uuid')
+ if uuid is None:
+ error = 'Error, no uuid in current: %s' % str(current)
+ self.na_helper.fail_on_error(error)
+ api = 'storage/flexcache/flexcaches'
+ # There may be a bug in ONTAP. If return_timeout is >= 15, the call fails with uuid not found!
+ # With 5, a job is queued, and completes with success. With a big enough value, no job is
+ # queued, and the API returns in around 15 seconds with a not found error.
+ rto = netapp_utils.get_feature(self.module, 'flexcache_delete_return_timeout')
+ response, error = rest_generic.delete_async(self.rest_api, api, uuid, timeout=rto, job_timeout=self.parameters['time_out'])
+ self.na_helper.fail_on_error(error)
+ return response
+
+ def flexcache_delete(self, current):
+ """
+ Delete FlexCache relationship at destination cluster
+ Check job status
+ """
+ if self.parameters['force_unmount']:
+ self.volume_unmount(current)
+ if self.parameters['force_offline']:
+ self.volume_offline(current)
+ if self.use_rest:
+ return self.flexcache_rest_delete(current)
+ results = self.flexcache_delete_async()
+ status = results.get('result-status')
+ if status == 'in_progress' and 'result-jobid' in results:
+ if self.parameters['time_out'] == 0:
+ # asynchronous call, assuming success!
+ return None
+ error = self.check_job_status(results['result-jobid'])
+ if error is not None:
+ self.module.fail_json(msg='Error when deleting flexcache: %s' % error)
+ return None
+ self.module.fail_json(msg='Unexpected error when deleting flexcache: results is: %s' % repr(results))
+
+ def check_parameters(self, cd_action):
+ """
+ Validate parameters and fail if one or more required params are missing
+ """
+ if cd_action != 'create':
+ return
+ if self.parameters['state'] == 'present':
+ expected = 'origin_volume', 'origin_vserver'
+ missings = [param for param in expected if not self.parameters.get(param)]
+ if missings:
+ plural = 's' if len(missings) > 1 else ''
+ msg = 'Missing parameter%s: %s' % (plural, ', '.join(missings))
+ self.module.fail_json(msg=msg)
+
+ def apply(self):
+ """
+ Apply action to FlexCache
+ """
+ current = self.flexcache_get()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ modify, mount_unmount = None, None
+ prepopulate_if_already_created = None
+
+ if self.parameters['state'] == 'present' and 'prepopulate' in self.parameters:
+ prepopulate_if_already_created = self.parameters['prepopulate'].pop('force_prepopulate_if_already_created')
+
+ if cd_action is None:
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if modify and self.use_rest:
+ mount_unmount = modify.pop('junction_path', None)
+ if modify:
+ self.module.fail_json(msg='FlexCache properties cannot be modified by this module. modify: %s' % str(modify))
+ if current and prepopulate_if_already_created:
+ # force a prepopulate action
+ modify = dict(prepopulate=self.parameters['prepopulate'])
+ self.na_helper.changed = True
+ self.module.warn('na_ontap_flexcache is not idempotent when prepopulate is present and force_prepopulate_if_already_created=true')
+ if mount_unmount == '' or current['junction_path'] == '':
+ self.module.warn('prepopulate requires the FlexCache volume to be mounted')
+ self.check_parameters(cd_action)
+ response = None
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ response = self.flexcache_create()
+ elif cd_action == 'delete':
+ response = self.flexcache_delete(current)
+ else:
+ if mount_unmount is not None:
+ # mount first, as this is required for prepopulate to succeed (or fail for unmount)
+ self.rest_mount_volume(current, mount_unmount)
+ if modify:
+ response = self.flexcache_rest_modify(current['uuid'])
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify, response)
+ self.module.exit_json(**result)
+
+
+def main():
+ """Execute action"""
+ my_obj = NetAppONTAPFlexCache()
+ my_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fpolicy_event.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fpolicy_event.py
new file mode 100644
index 000000000..fa7629fdb
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fpolicy_event.py
@@ -0,0 +1,444 @@
+#!/usr/bin/python
+
+# (c) 2021, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: na_ontap_fpolicy_event
+short_description: NetApp ONTAP FPolicy policy event configuration
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: '21.4.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create, delete or modify an FPolicy policy event.
+options:
+ state:
+ description:
+ - Whether the FPolicy policy event is present or not.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ vserver:
+ description:
+ - The name of the vserver to create the event on.
+ required: true
+ type: str
+
+ name:
+ description:
+ - Name of the Event.
+ required: true
+ type: str
+
+ file_operations:
+ description:
+ - Name of file operations to be applied to the event. By default no operations are monitored.
+ type: list
+ elements: 'str'
+ choices: ['close', 'create', 'create_dir', 'delete', 'delete_dir', 'getattr', 'link', 'lookup',
+ 'open', 'read', 'write', 'rename', 'rename_dir', 'setattr', 'symlink']
+
+ filters:
+ description:
+ - Name of filters to be applied to the event. It is notification filtering parameters. By default no filters are selected.
+ type: list
+ elements: 'str'
+ choices: ['monitor_ads', 'close_with_modification', 'close_without_modification', 'first_read', 'first_write', 'offline_bit', 'open_with_delete_intent',
+ 'open_with_write_intent', 'write_with_size_change', 'close_with_read', 'setattr_with_owner_change', 'setattr_with_group_change',
+ 'setattr_with_sacl_change', 'setattr_with_dacl_change', 'setattr_with_modify_time_change', 'setattr_with_access_time_change',
+ 'setattr_with_creation_time_change', 'setattr_with_mode_change', 'setattr_with_size_change', 'setattr_with_allocation_size_change', 'exclude_directory']
+
+ protocol:
+ description:
+ - Name of protocol for which event is created. By default no protocol is selected.
+ choices: ['cifs', 'nfsv3', 'nfsv4']
+ type: str
+
+ volume_monitoring:
+ description:
+ - Indicator if the volume operation required for the event. If not specified the default Value is false.
+ type: bool
+
+notes:
+- Support check_mode.
+'''
+
+EXAMPLES = """
+- name: Create FPolicy Event
+ na_ontap_fpolicy_event:
+ state: present
+ vserver: svm1
+ name: fpolicy_event
+ file_operations: ['create', 'create_dir', 'delete', 'delete_dir', 'read', 'close', 'rename', 'rename_dir']
+ filters: ['first_read', 'close_with_modification']
+ protocol: cifs
+ volume_monitoring: false
+ username: "{{ username }}"
+ password: "{{ password }}"
+ hostname: "{{ hostname }}"
+
+- name: Modify FPolicy Event
+ na_ontap_fpolicy_event:
+ state: present
+ vserver: svm1
+ name: fpolicy_event
+ volume_monitoring: true
+ username: "{{ username }}"
+ password: "{{ password }}"
+ hostname: "{{ hostname }}"
+
+- name: Delete FPolicy Event
+ na_ontap_fpolicy_event:
+ state: absent
+ vserver: svm1
+ name: fpolicy_event
+ username: "{{ username }}"
+ password: "{{ password }}"
+ hostname: "{{ hostname }}"
+
+"""
+
+RETURN = """ # """
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+import ansible_collections.netapp.ontap.plugins.module_utils.rest_response_helpers as rrh
+
+
+class NetAppOntapFpolicyEvent():
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(choices=['present', 'absent'], type='str', default='present'),
+ vserver=dict(required=True, type='str'),
+ name=dict(required=True, type='str'),
+ file_operations=dict(
+ required=False,
+ type='list',
+ elements='str',
+ choices=['close', 'create', 'create_dir', 'delete', 'delete_dir', 'getattr', 'link',
+ 'lookup', 'open', 'read', 'write', 'rename', 'rename_dir', 'setattr', 'symlink']),
+ filters=dict(
+ required=False,
+ type='list',
+ elements='str',
+ choices=['monitor_ads', 'close_with_modification', 'close_without_modification', 'first_read',
+ 'first_write', 'offline_bit', 'open_with_delete_intent', 'open_with_write_intent', 'write_with_size_change', 'close_with_read',
+ 'setattr_with_owner_change', 'setattr_with_group_change', 'setattr_with_sacl_change', 'setattr_with_dacl_change',
+ 'setattr_with_modify_time_change', 'setattr_with_access_time_change', 'setattr_with_creation_time_change', 'setattr_with_mode_change',
+ 'setattr_with_size_change', 'setattr_with_allocation_size_change', 'exclude_directory']),
+ protocol=dict(required=False, type='str', choices=['cifs', 'nfsv3', 'nfsv4']),
+ volume_monitoring=dict(required=False, type='bool')
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True,
+ required_together=[
+ ('protocol', 'file_operations')]
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.vserver_uuid = None
+
+ self.rest_api = OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+
+ if not self.use_rest:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp)
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_vserver_uuid(self):
+ """
+ Get vserver uuid, used for API calls.
+ """
+ api = "/svm/svms"
+ query = {
+ 'name': self.parameters['vserver']
+ }
+ message, error = self.rest_api.get(api, query)
+ if error:
+ self.module.fail_json(msg=error)
+ # if vserver does not exist we expect message to be a dict containing 'records': []
+ if not message['records']:
+ self.module.fail_json(msg="vserver does not exist")
+
+ return message['records'][0]['uuid']
+
+ def list_to_dict(self, params):
+ """
+ Converts a list of entries to a dictionary with the key as the parameter name and the value as True as expected by the REST API
+ """
+ return dict((param, True) for param in params)
+
+ def get_fpolicy_event(self):
+ """
+ Get FPolicy event configuration if an event matching the parameters exists
+ """
+ return_value = None
+ if self.use_rest:
+ api = "/protocols/fpolicy/%s/events" % (self.vserver_uuid)
+ query = {
+ 'fields': 'protocol,filters,file_operations,volume_monitoring'
+ }
+ message, error = self.rest_api.get(api, query)
+ records, error = rrh.check_for_0_or_more_records(api, message, error)
+ if error:
+ self.module.fail_json(msg=error)
+ if records is not None:
+ for record in records:
+ if record['name'] == self.parameters['name']:
+ return_value = {}
+ for parameter in ('protocol', 'volume_monitoring'):
+ return_value[parameter] = []
+ if parameter in record:
+ return_value[parameter] = record[parameter]
+ # file_operations and filters contains a dict all possible choices as the keys and True/False as the values.
+ # Return a list of the choices that are True.
+ return_value['file_operations'] = []
+ if 'file_operations' in record:
+ file_operation_list = [file_operation for file_operation, enabled in record['file_operations'].items() if enabled]
+ return_value['file_operations'] = file_operation_list
+
+ return_value['filters'] = []
+ if 'filters' in record:
+ filters_list = [filter for filter, enabled in record['filters'].items() if enabled]
+ return_value['filters'] = filters_list
+
+ return return_value
+
+ else:
+ fpolicy_event_obj = netapp_utils.zapi.NaElement('fpolicy-policy-event-get-iter')
+ fpolicy_event_config = netapp_utils.zapi.NaElement('fpolicy-event-options-config')
+ fpolicy_event_config.add_new_child('event-name', self.parameters['name'])
+ fpolicy_event_config.add_new_child('vserver', self.parameters['vserver'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(fpolicy_event_config)
+ fpolicy_event_obj.add_child_elem(query)
+
+ try:
+ result = self.server.invoke_successfully(fpolicy_event_obj, True)
+
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error searching for FPolicy policy event %s on vserver %s: %s' % (
+ self.parameters['name'], self.parameters['vserver'], to_native(error)),
+ exception=traceback.format_exc()
+ )
+
+ if result.get_child_by_name('attributes-list'):
+ fpolicy_event_attributes = result['attributes-list']['fpolicy-event-options-config']
+
+ # get file operations as list
+ file_operations = []
+ if fpolicy_event_attributes.get_child_by_name('file-operations'):
+ for file_operation in fpolicy_event_attributes.get_child_by_name('file-operations').get_children():
+ file_operations.append(file_operation.get_content())
+
+ # get filter string as list
+ filters = []
+ if fpolicy_event_attributes.get_child_by_name('filter-string'):
+ for filter in fpolicy_event_attributes.get_child_by_name('filter-string').get_children():
+ filters.append(filter.get_content())
+
+ protocol = ""
+ if fpolicy_event_attributes.get_child_by_name('protocol'):
+ protocol = fpolicy_event_attributes.get_child_content('protocol')
+
+ return_value = {
+ 'vserver': fpolicy_event_attributes.get_child_content('vserver'),
+ 'name': fpolicy_event_attributes.get_child_content('event-name'),
+ 'file_operations': file_operations,
+ 'filters': filters,
+ 'protocol': protocol,
+ 'volume_monitoring': self.na_helper.get_value_for_bool(
+ from_zapi=True, value=fpolicy_event_attributes.get_child_content('volume-operation')
+ )
+ }
+
+ return return_value
+
+ def create_fpolicy_event(self):
+ """
+ Create an FPolicy policy event
+ :return: nothing
+ """
+ if self.use_rest:
+ api = "/protocols/fpolicy/%s/events" % (self.vserver_uuid)
+ body = {
+ 'name': self.parameters['name']
+ }
+
+ if 'protocol' in self.parameters:
+ body['protocol'] = self.parameters['protocol']
+ if 'volume_monitoring' in self.parameters:
+ body['volume_monitoring'] = self.parameters['volume_monitoring']
+
+ if 'filters' in self.parameters:
+ body['filters'] = self.list_to_dict(self.parameters['filters'])
+ if 'file_operations' in self.parameters:
+ body['file_operations'] = self.list_to_dict(self.parameters['file_operations'])
+
+ dummy, error = self.rest_api.post(api, body)
+
+ if error:
+ self.module.fail_json(msg=error)
+
+ else:
+ fpolicy_event_obj = netapp_utils.zapi.NaElement('fpolicy-policy-event-create')
+ fpolicy_event_obj.add_new_child('event-name', self.parameters['name'])
+
+ if 'file_operations' in self.parameters:
+
+ file_operation_obj = netapp_utils.zapi.NaElement('file-operations')
+
+ for file_operation in self.parameters['file_operations']:
+ file_operation_obj.add_new_child('fpolicy-operation', file_operation)
+ fpolicy_event_obj.add_child_elem(file_operation_obj)
+
+ if 'filters' in self.parameters:
+
+ filter_string_obj = netapp_utils.zapi.NaElement('filter-string')
+
+ for filter in self.parameters['filters']:
+ filter_string_obj.add_new_child('fpolicy-filter', filter)
+ fpolicy_event_obj.add_child_elem(filter_string_obj)
+
+ if 'protocol' in self.parameters:
+ fpolicy_event_obj.add_new_child('protocol', self.parameters['protocol'])
+
+ if 'volume_monitoring' in self.parameters:
+ fpolicy_event_obj.add_new_child(
+ 'volume-operation', self.na_helper.get_value_for_bool(from_zapi=False, value=self.parameters['volume_monitoring'])
+ )
+
+ try:
+ self.server.invoke_successfully(fpolicy_event_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating fPolicy policy event %s on vserver %s: %s' % (
+ self.parameters['name'], self.parameters['vserver'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_fpolicy_event(self, modify):
+ """
+ Modify an FPolicy policy event
+ :return: nothing
+ """
+ if self.use_rest:
+ api = "/private/cli/vserver/fpolicy/policy/event"
+ query = {
+ 'vserver': self.parameters['vserver'],
+ 'event-name': self.parameters['name']
+ }
+ body = {}
+ # protocol and file_operations must be parsed into the API together
+ # if filters exists filters,protocol and file_operations must be parsed together.
+ for parameter in 'protocol', 'filters', 'file_operations':
+ if parameter in modify:
+ body[parameter] = modify[parameter]
+ elif parameter in self.parameters:
+ body[parameter] = self.parameters[parameter]
+ if 'volume_monitoring' in modify:
+ body['volume-operation'] = modify['volume_monitoring']
+
+ dummy, error = self.rest_api.patch(api, body, query)
+ if error:
+ self.module.fail_json(msg=error)
+
+ else:
+ fpolicy_event_obj = netapp_utils.zapi.NaElement('fpolicy-policy-event-modify')
+ fpolicy_event_obj.add_new_child('event-name', self.parameters['name'])
+
+ if 'file_operations' in self.parameters:
+ file_operation_obj = netapp_utils.zapi.NaElement('file-operations')
+ for file_operation in self.parameters['file_operations']:
+ file_operation_obj.add_new_child('fpolicy-operation', file_operation)
+ fpolicy_event_obj.add_child_elem(file_operation_obj)
+
+ if 'filters' in self.parameters:
+ filter_string_obj = netapp_utils.zapi.NaElement('filter-string')
+ for filter in self.parameters['filters']:
+ filter_string_obj.add_new_child('fpolicy-filter', filter)
+ fpolicy_event_obj.add_child_elem(filter_string_obj)
+
+ if 'protocol' in self.parameters:
+ fpolicy_event_obj.add_new_child('protocol', self.parameters['protocol'])
+
+ if 'volume_monitoring' in self.parameters:
+ fpolicy_event_obj.add_new_child(
+ 'volume-operation', self.na_helper.get_value_for_bool(from_zapi=False, value=self.parameters['volume_monitoring'])
+ )
+
+ try:
+ self.server.invoke_successfully(fpolicy_event_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying fPolicy policy event %s on vserver %s: %s' % (
+ self.parameters['name'], self.parameters['vserver'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_fpolicy_event(self):
+ """
+ Delete an FPolicy policy event
+ :return: nothing
+ """
+ if self.use_rest:
+ api = "/protocols/fpolicy/%s/events/%s" % (self.vserver_uuid, self.parameters['name'])
+
+ dummy, error = self.rest_api.delete(api)
+ if error:
+ self.module.fail_json(msg=error)
+ else:
+ fpolicy_event_obj = netapp_utils.zapi.NaElement('fpolicy-policy-event-delete')
+ fpolicy_event_obj.add_new_child('event-name', self.parameters['name'])
+
+ try:
+ self.server.invoke_successfully(fpolicy_event_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting fPolicy policy event %s on vserver %s: %s' % (
+ self.parameters['name'], self.parameters['vserver'],
+ to_native(error)), exception=traceback.format_exc()
+ )
+
+ def apply(self):
+ if self.use_rest:
+ self.vserver_uuid = self.get_vserver_uuid()
+
+ current, modify = self.get_fpolicy_event(), None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+
+ if cd_action is None and self.parameters['state'] == 'present':
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+
+ if self.na_helper.changed:
+ if not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_fpolicy_event()
+ elif cd_action == 'delete':
+ self.delete_fpolicy_event()
+ elif modify:
+ self.modify_fpolicy_event(modify)
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ """
+ Execute action from playbook
+ """
+ command = NetAppOntapFpolicyEvent()
+ command.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fpolicy_ext_engine.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fpolicy_ext_engine.py
new file mode 100644
index 000000000..ff059096f
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fpolicy_ext_engine.py
@@ -0,0 +1,520 @@
+#!/usr/bin/python
+
+# (c) 2021, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: na_ontap_fpolicy_ext_engine
+short_description: NetApp ONTAP fPolicy external engine configuration.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: '21.4.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Create, delete or modify fpolicy external engine.
+options:
+ state:
+ description:
+ - Whether the fPolicy external engine is present or not
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ vserver:
+ description:
+ - the name of the vserver to create the external engine on
+ required: true
+ type: str
+
+ name:
+ description:
+ - Name of the external engine.
+ required: true
+ type: str
+
+ certificate_ca:
+ description:
+ - Certificate authority name. No default value is set for this field.
+ type: str
+
+ certificate_common_name:
+ description:
+ - FQDN or custom common name of certificate. No default value is set for this field.
+ type: str
+
+ certificate_serial:
+ description:
+ - Serial number of certificate. No default value is set for this field.
+ type: str
+
+ extern_engine_type:
+ description:
+ - External engine type. If the engine is asynchronous, no reply is sent from FPolicy servers. Default value set for this field is synchronous.
+ choices: ['synchronous', 'asynchronous']
+ type: str
+
+ is_resiliency_enabled:
+ description:
+ - Indicates if the resiliency with this engine is required.
+ - If set to true, the notifications will be stored in a path as resiliency_directory_path
+ - If it is false, the notifications will not be stored. Default value is false.
+ type: bool
+
+ max_connection_retries:
+ description:
+ - Number of times storage appliance will attempt to establish a broken connection to FPolicy server. Default value set for this field is 5.
+ type: int
+
+ max_server_reqs:
+ description:
+ - Maximum number of outstanding screen requests that will be queued for an FPolicy Server. Default value set for this field is 50.
+ type: int
+
+ port:
+ description:
+ - Port number of the FPolicy server application.
+ type: int
+
+ primary_servers:
+ description:
+ - Primary FPolicy servers.
+ type: list
+ elements: str
+
+ recv_buffer_size:
+ description:
+ - Receive buffer size of connected socket for FPolicy Server. Default value set for this field is 256 kilobytes (256Kb).
+ type: int
+
+ resiliency_directory_path:
+ description:
+ - Directory path under Vserver for storing file access notifications. File access notifications will be stored in a generated file during the outage time.
+ - The path is the full, user visible path relative to the Vserver root, and it might be crossing junction mount points.
+ type: str
+
+ secondary_servers:
+ description:
+ - Secondary FPolicy servers. No default value is set for this field.
+ type: list
+ elements: str
+
+ send_buffer_size:
+ description:
+ - Send buffer size of connected socket for FPolicy Server. Default value set for this field is 256 kilobytes (256Kb).
+ type: int
+
+ ssl_option:
+ description:
+ - SSL option for external communication. No default value is set for this field
+ choices: ['no_auth', 'server_auth', 'mutual_auth']
+ type: str
+
+'''
+
+EXAMPLES = """
+- name: Create fPolicy external engine
+ na_ontap_fpolicy_ext_engine:
+ state: present
+ vserver: svm1
+ name: fpolicy_ext_engine
+ port: 8787
+ extern_engine_type: asynchronous
+ primary_servers: ['10.11.12.13', '10.11.12.14']
+ ssl_option: no_auth
+ username: "{{ username }}"
+ password: "{{ password }}"
+ hostname: "{{ hostname }}"
+
+- name: Modify fPolicy external engine
+ na_ontap_fpolicy_ext_engine:
+ state: present
+ vserver: svm1
+ name: fpolicy_ext_engine
+ port: 7878
+ extern_engine_type: synchronous
+ primary_servers: ['10.11.12.15', '10.11.12.16']
+ ssl_option: server_auth
+ username: "{{ username }}"
+ password: "{{ password }}"
+ hostname: "{{ hostname }}"
+
+- name: Delete fPolicy external engine
+ na_ontap_fpolicy_ext_engine:
+ state: absent
+ vserver: svm1
+ name: fpolicy_engine
+ username: "{{ username }}"
+ password: "{{ password }}"
+ hostname: "{{ hostname }}"
+
+"""
+
+RETURN = """
+
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+import ansible_collections.netapp.ontap.plugins.module_utils.rest_response_helpers as rrh
+
+
+class NetAppOntapFpolicyExtEngine():
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ name=dict(required=True, type='str'),
+ certificate_ca=dict(required=False, type='str'),
+ certificate_common_name=dict(required=False, type='str'),
+ certificate_serial=dict(required=False, type='str'),
+ extern_engine_type=dict(required=False, type='str', choices=['synchronous', 'asynchronous']),
+ is_resiliency_enabled=dict(required=False, type='bool'),
+ max_connection_retries=dict(required=False, type='int'),
+ max_server_reqs=dict(required=False, type='int'),
+ port=dict(required=False, type='int'),
+ primary_servers=dict(required=False, type='list', elements='str'),
+ recv_buffer_size=dict(required=False, type='int'),
+ resiliency_directory_path=dict(required=False, type='str'),
+ secondary_servers=dict(required=False, type='list', elements='str'),
+ send_buffer_size=dict(required=False, type='int'),
+ ssl_option=dict(required=False, type='str', choices=['no_auth', 'server_auth', 'mutual_auth']),
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[('state', 'present', ['ssl_option', 'primary_servers', 'port'])],
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ self.rest_api = OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+
+ if not self.use_rest:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def create_rest_body(self):
+ """
+ Create an fPolicy body for a create operation
+ :return: body as dict
+ """
+
+ body = {
+ 'vserver': self.parameters['vserver'],
+ 'engine-name': self.parameters['name'],
+ 'primary_servers': self.parameters['primary_servers'],
+ 'port': self.parameters['port'],
+ 'ssl_option': self.parameters['ssl_option']
+ }
+
+ list_of_options = ['secondary_servers', 'is_resiliency_enabled', 'resiliency_directory_path',
+ 'max_connection_retries', 'max_server_reqs', 'recv_buffer_size', 'send_buffer_size',
+ 'certificate_ca', 'certificate_common_name', 'certificate_serial', 'extern_engine_type']
+
+ for option in list_of_options:
+ if option in self.parameters:
+ body[option] = self.parameters[option]
+
+ return body
+
+ def create_zapi_api(self, api):
+ """
+ Create an the ZAPI API request for fpolicy modify and create
+ :return: ZAPI API object
+ """
+ fpolicy_ext_engine_obj = netapp_utils.zapi.NaElement(api)
+ fpolicy_ext_engine_obj.add_new_child('engine-name', self.parameters['name'])
+ fpolicy_ext_engine_obj.add_new_child('port-number', self.na_helper.get_value_for_int(from_zapi=False, value=self.parameters['port']))
+ fpolicy_ext_engine_obj.add_new_child('ssl-option', self.parameters['ssl_option'])
+
+ primary_servers_obj = netapp_utils.zapi.NaElement('primary-servers')
+
+ for primary_server in self.parameters['primary_servers']:
+ primary_servers_obj.add_new_child('ip-address', primary_server)
+ fpolicy_ext_engine_obj.add_child_elem(primary_servers_obj)
+
+ if 'secondary_servers' in self.parameters:
+ secondary_servers_obj = netapp_utils.zapi.NaElement('secondary-servers')
+
+ for secondary_server in self.parameters['secondary_servers']:
+ primary_servers_obj.add_new_child('ip-address', secondary_server)
+ fpolicy_ext_engine_obj.add_child_elem(secondary_servers_obj)
+
+ if 'is_resiliency_enabled' in self.parameters:
+ fpolicy_ext_engine_obj.add_new_child(
+ 'is-resiliency-enabled',
+ self.na_helper.get_value_for_bool(from_zapi=False, value=self.parameters['is_resiliency_enabled'])
+ )
+ if 'resiliency_directory_path' in self.parameters:
+ fpolicy_ext_engine_obj.add_new_child('resiliency-directory-path', self.parameters['resiliency_directory_path'])
+ if 'max_connection_retries' in self.parameters:
+ fpolicy_ext_engine_obj.add_new_child(
+ 'max-connection-retries',
+ self.na_helper.get_value_for_int(from_zapi=False, value=self.parameters['max_connection_retries'])
+ )
+ if 'max_server_reqs' in self.parameters:
+ fpolicy_ext_engine_obj.add_new_child(
+ 'max-server-requests',
+ self.na_helper.get_value_for_int(from_zapi=False, value=self.parameters['max_server_reqs'])
+ )
+ if 'recv_buffer_size' in self.parameters:
+ fpolicy_ext_engine_obj.add_new_child(
+ 'recv-buffer-size',
+ self.na_helper.get_value_for_int(from_zapi=False, value=self.parameters['recv_buffer_size'])
+ )
+ if 'send_buffer_size' in self.parameters:
+ fpolicy_ext_engine_obj.add_new_child(
+ 'send-buffer-size',
+ self.na_helper.get_value_for_int(from_zapi=False, value=self.parameters['send_buffer_size'])
+ )
+ if 'certificate_ca' in self.parameters:
+ fpolicy_ext_engine_obj.add_new_child('certificate-ca', self.parameters['certificate_ca'])
+ if 'certificate_common_name' in self.parameters:
+ fpolicy_ext_engine_obj.add_new_child('certificate-common-name', self.parameters['certificate_common_name'])
+ if 'certificate_serial' in self.parameters:
+ fpolicy_ext_engine_obj.add_new_child('certificate-serial', self.parameters['certificate_serial'])
+ if 'extern_engine_type' in self.parameters:
+ fpolicy_ext_engine_obj.add_new_child('extern-engine-type', self.parameters['extern_engine_type'])
+
+ return fpolicy_ext_engine_obj
+
+ def create_fpolicy_ext_engine(self):
+ """
+ Create an fPolicy external engine
+ :return: nothing
+ """
+
+ if self.use_rest:
+ api = "/private/cli/vserver/fpolicy/policy/external-engine"
+ body = self.create_rest_body()
+
+ dummy, error = self.rest_api.post(api, body)
+ if error:
+ self.module.fail_json(msg=error)
+ else:
+ fpolicy_ext_engine_obj = self.create_zapi_api('fpolicy-policy-external-engine-create')
+
+ try:
+ self.server.invoke_successfully(fpolicy_ext_engine_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(
+ msg='Error creating fPolicy external engine %s on vserver %s: %s' %
+ (self.parameters['name'], self.parameters['vserver'], to_native(error)), exception=traceback.format_exc()
+ )
+
+ def modify_fpolicy_ext_engine(self, modify):
+ """
+ Modify an fPolicy external engine
+ :return: nothing
+ """
+
+ if self.use_rest:
+ api = "/private/cli/vserver/fpolicy/policy/external-engine"
+ query = {
+ 'vserver': self.parameters['vserver'],
+ 'engine-name': self.parameters['name']
+ }
+
+ dummy, error = self.rest_api.patch(api, modify, query)
+ if error:
+ self.module.fail_json(msg=error)
+ else:
+ fpolicy_ext_engine_obj = self.create_zapi_api('fpolicy-policy-external-engine-modify')
+
+ try:
+ self.server.invoke_successfully(fpolicy_ext_engine_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(
+ msg='Error modifying fPolicy external engine %s on vserver %s: %s' %
+ (self.parameters['name'], self.parameters['vserver'], to_native(error)), exception=traceback.format_exc()
+ )
+
+ def delete_fpolicy_ext_engine(self):
+ """
+ Delete an fPolicy external engine
+ :return: nothing
+ """
+
+ if self.use_rest:
+ api = "/private/cli/vserver/fpolicy/policy/external-engine"
+ query = {
+ 'vserver': self.parameters['vserver'],
+ 'engine-name': self.parameters['name']
+ }
+
+ dummy, error = self.rest_api.delete(api, query)
+
+ if error:
+ self.module.fail_json(msg=error)
+ else:
+
+ fpolicy_ext_engine_obj = netapp_utils.zapi.NaElement('fpolicy-policy-external-engine-delete')
+ fpolicy_ext_engine_obj.add_new_child('engine-name', self.parameters['name'])
+
+ try:
+ self.server.invoke_successfully(fpolicy_ext_engine_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(
+ msg='Error deleting fPolicy external engine %s on vserver %s: %s' %
+ (self.parameters['name'], self.parameters['vserver'], to_native(error)), exception=traceback.format_exc()
+ )
+
+ def get_fpolicy_ext_engine(self):
+ """
+ Check to see if the fPolicy external engine exists or not
+ :return: dict of engine properties if exist, None if not
+ """
+ return_value = None
+
+ if self.use_rest:
+ fields = [
+ "vserver",
+ "engine-name",
+ "primary-servers",
+ "port",
+ "secondary-servers",
+ "extern-engine-type",
+ "ssl-option",
+ "max-connection-retries",
+ "max-server-reqs",
+ "certificate-common-name",
+ "certificate-serial",
+ "certificate-ca",
+ "recv-buffer-size",
+ "send-buffer-size",
+ "is-resiliency-enabled",
+ "resiliency-directory-path"
+ ]
+
+ api = "private/cli/vserver/fpolicy/policy/external-engine"
+ query = {
+ 'fields': ','.join(fields),
+ 'engine-name': self.parameters['name'],
+ 'vserver': self.parameters['vserver']
+ }
+ message, error = self.rest_api.get(api, query)
+
+ return_info, error = rrh.check_for_0_or_1_records(api, message, error)
+ if return_info is None:
+ return None
+
+ return_value = message['records'][0]
+ return return_value
+ else:
+
+ fpolicy_ext_engine_obj = netapp_utils.zapi.NaElement('fpolicy-policy-external-engine-get-iter')
+ fpolicy_ext_engine_config = netapp_utils.zapi.NaElement('fpolicy-external-engine-info')
+ fpolicy_ext_engine_config.add_new_child('engine-name', self.parameters['name'])
+ fpolicy_ext_engine_config.add_new_child('vserver', self.parameters['vserver'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(fpolicy_ext_engine_config)
+ fpolicy_ext_engine_obj.add_child_elem(query)
+
+ try:
+ result = self.server.invoke_successfully(fpolicy_ext_engine_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(
+ msg='Error searching for fPolicy engine %s on vserver %s: %s' %
+ (self.parameters['name'], self.parameters['vserver'], to_native(error)), exception=traceback.format_exc()
+ )
+
+ if result.get_child_by_name('attributes-list'):
+ fpolicy_ext_engine_attributes = result['attributes-list']['fpolicy-external-engine-info']
+
+ primary_servers = []
+ primary_servers_elem = fpolicy_ext_engine_attributes.get_child_by_name('primary-servers')
+ for primary_server in primary_servers_elem.get_children():
+ primary_servers.append(primary_server.get_content())
+
+ secondary_servers = []
+ if fpolicy_ext_engine_attributes.get_child_by_name('secondary-servers'):
+ secondary_servers_elem = fpolicy_ext_engine_attributes.get_child_by_name('secondary-servers')
+
+ for secondary_server in secondary_servers_elem.get_children():
+ secondary_servers.append(secondary_server.get_content())
+
+ return_value = {
+ 'vserver': fpolicy_ext_engine_attributes.get_child_content('vserver'),
+ 'name': fpolicy_ext_engine_attributes.get_child_content('engine-name'),
+ 'certificate_ca': fpolicy_ext_engine_attributes.get_child_content('certificate-ca'),
+ 'certificate_common_name': fpolicy_ext_engine_attributes.get_child_content('certificate-common-name'),
+ 'certificate_serial': fpolicy_ext_engine_attributes.get_child_content('certificate-serial'),
+ 'extern_engine_type': fpolicy_ext_engine_attributes.get_child_content('extern-engine-type'),
+ 'is_resiliency_enabled': self.na_helper.get_value_for_bool(
+ from_zapi=True,
+ value=fpolicy_ext_engine_attributes.get_child_content('is-resiliency-enabled')
+ ),
+ 'max_connection_retries': self.na_helper.get_value_for_int(
+ from_zapi=True,
+ value=fpolicy_ext_engine_attributes.get_child_content('max-connection-retries')
+ ),
+ 'max_server_reqs': self.na_helper.get_value_for_int(
+ from_zapi=True,
+ value=fpolicy_ext_engine_attributes.get_child_content('max-server-requests')
+ ),
+ 'port': self.na_helper.get_value_for_int(
+ from_zapi=True,
+ value=fpolicy_ext_engine_attributes.get_child_content('port-number')
+ ),
+ 'primary_servers': primary_servers,
+ 'secondary_servers': secondary_servers,
+ 'recv_buffer_size': self.na_helper.get_value_for_int(
+ from_zapi=True,
+ value=fpolicy_ext_engine_attributes.get_child_content('recv-buffer-size')
+ ),
+ 'resiliency_directory_path': fpolicy_ext_engine_attributes.get_child_content('resiliency-directory-path'),
+ 'send_buffer_size': self.na_helper.get_value_for_int(
+ from_zapi=True,
+ value=fpolicy_ext_engine_attributes.get_child_content('send-buffer-size')
+ ),
+ 'ssl_option': fpolicy_ext_engine_attributes.get_child_content('ssl-option'),
+ }
+
+ return return_value
+
+ def apply(self):
+ current, modify = self.get_fpolicy_ext_engine(), None
+
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+
+ if cd_action is None and self.parameters['state'] == 'present':
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+
+ if self.na_helper.changed:
+ if not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_fpolicy_ext_engine()
+ elif cd_action == 'delete':
+ self.delete_fpolicy_ext_engine()
+ elif modify:
+ self.modify_fpolicy_ext_engine(modify)
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ """
+ Execute action from playbook
+ """
+ command = NetAppOntapFpolicyExtEngine()
+ command.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fpolicy_policy.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fpolicy_policy.py
new file mode 100644
index 000000000..47d9143cb
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fpolicy_policy.py
@@ -0,0 +1,378 @@
+#!/usr/bin/python
+
+# (c) 2021, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = '''
+module: na_ontap_fpolicy_policy
+short_description: NetApp ONTAP - Create, delete or modify an FPolicy policy.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: '21.3.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create, delete or modify an FPolicy policy. Fpolicy scope must exist before running this module.
+- FPolicy is a file access notification framework that enables an administrator to monitor file and directory access in storage configured for CIFS and NFS.
+options:
+ state:
+ description:
+ - Whether the fPolicy policy should exist or not
+ choices: ['present', 'absent']
+ type: str
+ default: present
+
+ vserver:
+ description:
+ - the name of the vserver to create the policy on
+ type: str
+ required: True
+
+ name:
+ description:
+ - Name of the policy.
+ type: str
+ required: True
+
+ allow_privileged_access:
+ description:
+ - Specifies if privileged access should be given to FPolicy servers registered for the policy.
+ type: bool
+
+ engine:
+ description:
+ - Name of the Engine. External engines must be created prior to running this task.
+ type: str
+
+ events:
+ description:
+ - Events for file access monitoring.
+ type: list
+ elements: str
+ required: True
+
+ is_mandatory:
+ description:
+ - Specifies the action to take on a file access event in the case when all primary and secondary servers are down or no response is received from the
+ - FPolicy servers within a given timeout period. When True, file access events will be denied under these circumstances
+ type: bool
+
+ is_passthrough_read_enabled:
+ description:
+ - Specifies if passthrough-read should be allowed to FPolicy servers registered for the policy.
+ type: bool
+
+ privileged_user_name:
+ description:
+ - User name for privileged access.
+ type: str
+
+'''
+
+EXAMPLES = """
+- name: Create FPolicy policy
+ na_ontap_fpolicy_policy:
+ state: present
+ vserver: svm1
+ name: fpolicy_policy
+ events: fcpolicy_event
+ engine: fpolicy_ext_engine
+ username: "{{ username }}"
+ password: "{{ password }}"
+ hostname: "{{ hostname }}"
+
+- name: Modify FPolicy policy
+ na_ontap_fpolicy_policy:
+ state: present
+ vserver: svm1
+ name: fpolicy_policy
+ events: fcpolicy_event
+ is_mandatory: false
+ username: "{{ username }}"
+ password: "{{ password }}"
+ hostname: "{{ hostname }}"
+
+- name: Delete FPolicy policy
+ na_ontap_fpolicy_policy:
+ state: absent
+ vserver: svm1
+ name: fpolicy_policy
+ events: fcpolicy_event
+ username: "{{ username }}"
+ password: "{{ password }}"
+ hostname: "{{ hostname }}"
+
+"""
+
+RETURN = """
+
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+
+
+class NetAppOntapFpolicyPolicy():
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ name=dict(required=True, type='str'),
+ allow_privileged_access=dict(required=False, type='bool'),
+ engine=dict(required=False, type='str'),
+ events=dict(required=True, type='list', elements='str'),
+ is_mandatory=dict(required=False, type='bool'),
+ is_passthrough_read_enabled=dict(required=False, type='bool'),
+ privileged_user_name=dict(required=False, type='str')
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ self.rest_api = OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+
+ if not self.use_rest:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_fpolicy_policy(self):
+ """
+ Check if FPolicy policy exists, if it exists get the current state of the policy.
+ """
+ if self.use_rest:
+ api = "/private/cli/vserver/fpolicy/policy"
+ query = {
+ 'vserver': self.parameters['vserver'],
+ 'policy-name': self.parameters['name'],
+ 'fields': 'events,engine,allow-privileged-access,is-mandatory,is-passthrough-read-enabled,privileged-user-name'
+ }
+
+ message, error = self.rest_api.get(api, query)
+ if error:
+ self.module.fail_json(msg=error)
+ if len(message.keys()) == 0:
+ return None
+ if 'records' in message and len(message['records']) == 0:
+ return None
+ if 'records' not in message:
+ error = "Unexpected response in get_fpolicy_policy from %s: %s" % (api, repr(message))
+ self.module.fail_json(msg=error)
+ return_value = {
+ 'vserver': message['records'][0]['vserver'],
+ 'name': message['records'][0]['policy_name'],
+ 'events': message['records'][0]['events'],
+ 'allow_privileged_access': message['records'][0]['allow_privileged_access'],
+ 'engine': message['records'][0]['engine'],
+ 'is_mandatory': message['records'][0]['is_mandatory'],
+ 'is_passthrough_read_enabled': message['records'][0]['is_passthrough_read_enabled']
+ }
+ if 'privileged_user_name' in message['records'][0]:
+ return_value['privileged_user_name'] = message['records'][0]['privileged_user_name']
+
+ return return_value
+
+ else:
+ return_value = None
+
+ fpolicy_policy_obj = netapp_utils.zapi.NaElement('fpolicy-policy-get-iter')
+ fpolicy_policy_config = netapp_utils.zapi.NaElement('fpolicy-policy-info')
+ fpolicy_policy_config.add_new_child('policy-name', self.parameters['name'])
+ fpolicy_policy_config.add_new_child('vserver', self.parameters['vserver'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(fpolicy_policy_config)
+ fpolicy_policy_obj.add_child_elem(query)
+
+ try:
+ result = self.server.invoke_successfully(fpolicy_policy_obj, True)
+
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(
+ msg='Error searching for fPolicy policy %s on vserver %s: %s' % (self.parameters['name'], self.parameters['vserver'], to_native(error)),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('attributes-list'):
+ fpolicy_policy_attributes = result['attributes-list']['fpolicy-policy-info']
+ events = []
+ if fpolicy_policy_attributes.get_child_by_name('events'):
+ for event in fpolicy_policy_attributes.get_child_by_name('events').get_children():
+ events.append(event.get_content())
+
+ return_value = {
+ 'vserver': fpolicy_policy_attributes.get_child_content('vserver'),
+ 'name': fpolicy_policy_attributes.get_child_content('policy-name'),
+ 'events': events,
+ 'allow_privileged_access': self.na_helper.get_value_for_bool(
+ from_zapi=True, value=fpolicy_policy_attributes.get_child_content('allow-privileged-access')),
+ 'engine': fpolicy_policy_attributes.get_child_content('engine-name'),
+ 'is_mandatory': self.na_helper.get_value_for_bool(
+ from_zapi=True, value=fpolicy_policy_attributes.get_child_content('is-mandatory')),
+ 'is_passthrough_read_enabled': self.na_helper.get_value_for_bool(
+ from_zapi=True, value=fpolicy_policy_attributes.get_child_content('is-passthrough-read-enabled')),
+ 'privileged_user_name': fpolicy_policy_attributes.get_child_content('privileged-user-name')
+ }
+
+ return return_value
+
+ def create_fpolicy_policy(self):
+ """
+ Create an FPolicy policy.
+ """
+ if self.use_rest:
+ api = "/private/cli/vserver/fpolicy/policy"
+ body = {
+ 'vserver': self.parameters['vserver'],
+ 'policy-name': self.parameters['name'],
+ 'events': self.parameters['events']
+ }
+ for parameter in ('engine', 'allow_privileged_access', 'is_mandatory', 'is_passthrough_read_enabled', 'privileged_user_name'):
+ if parameter in self.parameters:
+ body[parameter.replace('_', '-')] = self.parameters[parameter]
+
+ dummy, error = self.rest_api.post(api, body)
+ if error:
+ self.module.fail_json(msg=error)
+
+ else:
+ fpolicy_policy_obj = netapp_utils.zapi.NaElement('fpolicy-policy-create')
+ fpolicy_policy_obj.add_new_child('policy-name', self.parameters['name'])
+ if 'is_mandatory' in self.parameters:
+ fpolicy_policy_obj.add_new_child('is-mandatory', self.na_helper.get_value_for_bool(from_zapi=False, value=self.parameters['is_mandatory']))
+ if 'engine' in self.parameters:
+ fpolicy_policy_obj.add_new_child('engine-name', self.parameters['engine'])
+ if 'allow_privileged_access' in self.parameters:
+ fpolicy_policy_obj.add_new_child(
+ 'allow-privileged-access', self.na_helper.get_value_for_bool(from_zapi=False, value=self.parameters['allow_privileged_access'])
+ )
+ if 'is_passthrough_read_enabled' in self.parameters:
+ fpolicy_policy_obj.add_new_child(
+ 'is-passthrough-read-enabled', self.na_helper.get_value_for_bool(from_zapi=False, value=self.parameters['is_passthrough_read_enabled'])
+ )
+ events_obj = netapp_utils.zapi.NaElement('events')
+ for event in self.parameters['events']:
+ events_obj.add_new_child('event-name', event)
+ fpolicy_policy_obj.add_child_elem(events_obj)
+
+ if 'privileged_user_name' in self.parameters:
+ fpolicy_policy_obj.add_new_child('privileged-user-name', self.parameters['privileged_user_name'])
+ try:
+ self.server.invoke_successfully(fpolicy_policy_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(
+ msg='Error creating fPolicy policy %s on vserver %s: %s' % (self.parameters['name'], self.parameters['vserver'], to_native(error)),
+ exception=traceback.format_exc()
+ )
+
+ def modify_fpolicy_policy(self, modify):
+ """
+ Modify an FPolicy policy.
+ """
+ if self.use_rest:
+ api = "/private/cli/vserver/fpolicy/policy"
+ query = {'vserver': self.parameters['vserver']}
+ query['policy-name'] = self.parameters['name']
+ dummy, error = self.rest_api.patch(api, modify, query)
+ if error:
+ self.module.fail_json(msg=error)
+ else:
+ fpolicy_policy_obj = netapp_utils.zapi.NaElement('fpolicy-policy-modify')
+ fpolicy_policy_obj.add_new_child('policy-name', self.parameters['name'])
+ if 'is_mandatory' in self.parameters:
+ fpolicy_policy_obj.add_new_child('is-mandatory', self.na_helper.get_value_for_bool(from_zapi=False, value=self.parameters['is_mandatory']))
+ if 'engine' in self.parameters:
+ fpolicy_policy_obj.add_new_child('engine-name', self.parameters['engine'])
+ if 'allow_privileged_access' in self.parameters:
+ fpolicy_policy_obj.add_new_child(
+ 'allow-privileged-access', self.na_helper.get_value_for_bool(from_zapi=False, value=self.parameters['allow_privileged_access'])
+ )
+ if 'is_passthrough_read_enabled' in self.parameters:
+ fpolicy_policy_obj.add_new_child(
+ 'is-passthrough-read-enabled', self.na_helper.get_value_for_bool(from_zapi=False, value=self.parameters['is_passthrough_read_enabled'])
+ )
+ events_obj = netapp_utils.zapi.NaElement('events')
+ for event in self.parameters['events']:
+ events_obj.add_new_child('event-name', event)
+ fpolicy_policy_obj.add_child_elem(events_obj)
+
+ if 'privileged_user_name' in self.parameters:
+ fpolicy_policy_obj.add_new_child('privileged-user-name', self.parameters['privileged_user_name'])
+ try:
+ self.server.invoke_successfully(fpolicy_policy_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(
+ msg='Error modifying fPolicy policy %s on vserver %s: %s' %
+ (self.parameters['name'], self.parameters['vserver'], to_native(error)), exception=traceback.format_exc()
+ )
+
+ def delete_fpolicy_policy(self):
+ """
+ Delete an FPolicy policy.
+ """
+ if self.use_rest:
+ api = "/private/cli/vserver/fpolicy/policy"
+ body = {
+ 'vserver': self.parameters['vserver'],
+ 'policy-name': self.parameters['name']
+ }
+ dummy, error = self.rest_api.delete(api, body)
+ if error:
+ self.module.fail_json(msg=error)
+
+ else:
+ fpolicy_policy_obj = netapp_utils.zapi.NaElement('fpolicy-policy-delete')
+ fpolicy_policy_obj.add_new_child('policy-name', self.parameters['name'])
+
+ try:
+ self.server.invoke_successfully(fpolicy_policy_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(
+ msg='Error deleting fPolicy policy %s on vserver %s: %s' %
+ (self.parameters['name'], self.parameters['vserver'], to_native(error)), exception=traceback.format_exc()
+ )
+
+ def apply(self):
+ current = self.get_fpolicy_policy()
+ modify = None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None and self.parameters['state'] == 'present':
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.na_helper.changed:
+ if not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_fpolicy_policy()
+ elif cd_action == 'delete':
+ self.delete_fpolicy_policy()
+ elif modify:
+ self.modify_fpolicy_policy(modify)
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ """
+ Execute action from playbook
+ """
+ command = NetAppOntapFpolicyPolicy()
+ command.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fpolicy_scope.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fpolicy_scope.py
new file mode 100644
index 000000000..a547282a3
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fpolicy_scope.py
@@ -0,0 +1,516 @@
+#!/usr/bin/python
+
+# (c) 2021, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: na_ontap_fpolicy_scope
+short_description: NetApp ONTAP - Create, delete or modify an FPolicy policy scope configuration.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: '21.4.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create, delete or modify an FPolicy policy scope.
+options:
+ state:
+ description:
+ - Whether the FPolicy policy scope is present or not
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ vserver:
+ description:
+ - the name of the vserver to create the scope on
+ required: true
+ type: str
+
+ name:
+ description:
+ - Name of the policy. The FPolicy policy must exist for the scope to be created.
+ required: true
+ type: str
+
+ check_extensions_on_directories:
+ description:
+ - Indicates whether directory names are also subjected to extensions check, similar to file names.
+ - By default, the value is true if policy is configured with Native engine, false otherwise.
+ type: bool
+
+ export_policies_to_exclude:
+ description:
+ - Export Policies to exclude for file access monitoring. By default no export policy is selected.
+ type: list
+ elements: str
+
+ export_policies_to_include:
+ description:
+ - Export policies to include for file access monitoring. By default no export policy is selected.
+ type: list
+ elements: str
+
+ file_extensions_to_exclude:
+ description:
+ - File extensions excluded for screening. By default no file extension is selected.
+ type: list
+ elements: str
+
+ file_extensions_to_include:
+ description:
+ - File extensions included for screening. By default no file extension is selected.
+ type: list
+ elements: str
+
+ is_monitoring_of_objects_with_no_extension_enabled:
+ description:
+ - Indicates whether monitoring of objects with no extension is required. By default, the value is false.
+ type: bool
+
+ shares_to_exclude:
+ description:
+ - Shares to exclude for file access monitoring. By default no share is selected.
+ type: list
+ elements: str
+
+ shares_to_include:
+ description:
+ - Shares to include for file access monitoring. By default no share is selected.
+ type: list
+ elements: str
+
+ volumes_to_exclude:
+ description:
+ - Volumes that are inactive for the file policy. The list can include items which are regular expressions, such as 'vol*' or 'user?'.
+ - Note that if a policy has both an exclude list and an include list, the include list is ignored by the filer when processing user requests.
+ - By default no volume is selected.
+ type: list
+ elements: str
+
+ volumes_to_include:
+ description:
+ - Volumes that are active for the file policy. The list can include items which are regular expressions, such as 'vol*' or 'user?'.
+ - By default no volume is selected.
+ type: list
+ elements: str
+
+'''
+
+EXAMPLES = """
+ - name: Create FPolicy scope
+ na_ontap_fpolicy_scope:
+ state: present
+ vserver: GBSMNAS80LD
+ name: policy1
+ export_policies_to_include: export1
+ shares_to_include: share1
+ username: "{{ username }}"
+ password: "{{ password }}"
+ hostname: "{{ hostname }}"
+ use_rest: "{{ use_rest }}"
+
+ - name: Modify FPolicy scope
+ na_ontap_fpolicy_scope:
+ state: present
+ vserver: GBSMNAS80LD
+ name: policy1
+ export_policies_to_include: export1,export2
+ shares_to_include: share1,share2
+ username: "{{ username }}"
+ password: "{{ password }}"
+ hostname: "{{ hostname }}"
+ use_rest: "{{ use_rest }}"
+
+ - name: Delete FPolicy scope
+ na_ontap_fpolicy_scope:
+ state: absent
+ vserver: GBSMNAS80LD
+ name: policy1
+ username: "{{ username }}"
+ password: "{{ password }}"
+ hostname: "{{ hostname }}"
+ use_rest: "{{ use_rest }}"
+
+"""
+
+RETURN = """
+
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+import ansible_collections.netapp.ontap.plugins.module_utils.rest_response_helpers as rrh
+
+
+class NetAppOntapFpolicyScope():
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ name=dict(required=True, type='str'),
+ check_extensions_on_directories=dict(required=False, type='bool'),
+ export_policies_to_exclude=dict(required=False, type='list', elements='str'),
+ export_policies_to_include=dict(required=False, type='list', elements='str'),
+ file_extensions_to_exclude=dict(required=False, type='list', elements='str'),
+ file_extensions_to_include=dict(required=False, type='list', elements='str'),
+ is_monitoring_of_objects_with_no_extension_enabled=dict(required=False, type='bool'),
+ shares_to_exclude=dict(required=False, type='list', elements='str'),
+ shares_to_include=dict(required=False, type='list', elements='str'),
+ volumes_to_exclude=dict(required=False, type='list', elements='str'),
+ volumes_to_include=dict(required=False, type='list', elements='str')
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ self.rest_api = OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+
+ if not self.use_rest:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_fpolicy_scope(self):
+ """
+ Check to see if the fPolicy scope exists or not
+ :return: dict of scope properties if exist, None if not
+ """
+ return_value = None
+
+ if self.use_rest:
+ api = "/private/cli/vserver/fpolicy/policy/scope"
+ query = {
+ 'vserver': self.parameters['vserver'],
+ 'policy-name': self.parameters['name'],
+ 'fields': 'shares-to-include,shares-to-exclude,volumes-to-include,volumes-to-exclude,export-policies-to-include,\
+export-policies-to-exclude,file-extensions-to-include,file-extensions-to-exclude,\
+is-file-extension-check-on-directories-enabled,is-monitoring-of-objects-with-no-extension-enabled'
+ }
+ message, error = self.rest_api.get(api, query)
+ records, error = rrh.check_for_0_or_more_records(api, message, error)
+ if error:
+ self.module.fail_json(msg=error)
+
+ if records is not None:
+ return_value = {
+ 'name': records[0]['policy_name'],
+ 'check_extensions_on_directories': records[0]['is_file_extension_check_on_directories_enabled'],
+ 'is_monitoring_of_objects_with_no_extension_enabled': records[0]['is_monitoring_of_objects_with_no_extension_enabled']
+ }
+
+ for field in (
+ 'export_policies_to_exclude', 'export_policies_to_include', 'export_policies_to_include', 'file_extensions_to_exclude',
+ 'file_extensions_to_include', 'shares_to_exclude', 'shares_to_include', 'volumes_to_exclude', 'volumes_to_include'
+ ):
+ return_value[field] = []
+ if field in records[0]:
+ return_value[field] = records[0][field]
+
+ return return_value
+
+ else:
+ fpolicy_scope_obj = netapp_utils.zapi.NaElement('fpolicy-policy-scope-get-iter')
+ fpolicy_scope_config = netapp_utils.zapi.NaElement('fpolicy-scope-config')
+ fpolicy_scope_config.add_new_child('policy-name', self.parameters['name'])
+ fpolicy_scope_config.add_new_child('vserver', self.parameters['vserver'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(fpolicy_scope_config)
+ fpolicy_scope_obj.add_child_elem(query)
+
+ try:
+ result = self.server.invoke_successfully(fpolicy_scope_obj, True)
+
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(
+ msg='Error searching for FPolicy policy scope %s on vserver %s: %s' % (
+ self.parameters['name'], self.parameters['vserver'], to_native(error)), exception=traceback.format_exc()
+ )
+
+ if result.get_child_by_name('attributes-list'):
+ fpolicy_scope_attributes = result['attributes-list']['fpolicy-scope-config']
+ param_dict = {
+ 'export_policies_to_exclude': [],
+ 'export_policies_to_include': [],
+ 'file_extensions_to_exclude': [],
+ 'file_extensions_to_include': [],
+ 'shares_to_exclude': [],
+ 'shares_to_include': [],
+ 'volumes_to_exclude': [],
+ 'volumes_to_include': []
+ }
+
+ for param in param_dict.keys():
+ if fpolicy_scope_attributes.get_child_by_name(param.replace('_', '-')):
+ param_dict[param] = [
+ child_name.get_content() for child_name in fpolicy_scope_attributes.get_child_by_name((param.replace('_', '-'))).get_children()
+ ]
+
+ return_value = {
+ 'name': fpolicy_scope_attributes.get_child_content('policy-name'),
+ 'check_extensions_on_directories': self.na_helper.get_value_for_bool(
+ from_zapi=True, value=fpolicy_scope_attributes.get_child_content('check-extensions-on-directories')),
+ 'is_monitoring_of_objects_with_no_extension_enabled': self.na_helper.get_value_for_bool(
+ from_zapi=True, value=fpolicy_scope_attributes.get_child_content('is-monitoring-of-objects-with-no-extension-enabled')),
+ }
+ return_value.update(param_dict)
+ return return_value
+
+ def create_fpolicy_scope(self):
+ """
+ Create an FPolicy policy scope
+ :return: nothing
+ """
+ if self.use_rest:
+ api = "/private/cli/vserver/fpolicy/policy/scope"
+ body = {
+ 'vserver': self.parameters['vserver'],
+ 'policy-name': self.parameters['name']
+ }
+ for parameter in (
+ 'export_policies_to_exclude', 'export_policies_to_include', 'export_policies_to_include', 'file_extensions_to_exclude',
+ 'file_extensions_to_include', 'shares_to_exclude', 'shares_to_include', 'volumes_to_exclude', 'volumes_to_include',
+ 'is-file-extension-check-on-directories-enabled', 'is-monitoring-of-objects-with-no-extension-enabled'
+ ):
+ if parameter in self.parameters:
+ body[parameter.replace('_', '-')] = self.parameters[parameter]
+
+ dummy, error = self.rest_api.post(api, body)
+ if error:
+ self.module.fail_json(msg=error)
+ else:
+ fpolicy_scope_obj = netapp_utils.zapi.NaElement('fpolicy-policy-scope-create')
+ fpolicy_scope_obj.add_new_child('policy-name', self.parameters['name'])
+
+ if 'check_extensions_on_directories' in self.parameters:
+ fpolicy_scope_obj.add_new_child(
+ 'check-extensions-on-directories', self.na_helper.get_value_for_bool(
+ from_zapi=False, value=self.parameters['check_extensions_on_directories']
+ )
+ )
+
+ if 'is_monitoring_of_objects_with_no_extension_enabled' in self.parameters:
+ fpolicy_scope_obj.add_new_child(
+ 'is-monitoring-of-objects-with-no-extension-enabled', self.na_helper.get_value_for_bool(
+ from_zapi=False, value=self.parameters['is_monitoring_of_objects_with_no_extension_enabled']
+ )
+ )
+
+ if 'export_policies_to_exclude' in self.parameters:
+ export_policies_to_exclude_obj = netapp_utils.zapi.NaElement('export-policies-to-exclude')
+ for export_policies_to_exclude in self.parameters['export_policies_to_exclude']:
+ export_policies_to_exclude_obj.add_new_child('string', export_policies_to_exclude)
+ fpolicy_scope_obj.add_child_elem(export_policies_to_exclude_obj)
+
+ if 'export_policies_to_include' in self.parameters:
+ export_policies_to_include_obj = netapp_utils.zapi.NaElement('export-policies-to-include')
+ for export_policies_to_include in self.parameters['export_policies_to_include']:
+ export_policies_to_include_obj.add_new_child('string', export_policies_to_include)
+ fpolicy_scope_obj.add_child_elem(export_policies_to_include_obj)
+
+ if 'file_extensions_to_exclude' in self.parameters:
+ file_extensions_to_exclude_obj = netapp_utils.zapi.NaElement('file-extensions-to-exclude')
+ for file_extensions_to_exclude in self.parameters['file_extensions_to_exclude']:
+ file_extensions_to_exclude_obj.add_new_child('string', file_extensions_to_exclude)
+ fpolicy_scope_obj.add_child_elem(file_extensions_to_exclude_obj)
+
+ if 'file_extensions_to_include' in self.parameters:
+ file_extensions_to_include_obj = netapp_utils.zapi.NaElement('file-extensions-to-include')
+ for file_extensions_to_include in self.parameters['file_extensions_to_include']:
+ file_extensions_to_include_obj.add_new_child('string', file_extensions_to_include)
+ fpolicy_scope_obj.add_child_elem(file_extensions_to_include_obj)
+
+ if 'shares_to_exclude' in self.parameters:
+ shares_to_exclude_obj = netapp_utils.zapi.NaElement('shares-to-exclude')
+ for shares_to_exclude in self.parameters['shares_to_exclude']:
+ shares_to_exclude_obj.add_new_child('string', shares_to_exclude)
+ fpolicy_scope_obj.add_child_elem(shares_to_exclude_obj)
+
+ if 'volumes_to_exclude' in self.parameters:
+ volumes_to_exclude_obj = netapp_utils.zapi.NaElement('volumes-to-exclude')
+ for volumes_to_exclude in self.parameters['volumes_to_exclude']:
+ volumes_to_exclude_obj.add_new_child('string', volumes_to_exclude)
+ fpolicy_scope_obj.add_child_elem(volumes_to_exclude_obj)
+
+ if 'shares_to_include' in self.parameters:
+ shares_to_include_obj = netapp_utils.zapi.NaElement('shares-to-include')
+ for shares_to_include in self.parameters['shares_to_include']:
+ shares_to_include_obj.add_new_child('string', shares_to_include)
+ fpolicy_scope_obj.add_child_elem(shares_to_include_obj)
+
+ if 'volumes_to_include' in self.parameters:
+ volumes_to_include_obj = netapp_utils.zapi.NaElement('volumes-to-include')
+ for volumes_to_include in self.parameters['volumes_to_include']:
+ volumes_to_include_obj.add_new_child('string', volumes_to_include)
+ fpolicy_scope_obj.add_child_elem(volumes_to_include_obj)
+
+ try:
+ self.server.invoke_successfully(fpolicy_scope_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(
+ msg='Error creating fPolicy policy scope %s on vserver %s: %s' % (
+ self.parameters['name'], self.parameters['vserver'], to_native(error)), exception=traceback.format_exc()
+ )
+
+ def modify_fpolicy_scope(self, modify):
+ """
+ Modify an FPolicy policy scope
+ :return: nothing
+ """
+ if self.use_rest:
+ api = "/private/cli/vserver/fpolicy/policy/scope"
+ query = {'vserver': self.parameters['vserver']}
+ query['policy-name'] = self.parameters['name']
+ dummy, error = self.rest_api.patch(api, modify, query)
+ if error:
+ self.module.fail_json(msg=error)
+
+ else:
+ fpolicy_scope_obj = netapp_utils.zapi.NaElement('fpolicy-policy-scope-modify')
+ fpolicy_scope_obj.add_new_child('policy-name', self.parameters['name'])
+
+ if 'check_extensions_on_directories' in self.parameters:
+ fpolicy_scope_obj.add_new_child(
+ 'check-extensions-on-directories', self.na_helper.get_value_for_bool(
+ from_zapi=False, value=self.parameters['check_extensions_on_directories']
+ )
+ )
+
+ if 'is_monitoring_of_objects_with_no_extension_enabled' in self.parameters:
+ fpolicy_scope_obj.add_new_child(
+ 'is-monitoring-of-objects-with-no-extension-enabled',
+ self.na_helper.get_value_for_bool(from_zapi=False, value=self.parameters['is_monitoring_of_objects_with_no_extension_enabled'])
+ )
+
+ if 'export_policies_to_exclude' in self.parameters:
+ export_policies_to_exclude_obj = netapp_utils.zapi.NaElement('export-policies-to-exclude')
+ for export_policies_to_exclude in self.parameters['export_policies_to_exclude']:
+ export_policies_to_exclude_obj.add_new_child('string', export_policies_to_exclude)
+ fpolicy_scope_obj.add_child_elem(export_policies_to_exclude_obj)
+
+ if 'export_policies_to_include' in self.parameters:
+ export_policies_to_include_obj = netapp_utils.zapi.NaElement('export-policies-to-include')
+
+ for export_policies_to_include in self.parameters['export_policies_to_include']:
+ export_policies_to_include_obj.add_new_child('string', export_policies_to_include)
+ fpolicy_scope_obj.add_child_elem(export_policies_to_include_obj)
+
+ if 'file_extensions_to_exclude' in self.parameters:
+ file_extensions_to_exclude_obj = netapp_utils.zapi.NaElement('file-extensions-to-exclude')
+
+ for file_extensions_to_exclude in self.parameters['file_extensions_to_exclude']:
+ file_extensions_to_exclude_obj.add_new_child('string', file_extensions_to_exclude)
+ fpolicy_scope_obj.add_child_elem(file_extensions_to_exclude_obj)
+
+ if 'file_extensions_to_include' in self.parameters:
+ file_extensions_to_include_obj = netapp_utils.zapi.NaElement('file-extensions-to-include')
+
+ for file_extensions_to_include in self.parameters['file_extensions_to_include']:
+ file_extensions_to_include_obj.add_new_child('string', file_extensions_to_include)
+ fpolicy_scope_obj.add_child_elem(file_extensions_to_include_obj)
+
+ if 'shares_to_exclude' in self.parameters:
+ shares_to_exclude_obj = netapp_utils.zapi.NaElement('shares-to-exclude')
+
+ for shares_to_exclude in self.parameters['shares_to_exclude']:
+ shares_to_exclude_obj.add_new_child('string', shares_to_exclude)
+ fpolicy_scope_obj.add_child_elem(shares_to_exclude_obj)
+
+ if 'volumes_to_exclude' in self.parameters:
+ volumes_to_exclude_obj = netapp_utils.zapi.NaElement('volumes-to-exclude')
+
+ for volumes_to_exclude in self.parameters['volumes_to_exclude']:
+ volumes_to_exclude_obj.add_new_child('string', volumes_to_exclude)
+ fpolicy_scope_obj.add_child_elem(volumes_to_exclude_obj)
+
+ if 'shares_to_include' in self.parameters:
+ shares_to_include_obj = netapp_utils.zapi.NaElement('shares-to-include')
+
+ for shares_to_include in self.parameters['shares_to_include']:
+ shares_to_include_obj.add_new_child('string', shares_to_include)
+ fpolicy_scope_obj.add_child_elem(shares_to_include_obj)
+
+ if 'volumes_to_include' in self.parameters:
+ volumes_to_include_obj = netapp_utils.zapi.NaElement('volumes-to-include')
+
+ for volumes_to_include in self.parameters['volumes_to_include']:
+ volumes_to_include_obj.add_new_child('string', volumes_to_include)
+ fpolicy_scope_obj.add_child_elem(volumes_to_include_obj)
+
+ try:
+ self.server.invoke_successfully(fpolicy_scope_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying fPolicy policy scope %s on vserver %s: %s' % (
+ self.parameters['name'], self.parameters['vserver'], to_native(error)), exception=traceback.format_exc()
+ )
+
+ def delete_fpolicy_scope(self):
+ """
+ Delete an FPolicy policy scope
+ :return: nothing
+ """
+
+ if self.use_rest:
+ api = "/private/cli/vserver/fpolicy/policy/scope"
+ body = {
+ 'vserver': self.parameters['vserver'],
+ 'policy-name': self.parameters['name']
+ }
+ dummy, error = self.rest_api.delete(api, body)
+ if error:
+ self.module.fail_json(msg=error)
+ else:
+ fpolicy_scope_obj = netapp_utils.zapi.NaElement('fpolicy-policy-scope-delete')
+ fpolicy_scope_obj.add_new_child('policy-name', self.parameters['name'])
+
+ try:
+ self.server.invoke_successfully(fpolicy_scope_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(
+ msg='Error deleting fPolicy policy scope %s on vserver %s: %s' % (
+ self.parameters['name'], self.parameters['vserver'], to_native(error)), exception=traceback.format_exc()
+ )
+
+ def apply(self):
+ current, modify = self.get_fpolicy_scope(), None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+
+ if cd_action is None and self.parameters['state'] == 'present':
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_fpolicy_scope()
+ elif cd_action == 'delete':
+ self.delete_fpolicy_scope()
+ elif modify:
+ self.modify_fpolicy_scope(modify)
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ """
+ Execute action from playbook
+ """
+ command = NetAppOntapFpolicyScope()
+ command.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fpolicy_status.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fpolicy_status.py
new file mode 100644
index 000000000..8d55683ba
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fpolicy_status.py
@@ -0,0 +1,283 @@
+#!/usr/bin/python
+
+# (c) 2021, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = """
+module: na_ontap_fpolicy_status
+short_description: NetApp ONTAP - Enables or disables the specified fPolicy policy
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: '21.4.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Enable or disable fPolicy policy.
+options:
+ state:
+ description:
+ - Whether the fPolicy policy is enabled or disabled.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ vserver:
+ description:
+ - Name of the vserver to enable fPolicy on.
+ type: str
+ required: true
+
+ policy_name:
+ description:
+ - Name of the policy.
+ type: str
+ required: true
+
+ sequence_number:
+ description:
+ - Policy Sequence Number.
+ type: int
+
+notes:
+- Not support check_mode.
+"""
+
+EXAMPLES = """
+ - name: Enable fPolicy policy
+ na_ontap_fpolicy_status:
+ state: present
+ vserver: svm1
+ policy_name: fpolicy_policy
+ sequence_number: 10
+ username: "{{ username }}"
+ password: "{{ password }}"
+ hostname: "{{ hostname }}"
+ https: true
+ validate_certs: false
+
+ - name: Disable fPolicy policy
+ na_ontap_fpolicy_status:
+ state: absent
+ vserver: svm1
+ policy_name: fpolicy_policy
+ username: "{{ username }}"
+ password: "{{ password }}"
+ hostname: "{{ hostname }}"
+ https: true
+ validate_certs: false
+
+"""
+
+RETURN = """ # """
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+import ansible_collections.netapp.ontap.plugins.module_utils.rest_response_helpers as rrh
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapFpolicyStatus(object):
+ """
+ Enables or disabled NetApp ONTAP fPolicy
+ """
+ def __init__(self):
+ """
+ Initialize the ONTAP fPolicy status class
+ """
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ policy_name=dict(required=True, type='str'),
+ sequence_number=dict(required=False, type='int')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[('state', 'present', ['sequence_number'])],
+ supports_check_mode=True
+ )
+
+ # set up variables
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ if self.parameters['state'] == 'present':
+ self.parameters['status'] = True
+ else:
+ self.parameters['status'] = False
+
+ self.rest_api = OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+
+ if not self.use_rest:
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_fpolicy_policy_status(self):
+ """
+ Check to see the status of the fPolicy policy
+ :return: dict of status properties
+ """
+ return_value = None
+
+ if self.use_rest:
+ api = '/protocols/fpolicy'
+ query = {
+ 'svm.name': self.parameters['vserver'],
+ 'fields': 'policies'
+ }
+
+ message, error = self.rest_api.get(api, query)
+ if error:
+ self.module.fail_json(msg=error)
+ records, error = rrh.check_for_0_or_more_records(api, message, error)
+ if records is not None:
+ for policy in records[0]['policies']:
+ if policy['name'] == self.parameters['policy_name']:
+ return_value = {}
+ return_value['vserver'] = records[0]['svm']['name']
+ return_value['policy_name'] = policy['name']
+ return_value['status'] = policy['enabled']
+ break
+ if not return_value:
+ self.module.fail_json(msg='Error getting fPolicy policy %s for vserver %s as policy does not exist' %
+ (self.parameters['policy_name'], self.parameters['vserver']))
+ return return_value
+
+ else:
+
+ fpolicy_status_obj = netapp_utils.zapi.NaElement('fpolicy-policy-status-get-iter')
+ fpolicy_status_info = netapp_utils.zapi.NaElement('fpolicy-policy-status-info')
+ fpolicy_status_info.add_new_child('policy-name', self.parameters['policy_name'])
+ fpolicy_status_info.add_new_child('vserver', self.parameters['vserver'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(fpolicy_status_info)
+ fpolicy_status_obj.add_child_elem(query)
+
+ try:
+ result = self.server.invoke_successfully(fpolicy_status_obj, True)
+
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error getting status for fPolicy policy %s for vserver %s: %s' %
+ (self.parameters['policy_name'], self.parameters['vserver'], to_native(error)),
+ exception=traceback.format_exc())
+
+ if result.get_child_by_name('attributes-list'):
+ fpolicy_status_attributes = result['attributes-list']['fpolicy-policy-status-info']
+
+ return_value = {
+ 'vserver': fpolicy_status_attributes.get_child_content('vserver'),
+ 'policy_name': fpolicy_status_attributes.get_child_content('policy-name'),
+ 'status': self.na_helper.get_value_for_bool(True, fpolicy_status_attributes.get_child_content('status')),
+ }
+ return return_value
+
+ def get_svm_uuid(self):
+ """
+ Gets SVM uuid based on name
+ :return: string of uuid
+ """
+ api = '/svm/svms'
+ query = {
+ 'name': self.parameters['vserver']
+ }
+ message, error = self.rest_api.get(api, query)
+
+ if error:
+ self.module.fail_json(msg=error)
+
+ return message['records'][0]['uuid']
+
+ def enable_fpolicy_policy(self):
+ """
+ Enables fPolicy policy
+ :return: nothing
+ """
+
+ if self.use_rest:
+ api = '/protocols/fpolicy/%s/policies/%s' % (self.svm_uuid, self.parameters['policy_name'])
+ body = {
+ 'enabled': self.parameters['status'],
+ 'priority': self.parameters['sequence_number']
+ }
+
+ dummy, error = self.rest_api.patch(api, body)
+ if error:
+ self.module.fail_json(msg=error)
+
+ else:
+ fpolicy_enable_obj = netapp_utils.zapi.NaElement('fpolicy-enable-policy')
+ fpolicy_enable_obj.add_new_child('policy-name', self.parameters['policy_name'])
+ fpolicy_enable_obj.add_new_child('sequence-number', self.na_helper.get_value_for_int(False, self.parameters['sequence_number']))
+ try:
+ self.server.invoke_successfully(fpolicy_enable_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error enabling fPolicy policy %s on vserver %s: %s' %
+ (self.parameters['policy_name'], self.parameters['vserver'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def disable_fpolicy_policy(self):
+ """
+ Disables fPolicy policy
+ :return: nothing
+ """
+
+ if self.use_rest:
+ api = '/protocols/fpolicy/%s/policies/%s' % (self.svm_uuid, self.parameters['policy_name'])
+ body = {
+ 'enabled': self.parameters['status']
+ }
+
+ dummy, error = self.rest_api.patch(api, body)
+ if error:
+ self.module.fail_json(msg=error)
+
+ else:
+
+ fpolicy_disable_obj = netapp_utils.zapi.NaElement('fpolicy-disable-policy')
+ fpolicy_disable_obj.add_new_child('policy-name', self.parameters['policy_name'])
+
+ try:
+ self.server.invoke_successfully(fpolicy_disable_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error disabling fPolicy policy %s on vserver %s: %s' %
+ (self.parameters['policy_name'], self.parameters['vserver'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ if self.use_rest:
+ self.svm_uuid = self.get_svm_uuid()
+
+ current = self.get_fpolicy_policy_status()
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+
+ if self.na_helper.changed:
+ if not self.module.check_mode:
+ if modify['status']:
+ self.enable_fpolicy_policy()
+ elif not modify['status']:
+ self.disable_fpolicy_policy()
+ result = netapp_utils.generate_result(self.na_helper.changed, modify=modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ """
+ Enables or disables NetApp ONTAP fPolicy
+ """
+ command = NetAppOntapFpolicyStatus()
+ command.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_igroup.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_igroup.py
new file mode 100644
index 000000000..3093537ef
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_igroup.py
@@ -0,0 +1,697 @@
+#!/usr/bin/python
+''' na_ontap_igroup
+
+ (c) 2018-2022, NetApp, Inc
+ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+'''
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+
+module: na_ontap_igroup
+short_description: NetApp ONTAP iSCSI or FC igroup configuration
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+ - Create/Delete/Rename Igroups and Modify initiators belonging to an igroup
+
+options:
+ state:
+ description:
+ - Whether the specified Igroup should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+
+ name:
+ description:
+ - The name of the igroup to manage.
+ required: true
+ type: str
+
+ initiator_group_type:
+ description:
+ - Type of the initiator group.
+ - Required when C(state=present).
+ choices: ['fcp', 'iscsi', 'mixed']
+ type: str
+ aliases: ['protocol']
+
+ from_name:
+ description:
+ - Name of igroup to rename to name.
+ version_added: 2.7.0
+ type: str
+
+ os_type:
+ description:
+ - OS type of the initiators within the group.
+ type: str
+ aliases: ['ostype']
+
+ igroups:
+ description:
+ - List of igroups to be mapped to the igroup.
+ - For a modify operation, this list replaces the existing igroups, or existing initiators.
+ - This module does not add or remove specific igroup(s) in an igroup.
+ - Mutually exclusive with initiator_names (initiators) and initiator_objects.
+ - Requires ONTAP 9.9 or newer.
+ type: list
+ elements: str
+ version_added: 21.3.0
+
+ initiator_names:
+ description:
+ - List of initiators to be mapped to the igroup.
+ - WWPN, WWPN Alias, or iSCSI name of Initiator to add or remove.
+ - For a modify operation, this list replaces the existing initiators, or existing igroups.
+ - This module does not add or remove specific initiator(s) in an igroup.
+ - Mutually exclusive with igroups and initiator_objects.
+ - This serves the same purpose as initiator_objects, but without the comment suboption.
+ aliases:
+ - initiator
+ - initiators
+ type: list
+ elements: str
+ version_added: 21.4.0
+
+ initiator_objects:
+ description:
+ - List of initiators to be mapped to the igroup, with an optional comment field.
+ - WWPN, WWPN Alias, or iSCSI name of Initiator to add or remove.
+ - For a modify operation, this list replaces the existing initiators, or existing igroups.
+ - This module does not add or remove specific initiator(s) in an igroup.
+ - Mutually exclusive with initiator_names (initiators) and igroups.
+ - Requires ONTAP 9.9 with REST support.
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description: name of the initiator.
+ type: str
+ required: true
+ comment:
+ description: a more descriptive comment as the WWPN can be quite opaque.
+ type: str
+ version_added: 21.4.0
+
+ bind_portset:
+ description:
+ - Name of a current portset to bind to the newly created igroup.
+ type: str
+
+ force_remove_initiator:
+ description:
+ - Forcibly remove the initiator even if there are existing LUNs mapped to this initiator group.
+ - This parameter should be used with caution.
+ type: bool
+ default: false
+ aliases: ['allow_delete_while_mapped']
+
+ vserver:
+ description:
+ - The name of the vserver to use.
+ required: true
+ type: str
+
+notes:
+ - supports check mode.
+ - supports ZAPI and REST.
+'''
+
+EXAMPLES = '''
+ - name: Create iSCSI Igroup
+ netapp.ontap.na_ontap_igroup:
+ state: present
+ name: ansibleIgroup3
+ initiator_group_type: iscsi
+ os_type: linux
+ initiator_names: iqn.1994-05.com.redhat:scspa0395855001.rtp.openenglab.netapp.com,abc.com:redhat.com
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Create iSCSI Igroup - ONTAP 9.9
+ netapp.ontap.na_ontap_igroup:
+ state: present
+ name: ansibleIgroup3
+ initiator_group_type: iscsi
+ os_type: linux
+ initiator_objects:
+ - name: iqn.1994-05.com.redhat:scspa0395855001.rtp.openenglab.netapp.com
+ comment: for test only
+ - name: abc.com:redhat.com
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Create FC Igroup
+ netapp.ontap.na_ontap_igroup:
+ state: present
+ name: ansibleIgroup4
+ initiator_group_type: fcp
+ os_type: linux
+ initiator_names: 20:00:00:50:56:9f:19:82
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: rename Igroup
+ netapp.ontap.na_ontap_igroup:
+ state: present
+ from_name: ansibleIgroup3
+ name: testexamplenewname
+ initiator_group_type: iscsi
+ os_type: linux
+ initiator_names: iqn.1994-05.com.redhat:scspa0395855001.rtp.openenglab.netapp.com
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Modify Igroup Initiators (replaces exisiting initiator_names)
+ netapp.ontap.na_ontap_igroup:
+ state: present
+ name: ansibleIgroup3
+ initiator_group_type: iscsi
+ os_type: linux
+ initiator: iqn.1994-05.com.redhat:scspa0395855001.rtp.openenglab.netapp.com
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Delete Igroup
+ netapp.ontap.na_ontap_igroup:
+ state: absent
+ name: ansibleIgroup3
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+'''
+
+RETURN = '''
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppOntapIgroup:
+ """Create/Delete/Rename Igroups and Modify initiators list"""
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ from_name=dict(required=False, type='str', default=None),
+ os_type=dict(required=False, type='str', aliases=['ostype']),
+ igroups=dict(required=False, type='list', elements='str'),
+ initiator_group_type=dict(required=False, type='str',
+ choices=['fcp', 'iscsi', 'mixed'],
+ aliases=['protocol']),
+ initiator_names=dict(required=False, type='list', elements='str', aliases=['initiator', 'initiators']),
+ initiator_objects=dict(required=False, type='list', elements='dict', options=dict(
+ name=dict(required=True, type='str'),
+ comment=dict(type='str'),
+ )),
+ vserver=dict(required=True, type='str'),
+ force_remove_initiator=dict(required=False, type='bool', default=False, aliases=['allow_delete_while_mapped']),
+ bind_portset=dict(required=False, type='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[('igroups', 'initiator_names'), ('igroups', 'initiator_objects'), ('initiator_objects', 'initiator_names'), ]
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.rest_modify_zapi_to_rest = dict(
+ # initiator_group_type (protocol) cannot be changed after create
+ bind_portset='portset',
+ name='name',
+ os_type='os_type'
+ )
+
+ self.rest_api = OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+
+ # use the new structure, a list of dict with name/comment as keys.
+ if self.parameters.get('initiator_names') is not None:
+ self.parameters['initiator_objects'] = [
+ dict(name=initiator, comment=None)
+ for initiator in self.parameters['initiator_names']]
+
+ if self.parameters.get('initiator_objects') is not None:
+ ontap_99_option = 'comment'
+ if not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 9) and\
+ any(x[ontap_99_option] is not None for x in self.parameters['initiator_objects']):
+ msg = 'Error: in initiator_objects: %s' % self.rest_api.options_require_ontap_version(ontap_99_option, version='9.9', use_rest=self.use_rest)
+ self.module.fail_json(msg=msg)
+ # sanitize WWNs and convert to lowercase for idempotency
+ self.parameters['initiator_objects'] = [
+ dict(name=self.na_helper.sanitize_wwn(initiator['name']), comment=initiator['comment'])
+ for initiator in self.parameters['initiator_objects']]
+ # keep a list of names as it is convenient for add and remove computations
+ self.parameters['initiator_names'] = [initiator['name'] for initiator in self.parameters['initiator_objects']]
+
+ def too_old_for_rest(minimum_generation, minimum_major):
+ return self.use_rest and not self.rest_api.meets_rest_minimum_version(self.use_rest, minimum_generation, minimum_major, 0)
+
+ ontap_99_options = ['bind_portset']
+ if too_old_for_rest(9, 9) and any(x in self.parameters for x in ontap_99_options):
+ self.module.warn('Warning: falling back to ZAPI: %s' % self.rest_api.options_require_ontap_version(ontap_99_options, version='9.9'))
+ self.use_rest = False
+
+ ontap_99_options = ['igroups']
+ if not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 9, 1) and any(x in self.parameters for x in ontap_99_options):
+ self.module.fail_json(msg='Error: %s' % self.rest_api.options_require_ontap_version(ontap_99_options, version='9.9.1'))
+
+ if self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 9, 1):
+ if 'igroups' in self.parameters:
+ # we may need to remove existing initiators
+ self.parameters['initiator_names'] = list()
+ elif 'initiator_names' in self.parameters:
+ # we may need to remove existing igroups
+ self.parameters['igroups'] = list()
+
+ if not self.use_rest:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def fail_on_error(self, error, stack=False):
+ if error is None:
+ return
+ elements = dict(msg="Error: %s" % error)
+ if stack:
+ elements['stack'] = traceback.format_stack()
+ self.module.fail_json(**elements)
+
+ def get_igroup_rest(self, name):
+ api = "protocols/san/igroups"
+ fields = 'name,uuid,svm,initiators,os_type,protocol'
+ if self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 9, 1):
+ fields += ',igroups'
+ query = dict(name=name, fields=fields)
+ query['svm.name'] = self.parameters['vserver']
+ record, error = rest_generic.get_one_record(self.rest_api, api, query)
+ self.fail_on_error(error)
+ if record:
+ try:
+ igroup_details = dict(
+ name=record['name'],
+ uuid=record['uuid'],
+ vserver=record['svm']['name'],
+ os_type=record['os_type'],
+ initiator_group_type=record['protocol'],
+ name_to_uuid=dict()
+ )
+ except KeyError as exc:
+ self.module.fail_json(msg='Error: unexpected igroup body: %s, KeyError on %s' % (str(record), str(exc)))
+ igroup_details['name_to_key'] = {}
+ for attr in ('igroups', 'initiators'):
+ option = 'initiator_names' if attr == 'initiators' else attr
+ if attr in record:
+ igroup_details[option] = [item['name'] for item in record[attr]]
+ if attr == 'initiators':
+ igroup_details['initiator_objects'] = [dict(name=item['name'], comment=item.get('comment')) for item in record[attr]]
+ # for initiators, there is no uuid, so we're using name as the key
+ igroup_details['name_to_uuid'][option] = dict((item['name'], item.get('uuid', item['name'])) for item in record[attr])
+ else:
+ igroup_details[option] = []
+ igroup_details['name_to_uuid'][option] = {}
+ return igroup_details
+ return None
+
+ def get_igroup(self, name):
+ """
+ Return details about the igroup
+ :param:
+ name : Name of the igroup
+
+ :return: Details about the igroup. None if not found.
+ :rtype: dict
+ """
+ if self.use_rest:
+ return self.get_igroup_rest(name)
+
+ igroup_info = netapp_utils.zapi.NaElement('igroup-get-iter')
+ attributes = dict(query={'initiator-group-info': {'initiator-group-name': name,
+ 'vserver': self.parameters['vserver']}})
+ igroup_info.translate_struct(attributes)
+ current = None
+
+ try:
+ result = self.server.invoke_successfully(igroup_info, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching igroup info %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ igroup_info = result.get_child_by_name('attributes-list')
+ initiator_group_info = igroup_info.get_child_by_name('initiator-group-info')
+ initiator_names = []
+ initiator_objects = []
+ if initiator_group_info.get_child_by_name('initiators'):
+ current_initiators = initiator_group_info['initiators'].get_children()
+ initiator_names = [initiator['initiator-name'] for initiator in current_initiators]
+ initiator_objects = [dict(name=initiator['initiator-name'], comment=None) for initiator in current_initiators]
+ current = {
+ 'initiator_names': initiator_names,
+ 'initiator_objects': initiator_objects,
+ # place holder, not used for ZAPI
+ 'name_to_uuid': dict(initiator_names=dict())
+ }
+ zapi_to_params = {
+ 'vserver': 'vserver',
+ 'initiator-group-os-type': 'os_type',
+ 'initiator-group-portset-name': 'bind_portset',
+ 'initiator-group-type': 'initiator_group_type'
+ }
+ for attr in zapi_to_params:
+ value = igroup_info.get_child_content(attr)
+ if value is not None:
+ current[zapi_to_params[attr]] = value
+ return current
+
+ def check_option_is_valid(self, option):
+ if self.use_rest and option in ('igroups', 'initiator_names'):
+ return
+ if option == 'initiator_names':
+ return
+ raise KeyError('check_option_is_valid: option=%s' % option)
+
+ @staticmethod
+ def get_rest_name_for_option(option):
+ if option == 'initiator_names':
+ return 'initiators'
+ if option == 'igroups':
+ return option
+ raise KeyError('get_rest_name_for_option: option=%s' % option)
+
+ def add_initiators_or_igroups_rest(self, uuid, option, names):
+ self.check_option_is_valid(option)
+ api = "protocols/san/igroups/%s/%s" % (uuid, self.get_rest_name_for_option(option))
+ if option == 'initiator_names' and self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 9, 1):
+ in_objects = self.parameters['initiator_objects']
+ records = [self.na_helper.filter_out_none_entries(item) for item in in_objects if item['name'] in names]
+ else:
+ records = [dict(name=name) for name in names]
+ body = dict(records=records)
+ dummy, error = rest_generic.post_async(self.rest_api, api, body)
+ self.fail_on_error(error)
+
+ def modify_initiators_rest(self, uuid, initiator_objects):
+ for initiator in initiator_objects:
+ if 'comment' in initiator:
+ api = "protocols/san/igroups/%s/initiators" % uuid
+ body = dict(comment=initiator['comment'])
+ dummy, error = rest_generic.patch_async(self.rest_api, api, initiator['name'], body)
+ self.fail_on_error(error)
+
+ def add_initiators_or_igroups(self, uuid, option, current_names):
+ """
+ Add the list of desired initiators to igroup unless they are already set
+ :return: None
+ """
+ self.check_option_is_valid(option)
+ # don't add if initiator_names/igroups is empty string
+ if self.parameters.get(option) == [''] or self.parameters.get(option) is None:
+ return
+ names_to_add = [name for name in self.parameters[option] if name not in current_names]
+ if self.use_rest and names_to_add:
+ self.add_initiators_or_igroups_rest(uuid, option, names_to_add)
+ else:
+ for name in names_to_add:
+ self.modify_initiator(name, 'igroup-add')
+
+ def delete_initiator_or_igroup_rest(self, uuid, option, name_or_uuid):
+ self.check_option_is_valid(option)
+ api = "protocols/san/igroups/%s/%s" % (uuid, self.get_rest_name_for_option(option))
+ query = {'allow_delete_while_mapped': True} if self.parameters['force_remove_initiator'] else None
+ dummy, error = rest_generic.delete_async(self.rest_api, api, name_or_uuid, query=query)
+ self.fail_on_error(error)
+
+ def remove_initiators_or_igroups(self, uuid, option, current_names, mapping):
+ """
+ Removes current names from igroup unless they are still desired
+ :return: None
+ """
+ self.check_option_is_valid(option)
+ for name in current_names:
+ if name not in self.parameters.get(option, list()):
+ if self.use_rest:
+ self.delete_initiator_or_igroup_rest(uuid, option, mapping[name])
+ else:
+ self.modify_initiator(name, 'igroup-remove')
+
+ def modify_initiator(self, initiator, zapi):
+ """
+ Add or remove an initiator to/from an igroup
+ """
+ options = {'initiator-group-name': self.parameters['name'],
+ 'initiator': initiator}
+ if zapi == 'igroup-remove' and self.parameters.get('force_remove_initiator'):
+ options['force'] = 'true'
+
+ igroup_modify = netapp_utils.zapi.NaElement.create_node_with_children(zapi, **options)
+
+ try:
+ self.server.invoke_successfully(igroup_modify, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying igroup initiator %s: %s' % (self.parameters['name'],
+ to_native(error)),
+ exception=traceback.format_exc())
+
+ def create_igroup_rest(self):
+ api = "protocols/san/igroups"
+ body = dict(
+ name=self.parameters['name'],
+ os_type=self.parameters['os_type'])
+ body['svm'] = dict(name=self.parameters['vserver'])
+ mapping = dict(
+ initiator_group_type='protocol',
+ bind_portset='portset',
+ igroups='igroups',
+ initiator_objects='initiators'
+ )
+ for option in mapping:
+ value = self.parameters.get(option)
+ if value is not None:
+ if option in ('igroups', 'initiator_objects'):
+ # we may have an empty list, ignore it
+ if option == 'initiator_objects':
+ value = [self.na_helper.filter_out_none_entries(item) for item in value] if value else None
+ else:
+ value = [dict(name=name) for name in value] if value else None
+ if value is not None:
+ body[mapping[option]] = value
+ dummy, error = rest_generic.post_async(self.rest_api, api, body)
+ self.fail_on_error(error)
+
+ def create_igroup(self):
+ """
+ Create the igroup.
+ """
+ if self.use_rest:
+ return self.create_igroup_rest()
+
+ options = {'initiator-group-name': self.parameters['name']}
+ if self.parameters.get('os_type') is not None:
+ options['os-type'] = self.parameters['os_type']
+ if self.parameters.get('initiator_group_type') is not None:
+ options['initiator-group-type'] = self.parameters['initiator_group_type']
+ if self.parameters.get('bind_portset') is not None:
+ options['bind-portset'] = self.parameters['bind_portset']
+
+ igroup_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'igroup-create', **options)
+
+ try:
+ self.server.invoke_successfully(igroup_create,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error provisioning igroup %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ self.add_initiators_or_igroups(None, 'initiator_names', [])
+
+ @staticmethod
+ def change_in_initiator_comments(modify, current):
+
+ if 'initiator_objects' not in current:
+ return list()
+
+ # for python 2.6
+ comments = dict((item['name'], item['comment']) for item in current['initiator_objects'])
+
+ def has_changed_comment(item):
+ return item['name'] in comments and item['comment'] is not None and item['comment'] != comments[item['name']]
+
+ return [item for item in modify['initiator_objects'] if has_changed_comment(item)]
+
+ def modify_igroup_rest(self, uuid, modify):
+ api = "protocols/san/igroups"
+ body = dict()
+ for option in modify:
+ if option not in self.rest_modify_zapi_to_rest:
+ self.module.fail_json(msg='Error: modifying %s is not supported in REST' % option)
+ body[self.rest_modify_zapi_to_rest[option]] = modify[option]
+ if body:
+ dummy, error = rest_generic.patch_async(self.rest_api, api, uuid, body)
+ self.fail_on_error(error)
+
+ def delete_igroup_rest(self, uuid):
+ api = "protocols/san/igroups"
+ query = {'allow_delete_while_mapped': True} if self.parameters['force_remove_initiator'] else None
+ dummy, error = rest_generic.delete_async(self.rest_api, api, uuid, query=query)
+ self.fail_on_error(error)
+
+ def delete_igroup(self, uuid):
+ """
+ Delete the igroup.
+ """
+ if self.use_rest:
+ return self.delete_igroup_rest(uuid)
+
+ igroup_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'igroup-destroy', **{'initiator-group-name': self.parameters['name'],
+ 'force': 'true' if self.parameters['force_remove_initiator'] else 'false'})
+
+ try:
+ self.server.invoke_successfully(igroup_delete,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting igroup %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def rename_igroup(self):
+ """
+ Rename the igroup.
+ """
+ if self.use_rest:
+ self.module.fail_json(msg='Internal error, should not call rename, but use modify')
+
+ igroup_rename = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'igroup-rename', **{'initiator-group-name': self.parameters['from_name'],
+ 'initiator-group-new-name': str(self.parameters['name'])})
+ try:
+ self.server.invoke_successfully(igroup_rename,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error renaming igroup %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def report_error_in_modify(self, modify, context):
+ if modify:
+ if len(modify) > 1:
+ tag = 'any of '
+ else:
+ tag = ''
+ self.module.fail_json(msg='Error: modifying %s %s is not supported in %s' % (tag, str(modify), context))
+
+ def validate_modify(self, modify):
+ """Identify options that cannot be modified for REST or ZAPI
+ """
+ if not modify:
+ return
+ modify_local = dict(modify)
+ modify_local.pop('igroups', None)
+ modify_local.pop('initiator_names', None)
+ modify_local.pop('initiator_objects', None)
+ if not self.use_rest:
+ self.report_error_in_modify(modify_local, 'ZAPI')
+ return
+ for option in modify:
+ if option in self.rest_modify_zapi_to_rest:
+ modify_local.pop(option)
+ self.report_error_in_modify(modify_local, 'REST')
+
+ def is_rename_action(self, cd_action, current):
+ old = self.get_igroup(self.parameters['from_name'])
+ rename = self.na_helper.is_rename_action(old, current)
+ if rename is None:
+ self.module.fail_json(msg='Error: igroup with from_name=%s not found' % self.parameters.get('from_name'))
+ if rename:
+ current = old
+ cd_action = None
+ return cd_action, rename, current
+
+ def modify_igroup(self, uuid, current, modify):
+ for attr in ('igroups', 'initiator_names'):
+ if attr in current:
+ # we need to remove everything first
+ self.remove_initiators_or_igroups(uuid, attr, current[attr], current['name_to_uuid'][attr])
+ for attr in ('igroups', 'initiator_names'):
+ if attr in current:
+ self.add_initiators_or_igroups(uuid, attr, current[attr])
+ modify.pop(attr, None)
+ if 'initiator_objects' in modify:
+ if self.use_rest:
+ # comments are not supported in ZAPI, we already checked for that in validate changes
+ changed_initiator_objects = self.change_in_initiator_comments(modify, current)
+ self.modify_initiators_rest(uuid, changed_initiator_objects)
+ modify.pop('initiator_objects',)
+ if modify:
+ # validate_modify ensured modify is empty with ZAPI
+ self.modify_igroup_rest(uuid, modify)
+
+ def apply(self):
+ uuid = None
+ rename, modify = None, None
+ current = self.get_igroup(self.parameters['name'])
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action == 'create' and self.parameters.get('from_name'):
+ cd_action, rename, current = self.is_rename_action(cd_action, current)
+ if cd_action is None and self.parameters['state'] == 'present':
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ # a change in name is handled in rename for ZAPI, but REST can use modify
+ if self.use_rest:
+ rename = False
+ else:
+ modify.pop('name', None)
+ if current and self.use_rest:
+ uuid = current['uuid']
+ if cd_action == 'create' and self.use_rest and 'os_type' not in self.parameters:
+ self.module.fail_json(msg='Error: os_type is a required parameter when creating an igroup with REST')
+ saved_modify = str(modify)
+ self.validate_modify(modify)
+
+ if self.na_helper.changed and not self.module.check_mode:
+ if rename:
+ self.rename_igroup()
+ elif cd_action == 'create':
+ self.create_igroup()
+ elif cd_action == 'delete':
+ self.delete_igroup(uuid)
+ if modify:
+ self.modify_igroup(uuid, current, modify)
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify=saved_modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ obj = NetAppOntapIgroup()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_igroup_initiator.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_igroup_initiator.py
new file mode 100644
index 000000000..7280eb181
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_igroup_initiator.py
@@ -0,0 +1,221 @@
+#!/usr/bin/python
+''' This is an Ansible module for ONTAP, to manage initiators in an Igroup
+
+ (c) 2019-2022, NetApp, Inc
+ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: na_ontap_igroup_initiator
+short_description: NetApp ONTAP igroup initiator configuration
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.8.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+ - Add/Remove initiators from an igroup
+
+options:
+ state:
+ description:
+ - Whether the specified initiator should exist or not in an igroup.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+
+ names:
+ description:
+ - List of initiators to manage.
+ required: true
+ aliases:
+ - name
+ type: list
+ elements: str
+
+ initiator_group:
+ description:
+ - Name of the initiator group to which the initiator belongs.
+ required: true
+ type: str
+
+ force_remove:
+ description:
+ - Forcibly remove the initiators even if there are existing LUNs mapped to the initiator group.
+ type: bool
+ default: false
+ version_added: '20.1.0'
+
+ vserver:
+ description:
+ - The name of the vserver to use.
+ required: true
+ type: str
+
+'''
+
+EXAMPLES = '''
+ - name: Add initiators to an igroup
+ netapp.ontap.na_ontap_igroup_initiator:
+ names: abc.test:def.com,def.test:efg.com
+ initiator_group: test_group
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Remove an initiator from an igroup
+ netapp.ontap.na_ontap_igroup_initiator:
+ state: absent
+ names: abc.test:def.com
+ initiator_group: test_group
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+'''
+
+RETURN = '''
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppOntapIgroupInitiator(object):
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ names=dict(required=True, type='list', elements='str', aliases=['name']),
+ initiator_group=dict(required=True, type='str'),
+ force_remove=dict(required=False, type='bool', default=False),
+ vserver=dict(required=True, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ self.rest_api = OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+ self.uuid = None
+
+ if not self.use_rest:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_initiators(self):
+ """
+ Get the existing list of initiators from an igroup
+ :rtype: list() or None
+ """
+ if self.use_rest:
+ return self.get_initiators_rest()
+ igroup_info = netapp_utils.zapi.NaElement('igroup-get-iter')
+ attributes = dict(query={'initiator-group-info': {'initiator-group-name': self.parameters['initiator_group'],
+ 'vserver': self.parameters['vserver']}})
+ igroup_info.translate_struct(attributes)
+ result, current = None, []
+
+ try:
+ result = self.server.invoke_successfully(igroup_info, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching igroup info %s: %s' % (self.parameters['initiator_group'],
+ to_native(error)),
+ exception=traceback.format_exc())
+
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ igroup_info = result.get_child_by_name('attributes-list').get_child_by_name('initiator-group-info')
+ if igroup_info.get_child_by_name('initiators') is not None:
+ current = [initiator['initiator-name'] for initiator in igroup_info['initiators'].get_children()]
+ return current
+
+ def modify_initiator(self, initiator_name, zapi):
+ """
+ Add or remove an initiator to/from an igroup
+ """
+ if self.use_rest:
+ return self.modify_initiator_rest(initiator_name, zapi)
+ options = {'initiator-group-name': self.parameters['initiator_group'],
+ 'initiator': initiator_name,
+ 'force': 'true' if zapi == 'igroup-remove' and self.parameters['force_remove'] else 'false'}
+ initiator_modify = netapp_utils.zapi.NaElement.create_node_with_children(zapi, **options)
+
+ try:
+ self.server.invoke_successfully(initiator_modify, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying igroup initiator %s: %s' % (initiator_name,
+ to_native(error)),
+ exception=traceback.format_exc())
+
+ def get_initiators_rest(self):
+ api = 'protocols/san/igroups'
+ query = {'name': self.parameters['initiator_group'], 'svm.name': self.parameters['vserver']}
+ fields = 'initiators,uuid'
+ record, error = rest_generic.get_one_record(self.rest_api, api, query, fields)
+ if error:
+ self.module.fail_json(msg="Error fetching igroup info %s: %s" % (self.parameters['initiator_group'], error))
+ current = []
+ if record:
+ self.uuid = record['uuid']
+ # igroup may have 0 initiators.
+ if 'initiators' in record:
+ current = [initiator['name'] for initiator in record['initiators']]
+ return current
+
+ def modify_initiator_rest(self, initiator_name, modify_action):
+ if self.uuid is None:
+ self.module.fail_json(msg="Error modifying igroup initiator %s: igroup not found" % initiator_name)
+ api = 'protocols/san/igroups/%s/initiators' % self.uuid
+ if modify_action == 'igroup-add':
+ body = {"name": initiator_name}
+ dummy, error = rest_generic.post_async(self.rest_api, api, body)
+ else:
+ query = {'allow_delete_while_mapped': self.parameters['force_remove']}
+ dummy, error = rest_generic.delete_async(self.rest_api, api, initiator_name, query)
+ if error:
+ self.module.fail_json(msg="Error modifying igroup initiator %s: %s" % (initiator_name, error))
+
+ def apply(self):
+ initiators = self.get_initiators()
+ for initiator in self.parameters['names']:
+ present = None
+ initiator = self.na_helper.sanitize_wwn(initiator)
+ if initiator in initiators:
+ present = True
+ cd_action = self.na_helper.get_cd_action(present, self.parameters)
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.modify_initiator(initiator, 'igroup-add')
+ elif cd_action == 'delete':
+ self.modify_initiator(initiator, 'igroup-remove')
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ obj = NetAppOntapIgroupInitiator()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_info.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_info.py
new file mode 100644
index 000000000..6591cc9cd
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_info.py
@@ -0,0 +1,1825 @@
+#!/usr/bin/python
+
+# (c) 2018 Piotr Olczak <piotr.olczak@redhat.com>
+# (c) 2018-2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_info
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: na_ontap_info
+author: Piotr Olczak (@dprts) <polczak@redhat.com>
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap_zapi
+short_description: NetApp information gatherer
+description:
+ - This module allows you to gather various information about ONTAP configuration
+version_added: 2.9.0
+requirements:
+ - netapp_lib
+options:
+ state:
+ type: str
+ description:
+ - deprecated as of 21.1.0.
+ - this option was ignored and continues to be ignored.
+ vserver:
+ type: str
+ description:
+ - If present, 'vserver tunneling' will limit the output to the vserver scope.
+ - Note that not all subsets are supported on a vserver, and 'all' will trigger an error.
+ version_added: '19.11.0'
+ gather_subset:
+ type: list
+ elements: str
+ description:
+ - When supplied, this argument will restrict the information collected to a given subset. Possible values for this argument include
+ - "active_directory_account_info"
+ - "aggregate_info"
+ - "aggr_efficiency_info"
+ - "autosupport_check_info"
+ - "cifs_options_info"
+ - "cifs_server_info"
+ - "cifs_share_info"
+ - "cifs_vserver_security_info"
+ - "cluster_identity_info"
+ - "cluster_image_info"
+ - "cluster_log_forwarding_info"
+ - "cluster_node_info"
+ - "cluster_peer_info"
+ - "cluster_switch_info"
+ - "clock_info"
+ - "disk_info"
+ - "env_sensors_info"
+ - "event_notification_destination_info"
+ - "event_notification_info"
+ - "export_policy_info"
+ - "export_rule_info"
+ - "fcp_adapter_info"
+ - "fcp_alias_info"
+ - "fcp_service_info"
+ - "igroup_info"
+ - "iscsi_service_info"
+ - "job_schedule_cron_info"
+ - "kerberos_realm_info"
+ - "ldap_client"
+ - "ldap_config"
+ - "license_info"
+ - "lun_info"
+ - "lun_map_info"
+ - "metrocluster_check_info"
+ - "metrocluster_info"
+ - "metrocluster_node_info"
+ - "net_dev_discovery_info"
+ - "net_dns_info"
+ - "net_failover_group_info"
+ - "net_firewall_info"
+ - "net_ifgrp_info"
+ - "net_interface_info"
+ - "net_interface_service_policy_info"
+ - "net_ipspaces_info"
+ - "net_port_info"
+ - "net_port_broadcast_domain_info"
+ - "net_routes_info"
+ - "net_vlan_info"
+ - "nfs_info"
+ - "ntfs_dacl_info"
+ - "ntfs_sd_info"
+ - "ntp_server_info"
+ - "nvme_info"
+ - "nvme_interface_info"
+ - "nvme_namespace_info"
+ - "nvme_subsystem_info"
+ - "ontap_system_version"
+ - "ontap_version"
+ - "ontapi_version"
+ - "qos_adaptive_policy_info"
+ - "qos_policy_info"
+ - "qtree_info"
+ - "quota_policy_info"
+ - "quota_report_info"
+ - "role_info"
+ - "security_key_manager_key_info"
+ - "security_login_account_info"
+ - "security_login_role_config_info"
+ - "security_login_role_info"
+ - "service_processor_info"
+ - "service_processor_network_info"
+ - "shelf_info"
+ - "sis_info"
+ - "sis_policy_info"
+ - "snapmirror_info"
+ - "snapmirror_destination_info"
+ - "snapmirror_policy_info"
+ - "snapshot_info"
+ - "snapshot_policy_info"
+ - "storage_failover_info"
+ - "storage_bridge_info"
+ - "subsys_health_info"
+ - "sysconfig_info"
+ - "sys_cluster_alerts"
+ - "volume_info"
+ - "volume_space_info"
+ - "vscan_info"
+ - "vscan_status_info"
+ - "vscan_scanner_pool_info"
+ - "vscan_connection_status_all_info"
+ - "vscan_connection_extended_stats_info"
+ - "vserver_info"
+ - "vserver_login_banner_info"
+ - "vserver_motd_info"
+ - "vserver_nfs_info"
+ - "vserver_peer_info"
+ - Can specify a list of values to include a larger subset.
+ - Values can also be used with an initial C(!) to specify that a specific subset should not be collected.
+ - nvme is supported with ONTAP 9.4 onwards.
+ - use "help" to get a list of supported information for your system.
+ - with lun_info, serial_hex and naa_id are computed when serial_number is present.
+ default: "all"
+ max_records:
+ type: int
+ description:
+ - Maximum number of records returned in a single ZAPI call. Valid range is [1..2^32-1].
+ This parameter controls internal behavior of this module.
+ default: 1024
+ version_added: '20.2.0'
+ summary:
+ description:
+ - Boolean flag to control return all attributes of the module info or only the names.
+ - If true, only names are returned.
+ default: false
+ type: bool
+ version_added: '20.4.0'
+ volume_move_target_aggr_info:
+ description:
+ - Required options for volume_move_target_aggr_info
+ type: dict
+ version_added: '20.5.0'
+ suboptions:
+ volume_name:
+ description:
+ - Volume name to get target aggr info for
+ required: true
+ type: str
+ version_added: '20.5.0'
+ vserver:
+ description:
+ - vserver the Volume lives on
+ required: true
+ type: str
+ version_added: '20.5.0'
+ desired_attributes:
+ description:
+ - Advanced feature requiring to understand ZAPI internals.
+ - Allows to request a specific attribute that is not returned by default, or to limit the returned attributes.
+ - A dictionary for the zapi desired-attributes element.
+ - An XML tag I(<tag>value</tag>) is a dictionary with tag as the key.
+ - Value can be another dictionary, a list of dictionaries, a string, or nothing.
+ - eg I(<tag/>) is represented as I(tag:)
+ - Only a single subset can be called at a time if this option is set.
+ - It is the caller responsibity to make sure key attributes are present in the right position.
+ - The module will error out if any key attribute is missing.
+ type: dict
+ version_added: '20.6.0'
+ query:
+ description:
+ - Advanced feature requiring to understand ZAPI internals.
+ - Allows to specify which objects to return.
+ - A dictionary for the zapi query element.
+ - An XML tag I(<tag>value</tag>) is a dictionary with tag as the key.
+ - Value can be another dictionary, a list of dictionaries, a string, or nothing.
+ - eg I(<tag/>) is represented as I(tag:)
+ - Only a single subset can be called at a time if this option is set.
+ type: dict
+ version_added: '20.7.0'
+ use_native_zapi_tags:
+ description:
+ - By default, I(-) in the returned dictionary keys are translated to I(_).
+ - If set to true, the translation is disabled.
+ type: bool
+ default: false
+ version_added: '20.6.0'
+ continue_on_error:
+ description:
+ - By default, this module fails on the first error.
+ - This option allows to provide a list of errors that are not failing the module.
+ - Errors in the list are reported in the output, under the related info element, as an "error" entry.
+ - Possible values are always, never, missing_vserver_api_error, rpc_error, other_error.
+ - missing_vserver_api_error - most likely the API is available at cluster level but not vserver level.
+ - rpc_error - some queries are failing because the node cannot reach another node in the cluster.
+ - key_error - a query is failing because the returned data does not contain an expected key.
+ - for key errors, make sure to report this in Slack. It may be a change in a new ONTAP version.
+ - other_error - anything not in the above list.
+ - always will continue on any error, never will fail on any error, they cannot be used with any other keyword.
+ type: list
+ elements: str
+ default: never
+'''
+
+EXAMPLES = '''
+- name: Get NetApp info as Cluster Admin (Password Authentication)
+ netapp.ontap.na_ontap_info:
+ hostname: "na-vsim"
+ username: "admin"
+ password: "admins_password"
+ register: ontap_info
+- debug:
+ msg: "{{ ontap_info.ontap_info }}"
+
+- name: Get NetApp version as Vserver admin
+ netapp.ontap.na_ontap_info:
+ hostname: "na-vsim"
+ username: "vsadmin"
+ vserver: trident_svm
+ password: "vsadmins_password"
+
+- name: run ontap info module using vserver tunneling and ignoring errors
+ netapp.ontap.na_ontap_info:
+ hostname: "na-vsim"
+ username: "admin"
+ password: "admins_password"
+ vserver: trident_svm
+ summary: true
+ continue_on_error:
+ - missing_vserver_api_error
+ - rpc_error
+
+- name: Limit Info Gathering to Aggregate Information as Cluster Admin
+ netapp.ontap.na_ontap_info:
+ hostname: "na-vsim"
+ username: "admin"
+ password: "admins_password"
+ gather_subset: "aggregate_info"
+ register: ontap_info
+
+- name: Limit Info Gathering to Volume and Lun Information as Cluster Admin
+ netapp.ontap.na_ontap_info:
+ hostname: "na-vsim"
+ username: "admin"
+ password: "admins_password"
+ gather_subset:
+ - volume_info
+ - lun_info
+ register: ontap_info
+
+- name: Gather all info except for volume and lun information as Cluster Admin
+ netapp.ontap.na_ontap_info:
+ hostname: "na-vsim"
+ username: "admin"
+ password: "admins_password"
+ gather_subset:
+ - "!volume_info"
+ - "!lun_info"
+ register: ontap_info
+
+- name: Gather Volume move information for a specific volume
+ netapp.ontap.na_ontap_info:
+ hostname: "na-vsim"
+ username: "admin"
+ password: "admins_password"
+ gather_subset: volume_move_target_aggr_info
+ volume_move_target_aggr_info:
+ volume_name: carchitest
+ vserver: ansible
+
+- name: run ontap info module for aggregate module, requesting specific fields
+ netapp.ontap.na_ontap_info:
+ # <<: *login
+ gather_subset: aggregate_info
+ desired_attributes:
+ aggr-attributes:
+ aggr-inode-attributes:
+ files-private-used:
+ aggr-raid-attributes:
+ aggregate-type:
+ use_native_zapi_tags: true
+ register: ontap
+- debug: var=ontap
+
+- name: run ontap info to get offline volumes with dp in the name
+ netapp.ontap.na_ontap_info:
+ # <<: *cert_login
+ gather_subset: volume_info
+ query:
+ volume-attributes:
+ volume-id-attributes:
+ name: '*dp*'
+ volume-state-attributes:
+ state: offline
+ desired_attributes:
+ volume-attributes:
+ volume-id-attributes:
+ name:
+ volume-state-attributes:
+ state:
+ register: ontap
+- debug: var=ontap
+'''
+
+RETURN = '''
+ontap_info:
+ description: Returns various information about NetApp cluster configuration
+ returned: always
+ type: dict
+ sample: '{
+ "ontap_info": {
+ "active_directory_account_info": {...},
+ "aggregate_info": {...},
+ "autosupport_check_info": {...},
+ "cluster_identity_info": {...},
+ "cluster_image_info": {...},
+ "cluster_node_info": {...},
+ "igroup_info": {...},
+ "iscsi_service_info": {...},
+ "license_info": {...},
+ "lun_info": {...},
+ "metrocluster_check_info": {...},
+ "metrocluster_info": {...},
+ "metrocluster_node_info": {...},
+ "net_dns_info": {...},
+ "net_ifgrp_info": {...},
+ "net_interface_info": {...},
+ "net_interface_service_policy_info": {...},
+ "net_port_info": {...},
+ "ontap_system_version": {...},
+ "ontap_version": {...},
+ "ontapi_version": {...},
+ "qos_policy_info": {...},
+ "qos_adaptive_policy_info": {...},
+ "qtree_info": {...},
+ "quota_policy_info": {..},
+ "quota_report_info": {...},
+ "security_key_manager_key_info": {...},
+ "security_login_account_info": {...},
+ "snapmirror_info": {...}
+ "snapmirror_destination_info": {...}
+ "storage_bridge_info": {...}
+ "storage_failover_info": {...},
+ "volume_info": {...},
+ "vserver_login_banner_info": {...},
+ "vserver_motd_info": {...},
+ "vserver_info": {...},
+ "vserver_nfs_info": {...},
+ "vscan_status_info": {...},
+ "vscan_scanner_pool_info": {...},
+ "vscan_connection_status_all_info": {...},
+ "vscan_connection_extended_stats_info": {...}
+ }'
+'''
+
+import codecs
+import copy
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_bytes, to_native, to_text
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+IMPORT_ERRORS = []
+try:
+ import xmltodict
+ HAS_XMLTODICT = True
+except ImportError as exc:
+ HAS_XMLTODICT = False
+ IMPORT_ERRORS.append(str(exc))
+
+try:
+ import json
+ HAS_JSON = True
+except ImportError as exc:
+ HAS_JSON = False
+ IMPORT_ERRORS.append(str(exc))
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPGatherInfo:
+ '''Class with gather info methods'''
+
+ def __init__(self):
+ ''' create module, set up context'''
+ argument_spec = netapp_utils.na_ontap_zapi_only_spec()
+ argument_spec.update(dict(
+ state=dict(type='str'),
+ gather_subset=dict(default=['all'], type='list', elements='str'),
+ vserver=dict(type='str', required=False),
+ max_records=dict(type='int', default=1024, required=False),
+ summary=dict(type='bool', default=False, required=False),
+ volume_move_target_aggr_info=dict(
+ type="dict",
+ required=False,
+ options=dict(
+ volume_name=dict(type='str', required=True),
+ vserver=dict(type='str', required=True)
+ )
+ ),
+ desired_attributes=dict(type='dict', required=False),
+ use_native_zapi_tags=dict(type='bool', required=False, default=False),
+ continue_on_error=dict(type='list', required=False, elements='str', default=['never']),
+ query=dict(type='dict', required=False),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ if not HAS_NETAPP_LIB:
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ if not HAS_XMLTODICT:
+ self.module.fail_json(msg="the python xmltodict module is required. Import error: %s" % str(IMPORT_ERRORS))
+ if not HAS_JSON:
+ self.module.fail_json(msg="the python json module is required. Import error: %s" % str(IMPORT_ERRORS))
+
+ self.max_records = str(self.module.params['max_records'])
+ volume_move_target_aggr_info = self.module.params.get('volume_move_target_aggr_info', dict())
+ if volume_move_target_aggr_info is None:
+ volume_move_target_aggr_info = {}
+ self.netapp_info = {}
+ self.desired_attributes = self.module.params['desired_attributes']
+ self.query = self.module.params['query']
+ self.translate_keys = not self.module.params['use_native_zapi_tags']
+ self.warnings = [] # warnings will be added to the info results, if any
+ self.set_error_flags()
+ self.module.warn('The module only supports ZAPI and is deprecated, and will no longer work with newer versions '
+ 'of ONTAP when ONTAPI is deprecated in CY22-Q4')
+ self.module.warn('netapp.ontap.na_ontap_rest_info should be used instead.')
+
+ # thanks to coreywan (https://github.com/ansible/ansible/pull/47016)
+ # for starting this
+ # min_version identifies the ontapi version which supports this ZAPI
+ # use 0 if it is supported since 9.1
+ self.info_subsets = {
+ 'cluster_identity_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'cluster-identity-get',
+ 'attributes_list_tag': 'attributes',
+ 'attribute': 'cluster-identity-info',
+ 'key_fields': 'cluster-name',
+ },
+ 'min_version': '0',
+ },
+ 'cluster_image_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'cluster-image-get-iter',
+ 'attribute': 'cluster-image-info',
+ 'key_fields': 'node-id',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'cluster_log_forwarding_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'cluster-log-forward-get-iter',
+ 'attribute': 'cluster-log-forward-info',
+ 'key_fields': ('destination', 'port'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'cluster_node_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'cluster-node-get-iter',
+ 'attribute': 'cluster-node-info',
+ 'key_fields': 'node-name',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'security_login_account_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'security-login-get-iter',
+ 'attribute': 'security-login-account-info',
+ 'key_fields': ('vserver', 'user-name', 'application', 'authentication-method'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'security_login_role_config_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'security-login-role-config-get-iter',
+ 'attribute': 'security-login-role-config-info',
+ 'key_fields': ('vserver', 'role-name'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'security_login_role_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'security-login-role-get-iter',
+ 'attribute': 'security-login-role-info',
+ 'key_fields': ('vserver', 'role-name', 'command-directory-name'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'active_directory_account_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'active-directory-account-get-iter',
+ 'attribute': 'active-directory-account-config',
+ 'key_fields': ('vserver', 'account-name'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'aggregate_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'aggr-get-iter',
+ 'attribute': 'aggr-attributes',
+ 'key_fields': 'aggregate-name',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'volume_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'volume-get-iter',
+ 'attribute': 'volume-attributes',
+ 'key_fields': ('name', 'owning-vserver-name'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'license_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'license-v2-list-info',
+ 'attributes_list_tag': None,
+ 'attribute': 'licenses',
+ },
+ 'min_version': '0',
+ },
+ 'lun_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'lun-get-iter',
+ 'attribute': 'lun-info',
+ 'key_fields': ('vserver', 'path'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'metrocluster_check_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'metrocluster-check-get-iter',
+ 'attribute': 'metrocluster-check-info',
+ 'fail_on_error': False,
+ },
+ 'min_version': '0',
+ },
+ 'metrocluster_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'metrocluster-get',
+ 'attribute': 'metrocluster-info',
+ 'attributes_list_tag': 'attributes',
+ },
+ 'min_version': '0',
+ },
+ 'metrocluster_node_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'metrocluster-node-get-iter',
+ 'attribute': 'metrocluster-node-info',
+ 'key_fields': ('cluster-name', 'node-name'),
+ },
+ 'min_version': '0',
+ },
+ 'net_dns_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'net-dns-get-iter',
+ 'attribute': 'net-dns-info',
+ 'key_fields': 'vserver-name',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'net_interface_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'net-interface-get-iter',
+ 'attribute': 'net-interface-info',
+ 'key_fields': ('interface-name', 'vserver'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'net_interface_service_policy_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'net-interface-service-policy-get-iter',
+ 'attribute': 'net-interface-service-policy-info',
+ 'key_fields': ('vserver', 'policy'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '150',
+ },
+ 'net_port_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'net-port-get-iter',
+ 'attribute': 'net-port-info',
+ 'key_fields': ('node', 'port'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'security_key_manager_key_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'security-key-manager-key-get-iter',
+ 'attribute': 'security-key-manager-key-info',
+ 'key_fields': ('node', 'key-id'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'storage_failover_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'cf-get-iter',
+ 'attribute': 'storage-failover-info',
+ 'key_fields': 'node',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'vserver_motd_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'vserver-motd-get-iter',
+ 'attribute': 'vserver-motd-info',
+ 'key_fields': 'vserver',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'vserver_login_banner_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'vserver-login-banner-get-iter',
+ 'attribute': 'vserver-login-banner-info',
+ 'key_fields': 'vserver',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'vserver_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'vserver-get-iter',
+ 'attribute': 'vserver-info',
+ 'key_fields': 'vserver-name',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'vserver_nfs_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'nfs-service-get-iter',
+ 'attribute': 'nfs-info',
+ 'key_fields': 'vserver',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'net_ifgrp_info': {
+ 'method': self.get_ifgrp_info,
+ 'kwargs': {},
+ 'min_version': '0',
+ },
+ 'ontap_system_version': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'system-get-version',
+ 'attributes_list_tag': None,
+ },
+ 'min_version': '0',
+ },
+ 'ontap_version': {
+ 'method': self.ontapi,
+ 'kwargs': {},
+ 'min_version': '0',
+ },
+ 'ontapi_version': {
+ 'method': self.ontapi,
+ 'kwargs': {},
+ 'min_version': '0',
+ },
+ 'clock_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'clock-get-clock',
+ 'attributes_list_tag': None,
+ },
+ 'min_version': '0'
+ },
+ 'system_node_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'system-node-get-iter',
+ 'attribute': 'node-details-info',
+ 'key_fields': 'node',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'igroup_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'igroup-get-iter',
+ 'attribute': 'initiator-group-info',
+ 'key_fields': ('vserver', 'initiator-group-name'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'iscsi_service_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'iscsi-service-get-iter',
+ 'attribute': 'iscsi-service-info',
+ 'key_fields': 'vserver',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'qos_policy_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'qos-policy-group-get-iter',
+ 'attribute': 'qos-policy-group-info',
+ 'key_fields': 'policy-group',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'qtree_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'qtree-list-iter',
+ 'attribute': 'qtree-info',
+ 'key_fields': ('vserver', 'volume', 'id'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'quota_policy_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'quota-policy-get-iter',
+ 'attribute': 'quota-policy-info',
+ 'key_fields': ('vserver', 'policy-name'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'quota_report_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'quota-report-iter',
+ 'attribute': 'quota',
+ 'key_fields': ('vserver', 'volume', 'tree', 'quota-type', 'quota-target'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'vscan_status_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'vscan-status-get-iter',
+ 'attribute': 'vscan-status-info',
+ 'key_fields': 'vserver',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'vscan_scanner_pool_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'vscan-scanner-pool-get-iter',
+ 'attribute': 'vscan-scanner-pool-info',
+ 'key_fields': 'vserver',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'vscan_connection_status_all_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'vscan-connection-status-all-get-iter',
+ 'attribute': 'vscan-connection-status-all-info',
+ 'key_fields': 'vserver',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'vscan_connection_extended_stats_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'vscan-connection-extended-stats-get-iter',
+ 'attribute': 'vscan-connection-extended-stats-info',
+ 'key_fields': 'vserver',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'snapshot_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'snapshot-get-iter',
+ 'attribute': 'snapshot-info',
+ 'key_fields': ('vserver', 'volume', 'name'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'storage_bridge_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'storage-bridge-get-iter',
+ 'attribute': 'storage-bridge-info',
+ 'key_fields': 'name',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ # supported in ONTAP 9.3 and onwards
+ 'qos_adaptive_policy_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'qos-adaptive-policy-group-get-iter',
+ 'attribute': 'qos-adaptive-policy-group-info',
+ 'key_fields': 'policy-group',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '130',
+ },
+ # supported in ONTAP 9.4 and onwards
+ 'nvme_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'nvme-get-iter',
+ 'attribute': 'nvme-target-service-info',
+ 'key_fields': 'vserver',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '140',
+ },
+ 'nvme_interface_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'nvme-interface-get-iter',
+ 'attribute': 'nvme-interface-info',
+ 'key_fields': 'vserver',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '140',
+ },
+ 'nvme_subsystem_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'nvme-subsystem-get-iter',
+ 'attribute': 'nvme-subsystem-info',
+ 'key_fields': 'subsystem',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '140',
+ },
+ 'nvme_namespace_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'nvme-namespace-get-iter',
+ 'attribute': 'nvme-namespace-info',
+ 'key_fields': 'path',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '140',
+ },
+
+ # Alpha Order
+
+ 'aggr_efficiency_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'aggr-efficiency-get-iter',
+ 'attribute': 'aggr-efficiency-info',
+ # the preferred key is node_name:aggregate_name
+ # but node is not present with MCC
+ 'key_fields': (('node', None), 'aggregate'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '140',
+ },
+ 'autosupport_check_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'autosupport-check-iter',
+ 'attribute': 'autosupport-check-info',
+ 'key_fields': ('node-name', 'check-type', 'error-detail'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'cifs_options_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'cifs-options-get-iter',
+ 'attribute': 'cifs-options',
+ 'key_fields': ('vserver'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'cifs_server_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'cifs-server-get-iter',
+ 'attribute': 'cifs-server-config',
+ # preferred key is <vserver>:<domain>:<cifs-server>
+ # alternate key is <vserver>:<domain-workgroup>:<cifs-server>
+ 'key_fields': ('vserver', ('domain', 'domain-workgroup'), 'cifs-server'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'cifs_share_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'cifs-share-get-iter',
+ 'attribute': 'cifs-share',
+ 'key_fields': ('share-name', 'path', 'cifs-server'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'cifs_vserver_security_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'cifs-security-get-iter',
+ 'attribute': 'cifs-security',
+ 'key_fields': ('vserver'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'cluster_peer_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'cluster-peer-get-iter',
+ 'attribute': 'cluster-peer-info',
+ 'key_fields': ('cluster-name', 'remote-cluster-name'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'cluster_switch_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'cluster-switch-get-iter',
+ 'attribute': 'cluster-switch-info',
+ 'key_fields': ('device', 'model', 'serial-number'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '160',
+ },
+ 'disk_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'storage-disk-get-iter',
+ 'attribute': 'storage-disk-info',
+ 'key_fields': ('disk-name'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'env_sensors_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'environment-sensors-get-iter',
+ 'attribute': 'environment-sensors-info',
+ 'key_fields': ('node-name', 'sensor-name'),
+ 'query': {'max-records': self.max_records},
+ 'fail_on_error': False,
+ },
+ 'min_version': '0',
+ },
+ 'event_notification_destination_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'ems-event-notification-destination-get-iter',
+ 'attribute': 'event-notification-destination-info',
+ 'key_fields': ('name', 'type'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'event_notification_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'ems-event-notification-get-iter',
+ 'attribute': 'event-notification',
+ 'key_fields': ('id'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'export_policy_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'export-policy-get-iter',
+ 'attribute': 'export-policy-info',
+ 'key_fields': ('vserver', 'policy-name'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'export_rule_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'export-rule-get-iter',
+ 'attribute': 'export-rule-info',
+ 'key_fields': ('vserver-name', 'policy-name', 'rule-index'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'fcp_adapter_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'ucm-adapter-get-iter',
+ 'attribute': 'uc-adapter-info',
+ 'key_fields': ('adapter-name', 'node-name'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'fcp_alias_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'fcp-wwpnalias-get-iter',
+ 'attribute': 'aliases-info',
+ 'key_fields': ('aliases-alias', 'vserver'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'fcp_service_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'fcp-service-get-iter',
+ 'attribute': 'fcp-service-info',
+ 'key_fields': ('vserver'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'job_schedule_cron_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'job-schedule-cron-get-iter',
+ 'attribute': 'job-schedule-cron-info',
+ 'key_fields': ('job-schedule-name', ('job-schedule-cluster', None)),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'kerberos_realm_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'kerberos-realm-get-iter',
+ 'attribute': 'kerberos-realm',
+ 'key_fields': ('vserver-name', 'realm'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'ldap_client': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'ldap-client-get-iter',
+ 'attribute': 'ldap-client',
+ 'key_fields': ('vserver'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'ldap_config': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'ldap-config-get-iter',
+ 'attribute': 'ldap-config',
+ 'key_fields': ('vserver'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'lun_map_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'lun-map-get-iter',
+ 'attribute': 'lun-map-info',
+ 'key_fields': ('initiator-group', 'lun-id', 'node', 'path', 'vserver'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'net_dev_discovery_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'net-device-discovery-get-iter',
+ 'attribute': 'net-device-discovery-info',
+ 'key_fields': ('port'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'net_failover_group_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'net-failover-group-get-iter',
+ 'attribute': 'net-failover-group-info',
+ 'key_fields': ('vserver', 'failover-group'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'net_firewall_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'net-firewall-policy-get-iter',
+ 'attribute': 'net-firewall-policy-info',
+ 'key_fields': ('policy', 'vserver', 'service'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'net_ipspaces_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'net-ipspaces-get-iter',
+ 'attribute': 'net-ipspaces-info',
+ 'key_fields': ('ipspace'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'net_port_broadcast_domain_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'net-port-broadcast-domain-get-iter',
+ 'attribute': 'net-port-broadcast-domain-info',
+ 'key_fields': ('broadcast-domain', 'ipspace'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'net_routes_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'net-routes-get-iter',
+ 'attribute': 'net-vs-routes-info',
+ 'key_fields': ('vserver', 'destination', 'gateway'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'net_vlan_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'net-vlan-get-iter',
+ 'attribute': 'vlan-info',
+ 'key_fields': ('interface-name', 'node'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'nfs_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'nfs-service-get-iter',
+ 'attribute': 'nfs-info',
+ 'key_fields': ('vserver'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'ntfs_dacl_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'file-directory-security-ntfs-dacl-get-iter',
+ 'attribute': 'file-directory-security-ntfs-dacl',
+ 'key_fields': ('vserver', 'ntfs-sd', 'account', 'access-type'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'ntfs_sd_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'file-directory-security-ntfs-get-iter',
+ 'attribute': 'file-directory-security-ntfs',
+ 'key_fields': ('vserver', 'ntfs-sd'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'ntp_server_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'ntp-server-get-iter',
+ 'attribute': 'ntp-server-info',
+ 'key_fields': ('server-name'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'role_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'security-login-role-get-iter',
+ 'attribute': 'security-login-role-info',
+ 'key_fields': ('vserver', 'role-name', 'access-level', 'command-directory-name'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'service_processor_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'service-processor-get-iter',
+ 'attribute': 'service-processor-info',
+ 'key_fields': ('node'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'service_processor_network_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'service-processor-network-get-iter',
+ 'attribute': 'service-processor-network-info',
+ # don't use key_fieldss, as we cannot build a key with optional key_fieldss
+ # without a key, we'll get a list of dictionaries
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'shelf_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'storage-shelf-info-get-iter',
+ 'attribute': 'storage-shelf-info',
+ 'key_fields': ('shelf-id', 'serial-number'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'sis_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'sis-get-iter',
+ 'attribute': 'sis-status-info',
+ 'key_fields': 'path',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'sis_policy_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'sis-policy-get-iter',
+ 'attribute': 'sis-policy-info',
+ 'key_fields': ('vserver', 'policy-name'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'snapmirror_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'snapmirror-get-iter',
+ 'attribute': 'snapmirror-info',
+ 'key_fields': 'destination-location',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '140',
+ },
+ 'snapmirror_destination_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'snapmirror-get-destination-iter',
+ 'attribute': 'snapmirror-destination-info',
+ 'key_fields': 'destination-location',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '140',
+ },
+ 'snapmirror_policy_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'snapmirror-policy-get-iter',
+ 'attribute': 'snapmirror-policy-info',
+ 'key_fields': ('vserver-name', 'policy-name'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'snapshot_policy_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'snapshot-policy-get-iter',
+ 'attribute': 'snapshot-policy-info',
+ 'key_fields': ('vserver-name', 'policy'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'subsys_health_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'diagnosis-subsystem-config-get-iter',
+ 'attribute': 'diagnosis-subsystem-config-info',
+ 'key_fields': 'subsystem',
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'sys_cluster_alerts': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'diagnosis-alert-get-iter',
+ 'attribute': 'diagnosis-alert-info',
+ 'key_fields': ('node', 'alerting-resource'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'sysconfig_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'system-get-node-info-iter',
+ 'attribute': 'system-info',
+ 'key_fields': ('system-name'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'volume_move_target_aggr_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'volume-move-target-aggr-get-iter',
+ 'attribute': 'volume-move-target-aggr-info',
+ 'query': {'max-records': self.max_records,
+ 'volume-name': volume_move_target_aggr_info.get('volume_name', None),
+ 'vserver': volume_move_target_aggr_info.get('vserver', None)},
+ 'fail_on_error': False,
+ },
+ 'min_version': '0',
+ },
+ 'volume_space_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'volume-space-get-iter',
+ 'attribute': 'space-info',
+ 'key_fields': ('vserver', 'volume'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'vscan_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'vscan-status-get-iter',
+ 'attribute': 'vscan-status-info',
+ 'key_fields': ('vserver'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ 'vserver_peer_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'vserver-peer-get-iter',
+ 'attribute': 'vserver-peer-info',
+ 'key_fields': ('vserver', 'remote-vserver-name'),
+ 'query': {'max-records': self.max_records},
+ },
+ 'min_version': '0',
+ },
+ }
+
+ # use vserver tunneling if vserver is present (not None)
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.module.params['vserver'])
+
+ def ontapi(self):
+ '''Method to get ontapi version'''
+
+ api = 'system-get-ontapi-version'
+ api_call = netapp_utils.zapi.NaElement(api)
+ try:
+ results = self.server.invoke_successfully(api_call, enable_tunneling=True)
+ ontapi_version = results.get_child_content('minor-version')
+ return ontapi_version if ontapi_version is not None else '0'
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error calling API %s: %s" %
+ (api, to_native(error)), exception=traceback.format_exc())
+
+ def call_api(self, call, attributes_list_tag='attributes-list', query=None, fail_on_error=True):
+ '''Main method to run an API call'''
+
+ api_call = netapp_utils.zapi.NaElement(call)
+ initial_result = None
+ result = None
+
+ if query:
+ for key, val in query.items():
+ # Can val be nested?
+ api_call.add_new_child(key, val)
+
+ if self.desired_attributes is not None:
+ api_call.translate_struct(self.desired_attributes)
+ if self.query is not None:
+ api_call.translate_struct(self.query)
+ try:
+ initial_result = self.server.invoke_successfully(api_call, enable_tunneling=True)
+ next_tag = initial_result.get_child_by_name('next-tag')
+ result = copy.copy(initial_result)
+
+ while next_tag:
+ next_tag_call = netapp_utils.zapi.NaElement(call)
+ if query:
+ for key, val in query.items():
+ next_tag_call.add_new_child(key, val)
+
+ next_tag_call.add_new_child("tag", next_tag.get_content(), True)
+ next_result = self.server.invoke_successfully(next_tag_call, enable_tunneling=True)
+
+ next_tag = next_result.get_child_by_name('next-tag')
+ if attributes_list_tag is None:
+ self.module.fail_json(msg="Error calling API %s: %s" %
+ (api_call.to_string(), "'next-tag' is not expected for this API"))
+
+ result_attr = result.get_child_by_name(attributes_list_tag)
+ new_records = next_result.get_child_by_name(attributes_list_tag)
+ if new_records:
+ for record in new_records.get_children():
+ result_attr.add_child_elem(record)
+
+ return result, None
+
+ except netapp_utils.zapi.NaApiError as error:
+ if call in ['security-key-manager-key-get-iter']:
+ return result, None
+ kind, error_message = netapp_utils.classify_zapi_exception(error)
+ if kind == 'missing_vserver_api_error':
+ # for missing_vserver_api_error, the API is already in error_message
+ error_message = "Error invalid API. %s" % error_message
+ else:
+ error_message = "Error calling API %s: %s" % (call, error_message)
+ if self.error_flags[kind] and fail_on_error:
+ self.module.fail_json(msg=error_message, exception=traceback.format_exc())
+ return None, error_message
+
+ def get_ifgrp_info(self):
+ '''Method to get network port ifgroups info'''
+
+ try:
+ net_port_info = self.netapp_info['net_port_info']
+ except KeyError:
+ net_port_info_calls = self.info_subsets['net_port_info']
+ net_port_info = net_port_info_calls['method'](**net_port_info_calls['kwargs'])
+ interfaces = net_port_info.keys()
+
+ ifgrps = []
+ for ifn in interfaces:
+ if net_port_info[ifn]['port_type'] == 'if_group':
+ ifgrps.append(ifn)
+
+ net_ifgrp_info = dict()
+ for ifgrp in ifgrps:
+ query = dict()
+ query['node'], query['ifgrp-name'] = ifgrp.split(':')
+
+ tmp = self.get_generic_get_iter('net-port-ifgrp-get', key_fields=('node', 'ifgrp-name'),
+ attribute='net-ifgrp-info', query=query,
+ attributes_list_tag='attributes')
+ net_ifgrp_info = net_ifgrp_info.copy()
+ net_ifgrp_info.update(tmp)
+ return net_ifgrp_info
+
+ def get_generic_get_iter(self, call, attribute=None, key_fields=None, query=None, attributes_list_tag='attributes-list', fail_on_error=True):
+ '''Method to run a generic get-iter call'''
+
+ generic_call, error = self.call_api(call, attributes_list_tag, query, fail_on_error=fail_on_error)
+
+ if error is not None:
+ return {'error': error}
+
+ if generic_call is None:
+ return None
+
+ if attributes_list_tag is None:
+ attributes_list = generic_call
+ else:
+ attributes_list = generic_call.get_child_by_name(attributes_list_tag)
+
+ if attributes_list is None:
+ return None
+
+ if key_fields is None:
+ out = []
+ else:
+ out = {}
+
+ iteration = 0
+ for child in attributes_list.get_children():
+ iteration += 1
+ dic = xmltodict.parse(child.to_string(), xml_attribs=False)
+
+ if attribute is not None:
+ try:
+ dic = dic[attribute]
+ except KeyError as exc:
+ error_message = 'Error: attribute %s not found for %s, got: %s' % (str(exc), call, dic)
+ self.module.fail_json(msg=error_message, exception=traceback.format_exc())
+
+ info = json.loads(json.dumps(dic))
+ if self.translate_keys:
+ info = convert_keys(info)
+ if isinstance(key_fields, str):
+ try:
+ unique_key = _finditem(dic, key_fields)
+ except KeyError as exc:
+ error_message = 'Error: key %s not found for %s, got: %s' % (str(exc), call, repr(info))
+ if self.error_flags['key_error']:
+ self.module.fail_json(msg=error_message, exception=traceback.format_exc())
+ unique_key = 'Error_%d_key_not_found_%s' % (iteration, exc.args[0])
+ elif isinstance(key_fields, tuple):
+ try:
+ unique_key = ':'.join([_finditem(dic, el) for el in key_fields])
+ except KeyError as exc:
+ error_message = 'Error: key %s not found for %s, got: %s' % (str(exc), call, repr(info))
+ if self.error_flags['key_error']:
+ self.module.fail_json(msg=error_message, exception=traceback.format_exc())
+ unique_key = 'Error_%d_key_not_found_%s' % (iteration, exc.args[0])
+ else:
+ unique_key = None
+ if unique_key is not None:
+ out = out.copy()
+ out.update({unique_key: info})
+ else:
+ out.append(info)
+
+ if attributes_list_tag is None and key_fields is None:
+ if len(out) == 1:
+ # flatten the list as only 1 element is expected
+ out = out[0]
+ elif len(out) > 1:
+ # aggregate a list of dictionaries into a single dict
+ # make sure we only have dicts and no key duplication
+ dic = dict()
+ key_count = 0
+ for item in out:
+ if not isinstance(item, dict):
+ # abort if we don't see a dict - not sure this can happen with ZAPI
+ key_count = -1
+ break
+ dic.update(item)
+ key_count += len(item)
+ if key_count == len(dic):
+ # no duplicates!
+ out = dic
+
+ return out
+
+ def augment_subset(self, subset, info):
+ if subset == 'lun_info' and info:
+ for lun_info in info.values():
+ # the keys may have been converted, or not
+ serial = lun_info.get('serial_number') or lun_info.get('serial-number')
+ if serial:
+ hexlify = codecs.getencoder('hex')
+ # dictionaries are mutable
+ lun_info['serial_hex'] = to_text(hexlify(to_bytes(lun_info['serial_number']))[0])
+ lun_info['naa_id'] = 'naa.600a0980' + lun_info['serial_hex']
+ return info
+
+ def get_all(self, gather_subset):
+ '''Method to get all subsets'''
+
+ self.netapp_info['ontapi_version'] = self.ontapi()
+ self.netapp_info['ontap_version'] = self.netapp_info['ontapi_version']
+
+ run_subset = self.get_subset(gather_subset, self.netapp_info['ontapi_version'])
+ if 'ontap_version' in gather_subset:
+ if netapp_utils.has_feature(self.module, 'deprecation_warning'):
+ self.netapp_info['deprecation_warning'] = 'ontap_version is deprecated, please use ontapi_version'
+ if 'help' in gather_subset:
+ self.netapp_info['help'] = sorted(run_subset)
+ else:
+ if self.desired_attributes is not None:
+ if len(run_subset) > 1:
+ self.module.fail_json(msg="desired_attributes option is only supported with a single subset")
+ self.sanitize_desired_attributes()
+ if self.query is not None:
+ if len(run_subset) > 1:
+ self.module.fail_json(msg="query option is only supported with a single subset")
+ self.sanitize_query()
+ for subset in run_subset:
+ call = self.info_subsets[subset]
+ self.netapp_info[subset] = call['method'](**call['kwargs'])
+ self.augment_subset(subset, self.netapp_info[subset])
+
+ if self.warnings:
+ self.netapp_info['module_warnings'] = self.warnings
+
+ return self.netapp_info
+
+ def get_subset(self, gather_subset, version):
+ '''Method to get a single subset'''
+
+ runable_subsets = set()
+ exclude_subsets = set()
+ usable_subsets = [key for key in self.info_subsets if version >= self.info_subsets[key]['min_version']]
+ if 'help' in gather_subset:
+ return usable_subsets
+ for subset in gather_subset:
+ if subset == 'all':
+ runable_subsets.update(usable_subsets)
+ return runable_subsets
+ if subset.startswith('!'):
+ subset = subset[1:]
+ if subset == 'all':
+ return set()
+ exclude = True
+ else:
+ exclude = False
+
+ if subset not in usable_subsets:
+ if subset not in self.info_subsets.keys():
+ self.module.fail_json(msg='Bad subset: %s' % subset)
+ self.module.fail_json(msg='Remote system at version %s does not support %s' %
+ (version, subset))
+
+ if exclude:
+ exclude_subsets.add(subset)
+ else:
+ runable_subsets.add(subset)
+
+ if not runable_subsets:
+ runable_subsets.update(usable_subsets)
+
+ runable_subsets.difference_update(exclude_subsets)
+
+ return runable_subsets
+
+ def get_summary(self, ontap_info):
+ for info in ontap_info:
+ if '_info' in info and ontap_info[info] is not None and isinstance(ontap_info[info], dict):
+ # don't summarize errors
+ if 'error' not in ontap_info[info]:
+ ontap_info[info] = ontap_info[info].keys()
+ return ontap_info
+
+ def sanitize_desired_attributes(self):
+ ''' add top 'desired-attributes' if absent
+ check for _ as more likely ZAPI does not take them
+ '''
+ da_key = 'desired-attributes'
+ if da_key not in self.desired_attributes:
+ desired_attributes = dict()
+ desired_attributes[da_key] = self.desired_attributes
+ self.desired_attributes = desired_attributes
+ self.check_for___in_keys(self.desired_attributes)
+
+ def sanitize_query(self):
+ ''' add top 'query' if absent
+ check for _ as more likely ZAPI does not take them
+ '''
+ key = 'query'
+ if key not in self.query:
+ query = dict()
+ query[key] = self.query
+ self.query = query
+ self.check_for___in_keys(self.query)
+
+ def check_for___in_keys(self, d_param):
+ '''Method to warn on underscore in a ZAPI tag'''
+ if isinstance(d_param, dict):
+ for key, val in d_param.items():
+ self.check_for___in_keys(val)
+ if '_' in key:
+ self.warnings.append("Underscore in ZAPI tag: %s, do you mean '-'?" % key)
+ elif isinstance(d_param, list):
+ for val in d_param:
+ self.check_for___in_keys(val)
+
+ def set_error_flags(self):
+ error_flags = self.module.params['continue_on_error']
+ generic_flags = ('always', 'never')
+ if len(error_flags) > 1:
+ for key in generic_flags:
+ if key in error_flags:
+ self.module.fail_json(msg="%s needs to be the only keyword in 'continue_on_error' option." % key)
+ specific_flags = ('rpc_error', 'missing_vserver_api_error', 'key_error', 'other_error')
+ for key in error_flags:
+ if key not in generic_flags and key not in specific_flags:
+ self.module.fail_json(msg="%s is not a valid keyword in 'continue_on_error' option." % key)
+ self.error_flags = dict()
+ for flag in specific_flags:
+ self.error_flags[flag] = True
+ for key in error_flags:
+ if key == 'always' or key == flag:
+ self.error_flags[flag] = False
+
+ def apply(self):
+ gather_subset = self.module.params['gather_subset']
+ if gather_subset is None:
+ gather_subset = ['all']
+ gf_all = self.get_all(gather_subset)
+ if self.module.params['summary']:
+ gf_all = self.get_summary(gf_all)
+ results = {'changed': False, 'ontap_info': gf_all}
+ if self.module.params['state'] is not None:
+ results['state'] = self.module.params['state']
+ results['warnings'] = "option 'state' is deprecated."
+ self.module.warn("option 'state' is deprecated.")
+ self.module.exit_json(**results)
+
+
+# https://stackoverflow.com/questions/14962485/finding-a-key-recursively-in-a-dictionary
+def __finditem(obj, key):
+
+ if key is None:
+ # allows for a key not to be present
+ return "key_not_present"
+ if key in obj:
+ if obj[key] is None:
+ return "None"
+ return obj[key]
+ for dummy, val in obj.items():
+ if isinstance(val, dict):
+ item = __finditem(val, key)
+ if item is not None:
+ return item
+ return None
+
+
+def _finditem(obj, keys):
+ ''' if keys is a string, use it as a key
+ if keys is a tuple, stop on the first valid key
+ if no valid key is found, raise a KeyError '''
+
+ value = None
+ if isinstance(keys, str):
+ value = __finditem(obj, keys)
+ elif isinstance(keys, tuple):
+ for key in keys:
+ value = __finditem(obj, key)
+ if value is not None:
+ break
+ if value is not None:
+ return value
+ raise KeyError(str(keys))
+
+
+def convert_keys(d_param):
+ '''Method to convert hyphen to underscore'''
+
+ if isinstance(d_param, dict):
+ out = {}
+ for key, val in d_param.items():
+ val = convert_keys(val)
+ out[key.replace('-', '_')] = val
+ return out
+ elif isinstance(d_param, list):
+ return [convert_keys(val) for val in d_param]
+ return d_param
+
+
+def main():
+ '''Execute action'''
+ gf_obj = NetAppONTAPGatherInfo()
+ gf_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_interface.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_interface.py
new file mode 100644
index 000000000..4f859adfd
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_interface.py
@@ -0,0 +1,1457 @@
+#!/usr/bin/python
+
+# (c) 2018-2023, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_interface
+'''
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: na_ontap_interface
+short_description: NetApp ONTAP LIF configuration
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Creating / deleting and modifying the LIF.
+
+options:
+ state:
+ description:
+ - Whether the specified interface should exist or not.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ interface_name:
+ description:
+ - Specifies the logical interface (LIF) name.
+ required: true
+ type: str
+
+ home_node:
+ description:
+ - Specifies the LIF's home node.
+ - By default, the first node from the cluster is considered as home node.
+ type: str
+
+ current_node:
+ description:
+ - Specifies the LIF's current node.
+ - By default, this is home_node
+ type: str
+
+ home_port:
+ description:
+ - Specifies the LIF's home port.
+ - Requires ONTAP 9.8 or later with FC interfaces when using REST.
+ - With REST, at least one of home_port, home_node, or broadcast_domain is required to create IP interfaces.
+ - With REST, either home_port or current_port is required to create FC interfaces.
+ - With ZAPI, home_port is required to create IP and FC interfaces.
+ - home_port and broadcast_domain are mutually exclusive (REST and IP interfaces).
+ type: str
+
+ current_port:
+ description:
+ - Specifies the LIF's current port.
+ type: str
+
+ role:
+ description:
+ - Specifies the role of the LIF.
+ - When setting role as "intercluster" or "cluster", setting protocol is not supported.
+ - When creating a "cluster" role, the node name will appear as the prefix in the name of LIF.
+ - For example, if the specified name is clif and node name is node1, the LIF name appears in the ONTAP as node1_clif.
+ - Possible values are 'undef', 'cluster', 'data', 'node-mgmt', 'intercluster', 'cluster-mgmt'.
+ - Required when C(state=present) unless service_policy is present and ONTAP version is 9.8 or better.
+ - This option is deprecated in REST.
+ - With REST, the module tries to derive a service_policy and may error out.
+ type: str
+
+ address:
+ description:
+ - Specifies the LIF's IP address.
+ - ZAPI - Required when C(state=present) and is_ipv4_link_local if false and subnet_name is not set.
+ - REST - Required when C(state=present) and C(interface_type) is IP.
+ type: str
+
+ netmask:
+ description:
+ - Specifies the LIF's netmask.
+ - ZAPI - Required when C(state=present) and is_ipv4_link_local if false and subnet_name is not set.
+ - REST - Required when C(state=present) and C(interface_type) is IP.
+ type: str
+
+ is_ipv4_link_local:
+ description:
+ - Specifies the LIF's are to acquire a ipv4 link local address.
+ - Use case for this is when creating Cluster LIFs to allow for auto assignment of ipv4 link local address.
+ - Not supported in REST
+ version_added: '20.1.0'
+ type: bool
+
+ vserver:
+ description:
+ - The name of the vserver to use.
+ - Required with ZAPI.
+ - Required with REST for FC interfaces (data vservers).
+ - Required with REST for SVM-scoped IP interfaces (data vservers).
+ - Invalid with REST for cluster-scoped IP interfaces.
+ - To help with transition from ZAPI to REST, vserver is ignored when the role is set to 'cluster', 'node-mgmt', 'intercluster', 'cluster-mgmt'.
+ - Remove this option to suppress the warning.
+ required: false
+ type: str
+
+ firewall_policy:
+ description:
+ - Specifies the firewall policy for the LIF.
+ - This option is deprecated in REST.
+ - With REST, the module tries to derive a service_policy and may error out.
+ type: str
+
+ failover_policy:
+ description:
+ - Specifies the failover policy for the LIF.
+ - When using REST, this values are mapped to 'home_port_only', 'default', 'home_node_only', 'sfo_partners_only', 'broadcast_domain_only'.
+ choices: ['disabled', 'system-defined', 'local-only', 'sfo-partner-only', 'broadcast-domain-wide']
+ type: str
+
+ failover_scope:
+ description:
+ - Specifies the failover scope for the LIF.
+ - REST only, and only for IP interfaces. Not supported for FC interfaces.
+ choices: ['home_port_only', 'default', 'home_node_only', 'sfo_partners_only', 'broadcast_domain_only']
+ type: str
+ version_added: '21.13.0'
+
+ failover_group:
+ description:
+ - Specifies the failover group for the LIF.
+ - Not supported with REST.
+ version_added: '20.1.0'
+ type: str
+
+ subnet_name:
+ description:
+ - Subnet where the IP interface address is allocated from.
+ - If the option is not used, the IP address and netmask need to be provided.
+ - With REST, ONTAP 9.11.1 or later is required.
+ - With REST, ipspace must be set.
+ version_added: 2.8.0
+ type: str
+
+ fail_if_subnet_conflicts:
+ description:
+ - Creating or updating an IP Interface fails if the specified IP address falls within the address range of a named subnet.
+ - Set this value to false to use the specified IP address and to assign the subnet owning that address to the interface.
+ - This option is only supported with REST and requires ONTAP 9.11.1 or later.
+ version_added: 22.2.0
+ type: bool
+
+ admin_status:
+ choices: ['up', 'down']
+ description:
+ - Specifies the administrative status of the LIF.
+ type: str
+
+ is_auto_revert:
+ description:
+ - If true, data LIF will revert to its home node under certain circumstances such as startup,
+ - and load balancing migration capability is disabled automatically
+ type: bool
+
+ force_subnet_association:
+ description:
+ - Set this to true to acquire the address from the named subnet and assign the subnet to the LIF.
+ - not supported with REST.
+ version_added: 2.9.0
+ type: bool
+
+ protocols:
+ description:
+ - Specifies the list of data protocols configured on the LIF. By default, the values in this element are nfs, cifs and fcache.
+ - Other supported protocols are iscsi and fcp. A LIF can be configured to not support any data protocols by specifying 'none'.
+ - Protocol values of none, iscsi, fc-nvme or fcp can't be combined with any other data protocol(s).
+ - address, netmask and firewall_policy parameters are not supported for 'fc-nvme' option.
+ - This option is ignored with REST, though it can be used to derive C(interface_type) or C(data_protocol).
+ type: list
+ elements: str
+
+ data_protocol:
+ description:
+ - The data protocol for which the FC interface is configured.
+ - Ignored with ZAPI or for IP interfaces.
+ - Required to create a FC type interface.
+ type: str
+ choices: ['fcp', 'fc_nvme']
+
+ dns_domain_name:
+ description:
+ - Specifies the unique, fully qualified domain name of the DNS zone of this LIF.
+ - Supported from ONTAP 9.9.0 or later in REST.
+ - Not supported for FC interfaces.
+ version_added: 2.9.0
+ type: str
+
+ listen_for_dns_query:
+ description:
+ - If True, this IP address will listen for DNS queries for the dnszone specified.
+ - Not supported with REST.
+ version_added: 2.9.0
+ type: bool
+
+ is_dns_update_enabled:
+ description:
+ - Specifies if DNS update is enabled for this LIF. Dynamic updates will be sent for this LIF if updates are enabled at Vserver level.
+ - Supported from ONTAP 9.9.1 or later in REST.
+ - Not supported for FC interfaces.
+ version_added: 2.9.0
+ type: bool
+
+ service_policy:
+ description:
+ - Starting with ONTAP 9.5, you can configure LIF service policies to identify a single service or a list of services that will use a LIF.
+ - In ONTAP 9.5, you can assign service policies only for LIFs in the admin SVM.
+ - In ONTAP 9.6, you can additionally assign service policies for LIFs in the data SVMs.
+ - When you specify a service policy for a LIF, you need not specify the data protocol and role for the LIF.
+ - NOTE that role is still required because of a ZAPI issue. This limitation is removed in ONTAP 9.8.
+ - Creating LIFs by specifying the role and data protocols is also supported.
+ version_added: '20.4.0'
+ type: str
+
+ from_name:
+ description: name of the interface to be renamed
+ type: str
+ version_added: 21.11.0
+
+ interface_type:
+ description:
+ - type of the interface.
+ - IP is assumed if address or netmask are present.
+ - IP interfaces includes cluster, intercluster, management, and NFS, CIFS, iSCSI interfaces.
+ - FC interfaces includes FCP and NVME-FC interfaces.
+ - ignored with ZAPI.
+ - required with REST, but maybe derived from deprecated options like C(role), C(protocols), and C(firewall_policy).
+ type: str
+ choices: ['fc', 'ip']
+ version_added: 21.13.0
+
+ ipspace:
+ description:
+ - IPspace name is required with REST for cluster-scoped interfaces. It is optional with SVM scope.
+ - ignored with ZAPI.
+ - ignored for FC interface.
+ type: str
+ version_added: 21.13.0
+
+ broadcast_domain:
+ description:
+ - broadcast_domain name can be used to specify the location on an IP interface with REST, as an alternative to node or port.
+ - only used when creating an IP interface to select a node, ignored if the interface already exists.
+ - if the broadcast domain is not found, make sure to check the ipspace value.
+ - home_port and broadcast_domain are mutually exclusive. home_node may or may not be present.
+ - not supported for FC interface.
+ - ignored with ZAPI.
+ type: str
+ version_added: 21.21.0
+
+ ignore_zapi_options:
+ description:
+ - ignore unsupported options that should not be relevant.
+ - ignored with ZAPI.
+ choices: ['failover_group', 'force_subnet_association', 'listen_for_dns_query']
+ type: list
+ elements: str
+ default: ['force_subnet_association']
+ version_added: 21.13.0
+
+ probe_port:
+ description:
+ - Probe port for Cloud load balancer - only valid in the Azure environment.
+ - Not supported with ZAPI or with FC interfaces.
+ - Requires ONTAP 9.10.1 or later.
+ type: int
+ version_added: 22.1.0
+notes:
+ - REST support requires ONTAP 9.7 or later.
+ - Support check_mode.
+'''
+
+EXAMPLES = '''
+ - name: Create interface - ZAPI
+ netapp.ontap.na_ontap_interface:
+ state: present
+ interface_name: data2
+ home_port: e0d
+ home_node: laurentn-vsim1
+ role: data
+ protocols:
+ - nfs
+ - cifs
+ admin_status: up
+ failover_policy: local-only
+ firewall_policy: mgmt
+ is_auto_revert: true
+ address: 10.10.10.10
+ netmask: 255.255.255.0
+ force_subnet_association: false
+ dns_domain_name: test.com
+ listen_for_dns_query: true
+ is_dns_update_enabled: true
+ vserver: svm1
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Create data interface - REST - NAS
+ netapp.ontap.na_ontap_interface:
+ state: present
+ interface_name: data2
+ home_port: e0d
+ home_node: laurentn-vsim1
+ admin_status: up
+ failover_scope: home_node_only
+ service_policy: default-data-files
+ is_auto_revert: true
+ interface_type: ip
+ address: 10.10.10.10
+ netmask: 255.255.255.0
+ vserver: svm1
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Create cluster interface - ZAPI
+ netapp.ontap.na_ontap_interface:
+ state: present
+ interface_name: cluster_lif
+ home_port: e0a
+ home_node: cluster1-01
+ role: cluster
+ admin_status: up
+ is_auto_revert: true
+ is_ipv4_link_local: true
+ vserver: Cluster
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Create cluster interface - REST
+ netapp.ontap.na_ontap_interface:
+ state: present
+ interface_name: cluster_lif
+ home_port: e0a
+ home_node: cluster1-01
+ service_policy: default-cluster
+ admin_status: up
+ is_auto_revert: true
+ vserver: Cluster
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Rename interface
+ netapp.ontap.na_ontap_interface:
+ state: present
+ from_name: ansibleSVM_lif
+ interface_name: ansibleSVM_lif01
+ vserver: ansibleSVM
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Migrate an interface
+ netapp.ontap.na_ontap_interface:
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ vserver: ansible
+ https: true
+ validate_certs: false
+ state: present
+ interface_name: carchi_interface3
+ home_port: e0d
+ home_node: ansdev-stor-1
+ current_node: ansdev-stor-2
+ role: data
+ failover_policy: local-only
+ firewall_policy: mgmt
+ is_auto_revert: true
+ address: 10.10.10.12
+ netmask: 255.255.255.0
+ force_subnet_association: false
+ admin_status: up
+
+ - name: Delete interface
+ netapp.ontap.na_ontap_interface:
+ state: absent
+ interface_name: data2
+ vserver: svm1
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+'''
+
+RETURN = """
+
+"""
+
+import time
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic, netapp_ipaddress
+
+FAILOVER_POLICIES = ['disabled', 'system-defined', 'local-only', 'sfo-partner-only', 'broadcast-domain-wide']
+FAILOVER_SCOPES = ['home_port_only', 'default', 'home_node_only', 'sfo_partners_only', 'broadcast_domain_only']
+REST_UNSUPPORTED_OPTIONS = ['is_ipv4_link_local']
+REST_IGNORABLE_OPTIONS = ['failover_group', 'force_subnet_association', 'listen_for_dns_query']
+
+
+class NetAppOntapInterface:
+ ''' object to describe interface info '''
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=[
+ 'present', 'absent'], default='present'),
+ interface_name=dict(required=True, type='str'),
+ interface_type=dict(type='str', choices=['fc', 'ip']),
+ ipspace=dict(type='str'),
+ broadcast_domain=dict(type='str'),
+ home_node=dict(required=False, type='str', default=None),
+ current_node=dict(required=False, type='str'),
+ home_port=dict(required=False, type='str'),
+ current_port=dict(required=False, type='str'),
+ role=dict(required=False, type='str'),
+ is_ipv4_link_local=dict(required=False, type='bool', default=None),
+ address=dict(required=False, type='str'),
+ netmask=dict(required=False, type='str'),
+ vserver=dict(required=False, type='str'),
+ firewall_policy=dict(required=False, type='str', default=None),
+ failover_policy=dict(required=False, type='str', default=None,
+ choices=['disabled', 'system-defined',
+ 'local-only', 'sfo-partner-only', 'broadcast-domain-wide']),
+ failover_scope=dict(required=False, type='str', default=None,
+ choices=['home_port_only', 'default',
+ 'home_node_only', 'sfo_partners_only', 'broadcast_domain_only']),
+ failover_group=dict(required=False, type='str'),
+ admin_status=dict(required=False, choices=['up', 'down']),
+ subnet_name=dict(required=False, type='str'),
+ is_auto_revert=dict(required=False, type='bool', default=None),
+ protocols=dict(required=False, type='list', elements='str'),
+ data_protocol=dict(required=False, type='str', choices=['fc_nvme', 'fcp']),
+ force_subnet_association=dict(required=False, type='bool', default=None),
+ dns_domain_name=dict(required=False, type='str'),
+ listen_for_dns_query=dict(required=False, type='bool'),
+ is_dns_update_enabled=dict(required=False, type='bool'),
+ service_policy=dict(required=False, type='str', default=None),
+ from_name=dict(required=False, type='str'),
+ ignore_zapi_options=dict(required=False, type='list', elements='str', default=['force_subnet_association'], choices=REST_IGNORABLE_OPTIONS),
+ probe_port=dict(required=False, type='int'),
+ fail_if_subnet_conflicts=dict(required=False, type='bool'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ mutually_exclusive=[
+ ['subnet_name', 'address'],
+ ['subnet_name', 'netmask'],
+ ['is_ipv4_link_local', 'address'],
+ ['is_ipv4_link_local', 'netmask'],
+ ['is_ipv4_link_local', 'subnet_name'],
+ ['failover_policy', 'failover_scope'],
+ ],
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.rest_api = OntapRestAPI(self.module)
+ unsupported_rest_properties = [key for key in REST_IGNORABLE_OPTIONS if key not in self.parameters['ignore_zapi_options']]
+ unsupported_rest_properties.extend(REST_UNSUPPORTED_OPTIONS)
+ if self.na_helper.safe_get(self.parameters, ['address']):
+ self.parameters['address'] = netapp_ipaddress.validate_and_compress_ip_address(self.parameters['address'], self.module)
+ partially_supported_rest_properties = [['dns_domain_name', (9, 9, 0)], ['is_dns_update_enabled', (9, 9, 1)], ['probe_port', (9, 10, 1)],
+ ['subnet_name', (9, 11, 1)], ['fail_if_subnet_conflicts', (9, 11, 1)]]
+ self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, unsupported_rest_properties, partially_supported_rest_properties)
+ if self.use_rest and not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 7, 0):
+ msg = 'REST requires ONTAP 9.7 or later for interface APIs.'
+ self.use_rest = self.na_helper.fall_back_to_zapi(self.module, msg, self.parameters)
+
+ if self.use_rest:
+ self.cluster_nodes = None # cached value to limit number of API calls.
+ self.home_node = None # cached value to limit number of API calls.
+ self.map_failover_policy()
+ self.validate_rest_input_parameters()
+ # REST supports both netmask and cidr for ipv4 but cidr only for ipv6.
+ if self.parameters.get('netmask'):
+ self.parameters['netmask'] = str(netapp_ipaddress.netmask_to_netmask_length(self.parameters.get('address'),
+ self.parameters['netmask'], self.module))
+ elif netapp_utils.has_netapp_lib() is False:
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ else:
+ for option in ('probe_port', 'fail_if_subnet_conflicts'):
+ if self.parameters.get(option) is not None:
+ self.module.fail_json(msg='Error option %s requires REST.' % option)
+ if 'vserver' not in self.parameters:
+ self.module.fail_json(msg='missing required argument with ZAPI: vserver')
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+ # ZAPI supports only netmask.
+ if self.parameters.get('netmask'):
+ self.parameters['netmask'] = netapp_ipaddress.netmask_length_to_netmask(self.parameters.get('address'),
+ self.parameters['netmask'], self.module)
+
+ def map_failover_policy(self):
+ if self.use_rest and 'failover_policy' in self.parameters:
+ mapping = dict(zip(FAILOVER_POLICIES, FAILOVER_SCOPES))
+ self.parameters['failover_scope'] = mapping[self.parameters['failover_policy']]
+
+ def set_interface_type(self, interface_type):
+ if 'interface_type' in self.parameters:
+ if self.parameters['interface_type'] != interface_type:
+ self.module.fail_json(msg="Error: mismatch between configured interface_type: %s and derived interface_type: %s."
+ % (self.parameters['interface_type'], interface_type))
+ else:
+ self.parameters['interface_type'] = interface_type
+
+ def derive_fc_data_protocol(self):
+ protocols = self.parameters.get('protocols')
+ if not protocols:
+ return
+ if len(protocols) > 1:
+ self.module.fail_json(msg="A single protocol entry is expected for FC interface, got %s." % protocols)
+ mapping = {'fc-nvme': 'fc_nvme', 'fc_nvme': 'fc_nvme', 'fcp': 'fcp'}
+ if protocols[0] not in mapping:
+ self.module.fail_json(msg="Unexpected protocol value %s." % protocols[0])
+ data_protocol = mapping[protocols[0]]
+ if 'data_protocol' in self.parameters and self.parameters['data_protocol'] != data_protocol:
+ self.module.fail_json(msg="Error: mismatch between configured data_protocol: %s and data_protocols: %s"
+ % (self.parameters['data_protocol'], protocols))
+ self.parameters['data_protocol'] = data_protocol
+
+ def derive_interface_type(self):
+ protocols = self.parameters.get('protocols')
+ if protocols in (None, ["none"]):
+ if self.parameters.get('role') in ('cluster', 'intercluster') or any(x in self.parameters for x in ('address', 'netmask', 'subnet_name')):
+ self.set_interface_type('ip')
+ return
+ protocol_types = set()
+ unknown_protocols = []
+ for protocol in protocols:
+ if protocol.lower() in ['fc-nvme', 'fcp']:
+ protocol_types.add('fc')
+ elif protocol.lower() in ['nfs', 'cifs', 'iscsi']:
+ protocol_types.add('ip')
+ elif protocol.lower() != 'none':
+ # none is an allowed value with ZAPI
+ unknown_protocols.append(protocol)
+ errors = []
+ if unknown_protocols:
+ errors.append('unexpected value(s) for protocols: %s' % unknown_protocols)
+ if len(protocol_types) > 1:
+ errors.append('incompatible value(s) for protocols: %s' % protocols)
+ if errors:
+ self.module.fail_json(msg='Error: unable to determine interface type, please set interface_type: %s' % (' - '.join(errors)))
+ if protocol_types:
+ self.set_interface_type(protocol_types.pop())
+ return
+
+ def derive_block_file_type(self, protocols):
+ block_p, file_p, fcp = False, False, False
+ if protocols is None:
+ fcp = self.parameters.get('interface_type') == 'fc'
+ return fcp, file_p, fcp
+ block_values, file_values = [], []
+ for protocol in protocols:
+ if protocol.lower() in ['fc-nvme', 'fcp', 'iscsi']:
+ block_p = True
+ block_values.append(protocol)
+ if protocol.lower() in ['fc-nvme', 'fcp']:
+ fcp = True
+ elif protocol.lower() in ['nfs', 'cifs']:
+ file_p = True
+ file_values.append(protocol)
+ if block_p and file_p:
+ self.module.fail_json(msg="Cannot use any of %s with %s" % (block_values, file_values))
+ return block_p, file_p, fcp
+
+ def get_interface_record_rest(self, if_type, query, fields):
+ if 'ipspace' in self.parameters and if_type == 'ip':
+ query['ipspace.name'] = self.parameters['ipspace']
+ return rest_generic.get_one_record(self.rest_api, self.get_net_int_api(if_type), query, fields)
+
+ def get_interface_records_rest(self, if_type, query, fields):
+ if 'ipspace' in self.parameters:
+ if if_type == 'ip':
+ query['ipspace.name'] = self.parameters['ipspace']
+ else:
+ self.module.warn("ipspace is ignored for FC interfaces.")
+ records, error = rest_generic.get_0_or_more_records(self.rest_api, self.get_net_int_api(if_type), query, fields)
+ if error and 'are available in precluster.' in error:
+ # in precluster mode, network APIs are not available!
+ self.module.fail_json(msg="This module cannot use REST in precluster mode, ZAPI can be forced with use_rest: never. Error: %s"
+ % error)
+ return records, error
+
+ def get_net_int_api(self, if_type=None):
+ if if_type is None:
+ if_type = self.parameters.get('interface_type')
+ if if_type is None:
+ self.module.fail_json(msg='Error: missing option "interface_type (or could not be derived)')
+ return 'network/%s/interfaces' % if_type
+
+ def find_interface_record(self, records, home_node, name):
+ full_name = "%s_%s" % (home_node, name) if home_node is not None else name
+ full_name_records = [record for record in records if record['name'] == full_name]
+ if len(full_name_records) > 1:
+ self.module.fail_json(msg='Error: multiple records for: %s - %s' % (full_name, full_name_records))
+ return full_name_records[0] if full_name_records else None
+
+ def find_exact_match(self, records, name):
+ """ with vserver, we expect an exact match
+ but ONTAP transforms cluster interface names by prepending the home_port
+ """
+ if 'vserver' in self.parameters:
+ if len(records) > 1:
+ self.module.fail_json(msg='Error: unexpected records for name: %s, vserver: %s - %s'
+ % (name, self.parameters['vserver'], records))
+ return records[0] if records else None
+ # since our queries included a '*', we expect multiple records
+ # an exact match is <home_node>_<name> or <name>.
+ # is there an exact macth on name only?
+ record = self.find_interface_record(records, None, name)
+ # now matching with home_port as a prefix
+ if 'home_node' in self.parameters and self.parameters['home_node'] != 'localhost':
+ home_record = self.find_interface_record(records, self.parameters['home_node'], name)
+ if record and home_record:
+ self.module.warn('Found both %s, selecting %s' % ([record['name'] for record in (record, home_record)], home_record['name']))
+ else:
+ # look for all known nodes
+ home_node_records = []
+ for home_node in self.get_cluster_node_names_rest():
+ home_record = self.find_interface_record(records, home_node, name)
+ if home_record:
+ home_node_records.append(home_record)
+ if len(home_node_records) > 1:
+ self.module.fail_json(msg='Error: multiple matches for name: %s: %s. Set home_node parameter.'
+ % (name, [record['name'] for record in home_node_records]))
+ home_record = home_node_records[0] if home_node_records else None
+ if record and home_node_records:
+ self.module.fail_json(msg='Error: multiple matches for name: %s: %s. Set home_node parameter.'
+ % (name, [record['name'] for record in (record, home_record)]))
+ if home_record:
+ record = home_record
+ if record and name == self.parameters['interface_name'] and name != record['name']:
+ # fix name, otherwise we'll attempt a rename :(
+ self.parameters['interface_name'] = record['name']
+ self.module.warn('adjusting name from %s to %s' % (name, record['name']))
+ return record
+
+ def get_interface_rest(self, name):
+ """
+ Return details about the interface
+ :param:
+ name : Name of the interface
+
+ :return: Details about the interface. None if not found.
+ :rtype: dict
+ """
+ self.derive_interface_type()
+ if_type = self.parameters.get('interface_type')
+ if 'vserver' in self.parameters:
+ query_ip = {
+ 'name': name,
+ 'svm.name': self.parameters['vserver']
+ }
+ query_fc = query_ip
+ else:
+ query_ip = {
+ # ONTAP renames cluster interfaces, use a * to find them
+ 'name': '*%s' % name,
+ 'scope': 'cluster'
+ }
+ query_fc = None
+ fields = 'name,location,uuid,enabled,svm.name'
+ fields_fc = fields + ',data_protocol'
+ fields_ip = fields + ',ip,service_policy'
+ if self.parameters.get('dns_domain_name'):
+ fields_ip += ',dns_zone'
+ if self.parameters.get('probe_port') is not None:
+ fields_ip += ',probe_port'
+ if self.parameters.get('is_dns_update_enabled') is not None:
+ fields_ip += ',ddns_enabled'
+ if self.parameters.get('subnet_name') is not None:
+ fields_ip += ',subnet'
+ records, error, records2, error2 = None, None, None, None
+ if if_type in [None, 'ip']:
+ records, error = self.get_interface_records_rest('ip', query_ip, fields_ip)
+ if if_type in [None, 'fc'] and query_fc:
+ records2, error2 = self.get_interface_records_rest('fc', query_fc, fields_fc)
+ if records and records2:
+ msg = 'Error fetching interface %s - found duplicate entries, please indicate interface_type.' % name
+ msg += ' - ip interfaces: %s' % records
+ msg += ' - fc interfaces: %s' % records2
+ self.module.fail_json(msg=msg)
+ if error is None and error2 is not None and records:
+ # ignore error on fc if ip interface is found
+ error2 = None
+ if error2 is None and error is not None and records2:
+ # ignore error on ip if fc interface is found
+ error = None
+ if error or error2:
+ errors = [to_native(err) for err in (error, error2) if err]
+ self.module.fail_json(msg='Error fetching interface details for %s: %s' % (name, ' - '.join(errors)),
+ exception=traceback.format_exc())
+ if records:
+ self.set_interface_type('ip')
+ if records2:
+ self.set_interface_type('fc')
+ records = records2
+
+ record = self.find_exact_match(records, name) if records else None
+ return self.dict_from_record(record)
+
+ def dict_from_record(self, record):
+ if not record:
+ return None
+ # Note: broadcast_domain is CreateOnly
+ return_value = {
+ 'interface_name': record['name'],
+ 'interface_type': self.parameters['interface_type'],
+ 'uuid': record['uuid'],
+ 'admin_status': 'up' if record['enabled'] else 'down',
+ }
+ # home_node/home_port not present for FC on ONTAP 9.7.
+ if self.na_helper.safe_get(record, ['location', 'home_node', 'name']):
+ return_value['home_node'] = record['location']['home_node']['name']
+ if self.na_helper.safe_get(record, ['location', 'home_port', 'name']):
+ return_value['home_port'] = record['location']['home_port']['name']
+ if self.na_helper.safe_get(record, ['svm', 'name']):
+ return_value['vserver'] = record['svm']['name']
+ if 'data_protocol' in record:
+ return_value['data_protocol'] = record['data_protocol']
+ if 'auto_revert' in record['location']:
+ return_value['is_auto_revert'] = record['location']['auto_revert']
+ if 'failover' in record['location']:
+ return_value['failover_scope'] = record['location']['failover']
+ # if interface_attributes.get_child_by_name('failover-group'):
+ # return_value['failover_group'] = interface_attributes['failover-group']
+ if self.na_helper.safe_get(record, ['ip', 'address']):
+ return_value['address'] = netapp_ipaddress.validate_and_compress_ip_address(record['ip']['address'], self.module)
+ if self.na_helper.safe_get(record, ['ip', 'netmask']) is not None:
+ return_value['netmask'] = record['ip']['netmask']
+ if self.na_helper.safe_get(record, ['service_policy', 'name']):
+ return_value['service_policy'] = record['service_policy']['name']
+ if self.na_helper.safe_get(record, ['location', 'node', 'name']):
+ return_value['current_node'] = record['location']['node']['name']
+ if self.na_helper.safe_get(record, ['location', 'port', 'name']):
+ return_value['current_port'] = record['location']['port']['name']
+ if self.na_helper.safe_get(record, ['dns_zone']):
+ return_value['dns_domain_name'] = record['dns_zone']
+ if self.na_helper.safe_get(record, ['probe_port']) is not None:
+ return_value['probe_port'] = record['probe_port']
+ if 'ddns_enabled' in record:
+ return_value['is_dns_update_enabled'] = record['ddns_enabled']
+ if self.na_helper.safe_get(record, ['subnet', 'name']):
+ return_value['subnet_name'] = record['subnet']['name']
+ return return_value
+
+ def get_node_port(self, uuid):
+ record, error = self.get_interface_record_rest(self.parameters['interface_type'], {'uuid': uuid}, 'location')
+ if error or not record:
+ return None, None, error
+ node = self.na_helper.safe_get(record, ['location', 'node', 'name'])
+ port = self.na_helper.safe_get(record, ['location', 'port', 'name'])
+ return node, port, None
+
+ def get_interface(self, name=None):
+ """
+ Return details about the interface
+ :param:
+ name : Name of the interface
+
+ :return: Details about the interface. None if not found.
+ :rtype: dict
+ """
+ if name is None:
+ name = self.parameters['interface_name']
+ if self.use_rest:
+ return self.get_interface_rest(name)
+
+ interface_info = netapp_utils.zapi.NaElement('net-interface-get-iter')
+ interface_attributes = netapp_utils.zapi.NaElement('net-interface-info')
+ interface_attributes.add_new_child('interface-name', name)
+ interface_attributes.add_new_child('vserver', self.parameters['vserver'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(interface_attributes)
+ interface_info.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(interface_info, True)
+ except netapp_utils.zapi.NaApiError as exc:
+ self.module.fail_json(msg='Error fetching interface details for %s: %s' %
+ (name, to_native(exc)),
+ exception=traceback.format_exc())
+ return_value = None
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) >= 1:
+
+ interface_attributes = result.get_child_by_name('attributes-list'). \
+ get_child_by_name('net-interface-info')
+ return_value = {
+ 'interface_name': name,
+ 'admin_status': interface_attributes['administrative-status'],
+ 'home_port': interface_attributes['home-port'],
+ 'home_node': interface_attributes['home-node'],
+ 'failover_policy': interface_attributes['failover-policy'].replace('_', '-'),
+ }
+ if interface_attributes.get_child_by_name('is-auto-revert'):
+ return_value['is_auto_revert'] = (interface_attributes['is-auto-revert'] == 'true')
+ if interface_attributes.get_child_by_name('failover-group'):
+ return_value['failover_group'] = interface_attributes['failover-group']
+ if interface_attributes.get_child_by_name('address'):
+ return_value['address'] = netapp_ipaddress.validate_and_compress_ip_address(interface_attributes['address'], self.module)
+ if interface_attributes.get_child_by_name('netmask'):
+ return_value['netmask'] = interface_attributes['netmask']
+ if interface_attributes.get_child_by_name('firewall-policy'):
+ return_value['firewall_policy'] = interface_attributes['firewall-policy']
+ if interface_attributes.get_child_by_name('dns-domain-name') not in ('none', None):
+ return_value['dns_domain_name'] = interface_attributes['dns-domain-name']
+ else:
+ return_value['dns_domain_name'] = None
+ if interface_attributes.get_child_by_name('listen-for-dns-query'):
+ return_value['listen_for_dns_query'] = self.na_helper.get_value_for_bool(True, interface_attributes[
+ 'listen-for-dns-query'])
+ if interface_attributes.get_child_by_name('is-dns-update-enabled'):
+ return_value['is_dns_update_enabled'] = self.na_helper.get_value_for_bool(True, interface_attributes[
+ 'is-dns-update-enabled'])
+ if interface_attributes.get_child_by_name('is-ipv4-link-local'):
+ return_value['is_ipv4_link_local'] = self.na_helper.get_value_for_bool(True, interface_attributes[
+ 'is-ipv4-link-local'])
+ if interface_attributes.get_child_by_name('service-policy'):
+ return_value['service_policy'] = interface_attributes['service-policy']
+ if interface_attributes.get_child_by_name('current-node'):
+ return_value['current_node'] = interface_attributes['current-node']
+ if interface_attributes.get_child_by_name('current-port'):
+ return_value['current_port'] = interface_attributes['current-port']
+ return return_value
+
+ @staticmethod
+ def set_options(options, parameters):
+ """ set attributes for create or modify """
+ if parameters.get('role') is not None:
+ options['role'] = parameters['role']
+ if parameters.get('home_node') is not None:
+ options['home-node'] = parameters['home_node']
+ if parameters.get('home_port') is not None:
+ options['home-port'] = parameters['home_port']
+ if parameters.get('subnet_name') is not None:
+ options['subnet-name'] = parameters['subnet_name']
+ if parameters.get('address') is not None:
+ options['address'] = parameters['address']
+ if parameters.get('netmask') is not None:
+ options['netmask'] = parameters['netmask']
+ if parameters.get('failover_policy') is not None:
+ options['failover-policy'] = parameters['failover_policy']
+ if parameters.get('failover_group') is not None:
+ options['failover-group'] = parameters['failover_group']
+ if parameters.get('firewall_policy') is not None:
+ options['firewall-policy'] = parameters['firewall_policy']
+ if parameters.get('is_auto_revert') is not None:
+ options['is-auto-revert'] = 'true' if parameters['is_auto_revert'] else 'false'
+ if parameters.get('admin_status') is not None:
+ options['administrative-status'] = parameters['admin_status']
+ if parameters.get('force_subnet_association') is not None:
+ options['force-subnet-association'] = 'true' if parameters['force_subnet_association'] else 'false'
+ if parameters.get('dns_domain_name') is not None:
+ options['dns-domain-name'] = parameters['dns_domain_name']
+ if parameters.get('listen_for_dns_query') is not None:
+ options['listen-for-dns-query'] = 'true' if parameters['listen_for_dns_query'] else 'false'
+ if parameters.get('is_dns_update_enabled') is not None:
+ options['is-dns-update-enabled'] = 'true' if parameters['is_dns_update_enabled'] else 'false'
+ if parameters.get('is_ipv4_link_local') is not None:
+ options['is-ipv4-link-local'] = 'true' if parameters['is_ipv4_link_local'] else 'false'
+ if parameters.get('service_policy') is not None:
+ options['service-policy'] = parameters['service_policy']
+
+ def fix_errors(self, options, errors):
+ '''ignore role and firewall_policy if a service_policy can be safely derived'''
+ block_p, file_p, fcp = self.derive_block_file_type(self.parameters.get('protocols'))
+ if 'role' in errors:
+ fixed = False
+ if errors['role'] == 'data' and errors.get('firewall_policy', 'data') == 'data':
+ if fcp:
+ # service_policy is not supported for FC interfaces
+ fixed = True
+ elif file_p and self.parameters.get('service_policy', 'default-data-files') == 'default-data-files':
+ options['service_policy'] = 'default-data-files'
+ fixed = True
+ elif block_p and self.parameters.get('service_policy', 'default-data-blocks') == 'default-data-blocks':
+ options['service_policy'] = 'default-data-blocks'
+ fixed = True
+ if errors['role'] == 'data' and errors.get('firewall_policy') == 'mgmt':
+ options['service_policy'] = 'default-management'
+ fixed = True
+ if errors['role'] == 'intercluster' and errors.get('firewall_policy') in [None, 'intercluster']:
+ options['service_policy'] = 'default-intercluster'
+ fixed = True
+ if errors['role'] == 'cluster' and errors.get('firewall_policy') in [None, 'mgmt']:
+ options['service_policy'] = 'default-cluster'
+ fixed = True
+ if errors['role'] == 'data' and fcp and errors.get('firewall_policy') is None:
+ # ignore role for FC interface
+ fixed = True
+ if fixed:
+ errors.pop('role')
+ errors.pop('firewall_policy', None)
+
+ def set_options_rest(self, parameters):
+ """ set attributes for create or modify """
+ def add_ip(options, key, value):
+ if 'ip' not in options:
+ options['ip'] = {}
+ options['ip'][key] = value
+
+ def add_location(options, key, value, node=None):
+ if 'location' not in options:
+ options['location'] = {}
+ # Note: broadcast_domain is CreateOnly
+ if key in ['home_node', 'home_port', 'node', 'port', 'broadcast_domain']:
+ options['location'][key] = {'name': value}
+ else:
+ options['location'][key] = value
+ if key in ['home_port', 'port']:
+ options['location'][key]['node'] = {'name': node}
+
+ def get_node_for_port(parameters, pkey):
+ if pkey == 'current_port':
+ return parameters.get('current_node') or self.parameters.get('home_node') or self.get_home_node_for_cluster()
+ elif pkey == 'home_port':
+ return self.parameters.get('home_node') or self.get_home_node_for_cluster()
+ else:
+ return None
+
+ options, migrate_options, errors = {}, {}, {}
+
+ # We normally create using home_port, and migrate to current.
+ # But for FC, home_port is not supported on 9.7 or earlier!
+ create_with_current = False
+ if parameters is None:
+ parameters = self.parameters
+ if self.parameters['interface_type'] == 'fc' and 'home_port' not in self.parameters:
+ create_with_current = True
+
+ mapping_params_to_rest = {
+ 'admin_status': 'enabled',
+ 'interface_name': 'name',
+ 'vserver': 'svm.name',
+ # LOCATION
+ 'current_port': 'port',
+ 'home_port': 'home_port'
+ }
+ if self.parameters['interface_type'] == 'ip':
+ mapping_params_to_rest.update({
+ 'ipspace': 'ipspace.name',
+ 'service_policy': 'service_policy',
+ 'dns_domain_name': 'dns_zone',
+ 'is_dns_update_enabled': 'ddns_enabled',
+ 'probe_port': 'probe_port',
+ 'subnet_name': 'subnet.name',
+ 'fail_if_subnet_conflicts': 'fail_if_subnet_conflicts',
+ # IP
+ 'address': 'address',
+ 'netmask': 'netmask',
+ # LOCATION
+ 'broadcast_domain': 'broadcast_domain',
+ 'failover_scope': 'failover',
+ 'is_auto_revert': 'auto_revert',
+ # home_node/current_node supported only in ip interfaces.
+ 'home_node': 'home_node',
+ 'current_node': 'node'
+ })
+ if self.parameters['interface_type'] == 'fc':
+ mapping_params_to_rest['data_protocol'] = 'data_protocol'
+ ip_keys = ('address', 'netmask')
+ location_keys = ('home_port', 'home_node', 'current_port', 'current_node', 'failover_scope', 'is_auto_revert', 'broadcast_domain')
+
+ # don't add node location when port structure is already present
+ has_home_port, has_current_port = False, False
+ if 'home_port' in parameters:
+ has_home_port = True
+ if 'current_port' in parameters:
+ has_current_port = True
+
+ for pkey, rkey in mapping_params_to_rest.items():
+ if pkey in parameters:
+ if pkey == 'admin_status':
+ options[rkey] = parameters[pkey] == 'up'
+ elif pkey in ip_keys:
+ add_ip(options, rkey, parameters[pkey])
+ elif pkey in location_keys:
+ if has_home_port and pkey == 'home_node':
+ continue
+ if has_current_port and pkey == 'current_node':
+ continue
+ dest = migrate_options if rkey in ('node', 'port') and not create_with_current else options
+ add_location(dest, rkey, parameters[pkey], get_node_for_port(parameters, pkey))
+ else:
+ options[rkey] = parameters[pkey]
+
+ keys_in_error = ('role', 'failover_group', 'firewall_policy', 'force_subnet_association',
+ 'listen_for_dns_query', 'is_ipv4_link_local')
+ for pkey in keys_in_error:
+ if pkey in parameters:
+ errors[pkey] = parameters[pkey]
+
+ return options, migrate_options, errors
+
+ def set_protocol_option(self, required_keys):
+ """ set protocols for create """
+ if self.parameters.get('protocols') is None:
+ return None
+ data_protocols_obj = netapp_utils.zapi.NaElement('data-protocols')
+ for protocol in self.parameters.get('protocols'):
+ if protocol.lower() in ['fc-nvme', 'fcp']:
+ if 'address' in required_keys:
+ required_keys.remove('address')
+ if 'home_port' in required_keys:
+ required_keys.remove('home_port')
+ if 'netmask' in required_keys:
+ required_keys.remove('netmask')
+ not_required_params = set(['address', 'netmask', 'firewall_policy'])
+ if not not_required_params.isdisjoint(set(self.parameters.keys())):
+ self.module.fail_json(msg='Error: Following parameters for creating interface are not supported'
+ ' for data-protocol fc-nvme: %s' % ', '.join(not_required_params))
+ data_protocols_obj.add_new_child('data-protocol', protocol)
+ return data_protocols_obj
+
+ def get_cluster_node_names_rest(self):
+ ''' get cluster node names, but the cluster may not exist yet
+ return:
+ empty list if the cluster cannot be reached
+ a list of nodes
+ '''
+ if self.cluster_nodes is None:
+ records, error = rest_generic.get_0_or_more_records(self.rest_api, 'cluster/nodes', fields='name,uuid,cluster_interfaces')
+ if error:
+ self.module.fail_json(msg='Error fetching cluster node info: %s' % to_native(error),
+ exception=traceback.format_exc())
+ self.cluster_nodes = records or []
+ return [record['name'] for record in self.cluster_nodes]
+
+ def get_home_node_for_cluster(self):
+ ''' get the first node name from this cluster '''
+ if self.use_rest:
+ if not self.home_node:
+ nodes = self.get_cluster_node_names_rest()
+ if nodes:
+ self.home_node = nodes[0]
+ return self.home_node
+
+ get_node = netapp_utils.zapi.NaElement('cluster-node-get-iter')
+ attributes = {
+ 'query': {
+ 'cluster-node-info': {}
+ }
+ }
+ get_node.translate_struct(attributes)
+ try:
+ result = self.server.invoke_successfully(get_node, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as exc:
+ if str(exc.code) == '13003' or exc.message == 'ZAPI is not enabled in pre-cluster mode.':
+ return None
+ self.module.fail_json(msg='Error fetching node for interface %s: %s' %
+ (self.parameters['interface_name'], to_native(exc)),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ attributes = result.get_child_by_name('attributes-list')
+ return attributes.get_child_by_name('cluster-node-info').get_child_content('node-name')
+ return None
+
+ def validate_rest_input_parameters(self, action=None):
+ if 'vserver' in self.parameters and self.parameters.get('role') in ['cluster', 'intercluster', 'node-mgmt', 'cluster-mgmt']:
+ # REST only supports DATA SVMs
+ del self.parameters['vserver']
+ self.module.warn('Ignoring vserver with REST for non data SVM.')
+ errors = []
+ if action == 'create':
+ if 'vserver' not in self.parameters and 'ipspace' not in self.parameters:
+ errors.append('ipspace name must be provided if scope is cluster, or vserver for svm scope.')
+ if self.parameters['interface_type'] == 'fc':
+ unsupported_fc_options = ['broadcast_domain', 'dns_domain_name', 'is_dns_update_enabled', 'probe_port', 'subnet_name',
+ 'fail_if_subnet_conflicts']
+ used_unsupported_fc_options = [option for option in unsupported_fc_options if option in self.parameters]
+ if used_unsupported_fc_options:
+ plural = 's' if len(used_unsupported_fc_options) > 1 else ''
+ errors.append('%s option%s only supported for IP interfaces: %s, interface_type: %s'
+ % (', '.join(used_unsupported_fc_options), plural, self.parameters.get('interface_name'), self.parameters['interface_type']))
+ if self.parameters.get('home_port') and self.parameters.get('broadcast_domain'):
+ errors.append('home_port and broadcast_domain are mutually exclusive for creating: %s'
+ % self.parameters.get('interface_name'))
+ if self.parameters.get('role') == "intercluster" and self.parameters.get('protocols') is not None:
+ errors.append('Protocol cannot be specified for intercluster role, failed to create interface.')
+ if errors:
+ self.module.fail_json(msg='Error: %s' % ' '.join(errors))
+
+ ignored_keys = []
+ for key in self.parameters.get('ignore_zapi_options', []):
+ if key in self.parameters:
+ del self.parameters[key]
+ ignored_keys.append(key)
+ if ignored_keys:
+ self.module.warn("Ignoring %s" % ', '.join(ignored_keys))
+ # if role is intercluster, protocol cannot be specified
+
+ def validate_required_parameters(self, keys):
+ '''
+ Validate if required parameters for create or modify are present.
+ Parameter requirement might vary based on given data-protocol.
+ :return: None
+ '''
+ home_node = self.parameters.get('home_node') or self.get_home_node_for_cluster()
+ # validate if mandatory parameters are present for create or modify
+ errors = []
+ if self.use_rest and home_node is None and self.parameters.get('home_port') is not None:
+ errors.append('Cannot guess home_node, home_node is required when home_port is present with REST.')
+ if 'broadcast_domain_home_port_or_home_node' in keys:
+ if all(x not in self.parameters for x in ['broadcast_domain', 'home_port', 'home_node']):
+ errors.append("At least one of 'broadcast_domain', 'home_port', 'home_node' is required to create an IP interface.")
+ keys.remove('broadcast_domain_home_port_or_home_node')
+ if not keys.issubset(set(self.parameters.keys())):
+ errors.append('Missing one or more required parameters for creating interface: %s.' % ', '.join(keys))
+ if 'interface_type' in keys and 'interface_type' in self.parameters:
+ if self.parameters['interface_type'] not in ['fc', 'ip']:
+ errors.append('unexpected value for interface_type: %s.' % self.parameters['interface_type'])
+ elif self.parameters['interface_type'] == 'fc':
+ if not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 8, 0):
+ if 'home_port' in self.parameters:
+ errors.append("'home_port' is not supported for FC interfaces with 9.7, use 'current_port', avoid home_node.")
+ if 'home_node' in self.parameters:
+ self.module.warn("Avoid 'home_node' with FC interfaces with 9.7, use 'current_node'.")
+ if 'vserver' not in self.parameters:
+ errors.append("A data 'vserver' is required for FC interfaces.")
+ if 'service_policy' in self.parameters:
+ errors.append("'service_policy' is not supported for FC interfaces.")
+ if 'role' in self.parameters and self.parameters.get('role') != 'data':
+ errors.append("'role' is deprecated, and 'data' is the only value supported for FC interfaces: found %s." % self.parameters.get('role'))
+ if 'probe_port' in self.parameters:
+ errors.append("'probe_port' is not supported for FC interfaces.")
+ if errors:
+ self.module.fail_json(msg='Error: %s' % ' '.join(errors))
+
+ def validate_modify_parameters(self, body):
+ """ Only the following keys can be modified:
+ enabled, ip, location, name, service_policy
+ """
+ bad_keys = [key for key in body if key not in ['enabled', 'ip', 'location', 'name', 'service_policy', 'dns_zone', 'ddns_enabled', 'subnet.name',
+ 'fail_if_subnet_conflicts']]
+ if bad_keys:
+ plural = 's' if len(bad_keys) > 1 else ''
+ self.module.fail_json(msg='The following option%s cannot be modified: %s' % (plural, ', '.join(bad_keys)))
+
+ def build_rest_body(self, modify=None):
+ required_keys = set(['interface_type']) # python 2.6 syntax
+ # running validation twice, as interface_type dictates the second set of requirements
+ self.validate_required_parameters(required_keys)
+ self.validate_rest_input_parameters(action='modify' if modify else 'create')
+ if modify:
+ # force the value of fail_if_subnet_conflicts as it is writeOnly
+ if self.parameters.get('fail_if_subnet_conflicts') is not None:
+ modify['fail_if_subnet_conflicts'] = self.parameters['fail_if_subnet_conflicts']
+ else:
+ required_keys = set()
+ required_keys.add('interface_name')
+ if self.parameters['interface_type'] == 'fc':
+ self.derive_fc_data_protocol()
+ required_keys.add('data_protocol')
+ if 'home_port' not in self.parameters:
+ # home_port is not supported with 9.7
+ if self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 8, 0):
+ required_keys.add('home_port')
+ else:
+ required_keys.add('current_port')
+ if self.parameters['interface_type'] == 'ip':
+ if 'subnet_name' not in self.parameters:
+ required_keys.add('address')
+ required_keys.add('netmask')
+ required_keys.add('broadcast_domain_home_port_or_home_node')
+ self.validate_required_parameters(required_keys)
+ body, migrate_body, errors = self.set_options_rest(modify)
+ self.fix_errors(body, errors)
+ if errors:
+ self.module.fail_json(msg='Error %s interface, unsupported options: %s'
+ % ('modifying' if modify else 'creating', str(errors)))
+ if modify:
+ self.validate_modify_parameters(body)
+ return body, migrate_body
+
+ def create_interface_rest(self, body):
+ ''' calling REST to create interface '''
+ query = {'return_records': 'true'}
+ records, error = rest_generic.post_async(self.rest_api, self.get_net_int_api(), body, query)
+ if error:
+ self.module.fail_json(msg='Error creating interface %s: %s' % (self.parameters['interface_name'], to_native(error)),
+ exception=traceback.format_exc())
+ return records
+
+ def create_interface(self, body):
+ ''' calling zapi to create interface '''
+ if self.use_rest:
+ return self.create_interface_rest(body)
+
+ required_keys = set(['role', 'home_port'])
+ if self.parameters.get('subnet_name') is None and self.parameters.get('is_ipv4_link_local') is None:
+ required_keys.add('address')
+ required_keys.add('netmask')
+ if self.parameters.get('service_policy') is not None:
+ required_keys.remove('role')
+ data_protocols_obj = self.set_protocol_option(required_keys)
+ self.validate_required_parameters(required_keys)
+
+ options = {'interface-name': self.parameters['interface_name'],
+ 'vserver': self.parameters['vserver']}
+ NetAppOntapInterface.set_options(options, self.parameters)
+ interface_create = netapp_utils.zapi.NaElement.create_node_with_children('net-interface-create', **options)
+ if data_protocols_obj is not None:
+ interface_create.add_child_elem(data_protocols_obj)
+ try:
+ self.server.invoke_successfully(interface_create, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as exc:
+ # msg: "Error Creating interface ansible_interface: NetApp API failed. Reason - 17:A LIF with the same name already exists"
+ if to_native(exc.code) == "17":
+ self.na_helper.changed = False
+ else:
+ self.module.fail_json(msg='Error Creating interface %s: %s' %
+ (self.parameters['interface_name'], to_native(exc)),
+ exception=traceback.format_exc())
+
+ def delete_interface_rest(self, uuid):
+ ''' calling zapi to delete interface '''
+
+ dummy, error = rest_generic.delete_async(self.rest_api, self.get_net_int_api(), uuid)
+ if error:
+ self.module.fail_json(msg='Error deleting interface %s: %s' % (self.parameters['interface_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_interface(self, current_status, current_interface, uuid):
+ ''' calling zapi to delete interface '''
+ if current_status == 'up':
+ self.parameters['admin_status'] = 'down'
+ if self.use_rest:
+ # only for fc interfaces disable is required before delete.
+ if current_interface == 'fc':
+ self.modify_interface_rest(uuid, {'enabled': False})
+ else:
+ self.modify_interface({'admin_status': 'down'})
+
+ if self.use_rest:
+ return self.delete_interface_rest(uuid)
+
+ interface_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'net-interface-delete', **{'interface-name': self.parameters['interface_name'],
+ 'vserver': self.parameters['vserver']})
+ try:
+ self.server.invoke_successfully(interface_delete, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as exc:
+ self.module.fail_json(msg='Error deleting interface %s: %s' %
+ (self.parameters['interface_name'], to_native(exc)),
+ exception=traceback.format_exc())
+
+ def modify_interface_rest(self, uuid, body):
+ ''' calling REST to modify interface '''
+ if not body:
+ return
+ dummy, error = rest_generic.patch_async(self.rest_api, self.get_net_int_api(), uuid, body)
+ if error:
+ self.module.fail_json(msg='Error modifying interface %s: %s' % (self.parameters['interface_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def migrate_interface_rest(self, uuid, body):
+ # curiously, we sometimes need to send the request twice (well, always in my experience)
+ errors = []
+ desired_node = self.na_helper.safe_get(body, ['location', 'node', 'name'])
+ desired_port = self.na_helper.safe_get(body, ['location', 'port', 'name'])
+ for __ in range(12):
+ self.modify_interface_rest(uuid, body)
+ time.sleep(10)
+ node, port, error = self.get_node_port(uuid)
+ if error is None and desired_node in [None, node] and desired_port in [None, port]:
+ return
+ if errors or error is not None:
+ errors.append(str(error))
+ if errors:
+ self.module.fail_json(msg='Errors waiting for migration to complete: %s' % ' - '.join(errors))
+ else:
+ self.module.warn('Failed to confirm interface is migrated after 120 seconds')
+
+ def modify_interface(self, modify, uuid=None, body=None):
+ """
+ Modify the interface.
+ """
+ if self.use_rest:
+ return self.modify_interface_rest(uuid, body)
+
+ # Current_node and current_port don't exist in modify only migrate, so we need to remove them from the list
+ migrate = {}
+ modify_options = dict(modify)
+ if modify_options.get('current_node') is not None:
+ migrate['current_node'] = modify_options.pop('current_node')
+ if modify_options.get('current_port') is not None:
+ migrate['current_port'] = modify_options.pop('current_port')
+ if modify_options:
+ options = {'interface-name': self.parameters['interface_name'],
+ 'vserver': self.parameters['vserver']
+ }
+ NetAppOntapInterface.set_options(options, modify_options)
+ interface_modify = netapp_utils.zapi.NaElement.create_node_with_children('net-interface-modify', **options)
+ try:
+ self.server.invoke_successfully(interface_modify, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as err:
+ self.module.fail_json(msg='Error modifying interface %s: %s' %
+ (self.parameters['interface_name'], to_native(err)),
+ exception=traceback.format_exc())
+ # if home node has been changed we need to migrate the interface
+ if migrate:
+ self.migrate_interface()
+
+ def migrate_interface(self):
+ # ZAPI
+ interface_migrate = netapp_utils.zapi.NaElement('net-interface-migrate')
+ if self.parameters.get('current_node') is None:
+ self.module.fail_json(msg='current_node must be set to migrate')
+ interface_migrate.add_new_child('destination-node', self.parameters['current_node'])
+ if self.parameters.get('current_port') is not None:
+ interface_migrate.add_new_child('destination-port', self.parameters['current_port'])
+ interface_migrate.add_new_child('lif', self.parameters['interface_name'])
+ interface_migrate.add_new_child('vserver', self.parameters['vserver'])
+ try:
+ self.server.invoke_successfully(interface_migrate, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error migrating %s: %s'
+ % (self.parameters['current_node'], to_native(error)),
+ exception=traceback.format_exc())
+ # like with REST, the migration may not be completed on the first try!
+ # just blindly do it twice.
+ try:
+ self.server.invoke_successfully(interface_migrate, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error migrating %s: %s'
+ % (self.parameters['current_node'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def rename_interface(self):
+ options = {
+ 'interface-name': self.parameters['from_name'],
+ 'new-name': self.parameters['interface_name'],
+ 'vserver': self.parameters['vserver']
+ }
+ interface_rename = netapp_utils.zapi.NaElement.create_node_with_children('net-interface-rename', **options)
+ try:
+ self.server.invoke_successfully(interface_rename, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error renaming %s to %s: %s'
+ % (self.parameters['from_name'], self.parameters['interface_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def get_action(self):
+ modify, rename, new_name = None, None, None
+ current = self.get_interface()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action == 'create' and self.parameters.get('from_name'):
+ # create by renaming existing interface
+ # self.parameters['interface_name'] may be overriden in self.get_interface so save a copy
+ new_name = self.parameters['interface_name']
+ old_interface = self.get_interface(self.parameters['from_name'])
+ rename = self.na_helper.is_rename_action(old_interface, current)
+ if rename is None:
+ self.module.fail_json(msg='Error renaming interface %s: no interface with from_name %s.'
+ % (self.parameters['interface_name'], self.parameters['from_name']))
+ if rename:
+ current = old_interface
+ cd_action = None
+ if cd_action is None:
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if rename and self.use_rest:
+ rename = False
+ if 'interface_name' not in modify:
+ modify['interface_name'] = new_name
+ if modify and modify.get('home_node') == 'localhost':
+ modify.pop('home_node')
+ if not modify:
+ self.na_helper.changed = False
+
+ return cd_action, modify, rename, current
+
+ def build_rest_payloads(self, cd_action, modify, current):
+ body, migrate_body = None, None
+ uuid = current.get('uuid') if current else None
+ if self.use_rest:
+ if cd_action == 'create':
+ body, migrate_body = self.build_rest_body()
+ elif modify:
+ # fc interface supports only home_port and port in POST/PATCH.
+ # add home_port and current_port in modify for home_node and current_node respectively to form home_port/port.
+ if modify.get('home_node') and not modify.get('home_port') and self.parameters['interface_type'] == 'fc':
+ modify['home_port'] = current['home_port']
+ # above will modify home_node of fc interface, after modify if requires to update current_node, it will error out for fc interface.
+ # migrate not supported for fc interface.
+ if modify.get('current_node') and not modify.get('current_port') and self.parameters['interface_type'] == 'fc':
+ modify['current_port'] = current['current_port']
+ body, migrate_body = self.build_rest_body(modify)
+ if (modify or cd_action == 'delete') and uuid is None:
+ self.module.fail_json(msg='Error, expecting uuid in existing record')
+ desired_home_port = self.na_helper.safe_get(body, ['location', 'home_port'])
+ desired_current_port = self.na_helper.safe_get(migrate_body, ['location', 'port'])
+ # if try to modify both home_port and current_port in FC interface and if its equal, make migrate_body None
+ if self.parameters.get('interface_type') == 'fc' and desired_home_port and desired_current_port and desired_home_port == desired_current_port:
+ migrate_body = None
+ return uuid, body, migrate_body
+
+ def apply(self):
+ ''' calling all interface features '''
+ cd_action, modify, rename, current = self.get_action()
+ # build the payloads even in check_mode, to perform validations
+ uuid, body, migrate_body = self.build_rest_payloads(cd_action, modify, current)
+ if self.na_helper.changed and not self.module.check_mode:
+ if rename and not self.use_rest:
+ self.rename_interface()
+ modify.pop('interface_name')
+ if cd_action == 'create':
+ records = self.create_interface(body)
+ if records:
+ # needed for migrate after creation
+ uuid = records['records'][0]['uuid']
+ elif cd_action == 'delete':
+ # interface type returned in REST but not in ZAPI.
+ interface_type = current['interface_type'] if self.use_rest else None
+ self.delete_interface(current['admin_status'], interface_type, uuid)
+ elif modify:
+ self.modify_interface(modify, uuid, body)
+ if migrate_body:
+ # for 9.7 or earlier, allow modify current node/port for fc interface.
+ if self.parameters.get('interface_type') == 'fc' and self.use_rest and self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 8, 0):
+ self.module.fail_json(msg="Error: cannot migrate FC interface")
+ self.migrate_interface_rest(uuid, migrate_body)
+
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ interface = NetAppOntapInterface()
+ interface.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ipspace.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ipspace.py
new file mode 100644
index 000000000..d8cf8144e
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ipspace.py
@@ -0,0 +1,286 @@
+#!/usr/bin/python
+"""
+this is ipspace module
+
+# (c) 2018, NTT Europe Ltd.
+# (c) 2020-2023, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: na_ontap_ipspace
+
+short_description: NetApp ONTAP Manage an ipspace
+
+version_added: 2.9.0
+
+author:
+ - NTTE Storage Engineering (@vicmunoz) <cl.eng.sto@ntt.eu>
+
+description:
+ - Manage an ipspace for an Ontap Cluster
+
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+
+options:
+ state:
+ description:
+ - Whether the specified ipspace should exist or not
+ choices: ['present', 'absent']
+ type: str
+ default: present
+ name:
+ description:
+ - The name of the ipspace to manage
+ required: true
+ type: str
+ from_name:
+ description:
+ - Name of the existing ipspace to be renamed to name
+ type: str
+'''
+
+EXAMPLES = """
+ - name: Create ipspace
+ netapp.ontap.na_ontap_ipspace:
+ state: present
+ name: ansibleIpspace
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Delete ipspace
+ netapp.ontap.na_ontap_ipspace:
+ state: absent
+ name: ansibleIpspace
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Rename ipspace
+ netapp.ontap.na_ontap_ipspace:
+ state: present
+ name: ansibleIpspace_newname
+ from_name: ansibleIpspace
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppOntapIpspace(object):
+ '''Class with ipspace operations'''
+
+ def __init__(self):
+ self.use_rest = False
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ from_name=dict(required=False, type='str'),
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.uuid = None
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ self.rest_api = netapp_utils.OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+ if not self.use_rest:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def ipspace_get_iter(self, name):
+ """
+ Return net-ipspaces-get-iter query results
+ :param name: Name of the ipspace
+ :return: NaElement if ipspace found, None otherwise
+ """
+ ipspace_get_iter = netapp_utils.zapi.NaElement('net-ipspaces-get-iter')
+ query_details = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'net-ipspaces-info', **{'ipspace': name})
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ ipspace_get_iter.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(
+ ipspace_get_iter, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ # Error 14636 denotes an ipspace does not exist
+ # Error 13073 denotes an ipspace not found
+ if to_native(error.code) == "14636" or to_native(error.code) == "13073":
+ return None
+ self.module.fail_json(
+ msg="Error getting ipspace %s: %s" % (name, to_native(error)),
+ exception=traceback.format_exc())
+ return result
+
+ def get_ipspace(self, name=None):
+ """
+ Fetch details if ipspace exists
+ :param name: Name of the ipspace to be fetched
+ :return:
+ Dictionary of current details if ipspace found
+ None if ipspace is not found
+ """
+ if name is None:
+ name = self.parameters['name']
+ if self.use_rest:
+ api = 'network/ipspaces'
+ query = {'name': name, 'fields': 'uuid'}
+ record, error = rest_generic.get_one_record(self.rest_api, api, query)
+ if error:
+ self.module.fail_json(msg="Error getting ipspace %s: %s" % (name, error))
+ if record:
+ self.uuid = record['uuid']
+ return record
+ return None
+ else:
+ ipspace_get = self.ipspace_get_iter(name)
+ if (ipspace_get and ipspace_get.get_child_by_name('num-records') and
+ int(ipspace_get.get_child_content('num-records')) >= 1):
+ current_ipspace = dict()
+ attr_list = ipspace_get.get_child_by_name('attributes-list')
+ attr = attr_list.get_child_by_name('net-ipspaces-info')
+ current_ipspace['name'] = attr.get_child_content('ipspace')
+ return current_ipspace
+ return None
+
+ def create_ipspace(self):
+ """
+ Create ipspace
+ :return: None
+ """
+ if self.use_rest:
+ api = 'network/ipspaces'
+ body = {'name': self.parameters['name']}
+ dummy, error = rest_generic.post_async(self.rest_api, api, body)
+ if error:
+ self.module.fail_json(msg="Error provisioning ipspace %s: %s" % (self.parameters['name'], error))
+ else:
+ ipspace_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'net-ipspaces-create', **{'ipspace': self.parameters['name']})
+ try:
+ self.server.invoke_successfully(ipspace_create,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(
+ msg="Error provisioning ipspace %s: %s" % (
+ self.parameters['name'],
+ to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_ipspace(self):
+ """
+ Destroy ipspace
+ :return: None
+ """
+ if self.use_rest:
+ api = 'network/ipspaces'
+ dummy, error = rest_generic.delete_async(self.rest_api, api, self.uuid)
+ if error:
+ self.module.fail_json(msg="Error removing ipspace %s: %s" % (self.parameters['name'], error))
+ else:
+ ipspace_destroy = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'net-ipspaces-destroy',
+ **{'ipspace': self.parameters['name']})
+ try:
+ self.server.invoke_successfully(
+ ipspace_destroy, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(
+ msg="Error removing ipspace %s: %s" % (
+ self.parameters['name'],
+ to_native(error)),
+ exception=traceback.format_exc())
+
+ def rename_ipspace(self):
+ """
+ Rename an ipspace
+ :return: Nothing
+ """
+ if self.use_rest:
+ api = 'network/ipspaces'
+ body = {'name': self.parameters['name']}
+ dummy, error = rest_generic.patch_async(self.rest_api, api, self.uuid, body)
+ if error:
+ self.module.fail_json(msg="Error renaming ipspace %s: %s" % (self.parameters['from_name'], error))
+ else:
+ ipspace_rename = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'net-ipspaces-rename',
+ **{'ipspace': self.parameters['from_name'],
+ 'new-name': self.parameters['name']})
+ try:
+ self.server.invoke_successfully(ipspace_rename,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(
+ msg="Error renaming ipspace %s: %s" % (
+ self.parameters['from_name'],
+ to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ """
+ Apply action to the ipspace
+ :return: Nothing
+ """
+ current = self.get_ipspace()
+ # rename and create are mutually exclusive
+ rename, modify = None, None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action == 'create' and self.parameters.get('from_name'):
+ rename = self.na_helper.is_rename_action(
+ self.get_ipspace(self.parameters['from_name']),
+ current)
+ if rename is None:
+ self.module.fail_json(
+ msg="Error renaming: ipspace %s does not exist" %
+ self.parameters['from_name'])
+ # reset cd_action to None and add name to modify to indicate rename.
+ cd_action = None
+ modify = {'name': self.parameters['name']}
+
+ if self.na_helper.changed and not self.module.check_mode:
+ if rename:
+ self.rename_ipspace()
+ elif cd_action == 'create':
+ self.create_ipspace()
+ elif cd_action == 'delete':
+ self.delete_ipspace()
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ """
+ Execute action
+ :return: nothing
+ """
+ obj = NetAppOntapIpspace()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_iscsi.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_iscsi.py
new file mode 100644
index 000000000..e5a30f970
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_iscsi.py
@@ -0,0 +1,329 @@
+#!/usr/bin/python
+
+# (c) 2017-2022, NetApp, Inc
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_iscsi
+'''
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: na_ontap_iscsi
+short_description: NetApp ONTAP manage iSCSI service
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+ - create, delete, start, stop iSCSI service on SVM.
+
+options:
+
+ state:
+ description:
+ - Whether the service should be present or deleted.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+
+ service_state:
+ description:
+ - Whether the specified service should running.
+ choices: ['started', 'stopped']
+ type: str
+
+ vserver:
+ required: true
+ type: str
+ description:
+ - The name of the vserver to use.
+
+ target_alias:
+ type: str
+ description:
+ - The iSCSI target alias of the iSCSI service.
+ - The target alias can contain one (1) to 128 characters and feature any printable character except space (" ").
+ - A PATCH request with an empty alias ("") clears the alias.
+ - This option is REST only.
+ version_added: 22.2.0
+
+'''
+
+EXAMPLES = """
+- name: Create iscsi service
+ netapp.ontap.na_ontap_iscsi:
+ state: present
+ service_state: started
+ vserver: ansibleVServer
+ target_alias: ansibleSVM
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+- name: Stop Iscsi service
+ netapp.ontap.na_ontap_iscsi:
+ state: present
+ service_state: stopped
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+- name: Delete Iscsi service
+ netapp.ontap.na_ontap_iscsi:
+ state: absent
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+
+"""
+
+import traceback
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppOntapISCSI:
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ service_state=dict(required=False, type='str', choices=['started', 'stopped'], default=None),
+ vserver=dict(required=True, type='str'),
+ target_alias=dict(required=False, type='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.uuid = None
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ # Set up Rest API
+ self.rest_api = OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+ self.unsupported_zapi_properties = ['target_alias']
+ if not self.use_rest:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ for unsupported_zapi_property in self.unsupported_zapi_properties:
+ if self.parameters.get(unsupported_zapi_property) is not None:
+ msg = "Error: %s option is not supported with ZAPI. It can only be used with REST." % unsupported_zapi_property
+ self.module.fail_json(msg=msg)
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+ self.safe_strip()
+
+ def safe_strip(self):
+ """ strip the left and right spaces of string """
+ if 'target_alias' in self.parameters:
+ self.parameters['target_alias'] = self.parameters['target_alias'].strip()
+ return
+
+ def get_iscsi(self):
+ """
+ Return details about the iscsi service
+
+ :return: Details about the iscsi service
+ :rtype: dict
+ """
+ if self.use_rest:
+ return self.get_iscsi_rest()
+ iscsi_info = netapp_utils.zapi.NaElement('iscsi-service-get-iter')
+ iscsi_attributes = netapp_utils.zapi.NaElement('iscsi-service-info')
+
+ iscsi_attributes.add_new_child('vserver', self.parameters['vserver'])
+
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(iscsi_attributes)
+
+ iscsi_info.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(iscsi_info, True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error finding iscsi service in %s: %s" % (self.parameters['vserver'], to_native(e)),
+ exception=traceback.format_exc())
+ return_value = None
+
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) >= 1:
+
+ iscsi = result.get_child_by_name(
+ 'attributes-list').get_child_by_name('iscsi-service-info')
+ if iscsi:
+ is_started = 'started' if iscsi.get_child_content('is-available') == 'true' else 'stopped'
+ return_value = {
+ 'service_state': is_started
+ }
+ return return_value
+
+ def create_iscsi_service(self):
+ """
+ Create iscsi service and start if requested
+ """
+ if self.use_rest:
+ return self.create_iscsi_service_rest()
+
+ iscsi_service = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'iscsi-service-create',
+ **{'start': 'true' if self.parameters.get('service_state', 'started') == 'started' else 'false'
+ })
+
+ try:
+ self.server.invoke_successfully(iscsi_service, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error creating iscsi service: % s" % (to_native(e)),
+ exception=traceback.format_exc())
+
+ def delete_iscsi_service(self, current):
+ """
+ Delete the iscsi service
+ """
+ if self.use_rest:
+ return self.delete_iscsi_service_rest(current)
+
+ if current['service_state'] == 'started':
+ self.stop_iscsi_service()
+
+ iscsi_delete = netapp_utils.zapi.NaElement.create_node_with_children('iscsi-service-destroy')
+
+ try:
+ self.server.invoke_successfully(iscsi_delete, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error deleting iscsi service on vserver %s: %s" % (self.parameters['vserver'], to_native(e)),
+ exception=traceback.format_exc())
+
+ def stop_iscsi_service(self):
+ """
+ Stop iscsi service
+ """
+
+ iscsi_stop = netapp_utils.zapi.NaElement.create_node_with_children('iscsi-service-stop')
+
+ try:
+ self.server.invoke_successfully(iscsi_stop, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error Stopping iscsi service on vserver %s: %s" % (self.parameters['vserver'], to_native(e)),
+ exception=traceback.format_exc())
+
+ def start_iscsi_service(self):
+ """
+ Start iscsi service
+ """
+ iscsi_start = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'iscsi-service-start')
+
+ try:
+ self.server.invoke_successfully(iscsi_start, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error starting iscsi service on vserver %s: %s" % (self.parameters['vserver'], to_native(e)),
+ exception=traceback.format_exc())
+
+ def get_iscsi_rest(self):
+ api = 'protocols/san/iscsi/services'
+ query = {'svm.name': self.parameters['vserver']}
+ fields = 'svm,enabled,target.alias'
+ record, error = rest_generic.get_one_record(self.rest_api, api, query, fields)
+ if error:
+ self.module.fail_json(msg="Error finding iscsi service in %s: %s" % (self.parameters['vserver'], error))
+ if record:
+ self.uuid = record['svm']['uuid']
+ is_started = 'started' if record['enabled'] else 'stopped'
+ return {
+ 'service_state': is_started,
+ 'target_alias': "" if self.na_helper.safe_get(record, ['target', 'alias']) is None else record['target']['alias'],
+ }
+ return None
+
+ def create_iscsi_service_rest(self):
+ api = 'protocols/san/iscsi/services'
+ body = {
+ 'svm.name': self.parameters['vserver'],
+ 'enabled': True if self.parameters.get('service_state', 'started') == 'started' else False
+ }
+ if 'target_alias' in self.parameters:
+ body['target.alias'] = self.parameters['target_alias']
+ dummy, error = rest_generic.post_async(self.rest_api, api, body)
+ if error:
+ self.module.fail_json(msg="Error creating iscsi service: % s" % error)
+
+ def delete_iscsi_service_rest(self, current):
+ # stop iscsi service before delete.
+ if current['service_state'] == 'started':
+ self.start_or_stop_iscsi_service_rest('stopped')
+ api = 'protocols/san/iscsi/services'
+ dummy, error = rest_generic.delete_async(self.rest_api, api, self.uuid)
+ if error:
+ self.module.fail_json(msg="Error deleting iscsi service on vserver %s: %s" % (self.parameters["vserver"], error))
+
+ def start_or_stop_iscsi_service_rest(self, service_state):
+ api = 'protocols/san/iscsi/services'
+ enabled = True if service_state == 'started' else False
+ dummy, error = rest_generic.patch_async(self.rest_api, api, self.uuid, {'enabled': enabled})
+ if error:
+ self.module.fail_json(msg="Error %s iscsi service on vserver %s: %s" % (service_state[0:5] + 'ing', self.parameters["vserver"], error))
+
+ def modify_iscsi_service_state_and_target(self, modify):
+ body = {}
+ api = 'protocols/san/iscsi/services'
+ if 'target_alias' in modify:
+ body['target.alias'] = self.parameters['target_alias']
+ dummy, error = rest_generic.patch_async(self.rest_api, api, self.uuid, body)
+ if error:
+ self.module.fail_json(msg="Error modifying iscsi service target alias on vserver %s: %s" % (self.parameters["vserver"], error))
+
+ def modify_iscsi_service_rest(self, modify, current):
+ if self.use_rest:
+ if 'service_state' in modify:
+ self.start_or_stop_iscsi_service_rest(modify['service_state'])
+ if 'target_alias' in modify:
+ self.modify_iscsi_service_state_and_target(modify)
+ else:
+ if 'service_state' in modify:
+ if modify['service_state'] == 'started':
+ self.start_iscsi_service()
+ else:
+ self.stop_iscsi_service()
+
+ def apply(self):
+ current = self.get_iscsi()
+ modify = None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ modify = self.na_helper.get_modified_attributes(current, self.parameters) if cd_action is None else None
+
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_iscsi_service()
+ elif cd_action == 'delete':
+ self.delete_iscsi_service(current)
+ elif modify:
+ self.modify_iscsi_service_rest(modify, current)
+ # TODO: include other details about the lun (size, etc.)
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action)
+ self.module.exit_json(**result)
+
+
+def main():
+ v = NetAppOntapISCSI()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_iscsi_security.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_iscsi_security.py
new file mode 100644
index 000000000..1b0cda134
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_iscsi_security.py
@@ -0,0 +1,350 @@
+#!/usr/bin/python
+
+# (c) 2019-2023, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_iscsi_security
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Create/Delete/Modify iscsi security.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_iscsi_security
+options:
+ state:
+ choices: ['present', 'absent']
+ description:
+ - Whether the specified initiator should exist or not.
+ default: present
+ type: str
+ vserver:
+ description:
+ - Name of the vserver to use.
+ required: true
+ type: str
+ auth_type:
+ description:
+ - Specifies the authentication type.
+ choices: ['chap', 'none', 'deny']
+ type: str
+ initiator:
+ description:
+ - Specifies the name of the initiator.
+ required: true
+ type: str
+ address_ranges:
+ description:
+ - May be a single IPv4 or IPv6 address or a range containing a startaddress and an end address.
+ - The start and end addresses themselves are included in the range.
+ - If not present, the initiator is allowed to log in from any IP address.
+ type: list
+ elements: str
+ inbound_username:
+ description:
+ - Inbound CHAP username.
+ - Required for CHAP. A null username is not allowed.
+ type: str
+ inbound_password:
+ description:
+ - Inbound CHAP user password.
+ - Can not be modified. If want to change password, delete and re-create the initiator.
+ type: str
+ outbound_username:
+ description:
+ - Outbound CHAP user name.
+ type: str
+ outbound_password:
+ description:
+ - Outbound CHAP user password.
+ - Can not be modified. If want to change password, delete and re-create the initiator.
+ type: str
+short_description: "NetApp ONTAP Manage iscsi security."
+version_added: "19.11.0"
+'''
+
+EXAMPLES = """
+ - name: create
+ netapp.ontap.na_ontap_iscsi_security:
+ hostname: 0.0.0.0
+ username: user
+ password: pass
+ vserver: test_svm
+ state: present
+ initiator: eui.9999956789abcdef
+ inbound_username: user_1
+ inbound_password: password_1
+ outbound_username: user_2
+ outbound_password: password_2
+ auth_type: chap
+ address_ranges: 10.125.10.0-10.125.10.10,10.125.193.78
+
+ - name: modify outbound username
+ netapp.ontap.na_ontap_iscsi_security:
+ hostname: 0.0.0.0
+ username: user
+ password: pass
+ vserver: test_svm
+ state: present
+ initiator: eui.9999956789abcdef
+ inbound_username: user_1
+ inbound_password: password_1
+ outbound_username: user_out_3
+ outbound_password: password_3
+ auth_type: chap
+ address_ranges: 10.125.10.0-10.125.10.10,10.125.193.78
+
+ - name: modify address
+ netapp.ontap.na_ontap_iscsi_security:
+ hostname: 0.0.0.0
+ username: user
+ password: pass
+ vserver: test_svm
+ state: present
+ initiator: eui.9999956789abcdef
+ address_ranges: 10.125.193.90,10.125.10.20-10.125.10.30
+"""
+
+RETURN = """
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+import ansible_collections.netapp.ontap.plugins.module_utils.rest_response_helpers as rrh
+
+
+class NetAppONTAPIscsiSecurity:
+ """
+ Class with iscsi security methods
+ """
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ auth_type=dict(required=False, type='str', choices=['chap', 'none', 'deny']),
+ inbound_password=dict(required=False, type='str', no_log=True),
+ inbound_username=dict(required=False, type='str'),
+ initiator=dict(required=True, type='str'),
+ address_ranges=dict(required=False, type='list', elements='str'),
+ outbound_password=dict(required=False, type='str', no_log=True),
+ outbound_username=dict(required=False, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True,
+ required_if=[
+ ['auth_type', 'chap', ['inbound_username', 'inbound_password']]
+ ],
+ required_together=[
+ ['inbound_username', 'inbound_password'],
+ ['outbound_username', 'outbound_password'],
+ ],
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ self.rest_api = netapp_utils.OntapRestAPI(self.module)
+ self.rest_api.fail_if_not_rest_minimum_version('na_ontap_iscsi_security:', 9, 6)
+ self.uuid = self.get_svm_uuid()
+
+ def get_initiator(self):
+ """
+ Get current initiator.
+ :return: dict of current initiator details.
+ """
+ params = {'fields': '*', 'initiator': self.parameters['initiator']}
+ api = 'protocols/san/iscsi/credentials'
+ message, error = self.rest_api.get(api, params)
+ if error is not None:
+ self.module.fail_json(msg="Error on fetching initiator: %s" % error)
+ if message['num_records'] > 0:
+ record = message['records'][0]
+ initiator_details = {'auth_type': record['authentication_type']}
+ if initiator_details['auth_type'] == 'chap':
+ if record['chap'].get('inbound'):
+ initiator_details['inbound_username'] = record['chap']['inbound']['user']
+ else:
+ initiator_details['inbound_username'] = None
+ if record['chap'].get('outbound'):
+ initiator_details['outbound_username'] = record['chap']['outbound']['user']
+ else:
+ initiator_details['outbound_username'] = None
+ if record.get('initiator_address'):
+ if record['initiator_address'].get('ranges'):
+ ranges = []
+ for address_range in record['initiator_address']['ranges']:
+ if address_range['start'] == address_range['end']:
+ ranges.append(address_range['start'])
+ else:
+ ranges.append(address_range['start'] + '-' + address_range['end'])
+ initiator_details['address_ranges'] = ranges
+ else:
+ initiator_details['address_ranges'] = []
+ else:
+ initiator_details['address_ranges'] = []
+ return initiator_details
+
+ def create_initiator(self):
+ """
+ Create initiator.
+ :return: None.
+ """
+ body = {
+ 'authentication_type': self.parameters['auth_type'],
+ 'initiator': self.parameters['initiator']
+ }
+
+ if self.parameters['auth_type'] == 'chap':
+ chap_info = {'inbound': {'user': self.parameters['inbound_username'], 'password': self.parameters['inbound_password']}}
+
+ if self.parameters.get('outbound_username'):
+ chap_info['outbound'] = {'user': self.parameters['outbound_username'], 'password': self.parameters['outbound_password']}
+ body['chap'] = chap_info
+ address_info = self.get_address_info(self.parameters.get('address_ranges'))
+ if address_info is not None:
+ body['initiator_address'] = {'ranges': address_info}
+ body['svm'] = {'uuid': self.uuid, 'name': self.parameters['vserver']}
+ api = 'protocols/san/iscsi/credentials'
+ dummy, error = self.rest_api.post(api, body)
+ if error is not None:
+ self.module.fail_json(msg="Error on creating initiator: %s" % error)
+
+ def delete_initiator(self):
+ """
+ Delete initiator.
+ :return: None.
+ """
+ api = 'protocols/san/iscsi/credentials/{0}/{1}'.format(self.uuid, self.parameters['initiator'])
+ dummy, error = self.rest_api.delete(api)
+ if error is not None:
+ self.module.fail_json(msg="Error on deleting initiator: %s" % error)
+
+ def modify_initiator(self, modify, current):
+ """
+ Modify initiator.
+ :param modify: dict of modify attributes.
+ :return: None.
+ """
+ body = {}
+ use_chap = False
+ chap_update = False
+ chap_update_inbound = False
+ chap_update_outbound = False
+
+ if modify.get('auth_type'):
+ body['authentication_type'] = modify.get('auth_type')
+ if modify['auth_type'] == 'chap':
+ # change in auth_type
+ chap_update = True
+ use_chap = True
+ elif current.get('auth_type') == 'chap':
+ # we're already using chap
+ use_chap = True
+
+ if use_chap and (modify.get('inbound_username') or modify.get('inbound_password')):
+ # change in chap inbound credentials
+ chap_update = True
+ chap_update_inbound = True
+
+ if use_chap and (modify.get('outbound_username') or modify.get('outbound_password')):
+ # change in chap outbound credentials
+ chap_update = True
+ chap_update_outbound = True
+
+ if chap_update and not chap_update_inbound and 'inbound_username' in self.parameters:
+ # use credentials from input
+ chap_update_inbound = True
+
+ if chap_update and not chap_update_outbound and 'outbound_username' in self.parameters:
+ # use credentials from input
+ chap_update_outbound = True
+
+ if chap_update:
+ chap_info = dict()
+ # set values from self.parameters as they may not show as modified
+ if chap_update_inbound:
+ chap_info['inbound'] = {'user': self.parameters['inbound_username'], 'password': self.parameters['inbound_password']}
+ else:
+ # use current values as inbound username/password are required
+ chap_info['inbound'] = {'user': current.get('inbound_username'), 'password': current.get('inbound_password')}
+ if chap_update_outbound:
+ chap_info['outbound'] = {'user': self.parameters['outbound_username'], 'password': self.parameters['outbound_password']}
+ body['chap'] = chap_info
+ # PATCH fails if this is not present, even though there is no change
+ body['authentication_type'] = 'chap'
+
+ address_info = self.get_address_info(modify.get('address_ranges'))
+ if address_info is not None:
+ body['initiator_address'] = {'ranges': address_info}
+ api = 'protocols/san/iscsi/credentials/{0}/{1}'.format(self.uuid, self.parameters['initiator'])
+ dummy, error = self.rest_api.patch(api, body)
+ if error is not None:
+ self.module.fail_json(msg="Error on modifying initiator: %s - params: %s" % (error, body))
+
+ def get_address_info(self, address_ranges):
+ if address_ranges is None:
+ return None
+ address_info = []
+ for address in address_ranges:
+ address_range = {}
+ if '-' in address:
+ address_range['end'] = address.split('-')[1]
+ address_range['start'] = address.split('-')[0]
+ else:
+ address_range['end'] = address
+ address_range['start'] = address
+ address_info.append(address_range)
+ return address_info
+
+ def apply(self):
+ """
+ check create/delete/modify operations if needed.
+ :return: None.
+ """
+ current = self.get_initiator()
+ action = self.na_helper.get_cd_action(current, self.parameters)
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.na_helper.changed and not self.module.check_mode:
+ if action == 'create':
+ self.create_initiator()
+ elif action == 'delete':
+ self.delete_initiator()
+ elif modify:
+ self.modify_initiator(modify, current)
+ result = netapp_utils.generate_result(self.na_helper.changed, action, modify)
+ self.module.exit_json(**result)
+
+ def get_svm_uuid(self):
+ """
+ Get a svm's UUID
+ :return: uuid of the svm.
+ """
+ params = {'fields': 'uuid', 'name': self.parameters['vserver']}
+ api = "svm/svms"
+ message, error = self.rest_api.get(api, params)
+ record, error = rrh.check_for_0_or_1_records(api, message, error)
+ if error is not None:
+ self.module.fail_json(msg="Error on fetching svm uuid: %s" % error)
+ if record is None:
+ self.module.fail_json(msg="Error on fetching svm uuid, SVM not found: %s" % self.parameters['vserver'])
+ return message['records'][0]['uuid']
+
+
+def main():
+ """Execute action"""
+ iscsi_obj = NetAppONTAPIscsiSecurity()
+ iscsi_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_job_schedule.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_job_schedule.py
new file mode 100644
index 000000000..a66e87b2d
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_job_schedule.py
@@ -0,0 +1,477 @@
+#!/usr/bin/python
+
+# (c) 2018-2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_job_schedule
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: na_ontap_job_schedule
+short_description: NetApp ONTAP Job Schedule
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create/Delete/Modify job-schedules on ONTAP
+options:
+ state:
+ description:
+ - Whether the specified job schedule should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+ name:
+ description:
+ - The name of the job-schedule to manage.
+ required: true
+ type: str
+ job_minutes:
+ description:
+ - The minute(s) of each hour when the job should be run.
+ Job Manager cron scheduling minute.
+ - 1 represents all minutes.
+ Range is [-1..59]
+ - Required for create.
+ type: list
+ elements: int
+ job_hours:
+ version_added: 2.8.0
+ description:
+ - The hour(s) of the day when the job should be run.
+ Job Manager cron scheduling hour.
+ - 1 represents all hours.
+ Range is [-1..23]
+ type: list
+ elements: int
+ job_months:
+ version_added: 2.8.0
+ description:
+ - The month(s) when the job should be run.
+ Job Manager cron scheduling month.
+ - 1 represents all months.
+ Range is [-1..12], 0 and 12 may or may not be supported, see C(month_offset)
+ type: list
+ elements: int
+ job_days_of_month:
+ version_added: 2.8.0
+ description:
+ - The day(s) of the month when the job should be run.
+ Job Manager cron scheduling day of month.
+ - 1 represents all days of a month from 1 to 31.
+ Range is [-1..31]
+ type: list
+ elements: int
+ job_days_of_week:
+ version_added: 2.8.0
+ description:
+ - The day(s) in the week when the job should be run.
+ Job Manager cron scheduling day of week.
+ - Zero represents Sunday. -1 represents all days of a week.
+ Range is [-1..6]
+ type: list
+ elements: int
+ month_offset:
+ description:
+ - whether January starts at 0 or 1. By default, ZAPI is using a 0..11 range, while REST is using 1..12.
+ - default to 0 when using ZAPI, and to 1 when using REST.
+ - when set to 0, a value of 12 or higher is rejected.
+ - when set to 1, a value of 0 or of 13 or higher is rejected.
+ type: int
+ choices: [0, 1]
+ version_added: 21.9.0
+ cluster:
+ description:
+ - Defaults to local cluster.
+ - In a MetroCluster configuration, user-created schedules owned by the local cluster are replicated to the partner cluster.
+ Likewise, user-created schedules owned by the partner cluster are replicated to the local cluster.
+ - Normally, only schedules owned by the local cluster can be created, modified, and deleted on the local cluster.
+ However, when a MetroCluster configuration is in switchover, the cluster in switchover state can
+ create, modify, and delete schedules owned by the partner cluster.
+ type: str
+ version_added: 21.22.0
+'''
+
+EXAMPLES = """
+ - name: Create Job for 11.30PM at 10th of every month
+ netapp.ontap.na_ontap_job_schedule:
+ state: present
+ name: jobName
+ job_minutes: 30
+ job_hours: 23
+ job_days_of_month: 10
+ job_months: -1
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: Create Job for 11.30PM at 10th of January, April, July, October for ZAPI and REST
+ netapp.ontap.na_ontap_job_schedule:
+ state: present
+ name: jobName
+ job_minutes: 30
+ job_hours: 23
+ job_days_of_month: 10
+ job_months: 1,4,7,10
+ month_offset: 1
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: Create Job for 11.30PM at 10th of January, April, July, October for ZAPI and REST
+ netapp.ontap.na_ontap_job_schedule:
+ state: present
+ name: jobName
+ job_minutes: 30
+ job_hours: 23
+ job_days_of_month: 10
+ job_months: 0,3,6,9
+ month_offset: 0
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: Create Job for 11.30PM at 10th of January when using REST and February when using ZAPI !!!
+ netapp.ontap.na_ontap_job_schedule:
+ state: present
+ name: jobName
+ job_minutes: 30
+ job_hours: 23
+ job_days_of_month: 10
+ job_months: 1
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: Delete Job
+ netapp.ontap.na_ontap_job_schedule:
+ state: absent
+ name: jobName
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+
+"""
+
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppONTAPJob:
+ '''Class with job schedule cron methods'''
+
+ def __init__(self):
+
+ self.use_rest = False
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ job_minutes=dict(required=False, type='list', elements='int'),
+ job_months=dict(required=False, type='list', elements='int'),
+ job_hours=dict(required=False, type='list', elements='int'),
+ job_days_of_month=dict(required=False, type='list', elements='int'),
+ job_days_of_week=dict(required=False, type='list', elements='int'),
+ month_offset=dict(required=False, type='int', choices=[0, 1]),
+ cluster=dict(required=False, type='str')
+ ))
+
+ self.uuid = None
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ self.rest_api = netapp_utils.OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+
+ self.month_offset = self.parameters.get('month_offset')
+ if self.month_offset is None:
+ # maintain backward compatibility
+ self.month_offset = 1 if self.use_rest else 0
+ if self.month_offset == 1 and self.parameters.get('job_months') and 0 in self.parameters['job_months']:
+ # we explictly test for 0 as it would be converted to -1, which has a special meaning (all).
+ # other value errors will be reported by the API.
+ self.module.fail_json(msg='Error: 0 is not a valid value in months if month_offset is set to 1: %s' % self.parameters['job_months'])
+
+ if self.use_rest:
+ self.set_playbook_api_key_map()
+ elif not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+ self.set_playbook_zapi_key_map()
+
+ def set_playbook_zapi_key_map(self):
+ self.na_helper.zapi_string_keys = {
+ 'name': 'job-schedule-name',
+ 'cluster': 'job-schedule-cluster'
+ }
+ self.na_helper.zapi_list_keys = {
+ 'job_minutes': ('job-schedule-cron-minute', 'cron-minute'),
+ 'job_months': ('job-schedule-cron-month', 'cron-month'),
+ 'job_hours': ('job-schedule-cron-hour', 'cron-hour'),
+ 'job_days_of_month': ('job-schedule-cron-day', 'cron-day-of-month'),
+ 'job_days_of_week': ('job-schedule-cron-day-of-week', 'cron-day-of-week')
+ }
+
+ def set_playbook_api_key_map(self):
+ self.na_helper.params_to_rest_api_keys = {
+ 'job_minutes': 'minutes',
+ 'job_months': 'months',
+ 'job_hours': 'hours',
+ 'job_days_of_month': 'days',
+ 'job_days_of_week': 'weekdays'
+ }
+
+ def get_job_schedule_rest(self):
+ """
+ Return details about the job
+ :param:
+ name : Job name
+ :return: Details about the Job. None if not found.
+ :rtype: dict
+ """
+ query = {'name': self.parameters['name']}
+ if self.parameters.get('cluster'):
+ query['cluster'] = self.parameters['cluster']
+ record, error = rest_generic.get_one_record(self.rest_api, 'cluster/schedules', query, 'uuid,cron')
+ if error is not None:
+ self.module.fail_json(msg="Error fetching job schedule: %s" % error)
+ if record:
+ self.uuid = record['uuid']
+ job_details = {'name': record['name']}
+ for param_key, rest_key in self.na_helper.params_to_rest_api_keys.items():
+ if rest_key in record['cron']:
+ job_details[param_key] = record['cron'][rest_key]
+ else:
+ # if any of the job_hours, job_minutes, job_months, job_days are empty:
+ # it means the value is -1 using ZAPI convention
+ job_details[param_key] = [-1]
+ # adjust offsets if necessary
+ if 'job_months' in job_details and self.month_offset == 0:
+ job_details['job_months'] = [x - 1 if x > 0 else x for x in job_details['job_months']]
+ # adjust minutes if necessary, -1 means all in ZAPI and for our user facing parameters
+ # while REST returns all values
+ if 'job_minutes' in job_details and len(job_details['job_minutes']) == 60:
+ job_details['job_minutes'] = [-1]
+ return job_details
+ return None
+
+ def get_job_schedule(self):
+ """
+ Return details about the job
+ :param:
+ name : Job name
+ :return: Details about the Job. None if not found.
+ :rtype: dict
+ """
+ if self.use_rest:
+ return self.get_job_schedule_rest()
+
+ job_get_iter = netapp_utils.zapi.NaElement('job-schedule-cron-get-iter')
+ query = {'job-schedule-cron-info': {'job-schedule-name': self.parameters['name']}}
+ if self.parameters.get('cluster'):
+ query['job-schedule-cron-info']['job-schedule-cluster'] = self.parameters['cluster']
+ job_get_iter.translate_struct({'query': query})
+ try:
+ result = self.server.invoke_successfully(job_get_iter, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching job schedule %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ job_details = None
+ # check if job exists
+ if result.get_child_by_name('num-records') and int(result['num-records']) >= 1:
+ job_info = result['attributes-list']['job-schedule-cron-info']
+ job_details = {}
+ for item_key, zapi_key in self.na_helper.zapi_string_keys.items():
+ job_details[item_key] = job_info[zapi_key]
+ for item_key, zapi_key in self.na_helper.zapi_list_keys.items():
+ parent, dummy = zapi_key
+ job_details[item_key] = self.na_helper.get_value_for_list(from_zapi=True,
+ zapi_parent=job_info.get_child_by_name(parent)
+ )
+ if item_key == 'job_months' and self.month_offset == 1:
+ job_details[item_key] = [int(x) + 1 if int(x) >= 0 else int(x) for x in job_details[item_key]]
+ elif item_key == 'job_minutes' and len(job_details[item_key]) == 60:
+ job_details[item_key] = [-1]
+ else:
+ job_details[item_key] = [int(x) for x in job_details[item_key]]
+ # if any of the job_hours, job_minutes, job_months, job_days are empty:
+ # it means the value is -1 for ZAPI
+ if not job_details[item_key]:
+ job_details[item_key] = [-1]
+ return job_details
+
+ def add_job_details(self, na_element_object, values):
+ """
+ Add children node for create or modify NaElement object
+ :param na_element_object: modify or create NaElement object
+ :param values: dictionary of cron values to be added
+ :return: None
+ """
+ for item_key, item_value in values.items():
+ if item_key in self.na_helper.zapi_string_keys:
+ zapi_key = self.na_helper.zapi_string_keys.get(item_key)
+ na_element_object[zapi_key] = item_value
+ elif item_key in self.na_helper.zapi_list_keys:
+ parent_key, child_key = self.na_helper.zapi_list_keys.get(item_key)
+ data = item_value
+ if data:
+ if item_key == 'job_months' and self.month_offset == 1:
+ # -1 is a special value
+ data = [str(x - 1) if x > 0 else str(x) for x in data]
+ else:
+ data = [str(x) for x in data]
+ na_element_object.add_child_elem(self.na_helper.get_value_for_list(from_zapi=False,
+ zapi_parent=parent_key,
+ zapi_child=child_key,
+ data=data))
+
+ def create_job_schedule(self):
+ """
+ Creates a job schedule
+ """
+ if self.use_rest:
+ cron = {}
+ for param_key, rest_key in self.na_helper.params_to_rest_api_keys.items():
+ # -1 means all in zapi, while empty means all in api.
+ if self.parameters.get(param_key):
+ if len(self.parameters[param_key]) == 1 and self.parameters[param_key][0] == -1:
+ # need to set empty value for minutes as this is a required parameter
+ if rest_key == 'minutes':
+ cron[rest_key] = []
+ elif param_key == 'job_months' and self.month_offset == 0:
+ cron[rest_key] = [x + 1 if x >= 0 else x for x in self.parameters[param_key]]
+ else:
+ cron[rest_key] = self.parameters[param_key]
+
+ params = {
+ 'name': self.parameters['name'],
+ 'cron': cron
+ }
+ if self.parameters.get('cluster'):
+ params['cluster'] = self.parameters['cluster']
+ api = 'cluster/schedules'
+ dummy, error = self.rest_api.post(api, params)
+ if error is not None:
+ self.module.fail_json(msg="Error creating job schedule: %s" % error)
+
+ else:
+ job_schedule_create = netapp_utils.zapi.NaElement('job-schedule-cron-create')
+ self.add_job_details(job_schedule_create, self.parameters)
+ try:
+ self.server.invoke_successfully(job_schedule_create,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating job schedule %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_job_schedule(self):
+ """
+ Delete a job schedule
+ """
+ if self.use_rest:
+ api = 'cluster/schedules/' + self.uuid
+ dummy, error = self.rest_api.delete(api)
+ if error is not None:
+ self.module.fail_json(msg="Error deleting job schedule: %s" % error)
+ else:
+ job_schedule_delete = netapp_utils.zapi.NaElement('job-schedule-cron-destroy')
+ self.add_job_details(job_schedule_delete, self.parameters)
+ try:
+ self.server.invoke_successfully(job_schedule_delete,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting job schedule %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_job_schedule(self, modify, current):
+ """
+ modify a job schedule
+ """
+
+ def set_cron(param_key, rest_key, params, cron):
+ # -1 means all in zapi, while empty means all in api.
+ if params[param_key] == [-1]:
+ cron[rest_key] = []
+ elif param_key == 'job_months' and self.month_offset == 0:
+ cron[rest_key] = [x + 1 for x in params[param_key]]
+ else:
+ cron[rest_key] = params[param_key]
+
+ if self.use_rest:
+ cron = {}
+ for param_key, rest_key in self.na_helper.params_to_rest_api_keys.items():
+ if modify.get(param_key):
+ set_cron(param_key, rest_key, modify, cron)
+ elif current.get(param_key):
+ # Usually only include modify attributes, but omitting an attribute means all in api.
+ # Need to add the current attributes in params.
+ set_cron(param_key, rest_key, current, cron)
+ params = {
+ 'cron': cron
+ }
+ api = 'cluster/schedules/' + self.uuid
+ dummy, error = self.rest_api.patch(api, params)
+ if error is not None:
+ self.module.fail_json(msg="Error modifying job schedule: %s" % error)
+ else:
+ job_schedule_modify = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'job-schedule-cron-modify', **{'job-schedule-name': self.parameters['name']})
+ self.add_job_details(job_schedule_modify, modify)
+ try:
+ self.server.invoke_successfully(job_schedule_modify, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying job schedule %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ """
+ Apply action to job-schedule
+ """
+ modify = None
+ current = self.get_job_schedule()
+ action = self.na_helper.get_cd_action(current, self.parameters)
+ if action is None and self.parameters['state'] == 'present':
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if action == 'create' and self.parameters.get('job_minutes') is None:
+ # job_minutes is mandatory for create
+ self.module.fail_json(msg='Error: missing required parameter job_minutes for create')
+
+ if self.na_helper.changed and not self.module.check_mode:
+ if action == 'create':
+ self.create_job_schedule()
+ elif action == 'delete':
+ self.delete_job_schedule()
+ elif modify:
+ self.modify_job_schedule(modify, current)
+ result = netapp_utils.generate_result(self.na_helper.changed, action, modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ '''Execute action'''
+ job_obj = NetAppONTAPJob()
+ job_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_kerberos_interface.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_kerberos_interface.py
new file mode 100644
index 000000000..de8b48e05
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_kerberos_interface.py
@@ -0,0 +1,225 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: na_ontap_kerberos_interface
+short_description: NetApp ONTAP module to modify kerberos interface.
+description:
+ - Enable or disable kerberos interface.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: '22.6.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+options:
+ state:
+ description:
+ - Modify kerberos interface, only present is supported.
+ choices: ['present']
+ type: str
+ default: present
+
+ interface_name:
+ description:
+ - Specifies the name of the logical interface associated with the NFS Kerberos configuration you want to modify.
+ type: str
+ required: true
+
+ vserver:
+ description:
+ - Specifies the Vserver associated with the NFS Kerberos configuration you want to modify.
+ type: str
+ required: true
+
+ enabled:
+ description:
+ - Specifies whether to enable or disable Kerberos for NFS on the specified Vserver and logical interface.
+ - C(service_principal_name) is required when try to enable kerberos.
+ type: bool
+ required: true
+
+ keytab_uri:
+ description:
+ - Specifies loading a keytab file from the specified URI.
+ - This value must be in the form of "(ftp|http|https)://(hostname|IPv4 Address|'['IPv6 Address']')...".
+ type: str
+
+ machine_account:
+ description:
+ - Specifies the machine account to create in Active Directory.
+ - Requires ONTAP 9.12.1 or later.
+ type: str
+
+ organizational_unit:
+ description:
+ - Specifies the organizational unit (OU) under which the Microsoft Active Directory server account will be created
+ when you enable Kerberos using a realm for Microsoft KDC
+ type: str
+
+ admin_username:
+ description:
+ - Specifies the administrator username.
+ type: str
+
+ admin_password:
+ description:
+ - Specifies the administrator password.
+ type: str
+
+ service_principal_name:
+ description:
+ - Specifies the service principal name (SPN) of the Kerberos configuration you want to modify.
+ - This value must be in the form nfs/host_name@REALM.
+ - host_name is the fully qualified host name of the Kerberos server, nfs is the service, and REALM is the name of the Kerberos realm.
+ - Specify Kerberos realm names in uppercase.
+ aliases: ['spn']
+ type: str
+
+notes:
+ - Supports check_mode.
+ - Module supports only REST and requires ONTAP 9.7 or later.
+'''
+
+EXAMPLES = '''
+
+ - name: Enable kerberos interface.
+ netapp.ontap.na_ontap_kerberos_interface:
+ interface_name: lif_svm1_284
+ vserver: ansibleSVM
+ enabled: true
+ service_principal_name: nfs/lif_svm1_284@RELAM2
+ admin_username: "{{ admin_user }}"
+ admin_password: "{{ admin_pass }}"
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ https: "{{ https }}"
+ validate_certs: "{{ certs }}"
+
+
+ - name: Disable kerberos interface.
+ netapp.ontap.na_ontap_kerberos_interface:
+ interface_name: lif_svm1_284
+ vserver: ansibleSVM
+ enabled: false
+ service_principal_name: nfs/lif_svm1_284@RELAM2
+ admin_username: "{{ admin_user }}"
+ admin_password: "{{ admin_pass }}"
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ https: "{{ https }}"
+ validate_certs: "{{ certs }}"
+
+'''
+
+RETURN = '''
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppOntapKerberosInterface:
+ """Modify Kerberos interface"""
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present'], default='present'),
+ interface_name=dict(required=True, type='str'),
+ vserver=dict(required=True, type='str'),
+ enabled=dict(required=True, type='bool'),
+ keytab_uri=dict(required=False, type='str', no_log=True),
+ machine_account=dict(required=False, type='str'),
+ organizational_unit=dict(required=False, type='str'),
+ admin_username=dict(required=False, type='str'),
+ admin_password=dict(required=False, type='str', no_log=True),
+ service_principal_name=dict(required=False, type='str', aliases=['spn'])
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True,
+ required_if=[('enabled', True, ['service_principal_name'])],
+ required_together=[('admin_username', 'admin_password')],
+ mutually_exclusive=[('keytab_uri', 'machine_account')]
+ )
+
+ self.na_helper = NetAppModule(self.module)
+ self.parameters = self.na_helper.check_and_set_parameters(self.module)
+ self.rest_api = netapp_utils.OntapRestAPI(self.module)
+ self.rest_api.fail_if_not_rest_minimum_version('na_ontap_kerberos_interface', 9, 7)
+ self.rest_api.is_rest_supported_properties(self.parameters, None, [['machine_account', (9, 12, 1)]])
+ self.uuid = None
+
+ def get_kerberos_interface(self):
+ """
+ Get kerberos interface.
+ """
+ api = 'protocols/nfs/kerberos/interfaces'
+ query = {
+ 'interface.name': self.parameters['interface_name'],
+ 'svm.name': self.parameters['vserver'],
+ 'fields': 'interface.uuid,enabled,spn'
+ }
+ if 'machine_account' in self.parameters:
+ query['fields'] += ',machine_account'
+ record, error = rest_generic.get_one_record(self.rest_api, api, query)
+ if error:
+ self.module.fail_json(msg='Error fetching kerberos interface info %s: %s' % (self.parameters['interface_name'], to_native(error)),
+ exception=traceback.format_exc())
+ if record is None:
+ self.module.fail_json(msg='Error: Kerberos interface config does not exist for %s' % self.parameters['interface_name'])
+ self.uuid = self.na_helper.safe_get(record, ['interface', 'uuid'])
+ return {
+ 'enabled': record.get('enabled')
+ }
+
+ def modify_kerberos_interface(self):
+ """
+ Modify kerberos interface.
+ """
+ api = 'protocols/nfs/kerberos/interfaces'
+ body = {'enabled': self.parameters['enabled']}
+ if 'keytab_uri' in self.parameters:
+ body['keytab_uri'] = self.parameters['keytab_uri']
+ if 'organizational_unit' in self.parameters:
+ body['organizational_unit'] = self.parameters['organizational_unit']
+ if 'service_principal_name' in self.parameters:
+ body['spn'] = self.parameters['service_principal_name']
+ if 'admin_username' in self.parameters:
+ body['user'] = self.parameters['admin_username']
+ if 'admin_password' in self.parameters:
+ body['password'] = self.parameters['admin_password']
+ if 'machine_account' in self.parameters:
+ body['machine_account'] = self.parameters['machine_account']
+ dummy, error = rest_generic.patch_async(self.rest_api, api, self.uuid, body)
+ if error:
+ self.module.fail_json(msg='Error modifying kerberos interface %s: %s.' % (self.parameters['interface_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ modify = self.na_helper.get_modified_attributes(self.get_kerberos_interface(), self.parameters)
+ if self.na_helper.changed and not self.module.check_mode:
+ self.modify_kerberos_interface()
+ result = netapp_utils.generate_result(self.na_helper.changed, modify=modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ kerberos_obj = NetAppOntapKerberosInterface()
+ kerberos_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_kerberos_realm.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_kerberos_realm.py
new file mode 100644
index 000000000..9cb4c346b
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_kerberos_realm.py
@@ -0,0 +1,438 @@
+#!/usr/bin/python
+'''
+(c) 2019, Red Hat, Inc
+(c) 2019-2022, NetApp, Inc
+GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+
+module: na_ontap_kerberos_realm
+
+short_description: NetApp ONTAP vserver nfs kerberos realm
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.9.0
+author: Milan Zink (@zeten30) <zeten30@gmail.com>,<mzink@redhat.com>
+
+description:
+ - Create, modify or delete vserver kerberos realm configuration
+
+options:
+
+ state:
+ description:
+ - Whether the Kerberos realm is present or absent.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ vserver:
+ description:
+ - vserver/svm with kerberos realm configured
+ required: true
+ type: str
+
+ realm:
+ description:
+ - Kerberos realm name
+ required: true
+ type: str
+
+ kdc_vendor:
+ description:
+ - The vendor of the Key Distribution Centre (KDC) server
+ - Required if I(state=present)
+ choices: ['other', 'microsoft']
+ type: str
+
+ kdc_ip:
+ description:
+ - IP address of the Key Distribution Centre (KDC) server
+ - Required if I(state=present)
+ type: str
+
+ kdc_port:
+ description:
+ - TCP port on the KDC to be used for Kerberos communication.
+ - The default for this parameter is 88.
+ type: int
+
+ clock_skew:
+ description:
+ - The clock skew in minutes is the tolerance for accepting tickets with time stamps that do not exactly match the host's system clock.
+ - The default for this parameter is '5' minutes.
+ - This option is not supported with REST.
+ type: str
+
+ comment:
+ description:
+ - Optional comment
+ type: str
+
+ admin_server_ip:
+ description:
+ - IP address of the host where the Kerberos administration daemon is running. This is usually the master KDC.
+ - If this parameter is omitted, the address specified in kdc_ip is used.
+ - This option is not supported with REST.
+ type: str
+
+ admin_server_port:
+ description:
+ - The TCP port on the Kerberos administration server where the Kerberos administration service is running.
+ - The default for this parmater is '749'.
+ - This option is not supported with REST.
+ type: str
+
+ pw_server_ip:
+ description:
+ - IP address of the host where the Kerberos password-changing server is running.
+ - Typically, this is the same as the host indicated in the adminserver-ip.
+ - If this parameter is omitted, the IP address in kdc-ip is used.
+ - This option is not supported with REST.
+ type: str
+
+ pw_server_port:
+ description:
+ - The TCP port on the Kerberos password-changing server where the Kerberos password-changing service is running.
+ - The default for this parameter is '464'.
+ - This option is not supported with REST.
+ type: str
+
+ ad_server_ip:
+ description:
+ - IP Address of the Active Directory Domain Controller (DC). This is a mandatory parameter if the kdc-vendor is 'microsoft'.
+ type: str
+ version_added: '20.4.0'
+
+ ad_server_name:
+ description:
+ - Host name of the Active Directory Domain Controller (DC). This is a mandatory parameter if the kdc-vendor is 'microsoft'.
+ type: str
+ version_added: '20.4.0'
+
+notes:
+ - supports ZAPI and REST. REST requires ONTAP 9.6 or later.
+ - supports check mode.
+'''
+
+EXAMPLES = '''
+
+ - name: Create kerberos realm other kdc vendor
+ netapp.ontap.na_ontap_kerberos_realm:
+ state: present
+ realm: 'EXAMPLE.COM'
+ vserver: 'vserver1'
+ kdc_ip: '1.2.3.4'
+ kdc_vendor: 'other'
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Create kerberos realm Microsoft kdc vendor
+ netapp.ontap.na_ontap_kerberos_realm:
+ state: present
+ realm: 'EXAMPLE.COM'
+ vserver: 'vserver1'
+ kdc_ip: '1.2.3.4'
+ kdc_vendor: 'microsoft'
+ ad_server_ip: '0.0.0.0'
+ ad_server_name: 'server'
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+'''
+
+RETURN = '''
+'''
+
+import traceback
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible.module_utils._text import to_native
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppOntapKerberosRealm:
+ '''
+ Kerberos Realm definition class
+ '''
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ admin_server_ip=dict(required=False, type='str'),
+ admin_server_port=dict(required=False, type='str'),
+ clock_skew=dict(required=False, type='str'),
+ comment=dict(required=False, type='str'),
+ kdc_ip=dict(required=False, type='str'),
+ kdc_port=dict(required=False, type='int'),
+ kdc_vendor=dict(required=False, type='str',
+ choices=['microsoft', 'other']),
+ pw_server_ip=dict(required=False, type='str'),
+ pw_server_port=dict(required=False, type='str'),
+ realm=dict(required=True, type='str'),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ ad_server_ip=dict(required=False, type='str'),
+ ad_server_name=dict(required=False, type='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True,
+ required_if=[
+ ('state', 'present', ['kdc_vendor', 'kdc_ip']),
+ ('kdc_vendor', 'microsoft', ['ad_server_ip', 'ad_server_name'])
+ ]
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ # Set up Rest API
+ self.rest_api = netapp_utils.OntapRestAPI(self.module)
+ unsupported_rest_properties = ['admin_server_ip', 'admin_server_port', 'clock_skew', 'pw_server_ip', 'pw_server_port']
+ self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, unsupported_rest_properties)
+ self.svm_uuid = None
+
+ if not self.use_rest:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ self.simple_attributes = [
+ 'admin_server_ip',
+ 'admin_server_port',
+ 'clock_skew',
+ 'kdc_ip',
+ 'kdc_vendor',
+ ]
+
+ def get_krbrealm(self):
+ '''
+ Checks if Kerberos Realm config exists.
+
+ :return:
+ kerberos realm object if found
+ None if not found
+ :rtype: object/None
+ '''
+ if self.use_rest:
+ return self.get_krbrealm_rest()
+
+ # Make query
+ krbrealm_info = netapp_utils.zapi.NaElement('kerberos-realm-get-iter')
+ query_details = netapp_utils.zapi.NaElement.create_node_with_children('kerberos-realm', **{'realm': self.parameters['realm'],
+ 'vserver-name': self.parameters['vserver']})
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ krbrealm_info.add_child_elem(query)
+
+ try:
+ result = self.server.invoke_successfully(krbrealm_info, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching kerberos realm %s: %s' % (self.parameters['realm'], to_native(error)))
+
+ # Get Kerberos Realm details
+ krbrealm_details = None
+ if (result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1):
+ attributes_list = result.get_child_by_name('attributes-list')
+ config_info = attributes_list.get_child_by_name('kerberos-realm')
+
+ krbrealm_details = {
+ 'admin_server_ip': config_info.get_child_content('admin-server-ip'),
+ 'admin_server_port': config_info.get_child_content('admin-server-port'),
+ 'clock_skew': config_info.get_child_content('clock-skew'),
+ 'kdc_ip': config_info.get_child_content('kdc-ip'),
+ 'kdc_port': int(config_info.get_child_content('kdc-port')),
+ 'kdc_vendor': config_info.get_child_content('kdc-vendor'),
+ 'pw_server_ip': config_info.get_child_content('password-server-ip'),
+ 'pw_server_port': config_info.get_child_content('password-server-port'),
+ 'realm': config_info.get_child_content('realm'),
+ 'vserver': config_info.get_child_content('vserver-name'),
+ 'ad_server_ip': config_info.get_child_content('ad-server-ip'),
+ 'ad_server_name': config_info.get_child_content('ad-server-name'),
+ 'comment': config_info.get_child_content('comment')
+ }
+
+ return krbrealm_details
+
+ def create_krbrealm(self):
+ '''supported
+ Create Kerberos Realm configuration
+ '''
+ if self.use_rest:
+ return self.create_krbrealm_rest()
+
+ options = {
+ 'realm': self.parameters['realm']
+ }
+
+ # Other options/attributes
+ for attribute in self.simple_attributes:
+ if self.parameters.get(attribute) is not None:
+ options[str(attribute).replace('_', '-')] = self.parameters[attribute]
+
+ if self.parameters.get('kdc_port'):
+ options['kdc-port'] = str(self.parameters['kdc_port'])
+ if self.parameters.get('pw_server_ip') is not None:
+ options['password-server-ip'] = self.parameters['pw_server_ip']
+ if self.parameters.get('pw_server_port') is not None:
+ options['password-server-port'] = self.parameters['pw_server_port']
+
+ if self.parameters.get('ad_server_ip') is not None:
+ options['ad-server-ip'] = self.parameters['ad_server_ip']
+ if self.parameters.get('ad_server_name') is not None:
+ options['ad-server-name'] = self.parameters['ad_server_name']
+
+ # Initialize NaElement
+ krbrealm_create = netapp_utils.zapi.NaElement.create_node_with_children('kerberos-realm-create', **options)
+
+ # Try to create Kerberos Realm configuration
+ try:
+ self.server.invoke_successfully(krbrealm_create, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as errcatch:
+ self.module.fail_json(msg='Error creating Kerberos Realm configuration %s: %s' % (self.parameters['realm'], to_native(errcatch)),
+ exception=traceback.format_exc())
+
+ def delete_krbrealm(self):
+ '''
+ Delete Kerberos Realm configuration
+ '''
+ if self.use_rest:
+ return self.delete_krbrealm_rest()
+ krbrealm_delete = netapp_utils.zapi.NaElement.create_node_with_children('kerberos-realm-delete', **{'realm': self.parameters['realm']})
+ try:
+ self.server.invoke_successfully(krbrealm_delete, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as errcatch:
+ self.module.fail_json(msg='Error deleting Kerberos Realm configuration %s: %s' % (
+ self.parameters['realm'], to_native(errcatch)), exception=traceback.format_exc())
+
+ def modify_krbrealm(self, modify):
+ '''
+ Modify Kerberos Realm
+ :param modify: list of modify attributes
+ '''
+ if self.use_rest:
+ return self.modify_krbrealm_rest(modify)
+ krbrealm_modify = netapp_utils.zapi.NaElement('kerberos-realm-modify')
+ krbrealm_modify.add_new_child('realm', self.parameters['realm'])
+
+ for attribute in modify:
+ if attribute in self.simple_attributes:
+ krbrealm_modify.add_new_child(str(attribute).replace('_', '-'), self.parameters[attribute])
+ if attribute == 'kdc_port':
+ krbrealm_modify.add_new_child('kdc-port', str(self.parameters['kdc_port']))
+ if attribute == 'pw_server_ip':
+ krbrealm_modify.add_new_child('password-server-ip', self.parameters['pw_server_ip'])
+ if attribute == 'pw_server_port':
+ krbrealm_modify.add_new_child('password-server-port', self.parameters['pw_server_port'])
+ if attribute == 'ad_server_ip':
+ krbrealm_modify.add_new_child('ad-server-ip', self.parameters['ad_server_ip'])
+ if attribute == 'ad_server_name':
+ krbrealm_modify.add_new_child('ad-server-name', self.parameters['ad_server_name'])
+ if attribute == 'comment':
+ krbrealm_modify.add_new_child('comment', self.parameters['comment'])
+
+ # Try to modify Kerberos Realm
+ try:
+ self.server.invoke_successfully(krbrealm_modify, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as errcatch:
+ self.module.fail_json(msg='Error modifying Kerberos Realm %s: %s' % (self.parameters['realm'], to_native(errcatch)),
+ exception=traceback.format_exc())
+
+ def get_krbrealm_rest(self):
+ api = 'protocols/nfs/kerberos/realms'
+ params = {
+ 'name': self.parameters['realm'],
+ 'svm.name': self.parameters['vserver'],
+ 'fields': 'kdc,ad_server,svm,comment'
+ }
+ record, error = rest_generic.get_one_record(self.rest_api, api, params)
+ if error:
+ self.module.fail_json(msg='Error fetching kerberos realm %s: %s' % (self.parameters['realm'], to_native(error)))
+ if record:
+ self.svm_uuid = record['svm']['uuid']
+ return {
+ 'kdc_ip': self.na_helper.safe_get(record, ['kdc', 'ip']),
+ 'kdc_port': self.na_helper.safe_get(record, ['kdc', 'port']),
+ 'kdc_vendor': self.na_helper.safe_get(record, ['kdc', 'vendor']),
+ 'ad_server_ip': self.na_helper.safe_get(record, ['ad_server', 'address']),
+ 'ad_server_name': self.na_helper.safe_get(record, ['ad_server', 'name']),
+ 'comment': self.na_helper.safe_get(record, ['comment'])
+ }
+ return None
+
+ def create_krbrealm_rest(self):
+ api = 'protocols/nfs/kerberos/realms'
+ body = {
+ 'name': self.parameters['realm'],
+ 'svm.name': self.parameters['vserver'],
+ 'kdc.ip': self.parameters['kdc_ip'],
+ 'kdc.vendor': self.parameters['kdc_vendor']
+ }
+ if self.parameters.get('kdc_port'):
+ body['kdc.port'] = self.parameters['kdc_port']
+ if self.parameters.get('comment'):
+ body['comment'] = self.parameters['comment']
+ if self.parameters['kdc_vendor'] == 'microsoft':
+ body['ad_server.address'] = self.parameters['ad_server_ip']
+ body['ad_server.name'] = self.parameters['ad_server_name']
+ dummy, error = rest_generic.post_async(self.rest_api, api, body)
+ if error:
+ self.module.fail_json(msg='Error creating Kerberos Realm configuration %s: %s' % (self.parameters['realm'], to_native(error)))
+
+ def modify_krbrealm_rest(self, modify):
+ api = 'protocols/nfs/kerberos/realms/%s' % self.svm_uuid
+ body = {}
+ if modify.get('kdc_ip'):
+ body['kdc.ip'] = modify['kdc_ip']
+ if modify.get('kdc_vendor'):
+ body['kdc.vendor'] = modify['kdc_vendor']
+ if modify.get('kdc_port'):
+ body['kdc.port'] = modify['kdc_port']
+ if modify.get('comment'):
+ body['comment'] = modify['comment']
+ if modify.get('ad_server_ip'):
+ body['ad_server.address'] = modify['ad_server_ip']
+ if modify.get('ad_server_name'):
+ body['ad_server.name'] = modify['ad_server_name']
+ dummy, error = rest_generic.patch_async(self.rest_api, api, self.parameters['realm'], body)
+ if error:
+ self.module.fail_json(msg='Error modifying Kerberos Realm %s: %s' % (self.parameters['realm'], to_native(error)))
+
+ def delete_krbrealm_rest(self):
+ api = 'protocols/nfs/kerberos/realms/%s' % self.svm_uuid
+ dummy, error = rest_generic.delete_async(self.rest_api, api, self.parameters['realm'])
+ if error:
+ self.module.fail_json(msg='Error deleting Kerberos Realm configuration %s: %s' % (self.parameters['realm'], to_native(error)))
+
+ def apply(self):
+ '''Call create/modify/delete operations.'''
+ current = self.get_krbrealm()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_krbrealm()
+ elif cd_action == 'delete':
+ self.delete_krbrealm()
+ elif modify:
+ self.modify_krbrealm(modify)
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ '''ONTAP Kerberos Realm'''
+ krbrealm = NetAppOntapKerberosRealm()
+ krbrealm.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ldap.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ldap.py
new file mode 100644
index 000000000..d75d17ee9
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ldap.py
@@ -0,0 +1,221 @@
+#!/usr/bin/python
+'''
+(c) 2018-2022, NetApp, Inc
+GNU General Public License v3.0+
+(see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+
+module: na_ontap_ldap
+
+short_description: NetApp ONTAP LDAP
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap_zapi
+version_added: 2.9.0
+author: Milan Zink (@zeten30) <zeten30@gmail.com>/<mzink@redhat.com>
+
+description:
+- Create, modify or delete LDAP on NetApp ONTAP SVM/vserver
+
+options:
+
+ state:
+ description:
+ - Whether the LDAP is present or not.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ vserver:
+ description:
+ - vserver/svm configured to use LDAP
+ required: true
+ type: str
+
+ name:
+ description:
+ - The name of LDAP client configuration
+ required: true
+ type: str
+
+ skip_config_validation:
+ description:
+ - Skip LDAP validation
+ choices: ['true', 'false']
+ type: str
+'''
+
+EXAMPLES = '''
+
+ - name: Enable LDAP on SVM
+ netapp.ontap.na_ontap_ldap:
+ state: present
+ name: 'example_ldap'
+ vserver: 'vserver1'
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+'''
+
+RETURN = '''
+'''
+
+import traceback
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+
+class NetAppOntapLDAP:
+ '''
+ LDAP Client definition class
+ '''
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_zapi_only_spec()
+ self.argument_spec.update(dict(
+ name=dict(required=True, type='str'),
+ skip_config_validation=dict(required=False, default=None, choices=['true', 'false']),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ msg = 'Error: na_ontap_ldap only supports ZAPI.netapp.ontap.na_ontap_ldap_client should be used instead.'
+ self.na_helper.fall_back_to_zapi(self.module, msg, self.parameters)
+
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_ldap(self, client_config_name=None):
+ '''
+ Checks if LDAP config exists.
+
+ :return:
+ ldap config object if found
+ None if not found
+ :rtype: object/None
+ '''
+ # Make query
+ config_info = netapp_utils.zapi.NaElement('ldap-config-get-iter')
+
+ if client_config_name is None:
+ client_config_name = self.parameters['name']
+
+ query_details = netapp_utils.zapi.NaElement.create_node_with_children('ldap-config', **{'client-config': client_config_name})
+
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ config_info.add_child_elem(query)
+
+ result = self.server.invoke_successfully(config_info, enable_tunneling=True)
+
+ # Get LDAP configuration details
+ config_details = None
+ if (result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1):
+ attributes_list = result.get_child_by_name('attributes-list')
+ config_info = attributes_list.get_child_by_name('ldap-config')
+
+ # Define config details structure
+ config_details = {'client_config': config_info.get_child_content('client-config'),
+ 'skip_config_validation': config_info.get_child_content('skip-config-validation'),
+ 'vserver': config_info.get_child_content('vserver')}
+
+ return config_details
+
+ def create_ldap(self):
+ '''
+ Create LDAP configuration
+ '''
+ options = {
+ 'client-config': self.parameters['name'],
+ 'client-enabled': 'true'
+ }
+
+ if self.parameters.get('skip_config_validation') is not None:
+ options['skip-config-validation'] = self.parameters['skip_config_validation']
+
+ # Initialize NaElement
+ ldap_create = netapp_utils.zapi.NaElement.create_node_with_children('ldap-config-create', **options)
+
+ # Try to create LDAP configuration
+ try:
+ self.server.invoke_successfully(ldap_create, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as errcatch:
+ self.module.fail_json(msg='Error creating LDAP configuration %s: %s' % (self.parameters['name'], to_native(errcatch)),
+ exception=traceback.format_exc())
+
+ def delete_ldap(self):
+ '''
+ Delete LDAP configuration
+ '''
+ ldap_client_delete = netapp_utils.zapi.NaElement.create_node_with_children('ldap-config-delete', **{})
+
+ try:
+ self.server.invoke_successfully(ldap_client_delete, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as errcatch:
+ self.module.fail_json(msg='Error deleting LDAP configuration %s: %s' % (
+ self.parameters['name'], to_native(errcatch)), exception=traceback.format_exc())
+
+ def modify_ldap(self, modify):
+ '''
+ Modify LDAP
+ :param modify: list of modify attributes
+ '''
+ ldap_modify = netapp_utils.zapi.NaElement('ldap-config-modify')
+ ldap_modify.add_new_child('client-config', self.parameters['name'])
+
+ for attribute in modify:
+ if attribute == 'skip_config_validation':
+ ldap_modify.add_new_child('skip-config-validation', self.parameters[attribute])
+
+ # Try to modify LDAP
+ try:
+ self.server.invoke_successfully(ldap_modify, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as errcatch:
+ self.module.fail_json(msg='Error modifying LDAP %s: %s' % (self.parameters['name'], to_native(errcatch)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ '''Call create/modify/delete operations.'''
+ current = self.get_ldap()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_ldap()
+ elif cd_action == 'delete':
+ self.delete_ldap()
+ elif modify:
+ self.modify_ldap(modify)
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify)
+ self.module.exit_json(**result)
+
+
+#
+# MAIN
+#
+def main():
+ '''ONTAP LDAP client configuration'''
+ ldapclient = NetAppOntapLDAP()
+ ldapclient.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ldap_client.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ldap_client.py
new file mode 100644
index 000000000..8a3103b7d
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ldap_client.py
@@ -0,0 +1,550 @@
+#!/usr/bin/python
+'''
+(c) 2018-2023, NetApp, Inc
+GNU General Public License v3.0+
+(see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+'''
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+
+module: na_ontap_ldap_client
+
+short_description: NetApp ONTAP LDAP client
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.9.0
+author: Milan Zink (@zeten30) <zeten30@gmail.com>/<mzink@redhat.com>
+
+description:
+ - Create, modify or delete LDAP client on NetApp ONTAP.
+
+options:
+
+ state:
+ description:
+ - Whether the specified LDAP client configuration exist or not.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ vserver:
+ description:
+ - vserver/svm that holds LDAP client configuration.
+ required: true
+ type: str
+
+ name:
+ description:
+ - The name of LDAP client configuration.
+ - Supported only in ZAPI.
+ - Required with ZAPI.
+ type: str
+
+ servers:
+ description:
+ - Comma separated list of LDAP servers. FQDN's or IP addreses.
+ - servers or ad_domain is required if I(state=present).
+ - Mutually exclusive with preferred_ad_servers and ad_domain.
+ type: list
+ elements: str
+ aliases: ['ldap_servers']
+
+ schema:
+ description:
+ - LDAP schema.
+ - Required if I(state=present).
+ - default schemas - 'AD-IDMU', 'AD-SFU', 'MS-AD-BIS', 'RFC-2307'.
+ - custom schemas are allowed as well.
+ type: str
+
+ ad_domain:
+ description:
+ - Active Directory Domain Name.
+ - servers or ad_domain is required if I(state=present).
+ - Mutually exclusive with servers.
+ type: str
+
+ base_dn:
+ description:
+ - LDAP base DN.
+ type: str
+
+ base_scope:
+ description:
+ - LDAP search scope.
+ choices: ['subtree', 'onelevel', 'base']
+ type: str
+
+ bind_as_cifs_server:
+ description:
+ - The cluster uses the CIFS server's credentials to bind to the LDAP server.
+ type: bool
+
+ preferred_ad_servers:
+ description:
+ - Preferred Active Directory (AD) Domain Controllers.
+ - Mutually exclusive with servers.
+ type: list
+ elements: str
+
+ port:
+ description:
+ - LDAP server TCP port.
+ type: int
+ aliases: ['tcp_port']
+ version_added: 21.3.0
+
+ query_timeout:
+ description:
+ - LDAP server query timeout.
+ type: int
+
+ min_bind_level:
+ description:
+ - Minimal LDAP server bind level.
+ choices: ['anonymous', 'simple', 'sasl']
+ type: str
+
+ bind_dn:
+ description:
+ - LDAP bind user DN.
+ type: str
+
+ bind_password:
+ description:
+ - LDAP bind user password.
+ type: str
+
+ use_start_tls:
+ description:
+ - Start TLS on LDAP connection.
+ type: bool
+
+ referral_enabled:
+ description:
+ - LDAP Referral Chasing.
+ type: bool
+
+ session_security:
+ description:
+ - Client Session Security.
+ choices: ['none', 'sign', 'seal']
+ type: str
+
+ ldaps_enabled:
+ description:
+ - Specifies whether or not LDAPS is enabled.
+ type: bool
+ version_added: 21.22.0
+
+ skip_config_validation:
+ description:
+ - Indicates whether or not the validation for the specified LDAP configuration is disabled.
+ - By default, errors are reported with REST when server names cannot be resolved for instance.
+ - Requires ONTAP 9.9 or later.
+ - This is ignored with ZAPI.
+ type: bool
+ version_added: 22.0.0
+
+notes:
+ - LDAP client created using ZAPI should be deleted using ZAPI.
+ - LDAP client created using REST should be deleted using REST.
+ - REST only supports create, modify and delete data svm ldap client configuration.
+
+'''
+
+EXAMPLES = '''
+
+ - name: Create LDAP client
+ # assuming credentials are set using module_defaults
+ netapp.ontap.na_ontap_ldap_client:
+ state: present
+ vserver: 'vserver1'
+ servers: 'ldap1.example.company.com,ldap2.example.company.com'
+ base_dn: 'dc=example,dc=company,dc=com'
+
+ - name: modify LDAP client
+ # assuming credentials are set using module_defaults
+ netapp.ontap.na_ontap_ldap_client:
+ state: present
+ vserver: 'vserver1'
+ servers: 'ldap1.example.company.com'
+ base_dn: 'dc=example,dc=company,dc=com'
+ skip_config_validation: true
+
+ - name: Delete LDAP client
+ # assuming credentials are set using module_defaults
+ netapp.ontap.na_ontap_ldap_client:
+ state: absent
+ vserver: 'vserver1'
+'''
+
+RETURN = '''
+'''
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic, rest_vserver
+
+
+class NetAppOntapLDAPClient:
+ '''
+ LDAP Client definition class
+ '''
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ ad_domain=dict(required=False, default=None, type='str'),
+ base_dn=dict(required=False, type='str'),
+ base_scope=dict(required=False, default=None, choices=['subtree', 'onelevel', 'base']),
+ bind_as_cifs_server=dict(required=False, type='bool'),
+ bind_dn=dict(required=False, default=None, type='str'),
+ bind_password=dict(type='str', required=False, default=None, no_log=True),
+ name=dict(required=False, type='str'),
+ servers=dict(required=False, type='list', elements='str', aliases=['ldap_servers']),
+ min_bind_level=dict(required=False, default=None, choices=['anonymous', 'simple', 'sasl']),
+ preferred_ad_servers=dict(required=False, type='list', elements='str'),
+ port=dict(required=False, type='int', aliases=['tcp_port']),
+ query_timeout=dict(required=False, default=None, type='int'),
+ referral_enabled=dict(required=False, type='bool'),
+ schema=dict(required=False, type='str'),
+ session_security=dict(required=False, default=None, choices=['none', 'sign', 'seal']),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ use_start_tls=dict(required=False, type='bool'),
+ vserver=dict(required=True, type='str'),
+ ldaps_enabled=dict(required=False, type='bool'),
+ skip_config_validation=dict(required=False, type='bool'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True,
+ required_if=[
+ ('state', 'present', ['schema']),
+ ],
+ mutually_exclusive=[
+ ['servers', 'ad_domain'],
+ ['servers', 'preferred_ad_servers'],
+ ['use_start_tls', 'ldaps_enabled']
+ ],
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ # Set up Rest API
+ self.rest_api = OntapRestAPI(self.module)
+ unsupported_rest_properties = ['name']
+ partially_supported_rest_properties = [['bind_as_cifs_server', (9, 9, 0)], ['query_timeout', (9, 9, 0)], ['referral_enabled', (9, 9, 0)],
+ ['ldaps_enabled', (9, 9, 0)], ['skip_config_validation', (9, 9, 0)]]
+ self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, unsupported_rest_properties, partially_supported_rest_properties)
+ if not self.use_rest:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+ if not self.parameters.get('name'):
+ self.module.fail_json(msg="Error: name is a required field with ZAPI.")
+
+ self.simple_attributes = [
+ 'ad_domain',
+ 'base_dn',
+ 'base_scope',
+ 'bind_as_cifs_server',
+ 'bind_dn',
+ 'bind_password',
+ 'min_bind_level',
+ 'tcp_port',
+ 'query_timeout',
+ 'referral_enabled',
+ 'session_security',
+ 'use_start_tls',
+ 'ldaps_enabled'
+ ]
+
+ def get_ldap_client(self, client_config_name=None, vserver_name=None):
+ '''
+ Checks if LDAP client config exists.
+
+ :return:
+ ldap client config object if found
+ None if not found
+ :rtype: object/None
+ '''
+ # Make query
+ client_config_info = netapp_utils.zapi.NaElement('ldap-client-get-iter')
+
+ if client_config_name is None:
+ client_config_name = self.parameters['name']
+
+ if vserver_name is None:
+ vserver_name = '*'
+
+ query_details = netapp_utils.zapi.NaElement.create_node_with_children('ldap-client',
+ **{
+ 'ldap-client-config': client_config_name,
+ 'vserver': vserver_name})
+
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ client_config_info.add_child_elem(query)
+
+ result = self.server.invoke_successfully(client_config_info, enable_tunneling=False)
+
+ # Get LDAP client configuration details
+ client_config_details = None
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ attributes_list = result.get_child_by_name('attributes-list')
+ client_config_info = attributes_list.get_child_by_name('ldap-client')
+ ldap_server_list = self.get_list_from_children(client_config_info, 'ldap-servers')
+ preferred_ad_servers_list = self.get_list_from_children(client_config_info, 'preferred-ad-servers')
+
+ # Define config details structure
+ client_config_details = {
+ 'name': client_config_info.get_child_content('ldap-client-config'),
+ 'servers': ldap_server_list,
+ 'ad_domain': client_config_info.get_child_content('ad-domain'),
+ 'base_dn': client_config_info.get_child_content('base-dn'),
+ 'base_scope': client_config_info.get_child_content('base-scope'),
+ 'bind_as_cifs_server': self.na_helper.get_value_for_bool(from_zapi=True,
+ value=client_config_info.get_child_content('bind-as-cifs-server')),
+ 'bind_dn': client_config_info.get_child_content('bind-dn'),
+ 'bind_password': client_config_info.get_child_content('bind-password'),
+ 'min_bind_level': client_config_info.get_child_content('min-bind-level'),
+ 'tcp_port': self.na_helper.get_value_for_int(from_zapi=True, value=client_config_info.get_child_content('tcp-port')),
+ 'preferred_ad_servers': preferred_ad_servers_list,
+ 'query_timeout': self.na_helper.get_value_for_int(from_zapi=True,
+ value=client_config_info.get_child_content('query-timeout')),
+ 'referral_enabled': self.na_helper.get_value_for_bool(from_zapi=True,
+ value=client_config_info.get_child_content('referral-enabled')),
+ 'schema': client_config_info.get_child_content('schema'),
+ 'session_security': client_config_info.get_child_content('session-security'),
+ 'use_start_tls': self.na_helper.get_value_for_bool(from_zapi=True,
+ value=client_config_info.get_child_content('use-start-tls')),
+ 'ldaps_enabled': self.na_helper.get_value_for_bool(from_zapi=True,
+ value=client_config_info.get_child_content('ldaps-enabled')),
+ }
+ return client_config_details
+
+ def get_list_from_children(self, client_config_info, element_name):
+ # Get list for element chidren
+ # returns empty list if element does not exist
+ get_list = client_config_info.get_child_by_name(element_name)
+ return [x.get_content() for x in get_list.get_children()] if get_list is not None else []
+
+ def create_ldap_client(self):
+ '''
+ Create LDAP client configuration
+ '''
+ options = {
+ 'ldap-client-config': self.parameters['name'],
+ 'schema': self.parameters['schema'],
+ }
+
+ # Other options/attributes
+ for attribute in self.simple_attributes:
+ if self.parameters.get(attribute) is not None:
+ options[str(attribute).replace('_', '-')] = str(self.parameters[attribute])
+
+ # Initialize NaElement
+ ldap_client_create = netapp_utils.zapi.NaElement.create_node_with_children('ldap-client-create', **options)
+
+ # LDAP servers NaElement
+ if self.parameters.get('servers') is not None:
+ self.add_element_with_children('ldap-servers', 'servers', 'string', ldap_client_create)
+
+ # preferred_ad_servers
+ if self.parameters.get('preferred_ad_servers') is not None:
+ self.add_element_with_children('preferred-ad-servers', 'preferred_ad_servers', 'ip-address', ldap_client_create)
+
+ # Try to create LDAP configuration
+ try:
+ self.server.invoke_successfully(ldap_client_create, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as errcatch:
+ self.module.fail_json(
+ msg='Error creating LDAP client %s: %s' % (self.parameters['name'], to_native(errcatch)),
+ exception=traceback.format_exc())
+
+ def add_element_with_children(self, element_name, param_name, child_name, ldap_client_create):
+ ldap_servers_element = netapp_utils.zapi.NaElement(element_name)
+ for ldap_server_name in self.parameters[param_name]:
+ ldap_servers_element.add_new_child(child_name, ldap_server_name)
+ ldap_client_create.add_child_elem(ldap_servers_element)
+
+ def delete_ldap_client(self):
+ '''
+ Delete LDAP client configuration
+ '''
+ ldap_client_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'ldap-client-delete', **{'ldap-client-config': self.parameters['name']})
+
+ try:
+ self.server.invoke_successfully(ldap_client_delete, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as errcatch:
+ self.module.fail_json(msg='Error deleting LDAP client configuration %s: %s' % (
+ self.parameters['name'], to_native(errcatch)), exception=traceback.format_exc())
+
+ def modify_ldap_client(self, modify):
+ '''
+ Modify LDAP client
+ :param modify: list of modify attributes
+ '''
+ ldap_client_modify = netapp_utils.zapi.NaElement('ldap-client-modify')
+ ldap_client_modify.add_new_child('ldap-client-config', self.parameters['name'])
+
+ for attribute in modify:
+ # LDAP_servers
+ if attribute == 'servers':
+ self.add_element_with_children('ldap-servers', attribute, 'string', ldap_client_modify)
+ # preferred_ad_servers
+ if attribute == 'preferred_ad_servers':
+ self.add_element_with_children('preferred-ad-servers', attribute, 'ip-address', ldap_client_modify)
+ # Simple attributes
+ if attribute in self.simple_attributes:
+ ldap_client_modify.add_new_child(str(attribute).replace('_', '-'), str(self.parameters[attribute]))
+
+ # Try to modify LDAP client
+ try:
+ self.server.invoke_successfully(ldap_client_modify, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as errcatch:
+ self.module.fail_json(
+ msg='Error modifying LDAP client %s: %s' % (self.parameters['name'], to_native(errcatch)),
+ exception=traceback.format_exc())
+
+ def get_ldap_client_rest(self):
+ """
+ Retrives ldap client config with rest API.
+ """
+ if not self.use_rest:
+ return self.get_ldap_client()
+ query = {'svm.name': self.parameters.get('vserver'),
+ 'fields': 'svm.uuid,'
+ 'ad_domain,'
+ 'servers,'
+ 'preferred_ad_servers,'
+ 'bind_dn,'
+ 'schema,'
+ 'port,'
+ 'base_dn,'
+ 'base_scope,'
+ 'min_bind_level,'
+ 'session_security,'
+ 'use_start_tls,'}
+ if self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 9, 0):
+ query['fields'] += 'bind_as_cifs_server,query_timeout,referral_enabled,ldaps_enabled'
+ record, error = rest_generic.get_one_record(self.rest_api, 'name-services/ldap', query)
+ if error:
+ self.module.fail_json(msg="Error on getting idap client info: %s" % error)
+ if record:
+ return {
+ 'svm': {'uuid': self.na_helper.safe_get(record, ['svm', 'uuid'])},
+ 'ad_domain': self.na_helper.safe_get(record, ['ad_domain']),
+ 'preferred_ad_servers': self.na_helper.safe_get(record, ['preferred_ad_servers']),
+ 'servers': self.na_helper.safe_get(record, ['servers']),
+ 'schema': self.na_helper.safe_get(record, ['schema']),
+ 'port': self.na_helper.safe_get(record, ['port']),
+ 'ldaps_enabled': self.na_helper.safe_get(record, ['ldaps_enabled']),
+ 'min_bind_level': self.na_helper.safe_get(record, ['min_bind_level']),
+ 'bind_dn': self.na_helper.safe_get(record, ['bind_dn']),
+ 'base_dn': self.na_helper.safe_get(record, ['base_dn']),
+ 'base_scope': self.na_helper.safe_get(record, ['base_scope']),
+ 'use_start_tls': self.na_helper.safe_get(record, ['use_start_tls']),
+ 'session_security': self.na_helper.safe_get(record, ['session_security']),
+ 'referral_enabled': self.na_helper.safe_get(record, ['referral_enabled']),
+ 'bind_as_cifs_server': self.na_helper.safe_get(record, ['bind_as_cifs_server']),
+ 'query_timeout': self.na_helper.safe_get(record, ['query_timeout'])
+ }
+ return None
+
+ def create_ldap_client_body_rest(self, modify=None):
+ """
+ ldap client config body for create and modify with rest API.
+ """
+ config_options = ['ad_domain', 'servers', 'preferred_ad_servers', 'bind_dn', 'schema', 'port', 'base_dn', 'referral_enabled', 'ldaps_enabled',
+ 'base_scope', 'bind_as_cifs_server', 'bind_password', 'min_bind_level', 'query_timeout', 'session_security', 'use_start_tls']
+ processing_options = ['skip_config_validation']
+ body = {}
+ for key in config_options:
+ if not modify and key in self.parameters:
+ body[key] = self.parameters[key]
+ elif modify and key in modify:
+ body[key] = modify[key]
+ for key in processing_options:
+ if body and key in self.parameters:
+ body[key] = self.parameters[key]
+ return body
+
+ def create_ldap_client_rest(self):
+ """
+ create ldap client config with rest API.
+ """
+ if not self.use_rest:
+ return self.create_ldap_client()
+ body = self.create_ldap_client_body_rest()
+ body['svm.name'] = self.parameters['vserver']
+ api = 'name-services/ldap'
+ dummy, error = rest_generic.post_async(self.rest_api, api, body)
+ if error is not None:
+ self.module.fail_json(msg="Error on creating ldap client: %s" % error)
+
+ def delete_ldap_client_rest(self, current):
+ """
+ delete ldap client config with rest API.
+ """
+ if not self.use_rest:
+ return self.delete_ldap_client()
+ api = 'name-services/ldap'
+ dummy, error = rest_generic.delete_async(self.rest_api, api, current['svm']['uuid'], body=None)
+ if error is not None:
+ self.module.fail_json(msg="Error on deleting ldap client rest: %s" % error)
+
+ def modify_ldap_client_rest(self, current, modify):
+ """
+ modif ldap client config with rest API.
+ """
+ if not self.use_rest:
+ return self.modify_ldap_client(modify)
+ body = self.create_ldap_client_body_rest(modify)
+ if body:
+ api = 'name-services/ldap'
+ dummy, error = rest_generic.patch_async(self.rest_api, api, current['svm']['uuid'], body)
+ if error is not None:
+ self.module.fail_json(msg="Error on modifying ldap client config: %s" % error)
+
+ def apply(self):
+ '''Call create/modify/delete operations.'''
+ current = self.get_ldap_client_rest()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ # state is present, either servers or ad_domain is required
+ if self.parameters['state'] == 'present' and not self.parameters.get('servers') \
+ and self.parameters.get('ad_domain') is None:
+ self.module.fail_json(msg='Required one of servers or ad_domain')
+ # REST retrives only data svm ldap configuration, error if try to use non data svm.
+ if cd_action == "create" and self.use_rest:
+ rest_vserver.get_vserver_uuid(self.rest_api, self.parameters['vserver'], self.module, True)
+ modify = self.na_helper.get_modified_attributes(current, self.parameters) if cd_action is None else None
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_ldap_client_rest()
+ elif cd_action == 'delete':
+ self.delete_ldap_client_rest(current)
+ elif modify:
+ self.modify_ldap_client_rest(current, modify)
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ '''ONTAP LDAP client configuration'''
+ ldapclient = NetAppOntapLDAPClient()
+ ldapclient.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_license.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_license.py
new file mode 100644
index 000000000..1ed628dbb
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_license.py
@@ -0,0 +1,708 @@
+#!/usr/bin/python
+
+# (c) 2018-2023, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_license
+'''
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+
+module: na_ontap_license
+
+short_description: NetApp ONTAP protocol and feature license packages
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+ - Add or remove license packages on NetApp ONTAP.
+ - Note that the module is asymmetrical.
+ - It requires license codes to add packages and the package name is not visible.
+ - It requires package names and as serial number to remove packages.
+
+options:
+ state:
+ description:
+ - Whether the specified license packages should be installed or removed.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+
+ remove_unused:
+ description:
+ - Remove license packages that have no controller affiliation in the cluster.
+ - Not supported with REST.
+ type: bool
+
+ remove_expired:
+ description:
+ - Remove license packages that have expired in the cluster.
+ - Not supported with REST.
+ type: bool
+
+ serial_number:
+ description:
+ - Serial number of the node or cluster associated with the license package.
+ - This parameter is required when removing a license package.
+ - With REST, '*' is accepted and matches any serial number.
+ type: str
+
+ license_names:
+ type: list
+ elements: str
+ description:
+ - List of license package names to remove.
+ suboptions:
+ base:
+ description:
+ - Cluster Base License
+ nfs:
+ description:
+ - NFS License
+ cifs:
+ description:
+ - CIFS License
+ iscsi:
+ description:
+ - iSCSI License
+ fcp:
+ description:
+ - FCP License
+ cdmi:
+ description:
+ - CDMI License
+ snaprestore:
+ description:
+ - SnapRestore License
+ snapmirror:
+ description:
+ - SnapMirror License
+ flexclone:
+ description:
+ - FlexClone License
+ snapvault:
+ description:
+ - SnapVault License
+ snaplock:
+ description:
+ - SnapLock License
+ snapmanagersuite:
+ description:
+ - SnapManagerSuite License
+ snapprotectapps:
+ description:
+ - SnapProtectApp License
+ v_storageattach:
+ description:
+ - Virtual Attached Storage License
+
+ license_codes:
+ description:
+ - List of license codes to be installed.
+ type: list
+ elements: str
+
+notes:
+ - Partially supports check_mode - some changes cannot be detected until an add or remove action is performed.
+ - Supports 28 character key licenses with ZAPI and REST.
+ - Supports NetApp License File Version 2 (NLFv2) with REST.
+ - NetApp License File Version 1 (NLFv1) with REST is not supported at present but may work.
+ - Ansible attempts to reformat license files as the contents are python-like.
+ Use the string filter in case of problem to disable this behavior.
+ - This module requires the python ast and json packages when the string filter is not used.
+ - This module requires the json package to check for idempotency, and to remove licenses using a NLFv2 file.
+ - This module requires the deepdiff package to check for idempotency.
+ - None of these packages are required when the string filter is used, but the module will not be idempotent.
+'''
+
+
+EXAMPLES = """
+- name: Add licenses - 28 character keys
+ netapp.ontap.na_ontap_license:
+ state: present
+ serial_number: #################
+ license_codes: CODE1,CODE2
+
+- name: Remove licenses
+ netapp.ontap.na_ontap_license:
+ state: absent
+ remove_unused: false
+ remove_expired: true
+ serial_number: #################
+ license_names: nfs,cifs
+
+- name: Add NLF licenses
+ netapp.ontap.na_ontap_license:
+ state: present
+ license_codes:
+ - "{{ lookup('file', nlf_filepath) | string }}"
+
+- name: Remove NLF license bundle - using license file
+ netapp.ontap.na_ontap_license:
+ state: absent
+ license_codes:
+ - "{{ lookup('file', nlf_filepath) | string }}"
+
+- name: Remove NLF license bundle - using bundle name
+ netapp.ontap.na_ontap_license:
+ state: absent
+ remove_unused: false
+ remove_expired: true
+ serial_number: #################
+ license_names: "Enterprise Edition"
+"""
+
+RETURN = """
+updated_licenses:
+ description: return list of updated package names
+ returned: always
+ type: dict
+ sample: "['nfs']"
+"""
+
+HAS_AST = True
+HAS_DEEPDIFF = True
+HAS_JSON = True
+IMPORT_ERRORS = []
+
+try:
+ import ast
+except ImportError as exc:
+ HAS_AST = False
+ IMPORT_ERRORS.append(exc)
+
+try:
+ from deepdiff import DeepDiff
+except (ImportError, SyntaxError) as exc:
+ # With Ansible 2.9, python 2.6 reports a SyntaxError
+ HAS_DEEPDIFF = False
+ IMPORT_ERRORS.append(exc)
+
+try:
+ import json
+except ImportError as exc:
+ HAS_JSON = False
+ IMPORT_ERRORS.append(exc)
+
+import re
+import sys
+import time
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+if sys.version_info < (3, 5):
+ # not defined in earlier versions
+ RecursionError = RuntimeError
+
+
+def local_cmp(a, b):
+ """
+ compares with only values and not keys, keys should be the same for both dicts
+ :param a: dict 1
+ :param b: dict 2
+ :return: difference of values in both dicts
+ """
+ return [key for key in a if a[key] != b[key]]
+
+
+class NetAppOntapLicense:
+ '''ONTAP license class'''
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ serial_number=dict(required=False, type='str'),
+ remove_unused=dict(default=None, type='bool'),
+ remove_expired=dict(default=None, type='bool'),
+ license_codes=dict(default=None, type='list', elements='str'),
+ license_names=dict(default=None, type='list', elements='str'),
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=False,
+ required_if=[
+ ('state', 'absent', ['license_codes', 'license_names'], True)],
+ required_together=[
+ ('serial_number', 'license_names')],
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.license_status = {}
+ # list of tuples - original licenses (license_code or NLF contents), and dict of NLF contents (empty dict for legacy codes)
+ self.nlfs = []
+ # when using REST, just keep a list as returned by GET to use with deepdiff
+ self.previous_records = []
+
+ # Set up REST API
+ self.rest_api = OntapRestAPI(self.module)
+ unsupported_rest_properties = ['remove_unused', 'remove_expired']
+ self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, unsupported_rest_properties)
+ if not self.use_rest:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+ self.validate_nlfs()
+
+ def get_licensing_status(self):
+ """
+ Check licensing status
+
+ :return: package (key) and licensing status (value)
+ :rtype: dict
+ """
+ if self.use_rest:
+ return self.get_licensing_status_rest()
+ license_status = netapp_utils.zapi.NaElement(
+ 'license-v2-status-list-info')
+ result = None
+ try:
+ result = self.server.invoke_successfully(license_status,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error checking license status: %s" %
+ to_native(error), exception=traceback.format_exc())
+
+ return_dictionary = {}
+ license_v2_status = result.get_child_by_name('license-v2-status')
+ if license_v2_status:
+ for license_v2_status_info in license_v2_status.get_children():
+ package = license_v2_status_info.get_child_content('package')
+ status = license_v2_status_info.get_child_content('method')
+ return_dictionary[package] = status
+ return return_dictionary, None
+
+ def get_licensing_status_rest(self):
+ api = 'cluster/licensing/licenses'
+ # By default, the GET method only returns licensed packages.
+ # To retrieve all the available package state details, below query is used.
+ query = {'state': 'compliant, noncompliant, unlicensed, unknown'}
+ fields = 'name,state,licenses'
+ records, error = rest_generic.get_0_or_more_records(self.rest_api, api, query, fields)
+ if error:
+ self.module.fail_json(msg=error)
+ current = {'installed_licenses': {}}
+ if records:
+ for package in records:
+ current[package['name']] = package['state']
+ if 'licenses' in package:
+ for license in package['licenses']:
+ installed_license = license.get('installed_license')
+ serial_number = license.get('serial_number')
+ if serial_number and installed_license:
+ if serial_number not in current:
+ current['installed_licenses'][serial_number] = set()
+ current['installed_licenses'][serial_number].add(installed_license)
+ return current, records
+
+ def remove_licenses(self, package_name, nlf_dict=None):
+ """
+ Remove requested licenses
+ :param:
+ package_name: Name of the license to be deleted
+ """
+ if self.use_rest:
+ return self.remove_licenses_rest(package_name, nlf_dict or {})
+ license_delete = netapp_utils.zapi.NaElement('license-v2-delete')
+ license_delete.add_new_child('serial-number', self.parameters['serial_number'])
+ license_delete.add_new_child('package', package_name)
+ try:
+ self.server.invoke_successfully(license_delete,
+ enable_tunneling=False)
+ return True
+ except netapp_utils.zapi.NaApiError as error:
+ # Error 15661 - Object not found
+ if to_native(error.code) == "15661":
+ return False
+ else:
+ self.module.fail_json(msg="Error removing license %s" %
+ to_native(error), exception=traceback.format_exc())
+
+ def remove_licenses_rest(self, package_name, nlf_dict):
+ """
+ This is called either with a package name or a NLF dict
+ We already validated product and serialNumber are present in nlf_dict
+ """
+ p_serial_number = self.parameters.get('serial_number')
+ n_serial_number = nlf_dict.get('serialNumber')
+ n_product = nlf_dict.get('product')
+ serial_number = n_serial_number or p_serial_number
+ if not serial_number:
+ self.module.fail_json(msg='Error: serial_number is required to delete a license.')
+ if n_product:
+ error = self.remove_one_license_rest(None, n_product, serial_number)
+ elif package_name.endswith(('Bundle', 'Edition')):
+ error = self.remove_one_license_rest(None, package_name, serial_number)
+ else:
+ error = self.remove_one_license_rest(package_name, None, serial_number)
+ if error and "entry doesn't exist" in error:
+ return False
+ if error:
+ self.module.fail_json(msg="Error removing license for serial number %s and %s: %s"
+ % (serial_number, n_product or package_name, error))
+ return True
+
+ def remove_one_license_rest(self, package_name, product, serial_number):
+ api = 'cluster/licensing/licenses'
+ query = {'serial_number': serial_number}
+ if product:
+ query['licenses.installed_license'] = product.replace(' ', '*')
+ # since this is a query, we need to specify state, or only active licenses are removed
+ query['state'] = '*'
+ dummy, error = rest_generic.delete_async(self.rest_api, api, package_name, query)
+ return error
+
+ def remove_unused_licenses(self):
+ """
+ Remove unused licenses
+ """
+ remove_unused = netapp_utils.zapi.NaElement('license-v2-delete-unused')
+ try:
+ self.server.invoke_successfully(remove_unused,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error removing unused licenses: %s" %
+ to_native(error), exception=traceback.format_exc())
+
+ def remove_expired_licenses(self):
+ """
+ Remove expired licenses
+ """
+ remove_expired = netapp_utils.zapi.NaElement(
+ 'license-v2-delete-expired')
+ try:
+ self.server.invoke_successfully(remove_expired,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error removing expired licenses: %s" %
+ to_native(error), exception=traceback.format_exc())
+
+ def add_licenses(self):
+ """
+ Add licenses
+ """
+ if self.use_rest:
+ return self.add_licenses_rest()
+ license_add = netapp_utils.zapi.NaElement('license-v2-add')
+ codes = netapp_utils.zapi.NaElement('codes')
+ for code in self.parameters['license_codes']:
+ codes.add_new_child('license-code-v2', str(code.strip().lower()))
+ license_add.add_child_elem(codes)
+ try:
+ self.server.invoke_successfully(license_add,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error adding licenses: %s" %
+ to_native(error), exception=traceback.format_exc())
+
+ def add_licenses_rest(self):
+ api = 'cluster/licensing/licenses'
+ body = {'keys': [x[0] for x in self.nlfs]}
+ headers = None
+ if self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 9, 1):
+ # request nested errors
+ headers = {'X-Dot-Error-Arguments': 'true'}
+ dummy, error = rest_generic.post_async(self.rest_api, api, body, headers=headers)
+ if error:
+ error = self.format_post_error(error, body)
+ if 'conflicts' in error:
+ return error
+ self.module.fail_json(msg="Error adding license: %s - previous license status: %s" % (error, self.license_status))
+ return None
+
+ def compare_license_status(self, previous_license_status):
+ changed_keys = []
+ for __ in range(5):
+ error = None
+ new_license_status, records = self.get_licensing_status()
+ try:
+ changed_keys = local_cmp(previous_license_status, new_license_status)
+ break
+ except KeyError as exc:
+ # when a new license is added, it seems REST may not report all licenses
+ # wait for things to stabilize
+ error = exc
+ time.sleep(5)
+ if error:
+ self.module.fail_json(msg='Error: mismatch in license package names: %s. Expected: %s, found: %s.'
+ % (error, previous_license_status.keys(), new_license_status.keys()))
+ if 'installed_licenses' in changed_keys:
+ changed_keys.remove('installed_licenses')
+ if records and self.previous_records:
+ deep_changed_keys = self.deep_compare(records)
+ for key in deep_changed_keys:
+ if key not in changed_keys:
+ changed_keys.append(key)
+ return changed_keys
+
+ def deep_compare(self, records):
+ """ look for any change in license details, capacity, expiration, ...
+ this is run after apply, so we don't know for sure in check_mode
+ """
+ if not HAS_DEEPDIFF:
+ self.module.warn('deepdiff is required to identify detailed changes')
+ return []
+ diffs = DeepDiff(self.previous_records, records)
+ self.rest_api.log_debug('diffs', diffs)
+ roots = set(re.findall(r'root\[(\d+)\]', str(diffs)))
+ result = [records[int(index)]['name'] for index in roots]
+ self.rest_api.log_debug('deep_changed_keys', result)
+ return result
+
+ def reformat_nlf(self, license_code):
+ # Ansible converts double quotes into single quotes if the input is python-like
+ # and we can't use json loads with single quotes!
+ if not HAS_AST or not HAS_JSON:
+ return None, "ast and json packages are required to install NLF license files. Import error(s): %s." % IMPORT_ERRORS
+ try:
+ nlf_dict = ast.literal_eval(license_code)
+ except (ValueError, TypeError, SyntaxError, MemoryError, RecursionError) as exc:
+ return None, "malformed input: %s, exception: %s" % (license_code, exc)
+ try:
+ license_code = json.dumps(nlf_dict, separators=(',', ':'))
+ except Exception as exc:
+ return None, "unable to encode input: %s - evaluated as %s, exception: %s" % (license_code, nlf_dict, exc)
+ return license_code, None
+
+ def get_nlf_dict(self, license_code):
+ nlf_dict = {}
+ is_nlf = False
+ if '"statusResp"' in license_code:
+ if license_code.count('"statusResp"') > 1:
+ self.module.fail_json(msg="Error: NLF license files with multiple licenses are not supported, found %d in %s."
+ % (license_code.count('"statusResp"'), license_code))
+ if license_code.count('"serialNumber"') > 1:
+ self.module.fail_json(msg="Error: NLF license files with multiple serial numbers are not supported, found %d in %s."
+ % (license_code.count('"serialNumber"'), license_code))
+ is_nlf = True
+ if not HAS_JSON:
+ return nlf_dict, is_nlf, "the json package is required to process NLF license files. Import error(s): %s." % IMPORT_ERRORS
+ try:
+ nlf_dict = json.loads(license_code)
+ except Exception as exc:
+ return nlf_dict, is_nlf, "the license contents cannot be read. Unable to decode input: %s - exception: %s." % (license_code, exc)
+ return nlf_dict, is_nlf, None
+
+ def scan_license_codes_for_nlf(self, license_code):
+ more_info = "You %s seeing this error because the original NLF contents were modified by Ansible. You can use the string filter to keep the original."
+ transformed = False
+ original_license_code = license_code
+
+ if "'statusResp'" in license_code:
+ license_code, error = self.reformat_nlf(license_code)
+ if error:
+ error = 'Error: %s %s' % (error, more_info % 'are')
+ self.module.fail_json(msg=error)
+ transformed = True
+
+ # For an NLF license, extract fields, to later collect serial number and bundle name (product)
+ nlf_dict, is_nlf, error = self.get_nlf_dict(license_code)
+ if error and transformed:
+ error = 'Error: %s. Ansible input: %s %s' % (error, original_license_code, more_info % 'may be')
+ self.module.fail_json(msg=error)
+
+ if error:
+ msg = "The license " + (
+ "will be installed without checking for idempotency." if self.parameters['state'] == 'present' else "cannot be removed.")
+ msg += " You are seeing this warning because " + error
+ self.module.warn(msg)
+
+ return license_code, nlf_dict, is_nlf
+
+ def split_nlf(self, license_code):
+ """ A NLF file may contain several licenses
+ One license per line
+ Return a list of 1 or more licenses
+ """
+ licenses = license_code.count('"statusResp"')
+ if licenses <= 1:
+ return [license_code]
+ nlfs = license_code.splitlines()
+ if len(nlfs) != licenses:
+ self.module.fail_json(msg="Error: unexpected format found %d entries and %d lines in %s"
+ % (licenses, len(nlfs), license_code))
+ return nlfs
+
+ def split_nlfs(self):
+ """ A NLF file may contain several licenses
+ Return a flattened list of license codes
+ """
+ license_codes = []
+ for license in self.parameters.get('license_codes', []):
+ license_codes.extend(self.split_nlf(license))
+ return license_codes
+
+ def validate_nlfs(self):
+ self.parameters['license_codes'] = self.split_nlfs()
+ nlf_count = 0
+ for license in self.parameters['license_codes']:
+ nlf, nlf_dict, is_nlf = self.scan_license_codes_for_nlf(license)
+ if is_nlf and not self.use_rest:
+ self.module.fail_json(msg="Error: NLF license format is not supported with ZAPI.")
+ self.nlfs.append((nlf, nlf_dict))
+ if is_nlf:
+ nlf_count += 1
+ if nlf_count and nlf_count != len(self.parameters['license_codes']):
+ self.module.fail_json(msg="Error: cannot mix legacy licenses and NLF licenses; found %d NLF licenses out of %d license_codes."
+ % (nlf_count, len(self.parameters['license_codes'])))
+
+ def get_key(self, error, body):
+ needle = r'Failed to install the license at index (\d+)'
+ matched = re.search(needle, error)
+ if matched:
+ index = int(matched.group(1))
+ return body['keys'][index]
+ return None
+
+ def format_post_error(self, error, body):
+ if 'The system received a licensing request with an invalid digital signature.' in error:
+ key = self.get_key(error, body)
+ if key and "'statusResp'" in key:
+ error = 'Original NLF contents were modified by Ansible. Make sure to use the string filter. REST error: %s' % error
+ return error
+
+ def nlf_is_installed(self, nlf_dict):
+ """ return True if NLF with same SN, product (bundle) name and package list is present
+ return False otherwise
+ Even when present, the NLF may not be active, so this is only useful for delete
+ """
+ n_serial_number, n_product = self.get_sn_and_product(nlf_dict)
+ if not n_product or not n_serial_number:
+ return False
+ if 'installed_licenses' not in self.license_status:
+ # nothing is installed
+ return False
+ if n_serial_number == '*' and self.parameters['state'] == 'absent':
+ # force a delete
+ return True
+ if n_serial_number not in self.license_status['installed_licenses']:
+ return False
+ return n_product in self.license_status['installed_licenses'][n_serial_number]
+
+ def get_sn_and_product(self, nlf_dict):
+ # V2 and V1 formats
+ n_serial_number = self.na_helper.safe_get(nlf_dict, ['statusResp', 'serialNumber'])\
+ or self.na_helper.safe_get(nlf_dict, ['statusResp', 'licenses', 'serialNumber'])
+ n_product = self.na_helper.safe_get(nlf_dict, ['statusResp', 'product'])\
+ or self.na_helper.safe_get(nlf_dict, ['statusResp', 'licenses', 'product'])
+ return n_serial_number, n_product
+
+ def validate_delete_action(self, nlf_dict):
+ """ make sure product and serialNumber are set at the top level (V2 format) """
+ # product is required for delete
+ n_serial_number, n_product = self.get_sn_and_product(nlf_dict)
+ if nlf_dict and not n_product:
+ self.module.fail_json(msg='Error: product not found in NLF file %s.' % nlf_dict)
+ # if serial number is not present in the NLF, we could use a module parameter
+ p_serial_number = self.parameters.get('serial_number')
+ if p_serial_number and n_serial_number and p_serial_number != n_serial_number:
+ self.module.fail_json(msg='Error: mismatch is serial numbers %s vs %s' % (p_serial_number, n_serial_number))
+ if nlf_dict and not n_serial_number and not p_serial_number:
+ self.module.fail_json(msg='Error: serialNumber not found in NLF file. It can be set in the module parameter.')
+ nlf_dict['serialNumber'] = n_serial_number or p_serial_number
+ nlf_dict['product'] = n_product
+
+ def get_delete_actions(self):
+ packages_to_delete = []
+ if self.parameters.get('license_names') is not None:
+ for package in list(self.parameters['license_names']):
+ if 'installed_licenses' in self.license_status and self.parameters['serial_number'] != '*'\
+ and self.parameters['serial_number'] in self.license_status['installed_licenses']\
+ and package in self.license_status['installed_licenses'][self.parameters['serial_number']]:
+ packages_to_delete.append(package)
+ if package in self.license_status:
+ packages_to_delete.append(package)
+
+ for dummy, nlf_dict in self.nlfs:
+ if nlf_dict:
+ self.validate_delete_action(nlf_dict)
+ nlfs_to_delete = [
+ nlf_dict
+ for dummy, nlf_dict in self.nlfs
+ if self.nlf_is_installed(nlf_dict)
+ ]
+ return bool(nlfs_to_delete) or bool(self.parameters.get('license_names')), packages_to_delete, nlfs_to_delete
+
+ def get_add_actions(self):
+ """ add licenses unconditionally
+ for legacy licenses we don't know if they are already installed
+ for NLF licenses we don't know if some details have changed (eg capacity, expiration date)
+ """
+ return bool(self.nlfs), [license_code for license_code, dummy in self.nlfs]
+
+ def get_actions(self):
+ changed = False
+ licenses_to_add = []
+ nlfs_to_delete = []
+ remove_license = False
+ packages_to_delete = []
+ nlfs_to_delete = []
+ # Add / Update licenses.
+ self.license_status, self.previous_records = self.get_licensing_status()
+ if self.parameters['state'] == 'absent': # delete
+ changed, packages_to_delete, nlfs_to_delete = self.get_delete_actions()
+ else: # add or update
+ changed, licenses_to_add = self.get_add_actions()
+ if self.parameters.get('remove_unused') is not None:
+ remove_license = True
+ changed = True
+ if self.parameters.get('remove_expired') is not None:
+ remove_license = True
+ changed = True
+ return changed, licenses_to_add, remove_license, packages_to_delete, nlfs_to_delete
+
+ def apply(self):
+ '''Call add, delete or modify methods'''
+ changed, licenses_to_add, remove_license, packages_to_delete, nlfs_to_delete = self.get_actions()
+ error, changed_keys = None, []
+ if changed and not self.module.check_mode:
+ if self.parameters['state'] == 'present': # execute create
+ if licenses_to_add:
+ error = self.add_licenses()
+ if self.parameters.get('remove_unused') is not None:
+ self.remove_unused_licenses()
+ if self.parameters.get('remove_expired') is not None:
+ self.remove_expired_licenses()
+ # not able to detect that a new license is required until we try to install it.
+ if licenses_to_add or remove_license:
+ changed_keys = self.compare_license_status(self.license_status)
+ # delete actions
+ else:
+ if nlfs_to_delete:
+ changed_keys.extend([nlf_dict.get("product") for nlf_dict in nlfs_to_delete if self.remove_licenses(None, nlf_dict)])
+ if packages_to_delete:
+ changed_keys.extend([package for package in self.parameters['license_names'] if self.remove_licenses(package)])
+ if not changed_keys:
+ changed = False
+
+ if error:
+ error = 'Error: ' + (
+ 'some licenses were updated, but others were in conflict: '
+ if changed_keys
+ else 'adding licenses: '
+ ) + error
+ self.module.fail_json(msg=error, changed=changed, updated_licenses=changed_keys)
+ self.module.exit_json(changed=changed, updated_licenses=changed_keys)
+
+
+def main():
+ '''Apply license operations'''
+ obj = NetAppOntapLicense()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_local_hosts.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_local_hosts.py
new file mode 100644
index 000000000..5b895bc7b
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_local_hosts.py
@@ -0,0 +1,197 @@
+#!/usr/bin/python
+
+# (c) 2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+module: na_ontap_local_hosts
+short_description: NetApp ONTAP local hosts
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 22.0.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Create or delete or modify local hosts in ONTAP.
+options:
+ state:
+ description:
+ - Whether the specified local hosts should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: 'present'
+ owner:
+ description:
+ - Name of the data SVM or cluster.
+ required: True
+ type: str
+ aliases:
+ description:
+ - The list of aliases.
+ type: list
+ elements: str
+ host:
+ description:
+ - Canonical hostname.
+ - minimum length is 1 and maximum length is 255.
+ type: str
+ address:
+ description:
+ - IPv4/IPv6 address in dotted form.
+ required: True
+ type: str
+"""
+
+EXAMPLES = """
+ - name: Create IP to host mapping
+ netapp.ontap.na_ontap_local_hosts:
+ state: present
+ address: 10.10.10.10
+ host: example.com
+ aliases: ['ex1.com', 'ex2.com']
+ owner: svm1
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: Modify IP to host mapping
+ netapp.ontap.na_ontap_local_hosts:
+ state: present
+ address: 10.10.10.10
+ owner: svm1
+ host: example1.com
+ aliases: ['ex1.com', 'ex2.com', 'ex3.com']
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: Delete host object
+ netapp.ontap.na_ontap_local_hosts:
+ state: absent
+ address: 10.10.10.10
+ owner: svm1
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic, netapp_ipaddress
+
+
+class NetAppOntapLocalHosts:
+ """ object initialize and class methods """
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ owner=dict(required=True, type='str'),
+ address=dict(required=True, type='str'),
+ aliases=dict(required=False, type='list', elements='str'),
+ host=dict(required=False, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.parameters['address'] = netapp_ipaddress.validate_and_compress_ip_address(self.parameters['address'], self.module)
+ self.rest_api = netapp_utils.OntapRestAPI(self.module)
+ self.owner_uuid = None
+ self.rest_api.fail_if_not_rest_minimum_version('na_ontap_local_hosts', 9, 10, 1)
+
+ def get_local_host_rest(self):
+ '''
+ Retrieves IP to hostname mapping for SVM of the cluster.
+ '''
+ api = 'name-services/local-hosts'
+ query = {'owner.name': self.parameters['owner'],
+ 'address': self.parameters['address'],
+ 'fields': 'address,hostname,owner.name,owner.uuid,aliases'}
+ record, error = rest_generic.get_one_record(self.rest_api, api, query)
+ if error:
+ self.module.fail_json(msg='Error fetching IP to hostname mappings for %s: %s' % (self.parameters['owner'], to_native(error)),
+ exception=traceback.format_exc())
+ if record:
+ self.owner_uuid = record['owner']['uuid']
+ return {
+ 'address': self.na_helper.safe_get(record, ['address']),
+ 'host': self.na_helper.safe_get(record, ['hostname']),
+ 'aliases': self.na_helper.safe_get(record, ['aliases'])
+ }
+ return record
+
+ def create_local_host_rest(self):
+ '''
+ Creates a new IP to hostname mapping.
+ '''
+ api = 'name-services/local-hosts'
+ body = {'owner.name': self.parameters.get('owner'),
+ 'address': self.parameters.get('address'),
+ 'hostname': self.parameters.get('host')}
+ if 'aliases' in self.parameters:
+ body['aliases'] = self.parameters.get('aliases')
+ dummy, error = rest_generic.post_async(self.rest_api, api, body)
+ if error:
+ self.module.fail_json(msg='Error creating IP to hostname mappings for %s: %s' % (self.parameters['owner'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_local_host_rest(self, modify):
+ '''
+ For a specified SVM and IP address, modifies the corresponding IP to hostname mapping.
+ '''
+ body = {}
+ if 'aliases' in modify:
+ body['aliases'] = self.parameters['aliases']
+ if 'host' in modify:
+ body['hostname'] = self.parameters['host']
+ api = 'name-services/local-hosts/%s/%s' % (self.owner_uuid, self.parameters['address'])
+ if body:
+ dummy, error = rest_generic.patch_async(self.rest_api, api, None, body)
+ if error:
+ self.module.fail_json(msg='Error updating IP to hostname mappings for %s: %s' % (self.parameters['owner'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_local_host_rest(self):
+ '''
+ vserver services name-service dns hosts delete.
+ '''
+ api = 'name-services/local-hosts/%s/%s' % (self.owner_uuid, self.parameters['address'])
+ dummy, error = rest_generic.delete_async(self.rest_api, api, None)
+ if error:
+ self.module.fail_json(msg='Error deleting IP to hostname mappings for %s: %s' % (self.parameters['owner'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ cd_action = None
+ current = self.get_local_host_rest()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ modify = self.na_helper.get_modified_attributes(current, self.parameters) if cd_action is None else None
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_local_host_rest()
+ elif cd_action == 'delete':
+ self.delete_local_host_rest()
+ elif modify:
+ self.modify_local_host_rest(modify)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """ Create object and call apply """
+ hosts_obj = NetAppOntapLocalHosts()
+ hosts_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_log_forward.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_log_forward.py
new file mode 100644
index 000000000..2ad66eb0b
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_log_forward.py
@@ -0,0 +1,312 @@
+#!/usr/bin/python
+
+# (c) 2021, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = '''
+module: na_ontap_log_forward
+short_description: NetApp ONTAP Log Forward Configuration
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: '21.2.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create, delete or modify the log forward configuration
+options:
+ state:
+ description:
+ - Whether the log forward configuration should exist or not
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ destination:
+ description:
+ - Destination address that the log messages will be forwarded to. Can be a hostname or IP address.
+ required: true
+ type: str
+
+ port:
+ description:
+ - The destination port used to forward the message.
+ required: true
+ type: int
+
+ facility:
+ description:
+ - Facility code used to indicate the type of software that generated the message.
+ type: str
+ choices: ['kern', 'user', 'local0', 'local1', 'local2', 'local3', 'local4', 'local5', 'local6', 'local7']
+
+ force:
+ description:
+ - Skip the Connectivity Test
+ type: bool
+
+ protocol:
+ description:
+ - Log Forwarding Protocol
+ choices: ['udp_unencrypted', 'tcp_unencrypted', 'tcp_encrypted']
+ type: str
+
+ verify_server:
+ description:
+ - Verify Destination Server Identity
+ type: bool
+'''
+
+EXAMPLES = """
+- name: Create log forward configuration
+ na_ontap_log_forward:
+ state: present
+ destination: 10.11.12.13
+ port: 514
+ protocol: udp_unencrypted
+ username: "{{ username }}"
+ password: "{{ password }}"
+ hostname: "{{ hostname }}"
+
+- name: Modify log forward configuration
+ na_ontap_log_forward:
+ state: present
+ destination: 10.11.12.13
+ port: 514
+ protocol: tcp_unencrypted
+ username: "{{ username }}"
+ password: "{{ password }}"
+ hostname: "{{ hostname }}"
+
+- name: Delete log forward configuration
+ na_ontap_log_forward:
+ state: absent
+ destination: 10.11.12.13
+ port: 514
+ username: "{{ username }}"
+ password: "{{ password }}"
+ hostname: "{{ hostname }}"
+"""
+
+RETURN = """
+
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapLogForward(object):
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(choices=['present', 'absent'], default='present'),
+ destination=dict(required=True, type='str'),
+ port=dict(required=True, type='int'),
+ facility=dict(required=False, type='str', choices=['kern', 'user', 'local0', 'local1', 'local2', 'local3', 'local4', 'local5', 'local6', 'local7']),
+ force=dict(required=False, type='bool'),
+ protocol=dict(required=False, type='str', choices=['udp_unencrypted', 'tcp_unencrypted', 'tcp_encrypted']),
+ verify_server=dict(required=False, type='bool')
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ self.rest_api = OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+
+ if not self.use_rest:
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def get_log_forward_config(self):
+ """
+ gets log forward configuration
+ :return: dict of log forward properties if exist, None if not
+ """
+
+ if self.use_rest:
+ log_forward_config = None
+ api = "security/audit/destinations"
+ query = {'fields': 'port,protocol,facility,address,verify_server',
+ 'address': self.parameters['destination'],
+ 'port': self.parameters['port']}
+
+ message, error = self.rest_api.get(api, query)
+ if error:
+ self.module.fail_json(msg=error)
+ if len(message.keys()) == 0:
+ return None
+ elif 'records' in message and len(message['records']) == 0:
+ return None
+ elif 'records' not in message:
+ error = "Unexpected response in get_security_key_manager from %s: %s" % (api, repr(message))
+ self.module.fail_json(msg=error)
+ log_forward_config = {
+ 'destination': message['records'][0]['address'],
+ 'facility': message['records'][0]['facility'],
+ 'port': message['records'][0]['port'],
+ 'protocol': message['records'][0]['protocol'],
+ 'verify_server': message['records'][0]['verify_server']
+ }
+
+ return log_forward_config
+
+ else:
+ log_forward_config = None
+
+ log_forward_get = netapp_utils.zapi.NaElement('cluster-log-forward-get')
+ log_forward_get.add_new_child('destination', self.parameters['destination'])
+ log_forward_get.add_new_child('port', self.na_helper.get_value_for_int(False, self.parameters['port']))
+
+ try:
+ result = self.server.invoke_successfully(log_forward_get, True)
+ except netapp_utils.zapi.NaApiError as error:
+ if to_native(error.code) == "15661":
+ # config doesnt exist
+ return None
+ else:
+ self.module.fail_json(
+ msg='Error getting log forward configuration for destination %s on port %s: %s' %
+ (self.parameters['destination'], self.na_helper.get_value_for_int(False, self.parameters['port']), to_native(error)),
+ exception=traceback.format_exc()
+ )
+
+ if result.get_child_by_name('attributes'):
+ log_forward_attributes = result.get_child_by_name('attributes')
+ cluster_log_forward_info = log_forward_attributes.get_child_by_name('cluster-log-forward-info')
+ log_forward_config = {
+ 'destination': cluster_log_forward_info.get_child_content('destination'),
+ 'facility': cluster_log_forward_info.get_child_content('facility'),
+ 'port': self.na_helper.get_value_for_int(True, cluster_log_forward_info.get_child_content('port')),
+ 'protocol': cluster_log_forward_info.get_child_content('protocol'),
+ 'verify_server': self.na_helper.get_value_for_bool(True, cluster_log_forward_info.get_child_content('verify-server'))
+ }
+
+ return log_forward_config
+
+ def create_log_forward_config(self):
+ """
+ Creates a log forward config
+ :return: nothing
+ """
+
+ if self.use_rest:
+ api = "security/audit/destinations"
+ body = dict()
+ body['address'] = self.parameters['destination']
+ body['port'] = self.parameters['port']
+
+ for attr in ('protocol', 'facility', 'verify_server', 'force'):
+ if attr in self.parameters:
+ body[attr] = self.parameters[attr]
+
+ dummy, error = self.rest_api.post(api, body)
+ if error:
+ self.module.fail_json(msg=error)
+
+ else:
+ log_forward_config_obj = netapp_utils.zapi.NaElement('cluster-log-forward-create')
+ log_forward_config_obj.add_new_child('destination', self.parameters['destination'])
+ log_forward_config_obj.add_new_child('port', self.na_helper.get_value_for_int(False, self.parameters['port']))
+
+ if 'facility' in self.parameters:
+ log_forward_config_obj.add_new_child('facility', self.parameters['facility'])
+
+ if 'force' in self.parameters:
+ log_forward_config_obj.add_new_child('force', self.na_helper.get_value_for_bool(False, self.parameters['force']))
+
+ if 'protocol' in self.parameters:
+ log_forward_config_obj.add_new_child('protocol', self.parameters['protocol'])
+
+ if 'verify_server' in self.parameters:
+ log_forward_config_obj.add_new_child('verify-server', self.na_helper.get_value_for_bool(False, self.parameters['verify_server']))
+
+ try:
+ self.server.invoke_successfully(log_forward_config_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating log forward config with destination %s on port %s: %s' %
+ (self.parameters['destination'], self.na_helper.get_value_for_int(False, self.parameters['port']), to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_log_forward_config(self):
+ # need to recreate as protocol can't be changed
+ self.destroy_log_forward_config()
+ self.create_log_forward_config()
+
+ def destroy_log_forward_config(self):
+ """
+ Delete a log forward configuration
+ :return: nothing
+ """
+ if self.use_rest:
+
+ api = "security/audit/destinations/%s/%s" % (self.parameters['destination'], self.parameters['port'])
+ body = None
+ query = {'return_timeout': 3}
+ dummy, error = self.rest_api.delete(api, body, query)
+ if error:
+ self.module.fail_json(msg=error)
+
+ else:
+ log_forward_config_obj = netapp_utils.zapi.NaElement('cluster-log-forward-destroy')
+ log_forward_config_obj.add_new_child('destination', self.parameters['destination'])
+ log_forward_config_obj.add_new_child('port', self.na_helper.get_value_for_int(False, self.parameters['port']))
+
+ try:
+ self.server.invoke_successfully(log_forward_config_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error destroying log forward destination %s on port %s: %s' %
+ (self.parameters['destination'], self.na_helper.get_value_for_int(False, self.parameters['port']), to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ current = self.get_log_forward_config()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ modify = None
+
+ if cd_action is None and self.parameters['state'] == 'present':
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+
+ if self.na_helper.changed:
+ if not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_log_forward_config()
+ elif cd_action == 'delete':
+ self.destroy_log_forward_config()
+ elif modify:
+ self.modify_log_forward_config()
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ """
+ Execute action from playbook
+ """
+ command = NetAppOntapLogForward()
+ command.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_login_messages.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_login_messages.py
new file mode 100644
index 000000000..099cea8b9
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_login_messages.py
@@ -0,0 +1,307 @@
+#!/usr/bin/python
+
+# (c) 2020-2023, NetApp, Inc
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_login_messages
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: na_ontap_login_messages
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: '20.1.0'
+short_description: Setup login banner and message of the day
+description:
+ - This module allows you to manipulate login banner and motd for a vserver
+options:
+ banner:
+ description:
+ - Login banner Text message.
+ type: str
+ vserver:
+ description:
+ - The name of the SVM login messages should be set for.
+ - With ZAPI, this option is required. This a cluster or data SVM.
+ - With REST, this is a data SVM.
+ - With REST, cluster scope is assumed when this option is absent.
+ type: str
+ motd_message:
+ description:
+ - MOTD Text message.
+ - message is deprecated and will be removed to avoid a conflict with an Ansible internal variable.
+ type: str
+ aliases:
+ - message
+ show_cluster_motd:
+ description:
+ - Set to I(false) if Cluster-level Message of the Day should not be shown
+ type: bool
+ default: True
+'''
+
+EXAMPLES = """
+
+ - name: modify banner vserver
+ netapp.ontap.na_ontap_login_messages:
+ vserver: trident_svm
+ banner: this is trident vserver
+ username: "{{ username }}"
+ password: "{{ password }}"
+ hostname: "{{ hostname }}"
+
+ - name: modify motd vserver
+ netapp.ontap.na_ontap_login_messages:
+ vserver: trident_svm
+ motd_message: this is trident vserver
+ show_cluster_motd: True
+ username: "{{ username }}"
+ password: "{{ password }}"
+ hostname: "{{ hostname }}"
+
+ - name: modify motd cluster - REST
+ netapp.ontap.na_ontap_login_messages:
+ motd_message: this is a cluster motd with REST
+ show_cluster_motd: True
+ username: "{{ username }}"
+ password: "{{ password }}"
+ hostname: "{{ hostname }}"
+
+"""
+
+RETURN = """
+
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_vserver
+
+
+class NetAppOntapLoginMessages:
+ """
+ modify and delete login banner and motd
+ """
+
+ def __init__(self):
+ self.use_rest = False
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ vserver=dict(type='str'),
+ banner=dict(type='str'),
+ motd_message=dict(type='str', aliases=['message']),
+ show_cluster_motd=dict(default=True, type='bool')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True,
+ required_one_of=[['show_cluster_motd', 'banner', 'motd_message']]
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ self.rest_api = OntapRestAPI(self.module)
+ if self.rest_api.is_rest():
+ self.use_rest = True
+ else:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ if not self.parameters.get('vserver'):
+ self.module.fail_json(msg="Error: vserver is a required parameter when using ZAPI.")
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+ if 'message' in self.parameters:
+ self.module.warn('Error: "message" option conflicts with Ansible internal variable - please use "motd_message".')
+
+ def get_banner_motd(self):
+ if self.use_rest:
+ api = 'security/login/messages'
+ query = {
+ 'fields': 'banner,message,show_cluster_message,uuid',
+ 'scope': 'cluster'
+ }
+ vserver = self.parameters.get('vserver')
+ if vserver:
+ query['scope'] = 'svm'
+ query['svm.name'] = vserver
+
+ record, error = rest_generic.get_one_record(self.rest_api, api, query)
+ if error:
+ self.module.fail_json(msg='Error fetching login_banner info: %s' % error)
+ if record is None and vserver is None:
+ self.module.fail_json(msg='Error fetching login_banner info for cluster - no data.')
+ return self.form_current(record)
+
+ # ZAPI
+ motd, show_cluster_motd = self.get_motd_zapi()
+ return {
+ 'banner': self.get_login_banner_zapi(),
+ 'motd_message': motd,
+ 'show_cluster_motd': show_cluster_motd
+ }
+
+ def form_current(self, record):
+ return_result = {
+ 'banner': '',
+ 'motd_message': '',
+ # we need the SVM UUID to add banner or motd if they are not present
+ 'uuid': record['uuid'] if record else self.get_svm_uuid(self.parameters.get('vserver')),
+ 'show_cluster_motd': record.get('show_cluster_message') if record else None
+ }
+ # by default REST adds a trailing \n if no trailing \n set in desired message/banner.
+ # rstip \n only when desired message/banner does not have trailing \n to preserve idempotency.
+ if record and record.get('banner'):
+ if self.parameters.get('banner', '').endswith('\n'):
+ return_result['banner'] = record['banner']
+ else:
+ return_result['banner'] = record['banner'].rstrip('\n')
+ if record and record.get('message'):
+ if self.parameters.get('motd_message', '').endswith('\n'):
+ return_result['motd_message'] = record['message']
+ else:
+ return_result['motd_message'] = record['message'].rstrip('\n')
+ return return_result
+
+ def get_login_banner_zapi(self):
+ login_banner_get_iter = netapp_utils.zapi.NaElement('vserver-login-banner-get-iter')
+ query = netapp_utils.zapi.NaElement('query')
+ login_banner_info = netapp_utils.zapi.NaElement('vserver-login-banner-info')
+ login_banner_info.add_new_child('vserver', self.parameters['vserver'])
+ query.add_child_elem(login_banner_info)
+ login_banner_get_iter.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(login_banner_get_iter, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching login_banner info: %s' % to_native(error),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) > 0:
+ login_banner_info = result.get_child_by_name('attributes-list').get_child_by_name(
+ 'vserver-login-banner-info')
+ banner = login_banner_info.get_child_content('message')
+ banner = str(banner).rstrip()
+ # if the message is '-' that means the banner doesn't exist.
+ if banner in ('-', 'None'):
+ banner = ''
+ return banner
+ return None
+
+ def get_motd_zapi(self):
+ motd_get_iter = netapp_utils.zapi.NaElement('vserver-motd-get-iter')
+ query = netapp_utils.zapi.NaElement('query')
+ motd_info = netapp_utils.zapi.NaElement('vserver-motd-info')
+ motd_info.add_new_child('vserver', self.parameters['vserver'])
+ query.add_child_elem(motd_info)
+ motd_get_iter.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(motd_get_iter, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching motd info: %s' % to_native(error),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) > 0:
+ motd_info = result.get_child_by_name('attributes-list').get_child_by_name(
+ 'vserver-motd-info')
+ motd_message = motd_info.get_child_content('message')
+ motd_message = str(motd_message).rstrip()
+ if motd_message == 'None':
+ motd_message = ''
+ show_cluster_motd = motd_info.get_child_content('is-cluster-message-enabled') == 'true'
+ return motd_message, show_cluster_motd
+ return '', False
+
+ def modify_rest(self, modify, uuid):
+ body = {
+ }
+ if 'banner' in modify:
+ body['banner'] = modify['banner']
+ if 'motd_message' in modify:
+ body['message'] = modify['motd_message']
+ if modify.get('show_cluster_motd') is not None:
+ body['show_cluster_message'] = modify['show_cluster_motd']
+ if body:
+ api = 'security/login/messages'
+ dummy, error = rest_generic.patch_async(self.rest_api, api, uuid, body)
+ if error:
+ keys = list(body.keys())
+ self.module.fail_json(msg='Error modifying %s: %s' % (', '.join(keys), error))
+
+ def modify_banner(self, modify):
+ login_banner_modify = netapp_utils.zapi.NaElement('vserver-login-banner-modify-iter')
+ login_banner_modify.add_new_child('message', modify['banner'])
+ query = netapp_utils.zapi.NaElement('query')
+ login_banner_info = netapp_utils.zapi.NaElement('vserver-login-banner-info')
+ login_banner_info.add_new_child('vserver', self.parameters['vserver'])
+ query.add_child_elem(login_banner_info)
+ login_banner_modify.add_child_elem(query)
+ try:
+ self.server.invoke_successfully(login_banner_modify, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as err:
+ self.module.fail_json(msg="Error modifying login_banner: %s" % (to_native(err)),
+ exception=traceback.format_exc())
+
+ def modify_motd(self, modify):
+ motd_create = netapp_utils.zapi.NaElement('vserver-motd-modify-iter')
+ if modify.get('motd_message') is not None:
+ motd_create.add_new_child('message', modify['motd_message'])
+ if modify.get('show_cluster_motd') is not None:
+ motd_create.add_new_child('is-cluster-message-enabled', 'true' if modify['show_cluster_motd'] is True else 'false')
+ query = netapp_utils.zapi.NaElement('query')
+ motd_info = netapp_utils.zapi.NaElement('vserver-motd-info')
+ motd_info.add_new_child('vserver', self.parameters['vserver'])
+ query.add_child_elem(motd_info)
+ motd_create.add_child_elem(query)
+ try:
+ self.server.invoke_successfully(motd_create, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as err:
+ self.module.fail_json(msg="Error modifying motd: %s" % (to_native(err)),
+ exception=traceback.format_exc())
+
+ def get_svm_uuid(self, vserver):
+ """
+ Get a svm's uuid
+ :return: uuid of the svm
+ """
+ uuid, error = rest_vserver.get_vserver_uuid(self.rest_api, vserver)
+ if error is not None:
+ self.module.fail_json(msg="Error fetching vserver %s: %s" % (vserver, error))
+ if uuid is None:
+ self.module.fail_json(msg="Error fetching vserver %s. Please make sure vserver name is correct. For cluster vserver, don't set vserver."
+ % vserver)
+ return uuid
+
+ def apply(self):
+ current = self.get_banner_motd()
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.na_helper.changed and not self.module.check_mode:
+ if self.use_rest:
+ self.modify_rest(modify, current['uuid'])
+ else:
+ if modify.get('banner') is not None:
+ self.modify_banner(modify)
+ if modify.get('show_cluster_motd') is not None or modify.get('motd_message') is not None:
+ self.modify_motd(modify)
+ result = netapp_utils.generate_result(self.na_helper.changed, modify=modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ '''Execute action from playbook'''
+ messages_obj = NetAppOntapLoginMessages()
+ messages_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_lun.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_lun.py
new file mode 100644
index 000000000..c0fb796f7
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_lun.py
@@ -0,0 +1,1270 @@
+#!/usr/bin/python
+
+# (c) 2017-2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: na_ontap_lun
+
+short_description: NetApp ONTAP manage LUNs
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+ - Create, destroy, resize LUNs on NetApp ONTAP.
+
+options:
+
+ state:
+ description:
+ - Whether the specified LUN should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+
+ name:
+ description:
+ - The name of the LUN to manage.
+ - Or LUN group name (volume name) when san_application_template is used.
+ required: true
+ type: str
+
+ from_name:
+ description:
+ - The name of the LUN to be renamed.
+ type: str
+ version_added: 20.12.0
+
+ flexvol_name:
+ description:
+ - The name of the FlexVol the LUN should exist on.
+ - Required if san_application_template is not present.
+ - Not allowed if san_application_template is present.
+ type: str
+
+ size:
+ description:
+ - The size of the LUN in C(size_unit).
+ - Required when creating a single LUN if application template is not used.
+ type: int
+
+ size_unit:
+ description:
+ - The unit used to interpret the size parameter.
+ choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
+ default: 'gb'
+ type: str
+
+ comment:
+ description:
+ - Optional descriptive comment for the LUN.
+ type: str
+ version_added: 21.2.0
+
+ force_resize:
+ description:
+ - Forcibly reduce the size. This is required for reducing the size of the LUN to avoid accidentally
+ reducing the LUN size.
+ type: bool
+
+ force_remove:
+ description:
+ - If "true", override checks that prevent a LUN from being destroyed if it is online and mapped.
+ - If "false", destroying an online and mapped LUN will fail.
+ type: bool
+ default: false
+
+ force_remove_fenced:
+ description:
+ - If "true", override checks that prevent a LUN from being destroyed while it is fenced.
+ - If "false", attempting to destroy a fenced LUN will fail.
+ - The default if not specified is "false". This field is available in Data ONTAP 8.2 and later.
+ type: bool
+
+ vserver:
+ required: true
+ description:
+ - The name of the vserver to use.
+ type: str
+
+ os_type:
+ description:
+ - The os type for the LUN.
+ type: str
+ aliases: ['ostype']
+
+ qos_policy_group:
+ description:
+ - The QoS policy group to be set on the LUN.
+ - With REST, qos_policy_group and qos_adaptive_policy_group are handled as QOS policy.
+ type: str
+ version_added: 20.12.0
+
+ qos_adaptive_policy_group:
+ description:
+ - The adaptive QoS policy group to be set on the LUN.
+ - Defines measurable service level objectives (SLOs) and service level agreements (SLAs) that adjust based on the LUN's allocated space or used space.
+ - Requires ONTAP 9.4 or later.
+ - With REST, qos_policy_group and qos_adaptive_policy_group are handled as QOS policy.
+ type: str
+ version_added: 21.2.0
+
+ space_reserve:
+ description:
+ - This can be set to "false" which will create a LUN without any space being reserved.
+ type: bool
+ default: true
+
+ space_allocation:
+ description:
+ - This enables support for the SCSI Thin Provisioning features. If the Host and file system do
+ not support this do not enable it.
+ type: bool
+ version_added: 2.7.0
+
+ use_exact_size:
+ description:
+ - This can be set to "false" which will round the LUN >= 450g.
+ type: bool
+ default: true
+ version_added: 20.11.0
+
+ san_application_template:
+ description:
+ - additional options when using the application/applications REST API to create LUNs.
+ - the module is using ZAPI by default, and switches to REST if san_application_template is present.
+ - create one or more LUNs (and the associated volume as needed).
+ - operations at the LUN level are supported, they require to know the LUN short name.
+ - this requires ONTAP 9.8 or higher.
+ - The module partially supports ONTAP 9.7 for create and delete operations, but not for modify (API limitations).
+ type: dict
+ version_added: 20.12.0
+ suboptions:
+ name:
+ description: name of the SAN application.
+ type: str
+ required: true
+ igroup_name:
+ description: name of the initiator group through which the contents of this application will be accessed.
+ type: str
+ lun_count:
+ description: number of LUNs in the application component (1 to 32).
+ type: int
+ protection_type:
+ description:
+ - The snasphot policy for the volume supporting the LUNs.
+ type: dict
+ suboptions:
+ local_policy:
+ description:
+ - The snapshot copy policy for the volume.
+ type: str
+ storage_service:
+ description:
+ - The performance service level (PSL) for this volume
+ type: str
+ choices: ['value', 'performance', 'extreme']
+ tiering:
+ description:
+ - Cloud tiering policy.
+ type: dict
+ suboptions:
+ control:
+ description: Storage tiering placement rules for the container.
+ choices: ['required', 'best_effort', 'disallowed']
+ type: str
+ policy:
+ description:
+ - Cloud tiering policy.
+ choices: ['all', 'auto', 'none', 'snapshot-only']
+ type: str
+ object_stores:
+ description: list of object store names for tiering.
+ type: list
+ elements: str
+ total_size:
+ description:
+ - The total size of the application component, split across the member LUNs in C(total_size_unit).
+ - Recommended when C(lun_count) is present.
+ - Required when C(lun_count) is present and greater than 1.
+ - Note - if lun_count is equal to 1, and total_size is not present, size is used to maintain backward compatibility.
+ type: int
+ version_added: 21.1.0
+ total_size_unit:
+ description:
+ - The unit used to interpret the total_size parameter.
+ - Defaults to size_unit if not present.
+ choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
+ type: str
+ version_added: 21.1.0
+ use_san_application:
+ description:
+ - Whether to use the application/applications REST/API to create LUNs.
+ - This will default to true if any other suboption is present.
+ type: bool
+ default: true
+ scope:
+ description:
+ - whether the top level name identifies a single LUN or a LUN group (application).
+ - By default, the module will try to make the right choice, but can report extra warnings.
+ - Setting scope to 'application' is required to convert an existing volume to a smart container.
+ - The module reports an error when 'lun' or 'application' is used and the desired action cannot be completed.
+ - The module issues warnings when the default 'auto' is used, and there is ambiguity regarding the desired actions.
+ type: str
+ choices: ['application', 'auto', 'lun']
+ default: auto
+ version_added: 21.2.0
+ exclude_aggregates:
+ description:
+ - The list of aggregate names to exclude when creating a volume.
+ - Requires ONTAP 9.9.1 GA or better.
+ type: list
+ elements: str
+ version_added: 21.7.0
+'''
+
+EXAMPLES = """
+- name: Create LUN
+ netapp.ontap.na_ontap_lun:
+ state: present
+ name: ansibleLUN
+ flexvol_name: ansibleVolume
+ vserver: ansibleVServer
+ size: 5
+ size_unit: mb
+ os_type: linux
+ space_reserve: true
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+- name: Resize LUN
+ netapp.ontap.na_ontap_lun:
+ state: present
+ name: ansibleLUN
+ force_resize: true
+ flexvol_name: ansibleVolume
+ vserver: ansibleVServer
+ size: 5
+ size_unit: gb
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+- name: Create LUNs using SAN application
+ tags: create
+ netapp.ontap.na_ontap_lun:
+ state: present
+ name: ansibleLUN
+ size: 15
+ size_unit: mb
+ os_type: linux
+ space_reserve: false
+ san_application_template:
+ name: san-ansibleLUN
+ igroup_name: testme_igroup
+ lun_count: 3
+ protection_type:
+ local_policy: default
+ exclude_aggregates: aggr0
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+- name: Convert existing volume to SAN application
+ tags: create
+ netapp.ontap.na_ontap_lun:
+ state: present
+ name: someVolume
+ size: 22
+ size_unit: mb
+ os_type: linux
+ space_reserve: false
+ san_application_template:
+ name: san-ansibleLUN
+ igroup_name: testme_igroup
+ lun_count: 3
+ protection_type:
+ local_policy: default
+ scope: application
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+
+"""
+
+import copy
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.rest_application import RestApplication
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_volume
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapLUN:
+ ''' create, modify, delete LUN '''
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ from_name=dict(required=False, type='str'),
+ size=dict(type='int'),
+ size_unit=dict(default='gb',
+ choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb',
+ 'pb', 'eb', 'zb', 'yb'], type='str'),
+ comment=dict(required=False, type='str'),
+ force_resize=dict(type='bool'),
+ force_remove=dict(required=False, type='bool', default=False),
+ force_remove_fenced=dict(type='bool'),
+ flexvol_name=dict(type='str'),
+ vserver=dict(required=True, type='str'),
+ os_type=dict(required=False, type='str', aliases=['ostype']),
+ qos_policy_group=dict(required=False, type='str'),
+ qos_adaptive_policy_group=dict(required=False, type='str'),
+ space_reserve=dict(required=False, type='bool', default=True),
+ space_allocation=dict(required=False, type='bool'),
+ use_exact_size=dict(required=False, type='bool', default=True),
+ san_application_template=dict(type='dict', options=dict(
+ use_san_application=dict(type='bool', default=True),
+ exclude_aggregates=dict(type='list', elements='str'),
+ name=dict(required=True, type='str'),
+ igroup_name=dict(type='str'),
+ lun_count=dict(type='int'),
+ protection_type=dict(type='dict', options=dict(
+ local_policy=dict(type='str'),
+ )),
+ storage_service=dict(type='str', choices=['value', 'performance', 'extreme']),
+ tiering=dict(type='dict', options=dict(
+ control=dict(type='str', choices=['required', 'best_effort', 'disallowed']),
+ policy=dict(type='str', choices=['all', 'auto', 'none', 'snapshot-only']),
+ object_stores=dict(type='list', elements='str') # create only
+ )),
+ total_size=dict(type='int'),
+ total_size_unit=dict(choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb',
+ 'pb', 'eb', 'zb', 'yb'], type='str'),
+ scope=dict(type='str', choices=['application', 'auto', 'lun'], default='auto'),
+ ))
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[('qos_policy_group', 'qos_adaptive_policy_group')]
+ )
+
+ # set up state variables
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if self.parameters.get('size') is not None:
+ self.parameters['size'] *= netapp_utils.POW2_BYTE_MAP[self.parameters['size_unit']]
+ if self.na_helper.safe_get(self.parameters, ['san_application_template', 'total_size']) is not None:
+ unit = self.na_helper.safe_get(self.parameters, ['san_application_template', 'total_size_unit'])
+ if unit is None:
+ unit = self.parameters['size_unit']
+ self.parameters['san_application_template']['total_size'] *= netapp_utils.POW2_BYTE_MAP[unit]
+
+ self.debug = {}
+ self.uuid = None
+ # self.debug['got'] = 'empty' # uncomment to enable collecting data
+
+ self.rest_api = OntapRestAPI(self.module)
+ # use_exact_size is defaulted to true, but not supported with REST. To get around this we will ignore the variable in rest.
+ unsupported_rest_properties = ['force_resize', 'force_remove_fenced']
+ partially_supported_rest_properties = [['san_application_template', (9, 7)],
+ ['space_allocation', (9, 10)]]
+ self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, unsupported_rest_properties,
+ partially_supported_rest_properties)
+ if self.use_rest:
+ self.parameters.pop('use_exact_size')
+ if self.parameters.get('qos_adaptive_policy_group') is not None:
+ self.parameters['qos_policy_group'] = self.parameters.pop('qos_adaptive_policy_group')
+ else:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+ # set default value for ZAPI only supported options.
+ if self.parameters.get('force_resize') is None:
+ self.parameters['force_resize'] = False
+ if self.parameters.get('force_remove_fenced') is None:
+ self.parameters['force_remove_fenced'] = False
+
+ # REST API for application/applications if needed
+ self.rest_app = self.setup_rest_application()
+
+ def setup_rest_application(self):
+ use_application_template = self.na_helper.safe_get(self.parameters, ['san_application_template', 'use_san_application'])
+ rest_app = None
+ if self.use_rest:
+ if use_application_template:
+ if self.parameters.get('flexvol_name') is not None:
+ self.module.fail_json(msg="'flexvol_name' option is not supported when san_application_template is present")
+ name = self.na_helper.safe_get(self.parameters, ['san_application_template', 'name'], allow_sparse_dict=False)
+ rest_app = RestApplication(self.rest_api, self.parameters['vserver'], name)
+ elif self.parameters.get('flexvol_name') is None:
+ self.module.fail_json(msg="flexvol_name option is required when san_application_template is not present")
+ else:
+ if use_application_template:
+ self.module.fail_json(msg="Error: using san_application_template requires ONTAP 9.7 or later and REST must be enabled.")
+ if self.parameters.get('flexvol_name') is None:
+ self.module.fail_json(msg="Error: 'flexvol_name' option is required when using ZAPI.")
+ return rest_app
+
+ def get_luns(self, lun_path=None):
+ """
+ Return list of LUNs matching vserver and volume names.
+
+ :return: list of LUNs in XML format.
+ :rtype: list
+ """
+ if self.use_rest:
+ return self.get_luns_rest(lun_path)
+ luns = []
+ tag = None
+
+ query_details = netapp_utils.zapi.NaElement('lun-info')
+ query_details.add_new_child('vserver', self.parameters['vserver'])
+ if lun_path is not None:
+ query_details.add_new_child('lun_path', lun_path)
+ else:
+ query_details.add_new_child('volume', self.parameters['flexvol_name'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+
+ while True:
+ lun_info = netapp_utils.zapi.NaElement('lun-get-iter')
+ lun_info.add_child_elem(query)
+ if tag:
+ lun_info.add_new_child('tag', tag, True)
+
+ try:
+ result = self.server.invoke_successfully(lun_info, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as exc:
+ self.module.fail_json(msg="Error fetching luns for %s: %s" %
+ (self.parameters['flexvol_name'] if lun_path is None else lun_path, to_native(exc)),
+ exception=traceback.format_exc())
+
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ attr_list = result.get_child_by_name('attributes-list')
+ luns.extend(attr_list.get_children())
+ tag = result.get_child_content('next-tag')
+ if tag is None:
+ break
+ return luns
+
+ def get_lun_details(self, lun):
+ """
+ Extract LUN details, from XML to python dict
+
+ :return: Details about the lun
+ :rtype: dict
+ """
+ if self.use_rest:
+ return lun
+ return_value = {'size': int(lun.get_child_content('size'))}
+ bool_attr_map = {
+ 'is-space-alloc-enabled': 'space_allocation',
+ 'is-space-reservation-enabled': 'space_reserve'
+ }
+ for attr in bool_attr_map:
+ value = lun.get_child_content(attr)
+ if value is not None:
+ return_value[bool_attr_map[attr]] = self.na_helper.get_value_for_bool(True, value)
+ str_attr_map = {
+ 'comment': 'comment',
+ 'multiprotocol-type': 'os_type',
+ 'name': 'name',
+ 'path': 'path',
+ 'qos-policy-group': 'qos_policy_group',
+ 'qos-adaptive-policy-group': 'qos_adaptive_policy_group',
+ }
+ for attr in str_attr_map:
+ value = lun.get_child_content(attr)
+ if value is None and attr in ('comment', 'qos-policy-group', 'qos-adaptive-policy-group'):
+ value = ''
+ if value is not None:
+ return_value[str_attr_map[attr]] = value
+
+ return return_value
+
+ def find_lun(self, luns, name, lun_path=None):
+ """
+ Return lun record matching name or path
+
+ :return: lun record
+ :rtype: XML for ZAPI, dict for REST, or None if not found
+ """
+ if luns:
+ for lun in luns:
+ path = lun['path']
+ if lun_path is None:
+ if name == path:
+ return lun
+ _rest, _splitter, found_name = path.rpartition('/')
+ if found_name == name:
+ return lun
+ elif lun_path == path:
+ return lun
+ return None
+
+ def get_lun(self, name, lun_path=None):
+ """
+ Return details about the LUN
+
+ :return: Details about the lun
+ :rtype: dict
+ """
+ luns = self.get_luns(lun_path)
+ lun = self.find_lun(luns, name, lun_path)
+ if lun is not None:
+ return self.get_lun_details(lun)
+ return None
+
+ def get_luns_from_app(self):
+ app_details, error = self.rest_app.get_application_details()
+ self.fail_on_error(error)
+ if app_details is not None:
+ app_details['paths'] = self.get_lun_paths_from_app()
+ return app_details
+
+ def get_lun_paths_from_app(self):
+ """Get luns path for SAN application"""
+ backing_storage, error = self.rest_app.get_application_component_backing_storage()
+ self.fail_on_error(error)
+ # {'luns': [{'path': '/vol/ansibleLUN/ansibleLUN_1', ...
+ if backing_storage is not None:
+ return [lun['path'] for lun in backing_storage.get('luns', [])]
+ return None
+
+ def get_lun_path_from_backend(self, name):
+ """returns lun path matching name if found in backing_storage
+ retruns None if not found
+ """
+ lun_paths = self.get_lun_paths_from_app()
+ match = "/%s" % name
+ return next((path for path in lun_paths if path.endswith(match)), None)
+
+ def create_san_app_component(self, modify):
+ '''Create SAN application component'''
+ if modify:
+ required_options = ['name']
+ action = 'modify'
+ if 'lun_count' in modify:
+ required_options.append('total_size')
+ else:
+ required_options = ('name', 'total_size')
+ action = 'create'
+ for option in required_options:
+ if self.parameters.get(option) is None:
+ self.module.fail_json(msg="Error: '%s' is required to %s a san application." % (option, action))
+
+ application_component = dict(name=self.parameters['name'])
+ if not modify:
+ application_component['lun_count'] = 1 # default value for create, may be overriden below
+
+ for attr in ('igroup_name', 'lun_count', 'storage_service'):
+ if not modify or attr in modify:
+ value = self.na_helper.safe_get(self.parameters, ['san_application_template', attr])
+ if value is not None:
+ application_component[attr] = value
+ for attr in ('os_type', 'qos_policy_group', 'qos_adaptive_policy_group', 'total_size'):
+ if not self.rest_api.meets_rest_minimum_version(True, 9, 8, 0) and attr in (
+ 'os_type',
+ 'qos_policy_group',
+ 'qos_adaptive_policy_group',
+ ):
+ # os_type and qos are not supported in 9.7 for the SAN application_component
+ continue
+ if not modify or attr in modify:
+ value = self.na_helper.safe_get(self.parameters, [attr])
+ if value is not None:
+ # only one of them can be present at most
+ if attr in ('qos_policy_group', 'qos_adaptive_policy_group'):
+ attr = 'qos'
+ value = dict(policy=dict(name=value))
+ application_component[attr] = value
+ tiering = self.na_helper.safe_get(self.parameters, ['san_application_template', 'tiering'])
+ if tiering is not None and not modify:
+ application_component['tiering'] = {}
+ for attr in ('control', 'policy', 'object_stores'):
+ value = tiering.get(attr)
+ if attr == 'object_stores' and value is not None:
+ value = [dict(name=x) for x in value]
+ if value is not None:
+ application_component['tiering'][attr] = value
+ return application_component
+
+ def create_san_app_body(self, modify=None):
+ '''Create body for san template'''
+ # TODO:
+ # Should we support new_igroups?
+ # It may raise idempotency issues if the REST call fails if the igroup already exists.
+ # And we already have na_ontap_igroups.
+ san = {
+ 'application_components': [self.create_san_app_component(modify)],
+ }
+ for attr in ('protection_type',):
+ if not modify or attr in modify:
+ value = self.na_helper.safe_get(self.parameters, ['san_application_template', attr])
+ if value is not None:
+ # we expect value to be a dict, but maybe an empty dict
+ value = self.na_helper.filter_out_none_entries(value)
+ if value:
+ san[attr] = value
+ for attr in ('exclude_aggregates',):
+ if modify is None: # only used for create
+ values = self.na_helper.safe_get(self.parameters, ['san_application_template', attr])
+ if values:
+ san[attr] = [dict(name=name) for name in values]
+ for attr in ('os_type',):
+ if not modify: # not supported for modify operation, but required at application component level for create
+ value = self.na_helper.safe_get(self.parameters, [attr])
+ if value is not None:
+ san[attr] = value
+ body, error = self.rest_app.create_application_body('san', san)
+ return body, error
+
+ def create_san_application(self):
+ '''Use REST application/applications san template to create one or more LUNs'''
+ body, error = self.create_san_app_body()
+ self.fail_on_error(error)
+ dummy, error = self.rest_app.create_application(body)
+ self.fail_on_error(error)
+
+ def modify_san_application(self, modify):
+ '''Use REST application/applications san template to add one or more LUNs'''
+ body, error = self.create_san_app_body(modify)
+ self.fail_on_error(error)
+ # these cannot be present when using PATCH
+ body.pop('name')
+ body.pop('svm')
+ body.pop('smart_container')
+ dummy, error = self.rest_app.patch_application(body)
+ self.fail_on_error(error)
+
+ def convert_to_san_application(self, scope):
+ '''First convert volume to smart container using POST
+ Second modify app to add new luns using PATCH
+ '''
+ # dummy modify, so that we don't fill in the body
+ modify = dict(dummy='dummy')
+ body, error = self.create_san_app_body(modify)
+ self.fail_on_error(error)
+ dummy, error = self.rest_app.create_application(body)
+ self.fail_on_error(error)
+ app_current, error = self.rest_app.get_application_uuid()
+ self.fail_on_error(error)
+ if app_current is None:
+ self.module.fail_json(msg='Error: failed to create smart container for %s' % self.parameters['name'])
+ app_modify, app_modify_warning = self.app_changes(scope)
+ if app_modify_warning is not None:
+ self.module.warn(app_modify_warning)
+ if app_modify:
+ self.modify_san_application(app_modify)
+
+ def delete_san_application(self):
+ '''Use REST application/applications san template to delete one or more LUNs'''
+ dummy, error = self.rest_app.delete_application()
+ self.fail_on_error(error)
+
+ def create_lun(self):
+ """
+ Create LUN with requested name and size
+ """
+ if self.use_rest:
+ return self.create_lun_rest()
+ path = '/vol/%s/%s' % (self.parameters['flexvol_name'], self.parameters['name'])
+ options = {'path': path,
+ 'size': str(self.parameters['size']),
+ 'space-reservation-enabled': self.na_helper.get_value_for_bool(False, self.parameters['space_reserve']),
+ 'use-exact-size': str(self.parameters['use_exact_size'])}
+ if self.parameters.get('space_allocation') is not None:
+ options['space-allocation-enabled'] = self.na_helper.get_value_for_bool(False, self.parameters['space_allocation'])
+ if self.parameters.get('comment') is not None:
+ options['comment'] = self.parameters['comment']
+ if self.parameters.get('os_type') is not None:
+ options['ostype'] = self.parameters['os_type']
+ if self.parameters.get('qos_policy_group') is not None:
+ options['qos-policy-group'] = self.parameters['qos_policy_group']
+ if self.parameters.get('qos_adaptive_policy_group') is not None:
+ options['qos-adaptive-policy-group'] = self.parameters['qos_adaptive_policy_group']
+ lun_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'lun-create-by-size', **options)
+
+ try:
+ self.server.invoke_successfully(lun_create, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as exc:
+ self.module.fail_json(msg="Error provisioning lun %s of size %s: %s"
+ % (self.parameters['name'], self.parameters['size'], to_native(exc)),
+ exception=traceback.format_exc())
+
+ def delete_lun(self, path):
+ """
+ Delete requested LUN
+ """
+ if self.use_rest:
+ return self.delete_lun_rest()
+ lun_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'lun-destroy', **{'path': path,
+ 'force': str(self.parameters['force_remove']),
+ 'destroy-fenced-lun':
+ str(self.parameters['force_remove_fenced'])})
+
+ try:
+ self.server.invoke_successfully(lun_delete, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as exc:
+ self.module.fail_json(msg="Error deleting lun %s: %s" % (path, to_native(exc)),
+ exception=traceback.format_exc())
+
+ def resize_lun(self, path):
+ """
+ Resize requested LUN
+
+ :return: True if LUN was actually re-sized, false otherwise.
+ :rtype: bool
+ """
+ if self.use_rest:
+ return self.resize_lun_rest()
+ lun_resize = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'lun-resize', **{'path': path,
+ 'size': str(self.parameters['size']),
+ 'force': str(self.parameters['force_resize'])})
+ try:
+ self.server.invoke_successfully(lun_resize, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as exc:
+ if to_native(exc.code) == "9042":
+ # Error 9042 denotes the new LUN size being the same as the
+ # old LUN size. This happens when there's barely any difference
+ # in the two sizes. For example, from 8388608 bytes to
+ # 8194304 bytes. This should go away if/when the default size
+ # requested/reported to/from the controller is changed to a
+ # larger unit (MB/GB/TB).
+ return False
+ else:
+ self.module.fail_json(msg="Error resizing lun %s: %s" % (path, to_native(exc)),
+ exception=traceback.format_exc())
+
+ return True
+
+ def set_lun_value(self, path, key, value):
+ key_to_zapi = dict(
+ comment=('lun-set-comment', 'comment'),
+ # The same ZAPI is used for both QOS attributes
+ qos_policy_group=('lun-set-qos-policy-group', 'qos-policy-group'),
+ qos_adaptive_policy_group=('lun-set-qos-policy-group', 'qos-adaptive-policy-group'),
+ space_allocation=('lun-set-space-alloc', 'enable'),
+ space_reserve=('lun-set-space-reservation-info', 'enable')
+ )
+ if key in key_to_zapi:
+ zapi, option = key_to_zapi[key]
+ else:
+ self.module.fail_json(msg="option %s cannot be modified to %s" % (key, value))
+ options = dict(path=path)
+ if option == 'enable':
+ options[option] = self.na_helper.get_value_for_bool(False, value)
+ else:
+ options[option] = value
+
+ lun_set = netapp_utils.zapi.NaElement.create_node_with_children(zapi, **options)
+ try:
+ self.server.invoke_successfully(lun_set, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as exc:
+ self.module.fail_json(msg="Error setting lun option %s: %s" % (key, to_native(exc)),
+ exception=traceback.format_exc())
+ return
+
+ def modify_lun(self, path, modify):
+ """
+ update LUN properties (except size or name)
+ """
+ if self.use_rest:
+ return self.modify_lun_rest(modify)
+ for key in sorted(modify):
+ self.set_lun_value(path, key, modify[key])
+
+ def rename_lun(self, path, new_path):
+ """
+ rename LUN
+ """
+ if self.use_rest:
+ return self.rename_lun_rest(new_path)
+ lun_move = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'lun-move', **{'path': path,
+ 'new-path': new_path})
+ try:
+ self.server.invoke_successfully(lun_move, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as exc:
+ self.module.fail_json(msg="Error moving lun %s: %s" % (path, to_native(exc)),
+ exception=traceback.format_exc())
+
+ def fail_on_error(self, error, stack=False):
+ if error is None:
+ return
+ elements = dict(msg="Error: %s" % error)
+ if stack:
+ elements['stack'] = traceback.format_stack()
+ self.module.fail_json(**elements)
+
+ def set_total_size(self, validate):
+ # fix total_size attribute, report error if total_size is missing (or size is missing)
+ attr = 'total_size'
+ value = self.na_helper.safe_get(self.parameters, ['san_application_template', attr])
+ if value is not None or not validate:
+ self.parameters[attr] = value
+ return
+ lun_count = self.na_helper.safe_get(self.parameters, ['san_application_template', 'lun_count'])
+ value = self.parameters.get('size')
+ if value is not None and (lun_count is None or lun_count == 1):
+ self.parameters[attr] = value
+ return
+ self.module.fail_json(msg="Error: 'total_size' is a required SAN application template attribute when creating a LUN application")
+
+ def validate_app_create(self):
+ # fix total_size attribute
+ self.set_total_size(validate=True)
+
+ def validate_app_changes(self, modify, warning):
+ saved_modify = dict(modify)
+ errors = [
+ "Error: the following application parameter cannot be modified: %s. Received: %s."
+ % (key, str(modify))
+ for key in modify
+ if key not in ('igroup_name', 'os_type', 'lun_count', 'total_size')
+ ]
+
+ extra_attrs = tuple()
+ if 'lun_count' in modify:
+ extra_attrs = ('total_size', 'os_type', 'igroup_name')
+ else:
+ ignored_keys = [key for key in modify if key not in ('total_size',)]
+ for key in ignored_keys:
+ self.module.warn(
+ "Ignoring: %s. This application parameter is only relevant when increasing the LUN count. Received: %s."
+ % (key, str(saved_modify)))
+ modify.pop(key)
+ for attr in extra_attrs:
+ value = self.parameters.get(attr)
+ if value is None:
+ value = self.na_helper.safe_get(self.parameters['san_application_template'], [attr])
+ if value is None:
+ errors.append('Error: %s is a required parameter when increasing lun_count.' % attr)
+ else:
+ modify[attr] = value
+ if errors:
+ self.module.fail_json(msg='\n'.join(errors))
+ if 'total_size' in modify:
+ self.set_total_size(validate=False)
+ if warning and 'lun_count' not in modify:
+ # can't change total_size, let's ignore it
+ self.module.warn(warning)
+ modify.pop('total_size')
+ saved_modify.pop('total_size')
+ if modify and not self.rest_api.meets_rest_minimum_version(True, 9, 8):
+ self.module.fail_json(
+ msg='Error: modifying %s is not supported on ONTAP 9.7' % ', '.join(saved_modify.keys()))
+
+ def fail_on_large_size_reduction(self, app_current, desired, provisioned_size):
+ """ Error if a reduction of size > 10% is requested.
+ Warn for smaller reduction and ignore it, to protect against 'rounding' errors.
+ """
+ total_size = app_current['total_size']
+ desired_size = desired.get('total_size')
+ warning = None
+ if desired_size is not None:
+ details = "total_size=%d, provisioned=%d, requested=%d" % (total_size, provisioned_size, desired_size)
+ if desired_size < total_size:
+ # * 100 to get a percentage, and .0 to force float conversion
+ reduction = round((total_size - desired_size) * 100.0 / total_size, 1)
+ if reduction > 10:
+ self.module.fail_json(msg="Error: can't reduce size: %s" % details)
+ else:
+ warning = "Ignoring small reduction (%.1f %%) in total size: %s" % (reduction, details)
+ elif desired_size > total_size and desired_size < provisioned_size:
+ # we can't increase, but we can't say it is a problem, as the size is already bigger!
+ warning = "Ignoring increase: requested size is too small: %s" % details
+ return warning
+
+ def get_luns_rest(self, lun_path=None):
+ if lun_path is None and self.parameters.get('flexvol_name') is None:
+ return []
+ api = 'storage/luns'
+ query = {
+ 'svm.name': self.parameters['vserver'],
+ 'fields': "comment,lun_maps,name,os_type,qos_policy.name,space"}
+ if lun_path is not None:
+ query['name'] = lun_path
+ else:
+ query['location.volume.name'] = self.parameters['flexvol_name']
+ record, error = rest_generic.get_0_or_more_records(self.rest_api, api, query)
+ if error:
+ if lun_path is not None:
+ self.module.fail_json(msg="Error getting lun_path %s: %s" % (lun_path, to_native(error)),
+ exception=traceback.format_exc())
+ else:
+ self.module.fail_json(
+ msg="Error getting LUN's for flexvol %s: %s" % (self.parameters['flexvol_name'], to_native(error)),
+ exception=traceback.format_exc())
+ return self.format_get_luns(record)
+
+ def format_get_luns(self, records):
+ luns = []
+ if not records:
+ return None
+ for record in records:
+ # TODO: Check that path and name are the same in Rest
+ lun = {
+ 'uuid': self.na_helper.safe_get(record, ['uuid']),
+ 'name': self.na_helper.safe_get(record, ['name']),
+ 'path': self.na_helper.safe_get(record, ['name']),
+ 'size': self.na_helper.safe_get(record, ['space', 'size']),
+ 'comment': self.na_helper.safe_get(record, ['comment']),
+ 'flexvol_name': self.na_helper.safe_get(record, ['location', 'volume', 'name']),
+ 'os_type': self.na_helper.safe_get(record, ['os_type']),
+ 'qos_policy_group': self.na_helper.safe_get(record, ['qos_policy', 'name']),
+ 'space_reserve': self.na_helper.safe_get(record, ['space', 'guarantee', 'requested']),
+ 'space_allocation': self.na_helper.safe_get(record,
+ ['space', 'scsi_thin_provisioning_support_enabled']),
+ }
+ luns.append(lun)
+ return luns
+
+ def create_lun_rest(self):
+ name = self.create_lun_path_rest()
+ api = 'storage/luns'
+ body = {
+ 'svm.name': self.parameters['vserver'],
+ 'name': name,
+ }
+ if self.parameters.get('flexvol_name') is not None:
+ body['location.volume.name'] = self.parameters['flexvol_name']
+ if self.parameters.get('os_type') is not None:
+ body['os_type'] = self.parameters['os_type']
+ if self.parameters.get('size') is not None:
+ body['space.size'] = self.parameters['size']
+ if self.parameters.get('space_reserve') is not None:
+ body['space.guarantee.requested'] = self.parameters['space_reserve']
+ if self.parameters.get('space_allocation') is not None:
+ body['space.scsi_thin_provisioning_support_enabled'] = self.parameters['space_allocation']
+ if self.parameters.get('comment') is not None:
+ body['comment'] = self.parameters['comment']
+ if self.parameters.get('qos_policy_group') is not None:
+ body['qos_policy.name'] = self.parameters['qos_policy_group']
+ dummy, error = rest_generic.post_async(self.rest_api, api, body)
+ if error:
+ self.module.fail_json(msg="Error creating LUN %s: %s" % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def create_lun_path_rest(self):
+ """ ZAPI accepts just a name, while REST expects a path. We need to convert a name in to a path for backward compatibility
+ If the name start with a slash we will assume it a path and use it as the name
+ """
+ if not self.parameters['name'].startswith('/') and self.parameters.get('flexvol_name') is not None:
+ # if it dosn't start with a slash and we have a flexvol name we will use it to build the path
+ return '/vol/%s/%s' % (self.parameters['flexvol_name'], self.parameters['name'])
+ return self.parameters['name']
+
+ def delete_lun_rest(self):
+ if self.uuid is None:
+ self.module.fail_json(msg="Error deleting LUN %s: UUID not found" % self.parameters['name'])
+ api = 'storage/luns'
+ query = {'allow_delete_while_mapped': self.parameters['force_remove']}
+ dummy, error = rest_generic.delete_async(self.rest_api, api, self.uuid, query)
+ if error:
+ self.module.fail_json(msg="Error deleting LUN %s: %s" % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def rename_lun_rest(self, new_path):
+ if self.uuid is None:
+ self.module.fail_json(msg="Error renaming LUN %s: UUID not found" % self.parameters['name'])
+ api = 'storage/luns'
+ body = {'name': new_path}
+ dummy, error = rest_generic.patch_async(self.rest_api, api, self.uuid, body)
+ if error:
+ self.module.fail_json(msg="Error renaming LUN %s: %s" % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def resize_lun_rest(self):
+ if self.uuid is None:
+ self.module.fail_json(msg="Error resizing LUN %s: UUID not found" % self.parameters['name'])
+ api = 'storage/luns'
+ body = {'space.size': self.parameters['size']}
+ dummy, error = rest_generic.patch_async(self.rest_api, api, self.uuid, body)
+ if error:
+ if 'New LUN size is the same as the old LUN size' in error:
+ return False
+ self.module.fail_json(msg="Error resizing LUN %s: %s" % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ return True
+
+ def modify_lun_rest(self, modify):
+ local_modify = modify.copy()
+ if self.uuid is None:
+ self.module.fail_json(msg="Error modifying LUN %s: UUID not found" % self.parameters['name'])
+ api = 'storage/luns'
+ body = {}
+ if local_modify.get('space_reserve') is not None:
+ body['space.guarantee.requested'] = local_modify.pop('space_reserve')
+ if local_modify.get('space_allocation') is not None:
+ body['space.scsi_thin_provisioning_support_enabled'] = local_modify.pop('space_allocation')
+ if local_modify.get('comment') is not None:
+ body['comment'] = local_modify.pop('comment')
+ if local_modify.get('qos_policy_group') is not None:
+ body['qos_policy.name'] = local_modify.pop('qos_policy_group')
+ if local_modify != {}:
+ self.module.fail_json(
+ msg="Error modifying LUN %s: Unknown parameters: %s" % (self.parameters['name'], local_modify))
+ dummy, error = rest_generic.patch_async(self.rest_api, api, self.uuid, body)
+ if error:
+ self.module.fail_json(msg="Error modifying LUN %s: %s" % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def check_for_errors(self, lun_cd_action, current, modify):
+ errors = []
+ if lun_cd_action == 'create':
+ if self.parameters.get('flexvol_name') is None:
+ errors.append("The flexvol_name parameter is required for creating a LUN.")
+ if self.use_rest and self.parameters.get('os_type') is None:
+ errors.append("The os_type parameter is required for creating a LUN with REST.")
+ if self.parameters.get('size') is None:
+ self.module.fail_json(msg="size is a required parameter for create.")
+ elif modify and 'os_type' in modify:
+ self.module.fail_json(msg="os_type cannot be modified: current: %s, desired: %s" % (current['os_type'], modify['os_type']))
+ if errors:
+ self.module.fail_json(msg=' '.join(errors))
+
+ def set_uuid(self, current):
+ if self.use_rest and current is not None and current.get('uuid') is not None:
+ self.uuid = current['uuid']
+
+ def app_changes(self, scope):
+ # find and validate app changes
+ app_current, error = self.rest_app.get_application_details('san')
+ self.fail_on_error(error)
+ # save application name, as it is overriden in the flattening operation
+ app_name = app_current['name']
+ # there is an issue with total_size not reflecting the real total_size, and some additional overhead
+ provisioned_size = self.na_helper.safe_get(app_current, ['statistics', 'space', 'provisioned'])
+ if provisioned_size is None:
+ provisioned_size = 0
+ if self.debug:
+ self.debug['app_current'] = app_current # will be updated below as it is mutable
+ self.debug['got'] = copy.deepcopy(app_current) # fixed copy
+ # flatten
+ app_current = app_current['san'] # app template
+ app_current.update(app_current['application_components'][0]) # app component
+ del app_current['application_components']
+ # if component name does not match, assume a change at LUN level
+ comp_name = app_current['name']
+ if comp_name != self.parameters['name']:
+ msg = "desired component/volume name: %s does not match existing component name: %s" % (self.parameters['name'], comp_name)
+ if scope == 'application':
+ self.module.fail_json(msg='Error: ' + msg + ". scope=%s" % scope)
+ return None, msg + ". scope=%s, assuming 'lun' scope." % scope
+ # restore app name
+ app_current['name'] = app_name
+
+ # ready to compare, except for a quirk in size handling
+ desired = dict(self.parameters['san_application_template'])
+ warning = self.fail_on_large_size_reduction(app_current, desired, provisioned_size)
+
+ # preserve change state before calling modify in case an ignorable total_size change is the only change
+ changed = self.na_helper.changed
+ app_modify = self.na_helper.get_modified_attributes(app_current, desired)
+ self.validate_app_changes(app_modify, warning)
+ if not app_modify:
+ self.na_helper.changed = changed
+ app_modify = None
+ return app_modify, None
+
+ def get_app_apply(self):
+ scope = self.na_helper.safe_get(self.parameters, ['san_application_template', 'scope'])
+ app_current, error = self.rest_app.get_application_uuid()
+ self.fail_on_error(error)
+ if scope == 'lun' and app_current is None:
+ self.module.fail_json(msg='Application not found: %s. scope=%s.' %
+ (self.na_helper.safe_get(self.parameters, ['san_application_template', 'name']),
+ scope))
+ return scope, app_current
+
+ def app_actions(self, app_current, scope, actions, results):
+ app_modify, app_modify_warning = None, None
+ app_cd_action = self.na_helper.get_cd_action(app_current, self.parameters)
+ if app_cd_action == 'create':
+ # check if target volume already exists
+ cp_volume_name = self.parameters['name']
+ volume, error = rest_volume.get_volume(self.rest_api, self.parameters['vserver'], cp_volume_name)
+ self.fail_on_error(error)
+ if volume is not None:
+ if scope == 'application':
+ # volume already exists, but not as part of this application
+ app_cd_action = 'convert'
+ if not self.rest_api.meets_rest_minimum_version(True, 9, 8, 0):
+ msg = 'Error: converting a LUN volume to a SAN application container requires ONTAP 9.8 or better.'
+ self.module.fail_json(msg=msg)
+ else:
+ # default name already in use, ask user to clarify intent
+ msg = "Error: volume '%s' already exists. Please use a different group name, or use 'application' scope. scope=%s"
+ self.module.fail_json(msg=msg % (cp_volume_name, scope))
+ if app_cd_action is not None:
+ actions.append('app_%s' % app_cd_action)
+ if app_cd_action == 'create':
+ self.validate_app_create()
+ if app_cd_action is None and app_current is not None:
+ app_modify, app_modify_warning = self.app_changes(scope)
+ if app_modify:
+ actions.append('app_modify')
+ results['app_modify'] = dict(app_modify)
+ return app_cd_action, app_modify, app_modify_warning
+
+ def lun_actions(self, app_current, actions, results, scope, app_modify, app_modify_warning):
+ # actions at LUN level
+ lun_cd_action, lun_modify, lun_rename = None, None, None
+ lun_path, from_lun_path = None, None
+ from_name = self.parameters.get('from_name')
+ if self.rest_app and app_current:
+ # For LUNs created using a SAN application, we're getting lun paths from the backing storage
+ lun_path = self.get_lun_path_from_backend(self.parameters['name'])
+ if from_name is not None:
+ from_lun_path = self.get_lun_path_from_backend(from_name)
+ current = self.get_lun(self.parameters['name'], lun_path)
+ self.set_uuid(current)
+ if current is not None and lun_path is None:
+ lun_path = current['path']
+ lun_cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if lun_cd_action == 'create' and from_name is not None:
+ # create by renaming existing LUN, if it exists
+ old_lun = self.get_lun(from_name, from_lun_path)
+ lun_rename = self.na_helper.is_rename_action(old_lun, current)
+ if lun_rename is None:
+ self.module.fail_json(msg="Error renaming lun: %s does not exist" % from_name)
+ if lun_rename:
+ current = old_lun
+ if from_lun_path is None:
+ from_lun_path = current['path']
+ head, _sep, tail = from_lun_path.rpartition(from_name)
+ if tail:
+ self.module.fail_json(
+ msg="Error renaming lun: %s does not match lun_path %s" % (from_name, from_lun_path))
+ self.set_uuid(current)
+ lun_path = head + self.parameters['name']
+ lun_cd_action = None
+ actions.append('lun_rename')
+ app_modify_warning = None # reset warning as we found a match
+ if lun_cd_action is not None:
+ actions.append('lun_%s' % lun_cd_action)
+ if lun_cd_action is None and self.parameters['state'] == 'present':
+ # we already handled rename if required
+ current.pop('name', None)
+ lun_modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if lun_modify:
+ actions.append('lun_modify')
+ results['lun_modify'] = dict(lun_modify)
+ app_modify_warning = None # reset warning as we found a match
+ if lun_cd_action and self.rest_app and app_current:
+ msg = 'This module does not support %s a LUN by name %s a SAN application.' % \
+ ('adding', 'to') if lun_cd_action == 'create' else ('removing', 'from')
+ if scope == 'auto':
+ # ignore LUN not found, as name can be a group name
+ self.module.warn(msg + ". scope=%s, assuming 'application'" % scope)
+ if not app_modify:
+ self.na_helper.changed = False
+ elif scope == 'lun':
+ self.module.fail_json(msg=msg + ". scope=%s." % scope)
+ lun_cd_action = None
+ self.check_for_errors(lun_cd_action, current, lun_modify)
+ return lun_path, from_lun_path, lun_cd_action, lun_rename, lun_modify, app_modify_warning
+
+ def lun_modify_after_app_update(self, lun_path, results):
+ # modify at LUN level, as app modify does not set some LUN level options (eg space_reserve)
+ if lun_path is None:
+ lun_path = self.get_lun_path_from_backend(self.parameters['name'])
+ current = self.get_lun(self.parameters['name'], lun_path)
+ self.set_uuid(current)
+ # we already handled rename if required
+ current.pop('name', None)
+ lun_modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if lun_modify:
+ results['lun_modify_after_app_update'] = dict(lun_modify)
+ self.check_for_errors(None, current, lun_modify)
+ return lun_modify
+
+ def apply(self):
+ results = {}
+ app_cd_action, app_modify, lun_cd_action, lun_modify, lun_rename = None, None, None, None, None
+ app_modify_warning, app_current, lun_path, from_lun_path = None, None, None, None
+ actions = []
+ if self.rest_app:
+ scope, app_current = self.get_app_apply()
+ else:
+ # no application template, fall back to LUN only
+ scope = 'lun'
+ if self.rest_app and scope != 'lun':
+ app_cd_action, app_modify, app_modify_warning = self.app_actions(app_current, scope, actions, results)
+ if app_cd_action is None and scope != 'application':
+ lun_path, from_lun_path, lun_cd_action, lun_rename, lun_modify, app_modify_warning = \
+ self.lun_actions(app_current, actions, results, scope, app_modify, app_modify_warning)
+ if self.na_helper.changed and not self.module.check_mode:
+ if app_cd_action == 'create':
+ self.create_san_application()
+ elif app_cd_action == 'convert':
+ self.convert_to_san_application(scope)
+ elif app_cd_action == 'delete':
+ self.rest_app.delete_application()
+ elif lun_cd_action == 'create':
+ self.create_lun()
+ elif lun_cd_action == 'delete':
+ self.delete_lun(lun_path)
+ else:
+ if app_modify:
+ self.modify_san_application(app_modify)
+ if lun_rename:
+ self.rename_lun(from_lun_path, lun_path)
+ if app_modify:
+ # space_reserve will be set to True
+ # To match input parameters, lun_modify is recomputed.
+ lun_modify = self.lun_modify_after_app_update(lun_path, results)
+ size_changed = False
+ if lun_modify and 'size' in lun_modify:
+ # Ensure that size was actually changed. Please
+ # read notes in 'resize_lun' function for details.
+ size_changed = self.resize_lun(lun_path)
+ lun_modify.pop('size')
+ if lun_modify:
+ self.modify_lun(lun_path, lun_modify)
+ if not lun_modify and not lun_rename and not app_modify:
+ # size may not have changed
+ self.na_helper.changed = size_changed
+
+ if app_modify_warning:
+ self.module.warn(app_modify_warning)
+ result = netapp_utils.generate_result(self.na_helper.changed, actions,
+ extra_responses={'debug': self.debug} if self.debug else None)
+ self.module.exit_json(**result)
+
+
+def main():
+ lun = NetAppOntapLUN()
+ lun.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_lun_copy.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_lun_copy.py
new file mode 100644
index 000000000..94a443b6e
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_lun_copy.py
@@ -0,0 +1,221 @@
+#!/usr/bin/python
+
+# (c) 2019-2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: na_ontap_lun_copy
+
+short_description: NetApp ONTAP copy LUNs
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.8.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+ - Copy LUNs on NetApp ONTAP.
+
+options:
+
+ state:
+ description:
+ - Whether the specified LUN should exist or not.
+ choices: ['present']
+ type: str
+ default: present
+
+ destination_vserver:
+ description:
+ - the name of the Vserver that will host the new LUN.
+ required: true
+ type: str
+ aliases: ['vserver']
+
+ destination_path:
+ description:
+ - Specifies the full path to the new LUN.
+ required: true
+ type: str
+
+ source_path:
+ description:
+ - Specifies the full path to the source LUN.
+ required: true
+ type: str
+
+ source_vserver:
+ description:
+ - Specifies the name of the vserver hosting the LUN to be copied.
+ - If not provided, C(destination_vserver) value is set as default.
+ - with REST, this option value must match C(destination_vserver) when present.
+ type: str
+
+notes:
+ - supports ZAPI and REST. REST requires ONTAP 9.10.1 or later.
+ - supports check mode.
+ - REST supports intra-Vserver lun copy only.
+ '''
+EXAMPLES = """
+- name: Copy LUN
+ netapp.ontap.na_ontap_lun_copy:
+ destination_vserver: ansible
+ destination_path: /vol/test/test_copy_dest_dest_new
+ source_path: /vol/test/test_copy_1
+ source_vserver: ansible
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppOntapLUNCopy:
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present'], default='present'),
+ destination_vserver=dict(required=True, type='str', aliases=['vserver']),
+ destination_path=dict(required=True, type='str'),
+ source_path=dict(required=True, type='str'),
+ source_vserver=dict(required=False, type='str'),
+
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.rest_api = netapp_utils.OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+ # if source_vserver not present, set destination_vserver value for intra-vserver copy operation.
+ if not self.parameters.get('source_vserver'):
+ self.parameters['source_vserver'] = self.parameters['destination_vserver']
+ if self.use_rest and not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 10, 1):
+ msg = 'REST requires ONTAP 9.10.1 or later for na_ontap_lun_copy'
+ self.use_rest = self.na_helper.fall_back_to_zapi(self.module, msg, self.parameters)
+ if not self.use_rest:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module,
+ vserver=self.parameters['destination_vserver'])
+
+ def get_lun(self):
+ """
+ Check if the LUN exists
+
+ :return: true is it exists, false otherwise
+ :rtype: bool
+ """
+
+ if self.use_rest:
+ return self.get_lun_rest()
+ return_value = False
+ lun_info = netapp_utils.zapi.NaElement('lun-get-iter')
+ query_details = netapp_utils.zapi.NaElement('lun-info')
+
+ query_details.add_new_child('path', self.parameters['destination_path'])
+ query_details.add_new_child('vserver', self.parameters['destination_vserver'])
+
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+
+ lun_info.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(lun_info, True)
+
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error getting lun info %s for verver %s: %s" %
+ (self.parameters['destination_path'], self.parameters['destination_vserver'], to_native(e)),
+ exception=traceback.format_exc())
+
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ return_value = True
+ return return_value
+
+ def copy_lun(self):
+ """
+ Copy LUN with requested path and vserver
+ """
+ if self.use_rest:
+ return self.copy_lun_rest()
+ lun_copy = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'lun-copy-start', **{'source-vserver': self.parameters['source_vserver']})
+ path_obj = netapp_utils.zapi.NaElement('paths')
+ pair = netapp_utils.zapi.NaElement('lun-path-pair')
+ pair.add_new_child('destination-path', self.parameters['destination_path'])
+ pair.add_new_child('source-path', self.parameters['source_path'])
+ path_obj.add_child_elem(pair)
+ lun_copy.add_child_elem(path_obj)
+
+ try:
+ self.server.invoke_successfully(lun_copy, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error copying lun from %s to vserver %s: %s" %
+ (self.parameters['source_vserver'], self.parameters['destination_vserver'], to_native(e)),
+ exception=traceback.format_exc())
+
+ def get_lun_rest(self):
+ api = 'storage/luns'
+ params = {
+ 'svm.name': self.parameters['destination_vserver'],
+ 'name': self.parameters['destination_path']
+ }
+ record, error = rest_generic.get_one_record(self.rest_api, api, params)
+ if error:
+ self.module.fail_json(msg="Error getting lun info %s for verver %s: %s" %
+ (self.parameters['destination_path'], self.parameters['destination_vserver'], to_native(error)))
+ return True if record else False
+
+ def copy_lun_rest(self):
+ api = 'storage/luns'
+ body = {
+ 'copy': {'source': {'name': self.parameters['source_path']}},
+ 'name': self.parameters['destination_path'],
+ 'svm.name': self.parameters['destination_vserver']
+ }
+ dummy, error = rest_generic.post_async(self.rest_api, api, body)
+ if error:
+ self.module.fail_json(msg="Error copying lun from %s to vserver %s: %s" %
+ (self.parameters['source_vserver'], self.parameters['destination_vserver'], to_native(error)))
+
+ def apply(self):
+ if self.get_lun(): # lun already exists at destination
+ changed = False
+ else:
+ if self.use_rest and self.parameters['source_vserver'] != self.parameters['destination_vserver']:
+ self.module.fail_json(msg="Error: REST does not supports inter-Vserver lun copy.")
+ changed = True
+ if not self.module.check_mode:
+ # need to copy lun
+ if self.parameters['state'] == 'present':
+ self.copy_lun()
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ v = NetAppOntapLUNCopy()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_lun_map.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_lun_map.py
new file mode 100644
index 000000000..5bdbc17c8
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_lun_map.py
@@ -0,0 +1,356 @@
+#!/usr/bin/python
+
+""" this is lun mapping module
+
+ (c) 2018-2022, NetApp, Inc
+ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = """
+module: na_ontap_lun_map
+short_description: NetApp ONTAP LUN maps
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Map and unmap LUNs on NetApp ONTAP.
+
+options:
+
+ state:
+ description:
+ - Whether the specified LUN should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+
+ initiator_group_name:
+ description:
+ - Initiator group to map to the given LUN.
+ required: true
+ type: str
+
+ path:
+ description:
+ - Path of the LUN..
+ required: true
+ type: str
+
+ vserver:
+ required: true
+ description:
+ - The name of the vserver to use.
+ type: str
+
+ lun_id:
+ description:
+ - LUN ID assigned for the map.
+ type: str
+"""
+
+EXAMPLES = """
+- name: Create LUN mapping
+ na_ontap_lun_map:
+ state: present
+ initiator_group_name: ansibleIgroup3234
+ path: /vol/iscsi_path/iscsi_lun
+ vserver: ci_dev
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+- name: Unmap LUN
+ na_ontap_lun_map:
+ state: absent
+ initiator_group_name: ansibleIgroup3234
+ path: /vol/iscsi_path/iscsi_lun
+ vserver: ci_dev
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+lun_node:
+ description: NetApp controller that is hosting the LUN. (Note Not returned with REST)
+ returned: success
+ type: str
+ sample: node01
+lun_ostype:
+ description: Specifies the OS of the host accessing the LUN.
+ returned: success
+ type: str
+ sample: vmware
+lun_serial:
+ description: A unique, 12-byte, ASCII string used to identify the LUN.
+ returned: success
+ type: str
+ sample: 80E7/]LZp1Tt
+lun_naa_id:
+ description: The Network Address Authority (NAA) identifier for the LUN.
+ returned: success
+ type: str
+ sample: 600a0980383045372f5d4c5a70315474
+lun_state:
+ description: Online or offline status of the LUN.
+ returned: success
+ type: str
+ sample: online
+lun_size:
+ description: Size of the LUN in bytes.
+ returned: success
+ type: int
+ sample: 2199023255552
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+import codecs
+from ansible.module_utils._text import to_text, to_bytes
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapLUNMap:
+ """
+ Class with LUN map methods
+ """
+
+ def __init__(self):
+ self.lun_uuid, self.igroup_uuid = None, None
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ initiator_group_name=dict(required=True, type='str'),
+ path=dict(required=True, type='str'),
+ vserver=dict(required=True, type='str'),
+ lun_id=dict(required=False, type='str', default=None),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['path'])
+ ],
+ supports_check_mode=True
+ )
+ self.result = dict(
+ changed=False,
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.rest_api = OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+ if not self.use_rest:
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_lun_map(self):
+ """
+ Return details about the LUN map
+
+ :return: Details about the lun map
+ :rtype: dict
+ """
+ if self.use_rest:
+ return self.get_lun_map_rest()
+ lun_info = netapp_utils.zapi.NaElement('lun-map-list-info')
+ lun_info.add_new_child('path', self.parameters['path'])
+ result = self.server.invoke_successfully(lun_info, True)
+ return_value = None
+ igroups = result.get_child_by_name('initiator-groups')
+ if igroups:
+ for igroup_info in igroups.get_children():
+ initiator_group_name = igroup_info.get_child_content('initiator-group-name')
+ lun_id = igroup_info.get_child_content('lun-id')
+ if initiator_group_name == self.parameters['initiator_group_name']:
+ return_value = {
+ 'lun_id': lun_id
+ }
+ break
+
+ return return_value
+
+ def get_lun(self):
+ """
+ Return details about the LUN
+
+ :return: Details about the lun
+ :rtype: dict
+ """
+ if self.use_rest:
+ return self.get_lun_rest()
+ # build the lun query
+ query_details = netapp_utils.zapi.NaElement('lun-info')
+ query_details.add_new_child('path', self.parameters['path'])
+
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+
+ lun_query = netapp_utils.zapi.NaElement('lun-get-iter')
+ lun_query.add_child_elem(query)
+
+ # find lun using query
+ result = self.server.invoke_successfully(lun_query, True)
+ return_value = None
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ lun = result.get_child_by_name('attributes-list').get_child_by_name('lun-info')
+
+ return_value = {
+ 'lun_node': lun.get_child_content('node'),
+ 'lun_ostype': lun.get_child_content('multiprotocol-type'),
+ 'lun_serial': lun.get_child_content('serial-number'),
+ 'lun_naa_id': self.return_naa_id(lun.get_child_content('serial-number')),
+ 'lun_state': lun.get_child_content('state'),
+ 'lun_size': lun.get_child_content('size'),
+ }
+
+ return return_value
+
+ def return_naa_id(self, serial_number):
+ hexlify = codecs.getencoder('hex')
+ return '600a0980' + to_text(hexlify(to_bytes(serial_number))[0])
+
+ def create_lun_map(self):
+ """
+ Create LUN map
+ """
+ if self.use_rest:
+ return self.create_lun_map_rest()
+ options = {'path': self.parameters['path'], 'initiator-group': self.parameters['initiator_group_name']}
+ if self.parameters['lun_id'] is not None:
+ options['lun-id'] = self.parameters['lun_id']
+ lun_map_create = netapp_utils.zapi.NaElement.create_node_with_children('lun-map', **options)
+
+ try:
+ self.server.invoke_successfully(lun_map_create, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error mapping lun %s of initiator_group_name %s: %s" %
+ (self.parameters['path'], self.parameters['initiator_group_name'], to_native(e)),
+ exception=traceback.format_exc())
+
+ def delete_lun_map(self):
+ """
+ Unmap LUN map
+ """
+ if self.use_rest:
+ return self.delete_lun_map_rest()
+ lun_map_delete = netapp_utils.zapi.NaElement.create_node_with_children('lun-unmap', **{
+ 'path': self.parameters['path'], 'initiator-group': self.parameters['initiator_group_name']})
+
+ try:
+ self.server.invoke_successfully(lun_map_delete, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error unmapping lun %s of initiator_group_name %s: %s" %
+ (self.parameters['path'], self.parameters['initiator_group_name'], to_native(e)),
+ exception=traceback.format_exc())
+
+ def get_lun_rest(self):
+ api = 'storage/luns'
+ params = {'name': self.parameters['path'],
+ 'svm.name': self.parameters['vserver'],
+ 'fields': 'name,'
+ 'os_type,'
+ 'serial_number,'
+ 'status.state,'
+ 'space.size,'
+ 'uuid,'
+ 'lun_maps'
+ }
+ record, error = rest_generic.get_one_record(self.rest_api, api, params)
+ if error:
+ self.module.fail_json(msg='Error getting lun %s: %s' % (self.parameters['path'], error))
+ if record:
+ return {'lun_ostype': self.na_helper.safe_get(record, ['os_type']),
+ 'lun_serial': self.na_helper.safe_get(record, ['serial_number']),
+ 'lun_naa_id': self.return_naa_id(self.na_helper.safe_get(record, ['serial_number'])),
+ 'lun_state': self.na_helper.safe_get(record, ['status', 'state']),
+ 'lun_size': self.na_helper.safe_get(record, ['space', 'size']),
+ }
+ return None
+
+ def get_lun_map_rest(self):
+ api = 'protocols/san/lun-maps'
+ params = {'lun.name': self.parameters['path'],
+ 'svm.name': self.parameters['vserver'],
+ 'igroup.name': self.parameters['initiator_group_name'],
+ 'fields': 'logical_unit_number,igroup.uuid,lun.uuid,lun.name,igroup.name'
+ }
+ record, error = rest_generic.get_one_record(self.rest_api, api, params)
+ if error:
+ self.module.fail_json(msg='Error getting lun_map %s: %s' % (self.parameters['path'], error))
+ if record:
+ return {'lun_id': str(self.na_helper.safe_get(record, ['logical_unit_number'])),
+ 'igroup_uuid': self.na_helper.safe_get(record, ['igroup', 'uuid']),
+ 'initiator_group_name': self.na_helper.safe_get(record, ['igroup', 'name']),
+ 'lun_uuid': self.na_helper.safe_get(record, ['lun', 'uuid']),
+ 'path': self.na_helper.safe_get(record, ['lun', 'name']),
+ }
+ return None
+
+ def create_lun_map_rest(self):
+ api = 'protocols/san/lun-maps'
+ body = {'svm.name': self.parameters['vserver'],
+ 'igroup.name': self.parameters['initiator_group_name'],
+ 'lun.name': self.parameters['path']}
+ if self.parameters.get('lun_id') is not None:
+ body['logical_unit_number'] = self.parameters['lun_id']
+ dummy, error = rest_generic.post_async(self.rest_api, api, body, job_timeout=120)
+ if error:
+ self.module.fail_json(msg='Error creating lun_map %s: %s' % (self.parameters['path'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_lun_map_rest(self):
+ api = 'protocols/san/lun-maps'
+ both_uuids = '%s/%s' % (self.lun_uuid, self.igroup_uuid)
+ dummy, error = rest_generic.delete_async(self.rest_api, api, both_uuids, job_timeout=120)
+ if error:
+ self.module.fail_json(msg='Error deleting lun_map %s: %s' % (self.parameters['path'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ lun_details = self.get_lun()
+ # why do we do this, it never used in the module, and has nothing to do with lun_map (it probably should be in
+ # the lun module
+ current = self.get_lun_map()
+ if self.use_rest and current:
+ self.lun_uuid = current.get('lun_uuid', None)
+ self.igroup_uuid = current.get('igroup_uuid', None)
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None and self.parameters['state'] == 'present':
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if modify:
+ self.module.fail_json(msg="Modification of lun_map not allowed")
+ if self.parameters['state'] == 'present' and lun_details:
+ self.result.update(lun_details)
+ self.result['changed'] = self.na_helper.changed
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_lun_map()
+ if cd_action == 'delete':
+ self.delete_lun_map()
+ self.module.exit_json(**self.result)
+
+
+def main():
+ v = NetAppOntapLUNMap()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_lun_map_reporting_nodes.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_lun_map_reporting_nodes.py
new file mode 100644
index 000000000..607c8c430
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_lun_map_reporting_nodes.py
@@ -0,0 +1,274 @@
+#!/usr/bin/python
+
+"""
+ (c) 2018-2022, NetApp, Inc
+ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+
+module: na_ontap_lun_map_reporting_nodes
+
+short_description: NetApp ONTAP LUN maps reporting nodes
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: '21.2.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Add and Remove LUN map reporting nodes.
+
+options:
+ state:
+ description:
+ - Whether to add or remove reporting nodes
+ choices: ['present', 'absent']
+ type: str
+ default: present
+
+ initiator_group_name:
+ description:
+ - Initiator group to map to the given LUN.
+ required: true
+ type: str
+
+ path:
+ description:
+ - Path of the LUN.
+ required: true
+ type: str
+
+ vserver:
+ required: true
+ description:
+ - The name of the vserver owning the LUN.
+ type: str
+
+ nodes:
+ required: true
+ description:
+ - List of reporting nodes to add or remove
+ type: list
+ elements: str
+
+notes:
+ - supports ZAPI and REST. REST requires ONTAP 9.10.1 or later.
+ - supports check mode.
+"""
+
+EXAMPLES = """
+ - name: Create Lun Map reporting nodes
+ netapp.ontap.na_ontap_lun_map_reporting_nodes:
+ hostname: 172.21.121.82
+ username: admin
+ password: netapp1!
+ https: true
+ validate_certs: false
+ vserver: vs1
+ state: present
+ initiator_group_name: carchigroup
+ path: /vol/carchiVolTest/carchiLunTest
+ nodes: [node2]
+
+ - name: Delete Lun Map reporting nodes
+ netapp.ontap.na_ontap_lun_map_reporting_nodes:
+ hostname: 172.21.121.82
+ username: admin
+ password: netapp1!
+ https: true
+ validate_certs: false
+ vserver: vs1
+ state: absent
+ initiator_group_name: carchigroup
+ path: /vol/carchiVolTest/carchiLunTest
+ nodes: [node2]
+
+"""
+
+RETURN = """
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppOntapLUNMapReportingNodes:
+ ''' add or remove reporting nodes from a lun map '''
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ initiator_group_name=dict(required=True, type='str'),
+ path=dict(required=True, type='str'),
+ vserver=dict(required=True, type='str'),
+ nodes=dict(required=True, type='list', elements='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.rest_api = netapp_utils.OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+ self.lun_uuid, self.igroup_uuid, self.nodes_uuids = None, None, {}
+ if self.use_rest and not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 10, 1):
+ msg = 'REST requires ONTAP 9.10.1 or later for na_ontap_lun_map_reporting_nodes'
+ self.use_rest = self.na_helper.fall_back_to_zapi(self.module, msg, self.parameters)
+ if not self.use_rest:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_lun_map_reporting_nodes(self):
+ """
+ Return list of reporting nodes from the LUN map
+
+ :return: list of reporting nodes
+ :rtype: list
+ """
+ if self.use_rest:
+ return self.get_lun_map_reporting_nodes_rest()
+ query_details = netapp_utils.zapi.NaElement('lun-map-info')
+ query_details.add_new_child('path', self.parameters['path'])
+ query_details.add_new_child('initiator-group', self.parameters['initiator_group_name'])
+ query_details.add_new_child('vserver', self.parameters['vserver'])
+
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+
+ lun_query = netapp_utils.zapi.NaElement('lun-map-get-iter')
+ lun_query.add_child_elem(query)
+
+ try:
+ result = self.server.invoke_successfully(lun_query, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error getting LUN map for %s: %s' %
+ (self.parameters['initiator_group_name'], to_native(error)),
+ exception=traceback.format_exc())
+ try:
+ num_records = int(result.get_child_content('num-records'))
+ except TypeError:
+ self.module.fail_json(msg="Error: unexpected ZAPI response for lun-map-get-iter: %s" % result.to_string())
+ if num_records == 0:
+ return None
+ alist = result.get_child_by_name('attributes-list')
+ info = alist.get_child_by_name('lun-map-info')
+ reporting_nodes = info.get_child_by_name('reporting-nodes')
+ node_list = []
+ if reporting_nodes:
+ for node in reporting_nodes.get_children():
+ node_list.append(node.get_content())
+ return node_list
+
+ def add_lun_map_reporting_nodes(self, nodes):
+ reporting_nodes_obj = netapp_utils.zapi.NaElement('lun-map-add-reporting-nodes')
+ reporting_nodes_obj.add_new_child('igroup', self.parameters['initiator_group_name'])
+ reporting_nodes_obj.add_new_child('path', self.parameters['path'])
+ nodes_obj = netapp_utils.zapi.NaElement('nodes')
+ for node in nodes:
+ nodes_obj.add_new_child('filer-id', node)
+ reporting_nodes_obj.add_child_elem(nodes_obj)
+ try:
+ self.server.invoke_successfully(reporting_nodes_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating LUN map reporting nodes for %s: %s' %
+ (self.parameters['initiator_group_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def remove_lun_map_reporting_nodes(self, nodes):
+ reporting_nodes_obj = netapp_utils.zapi.NaElement('lun-map-remove-reporting-nodes')
+ reporting_nodes_obj.add_new_child('igroup', self.parameters['initiator_group_name'])
+ reporting_nodes_obj.add_new_child('path', self.parameters['path'])
+ nodes_obj = netapp_utils.zapi.NaElement('nodes')
+ for node in nodes:
+ nodes_obj.add_new_child('filer-id', node)
+ reporting_nodes_obj.add_child_elem(nodes_obj)
+ try:
+ self.server.invoke_successfully(reporting_nodes_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting LUN map reporting nodes for %s: %s' %
+ (self.parameters['initiator_group_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def get_lun_map_reporting_nodes_rest(self):
+ api = 'protocols/san/lun-maps'
+ query = {
+ 'lun.name': self.parameters['path'],
+ 'igroup.name': self.parameters['initiator_group_name'],
+ 'svm.name': self.parameters['vserver'],
+ 'fields': 'reporting_nodes,lun.uuid,igroup.uuid'
+ }
+ record, error = rest_generic.get_one_record(self.rest_api, api, query)
+ if error:
+ self.module.fail_json(msg='Error getting LUN map for %s: %s' %
+ (self.parameters['initiator_group_name'], to_native(error)))
+ if record:
+ self.lun_uuid = record['lun']['uuid']
+ self.igroup_uuid = record['igroup']['uuid']
+ node_list = []
+ for node in record.get('reporting_nodes', []):
+ self.nodes_uuids[node['name']] = node['uuid']
+ node_list.append(node['name'])
+ return node_list
+ return None
+
+ def add_lun_map_reporting_nodes_rest(self, node):
+ api = 'protocols/san/lun-maps/%s/%s/reporting-nodes' % (self.lun_uuid, self.igroup_uuid)
+ dummy, error = rest_generic.post_async(self.rest_api, api, {'name': node})
+ if error:
+ self.module.fail_json(msg='Error creating LUN map reporting node for %s: %s' %
+ (self.parameters['initiator_group_name'], to_native(error)))
+
+ def remove_lun_map_reporting_nodes_rest(self, node):
+ api = 'protocols/san/lun-maps/%s/%s/reporting-nodes' % (self.lun_uuid, self.igroup_uuid)
+ dummy, error = rest_generic.delete_async(self.rest_api, api, self.nodes_uuids[node])
+ if error:
+ self.module.fail_json(msg='Error deleting LUN map reporting nodes for %s: %s' %
+ (self.parameters['initiator_group_name'], to_native(error)))
+
+ def apply(self):
+ reporting_nodes = self.get_lun_map_reporting_nodes()
+ if reporting_nodes is None:
+ self.module.fail_json(msg='Error: LUN map not found for vserver %s, LUN path: %s, igroup: %s' %
+ (self.parameters['vserver'], self.parameters['path'], self.parameters['initiator_group_name']))
+ if self.parameters['state'] == 'present':
+ nodes_to_add = [node for node in self.parameters['nodes'] if node not in reporting_nodes]
+ nodes_to_delete = list()
+ else:
+ nodes_to_add = list()
+ nodes_to_delete = [node for node in self.parameters['nodes'] if node in reporting_nodes]
+ changed = len(nodes_to_add) > 0 or len(nodes_to_delete) > 0
+ if changed and not self.module.check_mode:
+ if nodes_to_add:
+ if self.use_rest:
+ for node in nodes_to_add:
+ self.add_lun_map_reporting_nodes_rest(node)
+ else:
+ self.add_lun_map_reporting_nodes(nodes_to_add)
+ if nodes_to_delete:
+ if self.use_rest:
+ for node in nodes_to_delete:
+ self.remove_lun_map_reporting_nodes_rest(node)
+ else:
+ self.remove_lun_map_reporting_nodes(nodes_to_delete)
+ self.module.exit_json(changed=changed, reporting_nodes=reporting_nodes, nodes_to_add=nodes_to_add, nodes_to_delete=nodes_to_delete)
+
+
+def main():
+ na_module = NetAppOntapLUNMapReportingNodes()
+ na_module.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_mcc_mediator.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_mcc_mediator.py
new file mode 100644
index 000000000..bbacc8ce2
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_mcc_mediator.py
@@ -0,0 +1,185 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# This module implements the operations for ONTAP MCC Mediator.
+# The Mediator is supported for MCC IP configs from ONTAP 9.7 or later.
+# This module requires REST APIs for Mediator which is supported from
+# ONTAP 9.8 (DW) or later
+
+'''
+na_ontap_mcc_mediator
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+module: na_ontap_mcc_mediator
+short_description: NetApp ONTAP Add and Remove MetroCluster Mediator
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 20.9.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Add and remove ONTAP MCC Mediator
+options:
+ state:
+ choices: ['present', 'absent']
+ description:
+ - "Whether MCCIP Mediator is present or not."
+ default: present
+ type: str
+
+ mediator_address:
+ description:
+ - ip address of the mediator
+ type: str
+ required: true
+
+ mediator_user:
+ description:
+ - username of the mediator
+ type: str
+ required: true
+
+ mediator_password:
+ description:
+ - password of the mediator
+ type: str
+ required: true
+
+'''
+
+EXAMPLES = """
+ - name: Add ONTAP MCCIP Mediator
+ na_ontap_mcc_mediator:
+ state: present
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ mediator_address: mediator_ip
+ mediator_user: metrocluster_admin
+ mediator_password: netapp1!
+
+ - name: Delete ONTAP MCCIP Mediator
+ na_ontap_mcc_mediator:
+ state: absent
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ mediator_user: metrocluster_admin
+ mediator_password: netapp1!
+"""
+
+RETURN = """
+
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+
+class NetAppOntapMccipMediator(object):
+ """
+ Mediator object for Add/Remove/Display
+ """
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ mediator_address=dict(required=True, type='str'),
+ mediator_user=dict(required=True, type='str'),
+ mediator_password=dict(required=True, type='str', no_log=True),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.rest_api = OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+
+ if not self.use_rest:
+ self.module.fail_json(msg=self.rest_api.requires_ontap_9_6('na_ontap_mcc_mediator'))
+
+ def add_mediator(self):
+ """
+ Adds an ONTAP Mediator to MCC configuration
+ """
+ api = 'cluster/mediators'
+ params = {
+ 'ip_address': self.parameters['mediator_address'],
+ 'password': self.parameters['mediator_password'],
+ 'user': self.parameters['mediator_user']
+ }
+ dummy, error = self.rest_api.post(api, params)
+ if error:
+ self.module.fail_json(msg=error)
+
+ def remove_mediator(self, current_uuid):
+ """
+ Removes the ONTAP Mediator from MCC configuration
+ """
+ api = 'cluster/mediators/%s' % current_uuid
+ params = {
+ 'ip_address': self.parameters['mediator_address'],
+ 'password': self.parameters['mediator_password'],
+ 'user': self.parameters['mediator_user']
+ }
+ dummy, error = self.rest_api.delete(api, params)
+ if error:
+ self.module.fail_json(msg=error)
+
+ def get_mediator(self):
+ """
+ Determine if the MCC configuration has added an ONTAP Mediator
+ """
+ api = "cluster/mediators"
+ message, error = self.rest_api.get(api, None)
+ if error:
+ self.module.fail_json(msg=error)
+ if message['num_records'] > 0:
+ return message['records'][0]['uuid']
+ return None
+
+ def apply(self):
+ """
+ Apply action to MCC Mediator
+ """
+ current = self.get_mediator()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.add_mediator()
+ elif cd_action == 'delete':
+ self.remove_mediator(current)
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action)
+ self.module.exit_json(**result)
+
+
+def main():
+ """
+ Add, Remove and display ONTAP MCC Mediator
+ """
+ mediator_obj = NetAppOntapMccipMediator()
+ mediator_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_metrocluster.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_metrocluster.py
new file mode 100644
index 000000000..bc149267c
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_metrocluster.py
@@ -0,0 +1,171 @@
+#!/usr/bin/python
+"""
+(c) 2020, NetApp, Inc
+ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'
+}
+
+DOCUMENTATION = '''
+module: na_ontap_metrocluster
+short_description: NetApp ONTAP set up a MetroCluster
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: '20.9.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+requirements:
+ - ONTAP >= 9.8
+
+description:
+ - Configure MetroCluster.
+options:
+ state:
+ choices: ['present']
+ description:
+ - Present to set up a MetroCluster
+ default: present
+ type: str
+ dr_pairs:
+ description: disaster recovery pair
+ type: list
+ required: true
+ elements: dict
+ suboptions:
+ node_name:
+ description:
+ - the name of the main node
+ required: true
+ type: str
+ partner_node_name:
+ description:
+ - the name of the main partner node
+ required: true
+ type: str
+ partner_cluster_name:
+ description:
+ - The name of the partner Cluster
+ required: true
+ type: str
+'''
+
+EXAMPLES = '''
+-
+ name: Manage MetroCluster
+ hosts: localhost
+ collections:
+ - netapp.ontap
+ vars:
+ login: &login
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ https: True
+ validate_certs: False
+ tasks:
+ - name: Create MetroCluster
+ na_ontap_metrocluster:
+ <<: *login
+ dr_pairs:
+ - partner_node_name: rha17-a2
+ node_name: rha17-b2
+ partner_cluster_name: rha2-b2b1_siteB
+'''
+
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPMetroCluster(object):
+ ''' ONTAP metrocluster operations '''
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(choices=['present'], default='present'),
+ dr_pairs=dict(required=True, type='list', elements='dict', options=dict(
+ node_name=dict(required=True, type='str'),
+ partner_node_name=dict(required=True, type='str')
+ )),
+ partner_cluster_name=dict(required=True, type='str')
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.rest_api = OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+
+ if not self.use_rest:
+ self.module.fail_json(msg=self.rest_api.requires_ontap_9_6('na_ontap_metrocluster'))
+
+ def get_metrocluster(self):
+ attrs = None
+ api = 'cluster/metrocluster'
+ options = {'fields': '*'}
+ message, error = self.rest_api.get(api, options)
+ if error:
+ self.module.fail_json(msg=error)
+ if message is not None:
+ local = message['local']
+ if local['configuration_state'] != "not_configured":
+ attrs = {
+ 'configuration_state': local['configuration_state'],
+ 'partner_cluster_reachable': local['partner_cluster_reachable'],
+ 'partner_cluster_name': local['cluster']['name']
+ }
+ return attrs
+
+ def create_metrocluster(self):
+ api = 'cluster/metrocluster'
+ options = {}
+ dr_pairs = []
+ for pair in self.parameters['dr_pairs']:
+ dr_pairs.append({'node': {'name': pair['node_name']},
+ 'partner': {'name': pair['partner_node_name']}})
+ partner_cluster = {'name': self.parameters['partner_cluster_name']}
+ data = {'dr_pairs': dr_pairs, 'partner_cluster': partner_cluster}
+ message, error = self.rest_api.post(api, data, options)
+ if error is not None:
+ self.module.fail_json(msg="%s" % error)
+ message, error = self.rest_api.wait_on_job(message['job'])
+ if error:
+ self.module.fail_json(msg="%s" % error)
+
+ def apply(self):
+ current = self.get_metrocluster()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_metrocluster()
+ # Since there is no modify or delete, we will return no change
+ else:
+ self.module.fail_json(msg="Modify and Delete currently not support in API")
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action)
+ self.module.exit_json(**result)
+
+
+def main():
+ obj = NetAppONTAPMetroCluster()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_metrocluster_dr_group.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_metrocluster_dr_group.py
new file mode 100644
index 000000000..3794c9753
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_metrocluster_dr_group.py
@@ -0,0 +1,223 @@
+#!/usr/bin/python
+"""
+(c) 2020, NetApp, Inc
+ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'
+}
+
+DOCUMENTATION = '''
+module: na_ontap_metrocluster_dr_group
+short_description: NetApp ONTAP manage MetroCluster DR Group
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 20.11.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+requirements:
+ - ONTAP >= 9.8
+description:
+ - Create/Delete MetroCluster DR Group
+ - Create only supports MCC IP
+ - Delete supports both MCC IP and MCC FC
+options:
+ state:
+ choices: ['present', 'absent']
+ description:
+ add or remove DR groups
+ default: present
+ type: str
+ dr_pairs:
+ description: disaster recovery pairs
+ type: list
+ required: true
+ elements: dict
+ suboptions:
+ node_name:
+ description:
+ - the name of the main node
+ required: true
+ type: str
+ partner_node_name:
+ description:
+ - the name of the main partner node
+ required: true
+ type: str
+ partner_cluster_name:
+ description:
+ - The name of the partner cluster
+ required: true
+ type: str
+'''
+
+EXAMPLES = '''
+-
+ name: Manage MetroCluster DR group
+ hosts: localhost
+ collections:
+ - netapp.ontap
+ vars:
+ login: &login
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ https: True
+ validate_certs: False
+ tasks:
+ - name: Create MetroCluster DR group
+ na_ontap_metrocluster_dr_group:
+ <<: *login
+ dr_pairs:
+ - partner_name: carchi_cluster3_01
+ node_name: carchi_cluster1_01
+ partner_cluster_name: carchi_cluster3
+ - name: Delete MetroCluster DR group
+ na_ontap_metrocluster_dr_group:
+ <<: *login
+ dr_pairs:
+ - partner_name: carchi_cluster3_01
+ node_name: carchi_cluster1_01
+ state: absent
+ partner_cluster_name: carchi_cluster3
+'''
+
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+
+
+class NetAppONTAPMetroClusterDRGroup(object):
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(choices=['present', 'absent'], default='present'),
+ dr_pairs=dict(required=True, type='list', elements='dict', options=dict(
+ node_name=dict(required=True, type='str'),
+ partner_node_name=dict(required=True, type='str')
+ )),
+ partner_cluster_name=dict(required=True, type='str')
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.rest_api = OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+
+ if not self.use_rest:
+ self.module.fail_json(msg=self.rest_api.requires_ontap_version('na_ontap_metrocluster_dr_group',
+ version='9.8'))
+
+ def get_dr_group(self):
+ return_attrs = None
+ for pair in self.parameters['dr_pairs']:
+ api = 'cluster/metrocluster/dr-groups'
+ options = {'fields': '*',
+ 'dr_pairs.node.name': pair['node_name'],
+ 'dr_pairs.partner.name': pair['partner_node_name'],
+ 'partner_cluster.name': self.parameters['partner_cluster_name']}
+ message, error = self.rest_api.get(api, options)
+ if error:
+ self.module.fail_json(msg=error)
+ if 'records' in message and message['num_records'] == 0:
+ continue
+ elif 'records' not in message or message['num_records'] != 1:
+ error = "Unexpected response from %s: %s" % (api, repr(message))
+ self.module.fail_json(msg=error)
+ record = message['records'][0]
+ return_attrs = {
+ 'partner_cluster_name': record['partner_cluster']['name'],
+ 'dr_pairs': [],
+ 'id': record['id']
+ }
+ for dr_pair in record['dr_pairs']:
+ return_attrs['dr_pairs'].append({'node_name': dr_pair['node']['name'], 'partner_node_name': dr_pair['partner']['name']})
+ # if we have an return_dr_id we don't need to loop anymore
+ break
+ return return_attrs
+
+ def get_dr_group_ids_from_nodes(self):
+ delete_ids = []
+ for pair in self.parameters['dr_pairs']:
+ api = 'cluster/metrocluster/nodes'
+ options = {'fields': '*',
+ 'node.name': pair['node_name']}
+ message, error = self.rest_api.get(api, options)
+ if error:
+ self.module.fail_json(msg=error)
+ if 'records' in message and message['num_records'] == 0:
+ continue
+ elif 'records' not in message or message['num_records'] != 1:
+ error = "Unexpected response from %s: %s" % (api, repr(message))
+ self.module.fail_json(msg=error)
+ record = message['records'][0]
+ if int(record['dr_group_id']) not in delete_ids:
+ delete_ids.append(int(record['dr_group_id']))
+ return delete_ids
+
+ def create_dr_group(self):
+ api = 'cluster/metrocluster/dr-groups'
+ dr_pairs = []
+ for pair in self.parameters['dr_pairs']:
+ dr_pairs.append({'node': {'name': pair['node_name']},
+ 'partner': {'name': pair['partner_node_name']}})
+ partner_cluster = {'name': self.parameters['partner_cluster_name']}
+ data = {'dr_pairs': dr_pairs, 'partner_cluster': partner_cluster}
+ message, error = self.rest_api.post(api, data)
+ if error is not None:
+ self.module.fail_json(msg="%s" % error)
+ message, error = self.rest_api.wait_on_job(message['job'])
+ if error:
+ self.module.fail_json(msg="%s" % error)
+
+ def delete_dr_groups(self, dr_ids):
+ for dr_id in dr_ids:
+ api = 'cluster/metrocluster/dr-groups/' + str(dr_id)
+ message, error = self.rest_api.delete(api)
+ if error:
+ self.module.fail_json(msg=error)
+ message, error = self.rest_api.wait_on_job(message['job'])
+ if error:
+ self.module.fail_json(msg="%s" % error)
+
+ def apply(self):
+ current = self.get_dr_group()
+ delete_ids = None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None and current is None and self.parameters['state'] == 'absent':
+ # check if there is some FC group to delete
+ delete_ids = self.get_dr_group_ids_from_nodes()
+ if delete_ids:
+ cd_action = 'delete'
+ self.na_helper.changed = True
+ elif cd_action == 'delete':
+ delete_ids = [current['id']]
+ if cd_action and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_dr_group()
+ if cd_action == 'delete':
+ self.delete_dr_groups(delete_ids)
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action)
+ self.module.exit_json(**result)
+
+
+def main():
+ obj = NetAppONTAPMetroClusterDRGroup()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_motd.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_motd.py
new file mode 100644
index 000000000..ed363692e
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_motd.py
@@ -0,0 +1,210 @@
+#!/usr/bin/python
+
+# (c) 2018-2022, NetApp, Inc
+# (c) 2018 Piotr Olczak <piotr.olczak@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: na_ontap_motd
+author:
+ - Piotr Olczak (@dprts) <polczak@redhat.com>
+ - NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap_zapi
+short_description: Setup motd
+description:
+ - This module allows you to manipulate motd for a vserver
+ - It also allows to manipulate motd at the cluster level by using the cluster vserver (cserver)
+version_added: 2.7.0
+options:
+ state:
+ description:
+ - If C(state=present) sets MOTD given in I(message) C(state=absent) removes it.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+ motd_message:
+ description:
+ - MOTD Text message.
+ - message is deprecated and will be removed to avoid a conflict with an Ansible internal variable.
+ type: str
+ default: ''
+ aliases:
+ - message
+ vserver:
+ description:
+ - The name of the SVM motd should be set for.
+ required: true
+ type: str
+ show_cluster_motd:
+ description:
+ - Set to I(false) if Cluster-level Message of the Day should not be shown
+ type: bool
+ default: True
+
+notes:
+ - This module is deprecated and only supports ZAPI.
+ - Please use netapp.ontap.na_ontap_login_messages both for ZAPI and REST.
+
+'''
+
+EXAMPLES = '''
+
+- name: Set Cluster-Level MOTD
+ netapp.ontap.na_ontap_motd:
+ vserver: my_ontap_cluster
+ motd_message: "Cluster wide MOTD"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ state: present
+ https: true
+
+- name: Set MOTD for I(rhev_nfs_krb) SVM, do not show Cluster-Level MOTD
+ netapp.ontap.na_ontap_motd:
+ vserver: rhev_nfs_krb
+ motd_message: "Access to rhev_nfs_krb is also restricted"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ state: present
+ show_cluster_motd: False
+ https: true
+
+- name: Remove Cluster-Level MOTD
+ netapp.ontap.na_ontap_motd:
+ vserver: my_ontap_cluster
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ state: absent
+ https: true
+'''
+
+RETURN = '''
+
+'''
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+
+class NetAppONTAPMotd:
+
+ def __init__(self):
+ argument_spec = netapp_utils.na_ontap_zapi_only_spec()
+ argument_spec.update(dict(
+ state=dict(required=False, type='str', default='present', choices=['present', 'absent']),
+ vserver=dict(required=True, type='str'),
+ motd_message=dict(default='', type='str', aliases=['message']),
+ show_cluster_motd=dict(default=True, type='bool')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.na_helper.module_replaces('na_ontap_login_messages', self.module)
+
+ msg = 'netapp.ontap.na_ontap_motd is deprecated and only supports ZAPI. Please use netapp.ontap.na_ontap_login_messages.'
+ if self.parameters['use_rest'].lower() == 'never':
+ self.module.warn(msg)
+ else:
+ self.na_helper.fall_back_to_zapi(self.module, msg, self.parameters)
+ if 'message' in self.parameters:
+ self.module.warn('Error: "message" option conflicts with Ansible internal variable - please use "motd_message".')
+
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def motd_get_iter(self):
+ """
+ Compose NaElement object to query current motd
+ :return: NaElement object for vserver-motd-get-iter
+ """
+ motd_get_iter = netapp_utils.zapi.NaElement('vserver-motd-get-iter')
+ query = netapp_utils.zapi.NaElement('query')
+ motd_info = netapp_utils.zapi.NaElement('vserver-motd-info')
+ motd_info.add_new_child('is-cluster-message-enabled', str(self.parameters['show_cluster_motd']))
+ motd_info.add_new_child('vserver', self.parameters['vserver'])
+ query.add_child_elem(motd_info)
+ motd_get_iter.add_child_elem(query)
+ return motd_get_iter
+
+ def motd_get(self):
+ """
+ Get current motd
+ :return: Dictionary of current motd details if query successful, else None
+ """
+ motd_get_iter = self.motd_get_iter()
+ motd_result = {}
+ try:
+ result = self.server.invoke_successfully(motd_get_iter, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching motd info: %s' % to_native(error),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) > 0:
+ motd_info = result.get_child_by_name('attributes-list').get_child_by_name(
+ 'vserver-motd-info')
+ motd_result['motd_message'] = motd_info.get_child_content('message')
+ motd_result['motd_message'] = str(motd_result['motd_message']).rstrip()
+ motd_result['show_cluster_motd'] = motd_info.get_child_content('is-cluster-message-enabled') == 'true'
+ motd_result['vserver'] = motd_info.get_child_content('vserver')
+ return motd_result
+ return None
+
+ def modify_motd(self):
+ motd_create = netapp_utils.zapi.NaElement('vserver-motd-modify-iter')
+ motd_create.add_new_child('message', self.parameters['motd_message'])
+ motd_create.add_new_child(
+ 'is-cluster-message-enabled', 'true' if self.parameters['show_cluster_motd'] is True else 'false')
+ query = netapp_utils.zapi.NaElement('query')
+ motd_info = netapp_utils.zapi.NaElement('vserver-motd-info')
+ motd_info.add_new_child('vserver', self.parameters['vserver'])
+ query.add_child_elem(motd_info)
+ motd_create.add_child_elem(query)
+ try:
+ self.server.invoke_successfully(motd_create, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as err:
+ self.module.fail_json(msg="Error creating motd: %s" % (to_native(err)), exception=traceback.format_exc())
+ return motd_create
+
+ def apply(self):
+ """
+ Applies action from playbook
+ """
+ current = self.motd_get()
+ if self.parameters['state'] == 'absent':
+ # Just make sure it is empty
+ self.parameters['motd_message'] = ''
+ if current and current['motd_message'] == 'None':
+ current = None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None and self.parameters['state'] == 'present':
+ self.na_helper.get_modified_attributes(current, self.parameters)
+
+ if self.na_helper.changed and not self.module.check_mode:
+ self.modify_motd()
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ motd_obj = NetAppONTAPMotd()
+ motd_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_name_mappings.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_name_mappings.py
new file mode 100644
index 000000000..3aa4f2df5
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_name_mappings.py
@@ -0,0 +1,286 @@
+#!/usr/bin/python
+
+# (c) 2022-2023, NetApp, Inc
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+'''
+na_ontap_name_mappings
+'''
+
+
+DOCUMENTATION = '''
+module: na_ontap_name_mappings
+short_description: NetApp ONTAP name mappings
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 22.0.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Create/Delete/Modify name mappings for an SVM on ONTAP.
+options:
+ state:
+ description:
+ - Whether the specified name mappings should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+ vserver:
+ description:
+ - Name of the vserver to use.
+ required: true
+ type: str
+ client_match:
+ description:
+ - Client workstation IP Address which is matched when searching for the pattern.
+ - Example '10.254.101.111/28'
+ - Client match value can be in any of the following formats,
+ - As an IPv4 address with a subnet mask expressed as a number of bits; for instance, 10.1.12.0/24
+ - As an IPv6 address with a subnet mask expressed as a number of bits; for instance, fd20:8b1e:b255:4071::/64
+ - As an IPv4 address with a network mask; for instance, 10.1.16.0/255.255.255.0
+ - As a hostname
+ type: str
+ direction:
+ description:
+ - Direction in which the name mapping is applied.
+ - The possible values are,
+ krb_unix - Kerberos principal name to UNIX user name
+ win_unix - Windows user name to UNIX user name
+ unix_win - UNIX user name to Windows user name mapping
+ s3_unix - S3 user name to UNIX user name mapping
+ s3_win - S3 user name to Windows user name mapping
+ - s3_unix and s3_win requires ONTAP 9.12.1 or later.
+ choices: ['krb_unix', 'win_unix', 'unix_win', 's3_unix', 's3_win']
+ required: true
+ type: str
+ index:
+ description:
+ - Position in the list of name mappings.
+ - Minimum value is 1 and maximum is 2147483647.
+ required: true
+ type: int
+ pattern:
+ description:
+ - Pattern used to match the name while searching for a name that can be used as a replacement.
+ - The pattern is a UNIX-style regular expression.
+ - Regular expressions are case-insensitive when mapping from Windows to UNIX,
+ and they are case-sensitive for mappings from Kerberos to UNIX and UNIX to Windows.
+ - Minimum length is 1 and maximum length is 256.
+ - Pattern should be unique for each index of vserver.
+ - Example ENGCIFS_AD_USER.
+ type: str
+ replacement:
+ description:
+ - The name that is used as a replacement, if the pattern associated with this entry matches.
+ - Minimum length is 1 and maximum length is 256.
+ - Example unix_user1.
+ type: str
+ from_index:
+ description:
+ - If no entry with index is found, it is created by reindexing the entry for from_index.
+ - If no entry is found for index and from_index, an error is reported.
+ - Minimum value is 1 and maximum is 2147483647.
+ - Requires ONTAP version 9.7 or later.
+ type: int
+
+'''
+
+EXAMPLES = '''
+ - name: create name mappings configuration
+ netapp.ontap.na_ontap_name_mappings:
+ vserver: vserverName
+ direction: win_unix
+ index: 1
+ pattern: ENGCIFS_AD_USER
+ replacement: unix_user
+ client_match: 10.254.101.111/28
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: modify name mappings configuration
+ netapp.ontap.na_ontap_name_mappings:
+ vserver: vserverName
+ direction: win_unix
+ index: 1
+ pattern: ENGCIFS_AD_USERS
+ replacement: unix_user1
+ client_match: 10.254.101.112/28
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Swap name mappings position
+ netapp.ontap.na_ontap_name_mappings:
+ vserver: vserverName
+ direction: win_unix
+ index: 1
+ pattern: ENGCIFS_AD_USERS
+ replacement: unix_user1
+ from_index: 2
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Delete name mappings configuration
+ netapp.ontap.na_ontap_name_mappings:
+ vserver: vserverName
+ direction: win_unix
+ index: 1
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+'''
+
+RETURN = """
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppOntapNameMappings:
+ """ object initialize and class methods """
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ client_match=dict(required=False, type='str'),
+ direction=dict(required=True, type='str', choices=['krb_unix', 'win_unix', 'unix_win', 's3_unix', 's3_win']),
+ index=dict(required=True, type='int'),
+ from_index=dict(required=False, type='int'),
+ pattern=dict(required=False, type='str'),
+ replacement=dict(required=False, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule(self)
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.svm_uuid = None
+ self.rest_api = netapp_utils.OntapRestAPI(self.module)
+ self.rest_api.fail_if_not_rest_minimum_version('na_ontap_name_mappings', 9, 6)
+ self.rest_api.is_rest_supported_properties(self.parameters, None, [['from_index', (9, 7)]])
+ if self.parameters['direction'] in ['s3_unix', 's3_win'] and not self.rest_api.meets_rest_minimum_version(True, 9, 12, 1):
+ self.module.fail_json(msg="Error: direction %s requires ONTAP 9.12.1 or later version." % self.parameters['direction'])
+
+ def get_name_mappings_rest(self, index=None):
+ '''
+ Retrieves the name mapping configuration for SVM with rest API.
+ '''
+ if index is None:
+ index = self.parameters['index']
+ query = {'svm.name': self.parameters.get('vserver'),
+ 'index': index, # the existing current index or from_index to be swapped
+ 'direction': self.parameters.get('direction'), # different directions can have same index
+ 'fields': 'svm.uuid,'
+ 'client_match,'
+ 'direction,'
+ 'index,'
+ 'pattern,'
+ 'replacement,'}
+ api = 'name-services/name-mappings'
+ record, error = rest_generic.get_one_record(self.rest_api, api, query)
+ if error:
+ self.module.fail_json(msg=error)
+ if record:
+ self.svm_uuid = record['svm']['uuid']
+ return {
+ 'pattern': self.na_helper.safe_get(record, ['pattern']),
+ 'direction': self.na_helper.safe_get(record, ['direction']),
+ 'replacement': self.na_helper.safe_get(record, ['replacement']),
+ 'client_match': record.get('client_match', None),
+ }
+ return None
+
+ def create_name_mappings_rest(self):
+ """
+ Creates name mappings for an SVM with REST API.
+ """
+ body = {'svm.name': self.parameters.get('vserver'),
+ 'index': self.parameters.get('index'),
+ 'direction': self.parameters.get('direction'),
+ 'pattern': self.parameters.get('pattern'),
+ 'replacement': self.parameters.get('replacement')}
+ if 'client_match' in self.parameters:
+ body['client_match'] = self.parameters['client_match']
+ api = 'name-services/name-mappings'
+ dummy, error = rest_generic.post_async(self.rest_api, api, body)
+ if error is not None:
+ self.module.fail_json(msg="Error on creating name mappings rest: %s" % error)
+
+ def modify_name_mappings_rest(self, modify=None, reindex=False):
+ """
+ Updates the name mapping configuration of an SVM with rest API.
+ Swap the position with new position(new_index).
+ """
+ body = {}
+ query = None
+ if modify:
+ for option in ['pattern', 'replacement', 'client_match']:
+ if option in modify:
+ body[option] = self.parameters[option]
+ # Cannot swap entries which have hostname or address configured.
+ # Delete and recreate the new entry at the specified position.
+ index = self.parameters['index']
+ if reindex:
+ query = {'new_index': self.parameters.get('index')}
+ index = self.parameters['from_index']
+
+ api = 'name-services/name-mappings/%s/%s/%s' % (self.svm_uuid, self.parameters['direction'], index)
+ dummy, error = rest_generic.patch_async(self.rest_api, api, None, body, query)
+ if error is not None:
+ self.module.fail_json(msg="Error on modifying name mappings rest: %s" % error)
+
+ def delete_name_mappings_rest(self):
+ """
+ Delete the name mapping configuration of an SVM with rest API.
+ """
+ api = 'name-services/name-mappings/%s/%s/%s' % (self.svm_uuid, self.parameters['direction'], self.parameters['index'])
+ dummy, error = rest_generic.delete_async(self.rest_api, api, None)
+ if error is not None:
+ self.module.fail_json(msg="Error on deleting name mappings rest: %s" % error)
+
+ def apply(self):
+ reindex = False
+ current = self.get_name_mappings_rest()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ # Throws error when trying to swap with non existing index
+ if cd_action == 'create':
+ if self.parameters.get('from_index') is not None:
+ current = self.get_name_mappings_rest(self.parameters['from_index'])
+ if not current:
+ self.module.fail_json(msg="Error from_index entry does not exist")
+ reindex = True
+ cd_action = None
+ else:
+ # pattern and replacement are required when creating name mappings.
+ if not self.parameters.get('pattern') or not self.parameters.get('replacement'):
+ self.module.fail_json(msg="Error creating name mappings for an SVM, pattern and replacement are required in create.")
+ modify = self.na_helper.get_modified_attributes(current, self.parameters) if cd_action is None else None
+
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_name_mappings_rest()
+ elif cd_action == 'delete':
+ self.delete_name_mappings_rest()
+ elif modify or reindex:
+ self.modify_name_mappings_rest(modify, reindex)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """ Create object and call apply """
+ mapping_obj = NetAppOntapNameMappings()
+ mapping_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_name_service_switch.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_name_service_switch.py
new file mode 100644
index 000000000..edd8accb1
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_name_service_switch.py
@@ -0,0 +1,250 @@
+#!/usr/bin/python
+
+# (c) 2019-2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Create/Delete/Modify Name Service Switch.
+ - Deleting name service switch not supported in REST.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_name_service_switch
+options:
+ state:
+ choices: ['present', 'absent']
+ description:
+ - Whether the specified ns-switch should exist or not.
+ default: present
+ type: str
+ vserver:
+ description:
+ - Name of the vserver to use.
+ required: true
+ type: str
+ database_type:
+ description:
+ - Name services switch database.
+ choices: ['hosts','group', 'passwd', 'netgroup', 'namemap']
+ required: true
+ type: str
+ sources:
+ description:
+ - Type of sources.
+ - Possible values include files,dns,ldap,nis.
+ type: list
+ elements: str
+
+short_description: "NetApp ONTAP Manage name service switch"
+'''
+
+EXAMPLES = """
+ - name: create name service database
+ netapp.ontap.na_ontap_name_service_switch:
+ state: present
+ database_type: namemap
+ sources: files,ldap
+ vserver: "{{ Vserver name }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+
+ - name: modify name service database sources
+ netapp.ontap.na_ontap_name_service_switch:
+ state: present
+ database_type: namemap
+ sources: files
+ vserver: "{{ Vserver name }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+"""
+
+RETURN = """
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic, rest_vserver
+
+
+class NetAppONTAPNsswitch:
+ """
+ Class with NVMe service methods
+ """
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ database_type=dict(required=True, type='str', choices=['hosts', 'group', 'passwd', 'netgroup', 'namemap']),
+ sources=dict(required=False, type='list', elements='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ if self.parameters.get('sources') is not None:
+ self.parameters['sources'] = [source.strip() for source in self.parameters['sources']]
+ if '' in self.parameters['sources']:
+ self.module.fail_json(msg="Error: Invalid value '' specified for sources")
+ self.rest_api = netapp_utils.OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+ self.svm_uuid = None
+ if not self.use_rest:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_name_service_switch(self):
+ """
+ get current name service switch config
+ :return: dict of current name service switch
+ """
+ if self.use_rest:
+ return self.get_name_service_switch_rest()
+ nss_iter = netapp_utils.zapi.NaElement('nameservice-nsswitch-get-iter')
+ nss_info = netapp_utils.zapi.NaElement('namservice-nsswitch-config-info')
+ db_type = netapp_utils.zapi.NaElement('nameservice-database')
+ db_type.set_content(self.parameters['database_type'])
+ query = netapp_utils.zapi.NaElement('query')
+ nss_info.add_child_elem(db_type)
+ query.add_child_elem(nss_info)
+ nss_iter.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(nss_iter, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching name service switch info for %s: %s' %
+ (self.parameters['vserver'], to_native(error)), exception=traceback.format_exc())
+ return_value = None
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) == 1:
+ nss_sources = result.get_child_by_name('attributes-list').get_child_by_name(
+ 'namservice-nsswitch-config-info').get_child_by_name('nameservice-sources')
+ # nameservice-sources will not present in result if the value is '-'
+ if nss_sources:
+ sources = [sources.get_content() for sources in nss_sources.get_children()]
+ return_value = {'sources': sources}
+ else:
+ return_value = {'sources': []}
+ return return_value
+
+ def create_name_service_switch(self):
+ """
+ create name service switch config
+ :return: None
+ """
+ nss_create = netapp_utils.zapi.NaElement('nameservice-nsswitch-create')
+ nss_create.add_new_child('nameservice-database', self.parameters['database_type'])
+ nss_sources = netapp_utils.zapi.NaElement('nameservice-sources')
+ nss_create.add_child_elem(nss_sources)
+ for source in self.parameters['sources']:
+ nss_sources.add_new_child('nss-source-type', source)
+ try:
+ self.server.invoke_successfully(nss_create,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error on creating name service switch config on vserver %s: %s'
+ % (self.parameters['vserver'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_name_service_switch(self):
+ """
+ delete name service switch
+ :return: None
+ """
+ nss_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'nameservice-nsswitch-destroy', **{'nameservice-database': self.parameters['database_type']})
+ try:
+ self.server.invoke_successfully(nss_delete,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error on deleting name service switch config on vserver %s: %s'
+ % (self.parameters['vserver'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_name_service_switch(self, modify):
+ """
+ modify name service switch
+ :param modify: dict of modify attributes
+ :return: None
+ """
+ if self.use_rest:
+ return self.modify_name_service_switch_rest()
+ nss_modify = netapp_utils.zapi.NaElement('nameservice-nsswitch-modify')
+ nss_modify.add_new_child('nameservice-database', self.parameters['database_type'])
+ nss_sources = netapp_utils.zapi.NaElement('nameservice-sources')
+ nss_modify.add_child_elem(nss_sources)
+ if 'sources' in modify:
+ for source in self.parameters['sources']:
+ nss_sources.add_new_child('nss-source-type', source)
+ try:
+ self.server.invoke_successfully(nss_modify, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error on modifying name service switch config on vserver %s: %s'
+ % (self.parameters['vserver'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def get_name_service_switch_rest(self):
+ record, error = rest_vserver.get_vserver(self.rest_api, self.parameters['vserver'], 'nsswitch,uuid')
+ if error:
+ self.module.fail_json(msg='Error fetching name service switch info for %s: %s' %
+ (self.parameters['vserver'], to_native(error)))
+ if not record:
+ self.module.fail_json(msg="Error: Specified vserver %s not found" % self.parameters['vserver'])
+ self.svm_uuid = record['uuid']
+ # if database type is already deleted by ZAPI call, REST will not have the database key.
+ # setting it to [] help to set the value in REST patch call.
+ database_type = self.na_helper.safe_get(record, ['nsswitch', self.parameters['database_type']])
+ return {'sources': database_type if database_type else []}
+
+ def modify_name_service_switch_rest(self):
+ api = 'svm/svms'
+ body = {
+ 'nsswitch': {
+ self.parameters['database_type']: self.parameters['sources']
+ }
+ }
+ dummy, error = rest_generic.patch_async(self.rest_api, api, self.svm_uuid, body)
+ if error:
+ self.module.fail_json(msg='Error on modifying name service switch config on vserver %s: %s'
+ % (self.parameters['vserver'], to_native(error)))
+
+ def apply(self):
+ current = self.get_name_service_switch()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action == 'delete' and self.use_rest:
+ self.module.fail_json(msg="Error: deleting name service switch not supported in REST.")
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_name_service_switch()
+ elif cd_action == 'delete':
+ self.delete_name_service_switch()
+ elif modify:
+ self.modify_name_service_switch(modify)
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ '''Applyoperations from playbook'''
+ nss = NetAppONTAPNsswitch()
+ nss.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ndmp.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ndmp.py
new file mode 100644
index 000000000..3df785861
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ndmp.py
@@ -0,0 +1,392 @@
+#!/usr/bin/python
+""" this is ndmp module
+
+ (c) 2019, NetApp, Inc
+ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'
+}
+
+
+DOCUMENTATION = '''
+---
+module: na_ontap_ndmp
+short_description: NetApp ONTAP NDMP services configuration
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.9.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+ - Modify NDMP Services.
+
+options:
+
+ vserver:
+ description:
+ - Name of the vserver.
+ required: true
+ type: str
+
+ abort_on_disk_error:
+ description:
+ - Enable abort on disk error.
+ type: bool
+
+ authtype:
+ description:
+ - Authentication type.
+ type: list
+ elements: str
+
+ backup_log_enable:
+ description:
+ - Enable backup log.
+ type: bool
+
+ data_port_range:
+ description:
+ - Data port range. Modification not supported for data Vservers.
+ type: str
+
+ debug_enable:
+ description:
+ - Enable debug.
+ type: bool
+
+ debug_filter:
+ description:
+ - Debug filter.
+ type: str
+
+ dump_detailed_stats:
+ description:
+ - Enable logging of VM stats for dump.
+ type: bool
+
+ dump_logical_find:
+ description:
+ - Enable logical find for dump.
+ type: str
+
+ enable:
+ description:
+ - Enable NDMP on vserver.
+ type: bool
+
+ fh_dir_retry_interval:
+ description:
+ - FH throttle value for dir.
+ type: int
+
+ fh_node_retry_interval:
+ description:
+ - FH throttle value for node.
+ type: int
+
+ ignore_ctime_enabled:
+ description:
+ - Ignore ctime.
+ type: bool
+
+ is_secure_control_connection_enabled:
+ description:
+ - Is secure control connection enabled.
+ type: bool
+
+ offset_map_enable:
+ description:
+ - Enable offset map.
+ type: bool
+
+ per_qtree_exclude_enable:
+ description:
+ - Enable per qtree exclusion.
+ type: bool
+
+ preferred_interface_role:
+ description:
+ - Preferred interface role.
+ type: list
+ elements: str
+
+ restore_vm_cache_size:
+ description:
+ - Restore VM file cache size. Value range [4-1024]
+ type: int
+
+ secondary_debug_filter:
+ description:
+ - Secondary debug filter.
+ type: str
+
+ tcpnodelay:
+ description:
+ - Enable TCP nodelay.
+ type: bool
+
+ tcpwinsize:
+ description:
+ - TCP window size.
+ type: int
+'''
+
+EXAMPLES = '''
+ - name: modify ndmp
+ na_ontap_ndmp:
+ vserver: ansible
+ hostname: "{{ hostname }}"
+ abort_on_disk_error: true
+ authtype: plaintext,challenge
+ backup_log_enable: true
+ data_port_range: 8000-9000
+ debug_enable: true
+ debug_filter: filter
+ dump_detailed_stats: true
+ dump_logical_find: default
+ enable: true
+ fh_dir_retry_interval: 100
+ fh_node_retry_interval: 100
+ ignore_ctime_enabled: true
+ is_secure_control_connection_enabled: true
+ offset_map_enable: true
+ per_qtree_exclude_enable: true
+ preferred_interface_role: node_mgmt,intercluster
+ restore_vm_cache_size: 1000
+ secondary_debug_filter: filter
+ tcpnodelay: true
+ tcpwinsize: 10000
+ username: user
+ password: pass
+ https: False
+'''
+
+RETURN = '''
+'''
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPNdmp(object):
+ '''
+ modify vserver cifs security
+ '''
+ def __init__(self):
+ self.use_rest = False
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.modifiable_options = dict(
+ abort_on_disk_error=dict(required=False, type='bool'),
+ authtype=dict(required=False, type='list', elements='str'),
+ backup_log_enable=dict(required=False, type='bool'),
+ data_port_range=dict(required=False, type='str'),
+ debug_enable=dict(required=False, type='bool'),
+ debug_filter=dict(required=False, type='str'),
+ dump_detailed_stats=dict(required=False, type='bool'),
+ dump_logical_find=dict(required=False, type='str'),
+ enable=dict(required=False, type='bool'),
+ fh_dir_retry_interval=dict(required=False, type='int'),
+ fh_node_retry_interval=dict(required=False, type='int'),
+ ignore_ctime_enabled=dict(required=False, type='bool'),
+ is_secure_control_connection_enabled=dict(required=False, type='bool'),
+ offset_map_enable=dict(required=False, type='bool'),
+ per_qtree_exclude_enable=dict(required=False, type='bool'),
+ preferred_interface_role=dict(required=False, type='list', elements='str'),
+ restore_vm_cache_size=dict(required=False, type='int'),
+ secondary_debug_filter=dict(required=False, type='str'),
+ tcpnodelay=dict(required=False, type='bool'),
+ tcpwinsize=dict(required=False, type='int')
+ )
+ self.argument_spec.update(dict(
+ vserver=dict(required=True, type='str')
+ ))
+
+ self.argument_spec.update(self.modifiable_options)
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ # API should be used for ONTAP 9.6 or higher, ZAPI for lower version
+ self.rest_api = OntapRestAPI(self.module)
+ unsupported_rest_properties = ['abort_on_disk_error', 'backup_log_enable', 'data_port_range',
+ 'debug_enable', 'debug_filter', 'dump_detailed_stats',
+ 'dump_logical_find', 'fh_dir_retry_interval', 'fh_node_retry_interval',
+ 'ignore_ctime_enabled', 'is_secure_control_connection_enabled',
+ 'offset_map_enable', 'per_qtree_exclude_enable', 'preferred_interface_role',
+ 'restore_vm_cache_size', 'secondary_debug_filter', 'tcpnodelay', 'tcpwinsize']
+ self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, unsupported_rest_properties)
+ if not self.use_rest:
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_ndmp_svm_uuid(self):
+
+ """
+ Get a svm's UUID
+ :return: uuid of the node
+ """
+ params = {'svm.name': self.parameters['vserver']}
+ api = "protocols/ndmp/svms"
+ message, error = self.rest_api.get(api, params)
+ if error is not None:
+ self.module.fail_json(msg=error)
+ if 'records' in message and len(message['records']) == 0:
+ self.module.fail_json(msg='Error fetching uuid for vserver %s: ' % (self.parameters['vserver']))
+ if len(message.keys()) == 0:
+ error = "No information collected from %s: %s" % (api, repr(message))
+ self.module.fail_json(msg=error)
+ elif 'records' not in message:
+ error = "Unexpected response from %s: %s" % (api, repr(message))
+ self.module.fail_json(msg=error)
+ return message['records'][0]['svm']['uuid']
+
+ def ndmp_get_iter(self, uuid=None):
+ """
+ get current vserver ndmp attributes.
+ :return: a dict of ndmp attributes.
+ """
+ if self.use_rest:
+ data = dict()
+ params = {'fields': 'authentication_types,enabled'}
+ api = '/protocols/ndmp/svms/' + uuid
+ message, error = self.rest_api.get(api, params)
+ data['enable'] = message['enabled']
+ data['authtype'] = message['authentication_types']
+
+ if error:
+ self.module.fail_json(msg=error)
+ return data
+ else:
+ ndmp_get = netapp_utils.zapi.NaElement('ndmp-vserver-attributes-get-iter')
+ query = netapp_utils.zapi.NaElement('query')
+ ndmp_info = netapp_utils.zapi.NaElement('ndmp-vserver-attributes-info')
+ ndmp_info.add_new_child('vserver', self.parameters['vserver'])
+ query.add_child_elem(ndmp_info)
+ ndmp_get.add_child_elem(query)
+ ndmp_details = dict()
+ try:
+ result = self.server.invoke_successfully(ndmp_get, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching ndmp from %s: %s'
+ % (self.parameters['vserver'], to_native(error)),
+ exception=traceback.format_exc())
+
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) > 0:
+ ndmp_attributes = result.get_child_by_name('attributes-list').get_child_by_name('ndmp-vserver-attributes-info')
+ self.get_ndmp_details(ndmp_details, ndmp_attributes)
+ return ndmp_details
+
+ def get_ndmp_details(self, ndmp_details, ndmp_attributes):
+ """
+ :param ndmp_details: a dict of current ndmp.
+ :param ndmp_attributes: ndmp returned from api call in xml format.
+ :return: None
+ """
+ for option in self.modifiable_options:
+ option_type = self.modifiable_options[option]['type']
+ if option_type == 'bool':
+ ndmp_details[option] = self.str_to_bool(ndmp_attributes.get_child_content(self.attribute_to_name(option)))
+ elif option_type == 'int':
+ ndmp_details[option] = int(ndmp_attributes.get_child_content(self.attribute_to_name(option)))
+ elif option_type == 'list':
+ child_list = ndmp_attributes.get_child_by_name(self.attribute_to_name(option))
+ values = [child.get_content() for child in child_list.get_children()]
+ ndmp_details[option] = values
+ else:
+ ndmp_details[option] = ndmp_attributes.get_child_content(self.attribute_to_name(option))
+
+ def modify_ndmp(self, modify):
+ """
+ :param modify: A list of attributes to modify
+ :return: None
+ """
+ if self.use_rest:
+ ndmp = dict()
+ uuid = self.get_ndmp_svm_uuid()
+ if self.parameters.get('enable'):
+ ndmp['enabled'] = self.parameters['enable']
+ if self.parameters.get('authtype'):
+ ndmp['authentication_types'] = self.parameters['authtype']
+ api = "protocols/ndmp/svms/" + uuid
+ dummy, error = self.rest_api.patch(api, ndmp)
+ if error:
+ self.module.fail_json(msg=error)
+ else:
+
+ ndmp_modify = netapp_utils.zapi.NaElement('ndmp-vserver-attributes-modify')
+ for attribute in modify:
+ if attribute == 'authtype':
+ authtypes = netapp_utils.zapi.NaElement('authtype')
+ types = self.parameters['authtype']
+ for authtype in types:
+ authtypes.add_new_child('ndmpd-authtypes', authtype)
+ ndmp_modify.add_child_elem(authtypes)
+ elif attribute == 'preferred_interface_role':
+ preferred_interface_roles = netapp_utils.zapi.NaElement('preferred-interface-role')
+ roles = self.parameters['preferred_interface_role']
+ for role in roles:
+ preferred_interface_roles.add_new_child('netport-role', role)
+ ndmp_modify.add_child_elem(preferred_interface_roles)
+ else:
+ ndmp_modify.add_new_child(self.attribute_to_name(attribute), str(self.parameters[attribute]))
+ try:
+ self.server.invoke_successfully(ndmp_modify, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as exc:
+ self.module.fail_json(msg='Error modifying ndmp on %s: %s'
+ % (self.parameters['vserver'], to_native(exc)),
+ exception=traceback.format_exc())
+
+ @staticmethod
+ def attribute_to_name(attribute):
+ return str.replace(attribute, '_', '-')
+
+ @staticmethod
+ def str_to_bool(value):
+ return value == 'true'
+
+ def apply(self):
+ """Call modify operations."""
+ uuid = None
+ if self.use_rest:
+ # we only have the svm name, we need to the the uuid for the svm
+ uuid = self.get_ndmp_svm_uuid()
+ current = self.ndmp_get_iter(uuid=uuid)
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if modify:
+ self.modify_ndmp(modify)
+ result = netapp_utils.generate_result(self.na_helper.changed, modify=modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ obj = NetAppONTAPNdmp()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_ifgrp.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_ifgrp.py
new file mode 100644
index 000000000..6ba4083e5
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_ifgrp.py
@@ -0,0 +1,546 @@
+#!/usr/bin/python
+
+# (c) 2018-2021, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_net_ifgrp
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = """
+module: na_ontap_net_ifgrp
+short_description: NetApp Ontap modify network interface group
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Create, modify ports, destroy the network interface group
+options:
+ state:
+ description:
+ - Whether the specified network interface group should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+
+ distribution_function:
+ description:
+ - Specifies the traffic distribution function for the ifgrp.
+ choices: ['mac', 'ip', 'sequential', 'port']
+ type: str
+
+ name:
+ description:
+ - Specifies the interface group name.
+ - Not supported with REST, use C(ports) or C(from_lag_ports).
+ - Required with ZAPI.
+ type: str
+
+ mode:
+ description:
+ - Specifies the link policy for the ifgrp.
+ type: str
+
+ node:
+ description:
+ - Specifies the name of node.
+ required: true
+ type: str
+
+ ports:
+ aliases:
+ - port
+ description:
+ - List of expected ports to be present in the interface group.
+ - If a port is present in this list, but not on the target, it will be added.
+ - If a port is not in the list, but present on the target, it will be removed.
+ - Make sure the list contains all ports you want to see on the target.
+ - With REST, ports in this list are used to find the current LAG port.
+ - If LAG is not found or only partial port matches, then C(from_lag_port) are used to get the current LAG.
+ - With REST, when C(state=absent) is set, all of the ports in ifgrp should be provided to delete it.
+ - Example C(ports=['e0c','e0a']) will delete ifgrp that has ports C(['e0c','e0a']).
+ version_added: 2.8.0
+ type: list
+ elements: str
+
+ from_lag_ports:
+ description:
+ - Only supported with REST and is ignored with ZAPI.
+ - Specify all the ports to find current LAG port.
+ - Ignored if LAG found with exact match of C(ports).
+ - Example if current LAG has ports C(['e0c','e0d']) and C(ports=['e0c','e0d']), then from_lag_ports will be ignored.
+ - If LAG not found with C(ports), then ports in this list are used to find the current LAG.
+ - Ports in this list are used only for finding current LAG, provide exact match of all the ports in the current LAG.
+ - Ignored when C(state=absent).
+ version_added: 2.14.0
+ type: list
+ elements: str
+
+ broadcast_domain:
+ description:
+ - Specify the broadcast_domain name.
+ - Only supported with REST and is ignored with ZAPI.
+ - Required with ONTAP 9.6 and 9.7, but optional with 9.8 or later.
+ type: str
+ version_added: 21.14.0
+
+ ipspace:
+ description:
+ - Specify the ipspace for the broadcast domain.
+ - Only supported with REST and is ignored with ZAPI.
+ - Required with ONTAP 9.6 and 9.7, but optional with 9.8 or later.
+ type: str
+ version_added: 21.14.0
+"""
+
+EXAMPLES = """
+ - name: create ifgrp
+ netapp.ontap.na_ontap_net_ifgrp:
+ state: present
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ distribution_function: ip
+ name: a0c
+ ports: [e0a]
+ mode: multimode
+ node: "{{ Vsim node name }}"
+ - name: modify ports in an ifgrp
+ netapp.ontap.na_ontap_net_ifgrp:
+ state: present
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ distribution_function: ip
+ name: a0c
+ port: [e0a, e0c]
+ mode: multimode
+ node: "{{ Vsim node name }}"
+ - name: delete ifgrp
+ netapp.ontap.na_ontap_net_ifgrp:
+ state: absent
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ name: a0c
+ node: "{{ Vsim node name }}"
+ - name: create ifgrp - REST
+ netapp.ontap.na_ontap_net_ifgrp:
+ state: present
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ distribution_function: ip
+ ports: [e0a,e0b]
+ mode: multimode
+ node: "{{ Vsim node name }}"
+ broadcast_domain: Default
+ ipspace: Default
+ - name: Remove e0a and add port e0d to above created lag REST
+ netapp.ontap.na_ontap_net_ifgrp:
+ state: present
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ from_lag_ports: [a0a,e0b]
+ ports: [e0b,e0d]
+ node: "{{ Vsim node name }}"
+ - name: Add e0a to lag that has port e0b e0d REST
+ netapp.ontap.na_ontap_net_ifgrp:
+ state: present
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ distribution_function: ip
+ ports: [e0b,e0d,e0a]
+ mode: multimode
+ node: "{{ Vsim node name }}"
+ - name: Modify broadcast_domain and ipspace REST
+ netapp.ontap.na_ontap_net_ifgrp:
+ state: present
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ broadcast_domain: test
+ ipspace: test
+ ports: [e0b,e0d,e0a]
+ node: "{{ Vsim node name }}"
+ - name: Delete LAG with exact match of ports
+ netapp.ontap.na_ontap_net_ifgrp:
+ state: absent
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ ports: [e0b,e0d,e0a]
+ node: "{{ Vsim node name }}"
+"""
+
+RETURN = """
+
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppOntapIfGrp:
+ """
+ Create, Modifies and Destroys a IfGrp
+ """
+ def __init__(self):
+ """
+ Initialize the Ontap IfGrp class
+ """
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ distribution_function=dict(required=False, type='str', choices=['mac', 'ip', 'sequential', 'port']),
+ name=dict(required=False, type='str'),
+ mode=dict(required=False, type='str'),
+ node=dict(required=True, type='str'),
+ ports=dict(required=False, type='list', elements='str', aliases=["port"]),
+ from_lag_ports=dict(required=False, type='list', elements='str'),
+ broadcast_domain=dict(required=False, type='str'),
+ ipspace=dict(required=False, type='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['distribution_function', 'mode'])
+ ],
+ required_together=[['broadcast_domain', 'ipspace']],
+ supports_check_mode=True
+ )
+
+ self.current_records = []
+
+ # set up variables
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ # Set up Rest API
+ self.rest_api = OntapRestAPI(self.module)
+
+ # if rest and use_rest: auto and name is present, revert to zapi
+ # if rest and use_rest: always and name is present, throw error.
+ unsupported_rest_properties = ['name']
+ self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, unsupported_rest_properties)
+ if self.use_rest:
+ # if rest and ports is not present, throw error as ports is a required field with REST
+ if 'ports' not in self.parameters:
+ error_msg = "Error: ports is a required field with REST"
+ self.module.fail_json(msg=error_msg)
+
+ required_options = ['broadcast_domain', 'ipspace']
+ min_ontap_98 = self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 8, 0)
+ if not min_ontap_98 and not any(x in self.parameters for x in required_options):
+ error_msg = "'%s' are mandatory fields with ONTAP 9.6 and 9.7" % ', '.join(required_options)
+ self.module.fail_json(msg=error_msg)
+ else:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ if 'name' not in self.parameters:
+ self.module.fail_json("Error: name is a required field with ZAPI.")
+ if 'broadcast_domain' in self.parameters or 'ipspace' in self.parameters or 'from_lag_ports' in self.parameters:
+ msg = 'Using ZAPI and ignoring options - broadcast_domain, ipspace and from_lag_ports'
+ self.module.warn(msg)
+ self.parameters.pop('broadcast_domain', None)
+ self.parameters.pop('ipspace', None)
+ self.parameters.pop('from_lag_ports', None)
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def get_if_grp(self):
+ """
+ Return details about the if_group
+ :param:
+ name : Name of the if_group
+
+ :return: Details about the if_group. None if not found.
+ :rtype: dict
+ """
+ if_group_iter = netapp_utils.zapi.NaElement('net-port-get-iter')
+ if_group_info = netapp_utils.zapi.NaElement('net-port-info')
+ if_group_info.add_new_child('port', self.parameters['name'])
+ if_group_info.add_new_child('port-type', 'if_group')
+ if_group_info.add_new_child('node', self.parameters['node'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(if_group_info)
+ if_group_iter.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(if_group_iter, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error getting if_group %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ return_value = None
+
+ if result.get_child_by_name('num-records') and int(result['num-records']) >= 1:
+ if_group_attributes = result['attributes-list']['net-port-info']
+ return_value = {
+ 'name': if_group_attributes['port'],
+ 'distribution_function': if_group_attributes['ifgrp-distribution-function'],
+ 'mode': if_group_attributes['ifgrp-mode'],
+ 'node': if_group_attributes['node'],
+ }
+ return return_value
+
+ def get_if_grp_rest(self, ports, allow_partial_match):
+ api = 'network/ethernet/ports'
+ query = {
+ 'type': 'lag',
+ 'node.name': self.parameters['node'],
+ }
+ fields = 'name,node,uuid,broadcast_domain,lag'
+ error = None
+ if not self.current_records:
+ self.current_records, error = rest_generic.get_0_or_more_records(self.rest_api, api, query, fields)
+ if error:
+ self.module.fail_json(msg=error)
+ if self.current_records:
+ current_ifgrp = self.get_if_grp_current(self.current_records, ports)
+ if current_ifgrp:
+ exact_match = self.check_exact_match(ports, current_ifgrp['ports'])
+ if exact_match or allow_partial_match:
+ return current_ifgrp, exact_match
+ return None, None
+
+ def check_exact_match(self, desired_ports, current_ifgrp):
+ matched = set(desired_ports) == set(current_ifgrp)
+ if not matched:
+ self.rest_api.log_debug(0, "found LAG with partial match of ports: %s but current is %s" % (desired_ports, current_ifgrp))
+ return matched
+
+ def get_if_grp_current(self, records, ports):
+ desired_ifgrp_in_current = []
+ for record in records:
+ if 'member_ports' in record['lag']:
+ current_port_list = [port['name'] for port in record['lag']['member_ports']]
+ for current_port in current_port_list:
+ if current_port in ports:
+ desired_ifgrp_in_current.append(self.get_if_grp_detail(record, current_port_list))
+ break
+ # if ports are in different LAGs and state is absent, return None
+ if len(desired_ifgrp_in_current) > 1 and self.parameters['state'] == 'present':
+ error_msg = "'%s' are in different LAGs" % ', '.join(ports)
+ self.module.fail_json(msg=error_msg)
+ elif len(desired_ifgrp_in_current) == 1:
+ return desired_ifgrp_in_current[0]
+ return None
+
+ def get_if_grp_detail(self, record, current_port_list):
+ current = {
+ 'node': record['node']['name'],
+ 'uuid': record['uuid'],
+ 'ports': current_port_list
+ }
+ if record.get('broadcast_domain'):
+ current['broadcast_domain'] = record['broadcast_domain']['name']
+ current['ipspace'] = record['broadcast_domain']['ipspace']['name']
+ return current
+
+ def get_if_grp_ports(self):
+ """
+ Return ports of the if_group
+ :param:
+ name : Name of the if_group
+ :return: Ports of the if_group. None if not found.
+ :rtype: dict
+ """
+ if_group_iter = netapp_utils.zapi.NaElement('net-port-ifgrp-get')
+ if_group_iter.add_new_child('ifgrp-name', self.parameters['name'])
+ if_group_iter.add_new_child('node', self.parameters['node'])
+ try:
+ result = self.server.invoke_successfully(if_group_iter, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error getting if_group ports %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ port_list = []
+ if result.get_child_by_name('attributes'):
+ if_group_attributes = result['attributes']['net-ifgrp-info']
+ if if_group_attributes.get_child_by_name('ports'):
+ ports = if_group_attributes.get_child_by_name('ports').get_children()
+ for each in ports:
+ port_list.append(each.get_content())
+ return {'ports': port_list}
+
+ def create_if_grp(self):
+ """
+ Creates a new ifgrp
+ """
+ if self.use_rest:
+ return self.create_if_grp_rest()
+ route_obj = netapp_utils.zapi.NaElement("net-port-ifgrp-create")
+ route_obj.add_new_child("distribution-function", self.parameters['distribution_function'])
+ route_obj.add_new_child("ifgrp-name", self.parameters['name'])
+ route_obj.add_new_child("mode", self.parameters['mode'])
+ route_obj.add_new_child("node", self.parameters['node'])
+ try:
+ self.server.invoke_successfully(route_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating if_group %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ if self.parameters.get('ports') is not None:
+ for port in self.parameters.get('ports'):
+ self.add_port_to_if_grp(port)
+
+ def create_if_grp_rest(self):
+ api = 'network/ethernet/ports'
+ body = {
+ 'type': 'lag',
+ 'node': {'name': self.parameters['node']},
+ 'lag': {
+ "mode": self.parameters['mode'],
+ "distribution_policy": self.parameters['distribution_function']
+ }
+ }
+ if self.parameters.get('ports') is not None:
+ body['lag']['member_ports'] = self.build_member_ports()
+ if 'broadcast_domain' in self.parameters:
+ body['broadcast_domain'] = {'name': self.parameters['broadcast_domain']}
+ body['broadcast_domain']['ipspace'] = {'name': self.parameters['ipspace']}
+ dummy, error = rest_generic.post_async(self.rest_api, api, body)
+ if error:
+ self.module.fail_json(msg=error)
+
+ def delete_if_grp(self, uuid=None):
+ """
+ Deletes a ifgrp
+ """
+ if self.use_rest:
+ api = 'network/ethernet/ports'
+ dummy, error = rest_generic.delete_async(self.rest_api, api, uuid)
+ if error:
+ self.module.fail_json(msg=error)
+ else:
+ route_obj = netapp_utils.zapi.NaElement("net-port-ifgrp-destroy")
+ route_obj.add_new_child("ifgrp-name", self.parameters['name'])
+ route_obj.add_new_child("node", self.parameters['node'])
+ try:
+ self.server.invoke_successfully(route_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting if_group %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def add_port_to_if_grp(self, port):
+ """
+ adds port to a ifgrp
+ """
+ route_obj = netapp_utils.zapi.NaElement("net-port-ifgrp-add-port")
+ route_obj.add_new_child("ifgrp-name", self.parameters['name'])
+ route_obj.add_new_child("port", port)
+ route_obj.add_new_child("node", self.parameters['node'])
+ try:
+ self.server.invoke_successfully(route_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error adding port %s to if_group %s: %s' %
+ (port, self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_ports(self, current_ports):
+ add_ports = set(self.parameters['ports']) - set(current_ports)
+ remove_ports = set(current_ports) - set(self.parameters['ports'])
+ for port in add_ports:
+ self.add_port_to_if_grp(port)
+ for port in remove_ports:
+ self.remove_port_to_if_grp(port)
+
+ def modify_ports_rest(self, modify, uuid):
+ api = 'network/ethernet/ports'
+ body = {}
+ if 'ports' in modify:
+ member_ports = self.build_member_ports()
+ body['lag'] = {'member_ports': member_ports}
+ if 'broadcast_domain' in modify or 'ipspace' in modify:
+ broadcast_domain = modify['broadcast_domain'] if 'broadcast_domain' in modify else self.parameters['broadcast_domain']
+ ipspace = modify['ipspace'] if 'ipspace' in modify else self.parameters['ipspace']
+ body['broadcast_domain'] = {'name': broadcast_domain}
+ body['broadcast_domain']['ipspace'] = {'name': ipspace}
+ dummy, error = rest_generic.patch_async(self.rest_api, api, uuid, body)
+ if error:
+ self.module.fail_json(msg=error)
+
+ def build_member_ports(self):
+ member_ports = []
+ for port in self.parameters['ports']:
+ port_detail = {'name': port, 'node': {'name': self.parameters['node']}}
+ member_ports.append(port_detail)
+ return member_ports
+
+ def remove_port_to_if_grp(self, port):
+ """
+ removes port from a ifgrp
+ """
+ route_obj = netapp_utils.zapi.NaElement("net-port-ifgrp-remove-port")
+ route_obj.add_new_child("ifgrp-name", self.parameters['name'])
+ route_obj.add_new_child("port", port)
+ route_obj.add_new_child("node", self.parameters['node'])
+ try:
+ self.server.invoke_successfully(route_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error removing port %s to if_group %s: %s' %
+ (port, self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ # for a LAG, rename is equivalent to adding/removing ports from an existing LAG.
+ current, exact_match, modify, rename = None, True, None, None
+ if not self.use_rest:
+ current = self.get_if_grp()
+ elif self.use_rest:
+ current, exact_match = self.get_if_grp_rest(self.parameters.get('ports'), allow_partial_match=True)
+ cd_action = self.na_helper.get_cd_action(current if exact_match else None, self.parameters)
+ if cd_action == 'create' and self.use_rest:
+ # if we could not find a lag, or only a lag with a partial match, do a new query using from_lag_ports.
+ if self.parameters.get('from_lag_ports') is not None:
+ from_ifgrp, dummy = self.get_if_grp_rest(self.parameters['from_lag_ports'], allow_partial_match=False)
+ if not from_ifgrp:
+ error_msg = "Error: cannot find LAG matching from_lag_ports: '%s'." % self.parameters['from_lag_ports']
+ self.module.fail_json(msg=error_msg)
+ rename = True
+ current = from_ifgrp
+ # if we have a partial match with an existing LAG, we will update the ports.
+ elif not exact_match and current:
+ rename = True
+ if rename:
+ cd_action = None
+ if cd_action is None and self.parameters['state'] == 'present':
+ # with rest, current will have the port details
+ current_ports = self.get_if_grp_ports() if not self.use_rest else current
+ modify = self.na_helper.get_modified_attributes(current_ports, self.parameters)
+ if self.na_helper.changed and not self.module.check_mode:
+ uuid = current['uuid'] if current and self.use_rest else None
+ if cd_action == 'create':
+ self.create_if_grp()
+ elif cd_action == 'delete':
+ self.delete_if_grp(uuid)
+ elif modify:
+ if self.use_rest:
+ self.modify_ports_rest(modify, uuid)
+ else:
+ self.modify_ports(current_ports['ports'])
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ """
+ Creates the NetApp Ontap Net Route object and runs the correct play task
+ """
+ obj = NetAppOntapIfGrp()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_port.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_port.py
new file mode 100644
index 000000000..e8f045103
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_port.py
@@ -0,0 +1,309 @@
+#!/usr/bin/python
+
+# (c) 2018-2021, NetApp, Inc
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+'''
+na_ontap_net_port
+'''
+
+DOCUMENTATION = """
+module: na_ontap_net_port
+short_description: NetApp ONTAP network ports.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Modify a ONTAP network port.
+options:
+ state:
+ description:
+ - Whether the specified net port should exist or not.
+ choices: ['present']
+ type: str
+ default: present
+ node:
+ description:
+ - Specifies the name of node.
+ required: true
+ type: str
+ ports:
+ aliases:
+ - port
+ description:
+ - Specifies the name of port(s).
+ required: true
+ type: list
+ elements: str
+ mtu:
+ description:
+ - Specifies the maximum transmission unit (MTU) reported by the port.
+ - Not supported with REST.
+ type: int
+ autonegotiate_admin:
+ description:
+ - Enables or disables Ethernet auto-negotiation of speed,
+ duplex and flow control.
+ - Not supported with REST.
+ type: bool
+ duplex_admin:
+ description:
+ - Specifies the user preferred duplex setting of the port.
+ - Valid values auto, half, full
+ - Not supported with REST.
+ type: str
+ speed_admin:
+ description:
+ - Specifies the user preferred speed setting of the port.
+ - Not supported with REST.
+ type: str
+ flowcontrol_admin:
+ description:
+ - Specifies the user preferred flow control setting of the port.
+ - Not supported with REST.
+ type: str
+ ipspace:
+ description:
+ - Specifies the port's associated IPspace name.
+ - The 'Cluster' ipspace is reserved for cluster ports.
+ - Not supported with REST.
+ - use netapp.ontap.na_ontap_ports to modify ipspace with REST.
+ type: str
+ up_admin:
+ description:
+ - Enables or disables the port.
+ type: bool
+ version_added: 21.8.0
+"""
+
+EXAMPLES = """
+ - name: Modify Net Port
+ netapp.ontap.na_ontap_net_port:
+ state: present
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ node: "{{ node_name }}"
+ ports: e0d,e0c
+ autonegotiate_admin: true
+ up_admin: true
+ mtu: 1500
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppOntapNetPort:
+ """
+ Modify a Net port
+ """
+
+ def __init__(self):
+ """
+ Initialize the Ontap Net Port Class
+ """
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present'], default='present'),
+ node=dict(required=True, type="str"),
+ ports=dict(required=True, type='list', elements='str', aliases=['port']),
+ mtu=dict(required=False, type="int", default=None),
+ autonegotiate_admin=dict(required=False, type="bool", default=None),
+ up_admin=dict(required=False, type="bool", default=None),
+ duplex_admin=dict(required=False, type="str", default=None),
+ speed_admin=dict(required=False, type="str", default=None),
+ flowcontrol_admin=dict(required=False, type="str", default=None),
+ ipspace=dict(required=False, type="str", default=None)
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ # Set up Rest API
+ self.rest_api = OntapRestAPI(self.module)
+ unsupported_rest_properties = ['mtu', 'autonegotiate_admin', 'duplex_admin', 'speed_admin', 'flowcontrol_admin', 'ipspace']
+ self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, unsupported_rest_properties)
+ if not self.use_rest:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.set_playbook_zapi_key_map()
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def set_playbook_zapi_key_map(self):
+ self.na_helper.zapi_string_keys = {
+ 'duplex_admin': 'administrative-duplex',
+ 'speed_admin': 'administrative-speed',
+ 'flowcontrol_admin': 'administrative-flowcontrol',
+ 'ipspace': 'ipspace'
+ }
+ self.na_helper.zapi_bool_keys = {
+ 'up_admin': 'is-administrative-up',
+ 'autonegotiate_admin': 'is-administrative-auto-negotiate',
+ }
+ self.na_helper.zapi_int_keys = {
+ 'mtu': 'mtu',
+ }
+
+ def get_net_port(self, port):
+ """
+ Return details about the net port
+ :param: port: Name of the port
+ :return: Dictionary with current state of the port. None if not found.
+ :rtype: dict
+ """
+ if self.use_rest:
+ return self.get_net_port_rest(port)
+ net_port_get = netapp_utils.zapi.NaElement('net-port-get-iter')
+ attributes = {
+ 'query': {
+ 'net-port-info': {
+ 'node': self.parameters['node'],
+ 'port': port
+ }
+ }
+ }
+ net_port_get.translate_struct(attributes)
+
+ try:
+ result = self.server.invoke_successfully(net_port_get, True)
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ port_info = result['attributes-list']['net-port-info']
+ port_details = dict()
+ else:
+ return None
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error getting net ports for %s: %s' % (self.parameters['node'], to_native(error)),
+ exception=traceback.format_exc())
+
+ for item_key, zapi_key in self.na_helper.zapi_bool_keys.items():
+ port_details[item_key] = self.na_helper.get_value_for_bool(from_zapi=True, value=port_info.get_child_content(zapi_key))
+ for item_key, zapi_key in self.na_helper.zapi_int_keys.items():
+ port_details[item_key] = self.na_helper.get_value_for_int(from_zapi=True, value=port_info.get_child_content(zapi_key))
+ for item_key, zapi_key in self.na_helper.zapi_string_keys.items():
+ port_details[item_key] = port_info.get_child_content(zapi_key)
+ return port_details
+
+ def get_net_port_rest(self, port):
+ api = 'network/ethernet/ports'
+ query = {
+ 'name': port,
+ 'node.name': self.parameters['node'],
+ }
+ fields = 'name,node,uuid,enabled'
+ record, error = rest_generic.get_one_record(self.rest_api, api, query, fields)
+ if error:
+ self.module.fail_json(msg=error)
+ if record:
+ current = {
+ 'name': record['name'],
+ 'node': record['node']['name'],
+ 'uuid': record['uuid'],
+ 'up_admin': record['enabled']
+ }
+ return current
+ return None
+
+ def modify_net_port(self, port, modify):
+ """
+ Modify a port
+
+ :param port: Name of the port
+ :param modify: dict with attributes to be modified
+ :return: None
+ """
+ if self.use_rest:
+ return self.modify_net_port_rest(port, modify)
+
+ def get_zapi_key_and_value(key, value):
+ zapi_key = self.na_helper.zapi_string_keys.get(key)
+ if zapi_key is not None:
+ return zapi_key, value
+ zapi_key = self.na_helper.zapi_bool_keys.get(key)
+ if zapi_key is not None:
+ return zapi_key, self.na_helper.get_value_for_bool(from_zapi=False, value=value)
+ zapi_key = self.na_helper.zapi_int_keys.get(key)
+ if zapi_key is not None:
+ return zapi_key, self.na_helper.get_value_for_int(from_zapi=False, value=value)
+ raise KeyError(key)
+
+ port_modify = netapp_utils.zapi.NaElement('net-port-modify')
+ port_attributes = {'node': self.parameters['node'], 'port': port}
+ for key, value in modify.items():
+ zapi_key, value = get_zapi_key_and_value(key, value)
+ port_attributes[zapi_key] = value
+ port_modify.translate_struct(port_attributes)
+ try:
+ self.server.invoke_successfully(port_modify, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying net ports for %s: %s' % (self.parameters['node'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_net_port_rest(self, uuid, modify):
+ """
+ Modify broadcast domain, ipspace and enable/disable port
+ """
+ api = 'network/ethernet/ports'
+ body = {'enabled': modify['up_admin']}
+ dummy, error = rest_generic.patch_async(self.rest_api, api, uuid, body)
+ if error:
+ self.module.fail_json(msg=error)
+
+ def apply(self):
+ """
+ Run Module based on play book
+ """
+ # Run the task for all ports in the list of 'ports'
+ missing_ports = list()
+ modified = dict()
+ for port in self.parameters['ports']:
+ current = self.get_net_port(port)
+ if current is None:
+ missing_ports.append(port)
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ modified[port] = modify
+ if modify and not self.module.check_mode:
+ port = current['uuid'] if self.use_rest else port
+ self.modify_net_port(port, modify)
+ if missing_ports:
+ plural, suffix = '', '.'
+ if len(missing_ports) == len(self.parameters['ports']):
+ suffix = ' - check node name.'
+ if len(missing_ports) > 1:
+ plural = 's'
+ self.module.fail_json(changed=self.na_helper.changed, modify=modified,
+ msg='Error: port%s: %s not found on node: %s%s'
+ % (plural, ', '.join(missing_ports), self.parameters['node'], suffix))
+ result = netapp_utils.generate_result(self.na_helper.changed, modify=modified)
+ self.module.exit_json(**result)
+
+
+def main():
+ """
+ Create the NetApp Ontap Net Port Object and modify it
+ """
+ obj = NetAppOntapNetPort()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_routes.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_routes.py
new file mode 100644
index 000000000..9881755b3
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_routes.py
@@ -0,0 +1,354 @@
+#!/usr/bin/python
+
+# (c) 2018-2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_net_routes
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: na_ontap_net_routes
+short_description: NetApp ONTAP network routes
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Modify ONTAP network routes.
+options:
+ state:
+ description:
+ - Whether you want to create or delete a network route.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+ vserver:
+ description:
+ - The name of the vserver.
+ - Required when using ZAPI.
+ - When using REST, omit this parameter for cluster scoped routes, or set it to NULL.
+ type: str
+ destination:
+ description:
+ - Specify the route destination.
+ - Example 10.7.125.5/20, fd20:13::/64.
+ required: true
+ type: str
+ gateway:
+ description:
+ - Specify the route gateway.
+ - Example 10.7.125.1, fd20:13::1.
+ required: true
+ type: str
+ metric:
+ description:
+ - Specify the route metric. If this field is not provided, ONTAP will default to 20.
+ - Supported from ONTAP 9.11.0 in REST.
+ - With REST, trying to modify destination or gateway will also reset metric to 20 in ONTAP 9.10.1 or earlier.
+ type: int
+ from_destination:
+ description:
+ - Specify the route destination that should be changed.
+ version_added: 2.8.0
+ type: str
+ from_gateway:
+ description:
+ - Specify the route gateway that should be changed.
+ version_added: 2.8.0
+ type: str
+ from_metric:
+ description:
+ - Specify the route metric that should be changed.
+ - This parameter is ignored, as the value is read from ONTAP.
+ - Not supported with REST, ignored with ZAPI.
+ version_added: 2.8.0
+ type: int
+'''
+
+EXAMPLES = """
+ - name: create route
+ netapp.ontap.na_ontap_net_routes:
+ state: present
+ vserver: "{{ Vserver name }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ destination: 10.7.125.5/20
+ gateway: 10.7.125.1
+ metric: 30
+
+ - name: create route - cluster scope, using REST
+ netapp.ontap.na_ontap_net_routes:
+ state: present
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ destination: 10.7.125.5/20
+ gateway: 10.7.125.1
+
+ - name: create route - vserver scope, using REST
+ netapp.ontap.na_ontap_net_routes:
+ state: present
+ vserver: "{{ Vserver name }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ destination: 10.7.125.5/20
+ gateway: 10.7.125.1
+"""
+
+RETURN = """
+
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppOntapNetRoutes:
+ """
+ Create, Modifies and Destroys a Net Route
+ """
+
+ def __init__(self):
+ """
+ Initialize the Ontap Net Route class
+ """
+ self.use_rest = False
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ vserver=dict(required=False, type='str'),
+ destination=dict(required=True, type='str'),
+ gateway=dict(required=True, type='str'),
+ metric=dict(required=False, type='int'),
+ from_destination=dict(required=False, type='str', default=None),
+ from_gateway=dict(required=False, type='str', default=None),
+ from_metric=dict(required=False, type='int', default=None),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.rest_api = netapp_utils.OntapRestAPI(self.module)
+
+ # metric supported from ONTAP 9.11.0 version.
+ self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, ['from_metric'], [['metric', (9, 11, 0)]])
+ self.validate_options()
+ if not self.use_rest:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def validate_options(self):
+ errors = []
+ example = ''
+ if not self.use_rest and 'vserver' not in self.parameters:
+ # self.module.fail_json(msg="Error: vserver is a required parameter when using ZAPI")
+ errors.append("vserver is a required parameter when using ZAPI")
+ for attr in ('destination', 'from_destination'):
+ value = self.parameters.get(attr)
+ if value is not None and '/' not in value:
+ errors.append("Expecting '/' in '%s'" % value)
+ example = 'Examples: 10.7.125.5/20, fd20:13::/64'
+ if errors:
+ if example:
+ errors.append(example)
+ self.module.fail_json(msg="Error: %s." % '. '.join(errors))
+
+ @staticmethod
+ def sanitize_exception(action, exc):
+ if action == 'create' and to_native(exc.code) == '13001' and 'already exists' in to_native(exc.message):
+ return None
+ if action == 'get' and to_native(exc.code) == "15661":
+ # Error 15661 denotes a route doesn't exist.
+ return None
+ return to_native(exc)
+
+ def create_net_route(self, current=None, fail=True):
+ """
+ Creates a new Route
+ """
+ if current is None:
+ current = self.parameters
+ if self.use_rest:
+ api = 'network/ip/routes'
+ body = {'gateway': current['gateway']}
+ dest = current['destination']
+ if isinstance(dest, dict):
+ body['destination'] = dest
+ else:
+ dest = current['destination'].split('/')
+ body['destination'] = {'address': dest[0], 'netmask': dest[1]}
+ if current.get('vserver') is not None:
+ body['svm.name'] = current['vserver']
+ if current.get('metric') is not None:
+ body['metric'] = current['metric']
+ __, error = rest_generic.post_async(self.rest_api, api, body)
+ else:
+ route_obj = netapp_utils.zapi.NaElement('net-routes-create')
+ route_obj.add_new_child("destination", current['destination'])
+ route_obj.add_new_child("gateway", current['gateway'])
+ metric = current.get('metric')
+ if metric is not None:
+ route_obj.add_new_child("metric", str(metric))
+ try:
+ self.server.invoke_successfully(route_obj, True)
+ error = None
+ except netapp_utils.zapi.NaApiError as exc:
+ # return if desired route already exists
+ error = self.sanitize_exception('create', exc)
+ if error:
+ error = 'Error creating net route: %s' % error
+ if fail:
+ self.module.fail_json(msg=error)
+ return error
+
+ def delete_net_route(self, current):
+ """
+ Deletes a given Route
+ """
+ if self.use_rest:
+ uuid = current['uuid']
+ api = 'network/ip/routes'
+ dummy, error = rest_generic.delete_async(self.rest_api, api, uuid)
+ if error:
+ self.module.fail_json(msg='Error deleting net route - %s' % error)
+ else:
+ route_obj = netapp_utils.zapi.NaElement('net-routes-destroy')
+ route_obj.add_new_child("destination", current['destination'])
+ route_obj.add_new_child("gateway", current['gateway'])
+ try:
+ self.server.invoke_successfully(route_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting net route: %s'
+ % (to_native(error)),
+ exception=traceback.format_exc())
+
+ def recreate_net_route(self, current):
+ """
+ Modify a net route
+ Since we cannot modify a route, we are deleting the existing route, and creating a new one.
+ """
+ self.delete_net_route(current)
+ # use existing metric if not specified
+ if current.get('metric') is not None and self.parameters.get('metric') is None:
+ self.parameters['metric'] = current['metric']
+ error = self.create_net_route(fail=False)
+ if error:
+ # restore the old route, create the route with the existing values
+ self.create_net_route(current)
+ # Invalid value specified for any of the attributes
+ self.module.fail_json(msg='Error modifying net route: %s' % error,
+ exception=traceback.format_exc())
+
+ def get_net_route(self, params=None):
+ """
+ Checks to see if a route exist or not
+ :return: NaElement object if a route exists, None otherwise
+ """
+ if params is None:
+ params = self.parameters
+ if self.use_rest:
+ api = "network/ip/routes"
+ fields = 'destination,gateway,svm,scope'
+ if self.parameters.get('metric') is not None:
+ fields += ',metric'
+ query = {'destination.address': params['destination'].split('/')[0],
+ 'gateway': params['gateway']}
+ if params.get('vserver') is None:
+ query['scope'] = 'cluster'
+ else:
+ query['scope'] = 'svm'
+ query['svm.name'] = params['vserver']
+ record, error = rest_generic.get_one_record(self.rest_api, api, query, fields)
+ if error:
+ self.module.fail_json(msg='Error fetching net route: %s' % error)
+ # even if metric not set, 20 is set by default.
+ if record and 'metric' not in record:
+ record['metric'] = None
+ return record
+ else:
+ route_obj = netapp_utils.zapi.NaElement('net-routes-get')
+ for attr in ('destination', 'gateway'):
+ route_obj.add_new_child(attr, params[attr])
+ try:
+ result = self.server.invoke_successfully(route_obj, True)
+ except netapp_utils.zapi.NaApiError as exc:
+ # Error 15661 denotes a route doesn't exist.
+ error = self.sanitize_exception('get', exc)
+ if error is None:
+ return None
+ self.module.fail_json(msg='Error fetching net route: %s' % error,
+ exception=traceback.format_exc())
+ if result.get_child_by_name('attributes') is not None:
+ route_info = result.get_child_by_name('attributes').get_child_by_name('net-vs-routes-info')
+ return {
+ 'destination': route_info.get_child_content('destination'),
+ 'gateway': route_info.get_child_content('gateway'),
+ 'metric': int(route_info.get_child_content('metric'))
+ }
+ return None
+
+ def apply(self):
+ """
+ Run Module based on play book
+ """
+ modify, rename = False, False
+ current = self.get_net_route()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action == 'create' and any(self.parameters.get(attr) is not None for attr in ('from_gateway', 'from_destination')):
+ # create by renaming existing route if it exists
+ # destination and gateway combination is unique, and is considered like an id.
+ # So modify destination or gateway is considered a rename action.
+ # If one of 'destination', 'gateway' is not in the from field, use the desired value.
+ from_params = {'gateway': self.parameters.get('from_gateway', self.parameters['gateway']),
+ 'destination': self.parameters.get('from_destination', self.parameters['destination'])}
+ if self.parameters.get('vserver'):
+ from_params['vserver'] = self.parameters['vserver']
+ current = self.get_net_route(from_params)
+ if current is None:
+ self.module.fail_json(msg="Error modifying: route %s does not exist" % self.parameters['from_destination'])
+ rename = True
+ cd_action = None
+
+ if cd_action is None and self.parameters.get('metric') is not None and current:
+ modify = self.parameters['metric'] != current['metric']
+ if modify:
+ self.na_helper.changed = True
+
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_net_route()
+ elif cd_action == 'delete':
+ self.delete_net_route(current)
+ elif rename or modify:
+ self.recreate_net_route(current)
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify, extra_responses={'rename': rename})
+ self.module.exit_json(**result)
+
+
+def main():
+ """
+ Creates the NetApp Ontap Net Route object and runs the correct play task
+ """
+ obj = NetAppOntapNetRoutes()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_subnet.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_subnet.py
new file mode 100644
index 000000000..8a4a26a28
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_subnet.py
@@ -0,0 +1,426 @@
+#!/usr/bin/python
+# (c) 2018-2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = """
+module: na_ontap_net_subnet
+short_description: NetApp ONTAP Create, delete, modify network subnets.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.8.0
+author: Storage Engineering (@Albinpopote) <ansible@black-perl.fr>
+description:
+ - Create, modify, destroy the network subnet
+options:
+ state:
+ description:
+ - Whether the specified network interface group should exist or not.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ broadcast_domain:
+ description:
+ - Specify the required broadcast_domain name for the subnet.
+ - A broadcast domain can not be modified after the subnet has been created
+ type: str
+
+ name:
+ description:
+ - Specify the subnet name.
+ required: true
+ type: str
+
+ from_name:
+ description:
+ - Name of the subnet to be renamed
+ type: str
+
+ gateway:
+ description:
+ - Specify the gateway for the default route of the subnet.
+ type: str
+
+ ipspace:
+ description:
+ - Specify the ipspace for the subnet.
+ - The default value for this parameter is the default IPspace, named 'Default'.
+ type: str
+
+ ip_ranges:
+ description:
+ - Specify the list of IP address ranges associated with the subnet.
+ type: list
+ elements: str
+
+ subnet:
+ description:
+ - Specify the subnet (ip and mask).
+ type: str
+
+notes:
+ - supports ZAPI and REST. REST requires ONTAP 9.11.1 or later.
+ - supports check mode.
+"""
+
+EXAMPLES = """
+ - name: create subnet
+ netapp.ontap.na_ontap_net_subnet:
+ state: present
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ subnet: 10.10.10.0/24
+ name: subnet-adm
+ ip_ranges: [ '10.10.10.30-10.10.10.40', '10.10.10.51' ]
+ gateway: 10.10.10.254
+ ipspace: Default
+ broadcast_domain: Default
+ - name: delete subnet
+ netapp.ontap.na_ontap_net_subnet:
+ state: absent
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ name: subnet-adm
+ ipspace: Default
+ - name: rename subnet
+ netapp.ontap.na_ontap_net_subnet:
+ state: present
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ name: subnet-adm-new
+ from_name: subnet-adm
+ ipspace: Default
+"""
+
+RETURN = """
+
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppOntapSubnet:
+ """
+ Create, Modifies and Destroys a subnet
+ """
+ def __init__(self):
+ """
+ Initialize the ONTAP Subnet class
+ """
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ from_name=dict(required=False, type='str'),
+ broadcast_domain=dict(required=False, type='str'),
+ gateway=dict(required=False, type='str'),
+ ip_ranges=dict(required=False, type='list', elements='str'),
+ ipspace=dict(required=False, type='str'),
+ subnet=dict(required=False, type='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ # Set up Rest API
+ self.rest_api = netapp_utils.OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+ self.uuid = None
+
+ if self.use_rest and not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 11, 1):
+ msg = 'REST requires ONTAP 9.11.1 or later for network/ip/subnets APIs.'
+ self.use_rest = self.na_helper.fall_back_to_zapi(self.module, msg, self.parameters)
+
+ if not self.use_rest:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def get_subnet(self, name=None):
+ """
+ Return details about the subnet
+ :param:
+ name : Name of the subnet
+ :return: Details about the subnet. None if not found.
+ :rtype: dict
+ """
+ if name is None:
+ name = self.parameters.get('name')
+ if self.use_rest:
+ return self.get_subnet_rest(name)
+ subnet_iter = netapp_utils.zapi.NaElement('net-subnet-get-iter')
+ subnet_info = netapp_utils.zapi.NaElement('net-subnet-info')
+ subnet_info.add_new_child('subnet-name', name)
+ if self.parameters.get('ipspace'):
+ subnet_info.add_new_child('ipspace', self.parameters['ipspace'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(subnet_info)
+
+ subnet_iter.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(subnet_iter, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching subnet %s: %s' % (name, to_native(error)))
+ return_value = None
+ # check if query returns the expected subnet
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) == 1:
+
+ subnet_attributes = result.get_child_by_name('attributes-list').get_child_by_name('net-subnet-info')
+ broadcast_domain = subnet_attributes.get_child_content('broadcast-domain')
+ gateway = subnet_attributes.get_child_content('gateway')
+ ipspace = subnet_attributes.get_child_content('ipspace')
+ subnet = subnet_attributes.get_child_content('subnet')
+ name = subnet_attributes.get_child_content('subnet-name')
+
+ ip_ranges = []
+ if subnet_attributes.get_child_by_name('ip-ranges'):
+ range_obj = subnet_attributes.get_child_by_name('ip-ranges').get_children()
+ ip_ranges = [elem.get_content() for elem in range_obj]
+
+ return_value = {
+ 'name': name,
+ 'broadcast_domain': broadcast_domain,
+ 'gateway': gateway,
+ 'ip_ranges': ip_ranges,
+ 'ipspace': ipspace,
+ 'subnet': subnet
+ }
+
+ return return_value
+
+ def create_subnet(self):
+ """
+ Creates a new subnet
+ """
+ if self.use_rest:
+ return self.create_subnet_rest()
+ subnet_create = self.build_zapi_request_for_create_or_modify('net-subnet-create')
+ try:
+ self.server.invoke_successfully(subnet_create, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating subnet %s: %s' % (self.parameters.get('name'), to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_subnet(self):
+ """
+ Deletes a subnet
+ """
+ if self.use_rest:
+ return self.delete_subnet_rest()
+ subnet_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'net-subnet-destroy', **{'subnet-name': self.parameters.get('name')})
+ if self.parameters.get('ipspace'):
+ subnet_delete.add_new_child('ipspace', self.parameters.get('ipspace'))
+
+ try:
+ self.server.invoke_successfully(subnet_delete, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting subnet %s: %s' % (self.parameters.get('name'), to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_subnet(self, modify):
+ """
+ Modifies a subnet
+ """
+ if self.use_rest:
+ return self.modify_subnet_rest(modify)
+ subnet_modify = self.build_zapi_request_for_create_or_modify('net-subnet-modify')
+ try:
+ self.server.invoke_successfully(subnet_modify, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying subnet %s: %s' % (self.parameters.get('name'), to_native(error)),
+ exception=traceback.format_exc())
+
+ def build_zapi_request_for_create_or_modify(self, zapi):
+ simple_keys = ['gateway', 'ipspace', 'subnet']
+
+ # required parameters
+ options = {'subnet-name': self.parameters.get('name')}
+ if zapi == 'net-subnet-create':
+ options['broadcast-domain'] = self.parameters.get('broadcast_domain')
+ options['subnet'] = self.parameters.get('subnet')
+ simple_keys.remove('subnet')
+
+ # optional parameters
+ for key in simple_keys:
+ value = self.parameters.get(key)
+ if value is not None:
+ options[key] = value
+
+ result = netapp_utils.zapi.NaElement.create_node_with_children(zapi, **options)
+ if self.parameters.get('ip_ranges'):
+ subnet_ips = netapp_utils.zapi.NaElement('ip-ranges')
+ for ip_range in self.parameters.get('ip_ranges'):
+ subnet_ips.add_new_child('ip-range', ip_range)
+ result.add_child_elem(subnet_ips)
+
+ return result
+
+ def rename_subnet(self):
+ """
+ TODO
+ """
+ options = {'subnet-name': self.parameters.get('from_name'),
+ 'new-name': self.parameters.get('name')}
+
+ subnet_rename = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'net-subnet-rename', **options)
+
+ if self.parameters.get('ipspace'):
+ subnet_rename.add_new_child('ipspace', self.parameters.get('ipspace'))
+
+ try:
+ self.server.invoke_successfully(subnet_rename, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error renaming subnet %s: %s' % (self.parameters.get('name'), to_native(error)),
+ exception=traceback.format_exc())
+
+ def get_subnet_rest(self, name):
+ api = 'network/ip/subnets'
+ params = {
+ 'name': name,
+ 'fields': 'available_ip_ranges,name,broadcast_domain,ipspace,gateway,subnet,uuid'
+ }
+ if self.parameters.get('ipspace'):
+ params['ipspace.name'] = self.parameters['ipspace']
+ record, error = rest_generic.get_one_record(self.rest_api, api, params)
+ if error:
+ self.module.fail_json(msg="Error fetching subnet %s: %s" % (name, error))
+ current = None
+ if record:
+ self.uuid = record['uuid']
+ current = {
+ 'name': record['name'],
+ 'broadcast_domain': self.na_helper.safe_get(record, ['broadcast_domain', 'name']),
+ 'gateway': self.na_helper.safe_get(record, ['gateway']),
+ 'ipspace': self.na_helper.safe_get(record, ['ipspace', 'name']),
+ 'subnet': record['subnet']['address'] + '/' + record['subnet']['netmask'],
+ 'ip_ranges': []
+ }
+ for each_range in record.get('available_ip_ranges', []):
+ if each_range['start'] == each_range['end']:
+ current['ip_ranges'].append(each_range['start'])
+ else:
+ current['ip_ranges'].append(each_range['start'] + '-' + each_range['end'])
+ return current
+
+ def create_subnet_rest(self):
+ api = 'network/ip/subnets'
+ dummy, error = rest_generic.post_async(self.rest_api, api, self.form_create_modify_body_rest())
+ if error:
+ self.module.fail_json(msg='Error creating subnet %s: %s' % (self.parameters['name'], to_native(error)))
+
+ def modify_subnet_rest(self, modify):
+ api = 'network/ip/subnets'
+ dummy, error = rest_generic.patch_async(self.rest_api, api, self.uuid, self.form_create_modify_body_rest(modify))
+ if error:
+ self.module.fail_json(msg='Error modifying subnet %s: %s' % (self.parameters.get('name'), to_native(error)))
+
+ def delete_subnet_rest(self):
+ api = 'network/ip/subnets'
+ dummy, error = rest_generic.delete_async(self.rest_api, api, self.uuid)
+ if error:
+ self.module.fail_json(msg='Error deleting subnet %s: %s' % (self.parameters.get('name'), to_native(error)))
+
+ def form_create_modify_body_rest(self, params=None):
+ if params is None:
+ params = self.parameters
+ body = {'name': self.parameters['name']}
+ if params.get('broadcast_domain'):
+ body['broadcast_domain.name'] = params['broadcast_domain']
+ if params.get('subnet'):
+ if '/' not in params['subnet']:
+ self.module.fail_json(msg="Error: Invalid value specified for subnet %s" % params['subnet'])
+ body['subnet.address'] = params['subnet'].split('/')[0]
+ body['subnet.netmask'] = params['subnet'].split('/')[1]
+ if params.get('gateway'):
+ body['gateway'] = params['gateway']
+ if params.get('ipspace'):
+ body['ipspace.name'] = params['ipspace']
+ ip_ranges = []
+ for each_range in params.get('ip_ranges', []):
+ if '-' in each_range:
+ ip_ranges.append({
+ 'start': each_range.split('-')[0],
+ 'end': each_range.split('-')[1]
+ })
+ else:
+ ip_ranges.append({
+ 'start': each_range,
+ 'end': each_range
+ })
+ if ip_ranges or params.get('ip_ranges') == []:
+ body['ip_ranges'] = ip_ranges
+ return body
+
+ def apply(self):
+ '''Apply action to subnet'''
+ current = self.get_subnet()
+ rename, modify = None, None
+
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action == 'create' and self.parameters.get('from_name'):
+ # creating new subnet by renaming
+ current = self.get_subnet(self.parameters.get('from_name'))
+ if current is None:
+ self.module.fail_json(msg="Error renaming: subnet %s does not exist" %
+ self.parameters.get('from_name'))
+ rename = True
+ cd_action = None
+ if self.use_rest:
+ # patch takes care of renaming subnet too.
+ rename = False
+
+ if self.parameters['state'] == 'present' and current:
+ if not self.use_rest:
+ current.pop('name', None) # handled in rename
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if 'broadcast_domain' in modify:
+ self.module.fail_json(msg='Error modifying subnet %s: cannot modify broadcast_domain parameter, desired "%s", currrent "%s"'
+ % (self.parameters.get('name'), self.parameters.get('broadcast_domain'), current.get('broadcast_domain')))
+
+ if cd_action == 'create':
+ for attribute in ['subnet', 'broadcast_domain']:
+ if not self.parameters.get(attribute):
+ self.module.fail_json(msg='Error - missing required arguments: %s.' % attribute)
+
+ if self.na_helper.changed and not self.module.check_mode:
+ if rename:
+ self.rename_subnet()
+ # If rename is True, cd_action is None but modify could be true
+ if cd_action == 'create':
+ self.create_subnet()
+ elif cd_action == 'delete':
+ self.delete_subnet()
+ elif modify:
+ self.modify_subnet(modify)
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ """
+ Creates the NetApp ONTAP Net Route object and runs the correct play task
+ """
+ subnet_obj = NetAppOntapSubnet()
+ subnet_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_vlan.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_vlan.py
new file mode 100644
index 000000000..ed94b4728
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_vlan.py
@@ -0,0 +1,367 @@
+#!/usr/bin/python
+
+# (c) 2018-2021, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_net_vlan
+'''
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: na_ontap_net_vlan
+short_description: NetApp ONTAP network VLAN
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create/Modify/Delete network VLAN
+- Modify VLAN are supported only with REST
+- broadcast_domain, ipspace and enabled keys are supported with REST and is ignored with ZAPI
+options:
+ state:
+ description:
+ - Whether the specified network VLAN should exist or not
+ choices: ['present', 'absent']
+ type: str
+ default: present
+ parent_interface:
+ description:
+ - The interface that hosts the VLAN interface.
+ required: true
+ type: str
+ vlanid:
+ description:
+ - The VLAN id. Ranges from 1 to 4094.
+ required: true
+ type: int
+ node:
+ description:
+ - Node name of VLAN interface.
+ required: true
+ type: str
+ broadcast_domain:
+ description:
+ - Specify the broadcast_domain name.
+ - Only supported with REST and is ignored with ZAPI.
+ - Required with 9.6 and 9.7, but optional with 9.8 or later.
+ type: str
+ version_added: 21.13.0
+ ipspace:
+ description:
+ - Specify the ipspace for the broadcast domain.
+ - Only supported with REST and is ignored with ZAPI.
+ - Required with 9.6 and 9.7, but optional with 9.8 or later.
+ type: str
+ version_added: 21.13.0
+ enabled:
+ description:
+ - Enable/Disable Net vlan.
+ - Only supported with REST and is ignored with ZAPI.
+ type: bool
+ version_added: 21.13.0
+notes:
+ - The C(interface_name) option has been removed and should be deleted from playbooks
+'''
+
+EXAMPLES = """
+ - name: create VLAN
+ netapp.ontap.na_ontap_net_vlan:
+ state: present
+ vlanid: 13
+ node: "{{ vlan_node }}"
+ ipspace: "{{ ipspace_name }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+
+ - name: Create and add vlan to broadcast domain - REST
+ netapp.ontap.na_ontap_net_vlan:
+ state: present
+ vlanid: 14
+ node: "{{ vlan_node }}"
+ parent_interface: "{{ vlan_parent_interface_name }}"
+ broadcast_domain: "{{ broadcast_domain_name }}"
+ ipspace: "{{ ipspace_name }}"
+ enabled: true
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+
+ - name: Disable VLAN - REST
+ netapp.ontap.na_ontap_net_vlan:
+ state: present
+ vlanid: 14
+ node: "{{ vlan_node }}"
+ parent_interface: "{{ vlan_parent_interface_name }}"
+ enabled: false
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+
+ - name: Delete VLAN
+ netapp.ontap.na_ontap_net_vlan:
+ state: absent
+ vlanid: 14
+ node: "{{ vlan_node }}"
+ parent_interface: "{{ vlan_parent_interface_name }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+"""
+
+RETURN = """
+
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapVlan:
+ """
+ Created, and destorys Net Vlans's
+ """
+ def __init__(self):
+ """
+ Initializes the NetAppOntapVlan function
+ """
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ parent_interface=dict(required=True, type='str'),
+ vlanid=dict(required=True, type='int'),
+ node=dict(required=True, type='str'),
+ broadcast_domain=dict(required=False, type='str'),
+ ipspace=dict(required=False, type='str'),
+ enabled=dict(required=False, type='bool')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_together=[['broadcast_domain', 'ipspace']],
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+
+ # set up state variables
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.parameters['interface_name'] = "%s-%s" % (self.parameters['parent_interface'], self.parameters['vlanid'])
+
+ # Set up Rest API
+ self.rest_api = OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+
+ if self.use_rest and not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 8, 0):
+ if 'broadcast_domain' not in self.parameters and 'ipspace' not in self.parameters and self.parameters['state'] == 'present':
+ error_msg = 'broadcast_domain and ipspace are required fields with ONTAP 9.6 and 9.7'
+ self.module.fail_json(msg=error_msg)
+
+ if not self.use_rest and ('broadcast_domain' in self.parameters or 'enabled' in self.parameters):
+ msg = 'Using ZAPI and ignoring keys - enabled, broadcast_domain and ipspace'
+ self.module.warn(msg)
+ self.parameters.pop('broadcast_domain', None)
+ self.parameters.pop('ipspace', None)
+ self.parameters.pop('enabled', None)
+
+ if not self.use_rest:
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def invoke_vlan(self, zapi):
+ """
+ Invoke zapi - add/delete take the same NaElement structure
+ """
+ vlan_obj = netapp_utils.zapi.NaElement(zapi)
+ vlan_info = self.create_vlan_info()
+ vlan_obj.add_child_elem(vlan_info)
+ try:
+ self.server.invoke_successfully(vlan_obj, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ if zapi == 'net-vlan-create':
+ action = 'adding'
+ elif zapi == 'net-vlan-delete':
+ action = 'deleting'
+ else:
+ action = 'unexpected'
+ self.module.fail_json(msg='Error %s Net Vlan %s: %s' % (action, self.parameters['interface_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def create_vlan(self):
+ """
+ Creates a new vlan
+ """
+ if self.use_rest:
+ api = 'network/ethernet/ports'
+ body = {
+ 'type': 'vlan',
+ 'node': {'name': self.parameters['node']},
+ 'vlan': {
+ 'base_port': {
+ 'name': self.parameters['parent_interface'],
+ 'node': {'name': self.parameters['node']}
+ },
+ 'tag': self.parameters['vlanid']
+ }
+ }
+ if 'broadcast_domain' in self.parameters:
+ body['broadcast_domain'] = {'name': self.parameters['broadcast_domain']}
+ body['broadcast_domain']['ipspace'] = {'name': self.parameters['ipspace']}
+ if 'enabled' in self.parameters:
+ body['enabled'] = self.parameters['enabled']
+ dummy, error = rest_generic.post_async(self.rest_api, api, body)
+ if error:
+ self.module.fail_json(msg=error)
+ else:
+ self.invoke_vlan('net-vlan-create')
+
+ def delete_vlan(self, current=None):
+ """
+ Deletes a vland
+ """
+ if self.use_rest:
+ uuid = current['uuid']
+ api = 'network/ethernet/ports'
+ dummy, error = rest_generic.delete_async(self.rest_api, api, uuid)
+ if error:
+ self.module.fail_json(msg=error)
+ else:
+ self.invoke_vlan('net-vlan-delete')
+
+ def get_vlan(self):
+ """
+ Checks to see if a vlan already exists or not
+ :return: Returns dictionary of attributes if the vlan exists, None if it dosn't
+ """
+ if self.use_rest:
+ return self.get_vlan_rest()
+ vlan_obj = netapp_utils.zapi.NaElement("net-vlan-get-iter")
+ query = {
+ 'query': {
+ 'vlan-info': {
+ 'interface-name': self.parameters['interface_name'],
+ 'node': self.parameters['node']
+ }
+ }
+ }
+ vlan_obj.translate_struct(query)
+ try:
+ result = self.server.invoke_successfully(vlan_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg=to_native(error), exception=traceback.format_exc())
+ # This checks desired vlan already exists and returns interface_name and node
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) == 1:
+ vlan_info = result.get_child_by_name('attributes-list').get_child_by_name('vlan-info')
+ current = {
+ 'interface_name': vlan_info.get_child_content('interface-name'),
+ 'node': vlan_info.get_child_content('node')
+ }
+ return current
+ return None
+
+ def get_vlan_rest(self):
+ api = 'network/ethernet/ports'
+ query = {
+ 'name': self.parameters['interface_name'],
+ 'node.name': self.parameters['node'],
+ }
+ fields = 'name,node,uuid,broadcast_domain,enabled'
+ record, error = rest_generic.get_one_record(self.rest_api, api, query, fields)
+ if error:
+ self.module.fail_json(msg=error)
+ if record:
+ current = {
+ 'interface_name': record['name'],
+ 'node': record['node']['name'],
+ 'uuid': record['uuid'],
+ 'enabled': record['enabled']
+ }
+ if 'broadcast_domain' in record:
+ current['broadcast_domain'] = record['broadcast_domain']['name']
+ current['ipspace'] = record['broadcast_domain']['ipspace']['name']
+ return current
+ return None
+
+ def modify_vlan(self, current, modify):
+ """
+ Modify broadcast domain, ipspace and enable/disable vlan
+ """
+ uuid = current['uuid']
+ api = 'network/ethernet/ports'
+ body = {}
+ # Requires both broadcast_domain and ipspace in body
+ # of PATCH call if any one of it present in modify
+ if 'broadcast_domain' in modify or 'ipspace' in modify:
+ broadcast_domain = modify['broadcast_domain'] if 'broadcast_domain' in modify else current['broadcast_domain']
+ ipspace = modify['ipspace'] if 'ipspace' in modify else current['ipspace']
+ body['broadcast_domain'] = {'name': broadcast_domain}
+ body['broadcast_domain']['ipspace'] = {'name': ipspace}
+ if 'enabled' in modify:
+ body['enabled'] = modify['enabled']
+ dummy, error = rest_generic.patch_async(self.rest_api, api, uuid, body)
+ if error:
+ self.module.fail_json(msg=error)
+
+ def create_vlan_info(self):
+ """
+ Create a vlan_info object to be used in a create/delete
+ :return:
+ """
+ vlan_info = netapp_utils.zapi.NaElement("vlan-info")
+
+ # set up the vlan_info object:
+ vlan_info.add_new_child("parent-interface", self.parameters['parent_interface'])
+ vlan_info.add_new_child("vlanid", str(self.parameters['vlanid']))
+ vlan_info.add_new_child("node", self.parameters['node'])
+ return vlan_info
+
+ def apply(self):
+ """
+ check the option in the playbook to see what needs to be done
+ :return:
+ """
+ modify = None
+ current = self.get_vlan()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if self.use_rest and cd_action is None:
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_vlan()
+ # enabled key in POST call has no effect
+ # applying PATCH if there is change in default value
+ if self.use_rest:
+ current = self.get_vlan_rest()
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if cd_action == 'delete':
+ self.delete_vlan(current)
+ if modify:
+ self.modify_vlan(current, modify)
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ """
+ Creates the NetApp Ontap vlan object, and runs the correct play task.
+ """
+ vlan_obj = NetAppOntapVlan()
+ vlan_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_nfs.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_nfs.py
new file mode 100644
index 000000000..a1315df1b
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_nfs.py
@@ -0,0 +1,700 @@
+#!/usr/bin/python
+
+# (c) 2018-2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+module: na_ontap_nfs
+short_description: NetApp ONTAP NFS status
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Enable or disable NFS on ONTAP
+options:
+ state:
+ description:
+ - Whether NFS should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+ service_state:
+ description:
+ - Whether the specified NFS should be enabled or disabled. Creates NFS service if doesnt exist.
+ choices: ['started', 'stopped']
+ type: str
+ vserver:
+ description:
+ - Name of the vserver to use.
+ required: true
+ type: str
+ nfsv3:
+ description:
+ - status of NFSv3.
+ choices: ['enabled', 'disabled']
+ type: str
+ nfsv3_fsid_change:
+ description:
+ - status of if NFSv3 clients see change in FSID as they traverse filesystems.
+ choices: ['enabled', 'disabled']
+ type: str
+ version_added: 2.7.0
+ nfsv4_fsid_change:
+ description:
+ - status of if NFSv4 clients see change in FSID as they traverse filesystems.
+ choices: ['enabled', 'disabled']
+ type: str
+ version_added: 2.9.0
+ nfsv4:
+ description:
+ - status of NFSv4.
+ choices: ['enabled', 'disabled']
+ type: str
+ nfsv41:
+ description:
+ - status of NFSv41.
+ - usage of C(nfsv4.1) is deprecated as it does not match Ansible naming convention. The alias will be removed.
+ - please use C(nfsv41) exclusively for this option.
+ aliases: ['nfsv4.1']
+ choices: ['enabled', 'disabled']
+ type: str
+ nfsv41_pnfs:
+ description:
+ - status of NFSv41 pNFS.
+ choices: ['enabled', 'disabled']
+ type: str
+ version_added: 2.9.0
+ nfsv4_numeric_ids:
+ description:
+ - status of NFSv4 numeric ID's.
+ choices: ['enabled', 'disabled']
+ type: str
+ version_added: 2.9.0
+ vstorage_state:
+ description:
+ - status of vstorage_state.
+ choices: ['enabled', 'disabled']
+ type: str
+ nfsv4_id_domain:
+ description:
+ - Name of the nfsv4_id_domain to use.
+ type: str
+ nfsv40_acl:
+ description:
+ - status of NFS v4.0 ACL feature
+ choices: ['enabled', 'disabled']
+ type: str
+ version_added: 2.7.0
+ nfsv40_read_delegation:
+ description:
+ - status for NFS v4.0 read delegation feature.
+ choices: ['enabled', 'disabled']
+ type: str
+ version_added: 2.7.0
+ nfsv40_write_delegation:
+ description:
+ - status for NFS v4.0 write delegation feature.
+ choices: ['enabled', 'disabled']
+ type: str
+ version_added: 2.7.0
+ nfsv41_acl:
+ description:
+ - status of NFS v4.1 ACL feature
+ choices: ['enabled', 'disabled']
+ type: str
+ version_added: 2.7.0
+ nfsv41_read_delegation:
+ description:
+ - status for NFS v4.1 read delegation feature.
+ choices: ['enabled', 'disabled']
+ type: str
+ version_added: 2.7.0
+ nfsv41_write_delegation:
+ description:
+ - status for NFS v4.1 write delegation feature.
+ choices: ['enabled', 'disabled']
+ type: str
+ version_added: 2.7.0
+ nfsv40_referrals:
+ description:
+ - status for NFS v4.0 referrals.
+ choices: ['enabled', 'disabled']
+ type: str
+ version_added: 2.9.0
+ nfsv41_referrals:
+ description:
+ - status for NFS v4.1 referrals.
+ choices: ['enabled', 'disabled']
+ type: str
+ version_added: 2.9.0
+ tcp:
+ description:
+ - Enable TCP (support from ONTAP 9.3 onward).
+ choices: ['enabled', 'disabled']
+ type: str
+ udp:
+ description:
+ - Enable UDP (support from ONTAP 9.3 onward).
+ choices: ['enabled', 'disabled']
+ type: str
+ showmount:
+ description:
+ - Whether SVM allows showmount.
+ - With REST, supported from ONTAP 9.8 version.
+ choices: ['enabled', 'disabled']
+ type: str
+ version_added: 2.7.0
+ tcp_max_xfer_size:
+ description:
+ - TCP Maximum Transfer Size (bytes). The default value is 65536.
+ - This option requires ONTAP 9.11.0 or later in REST.
+ version_added: 2.8.0
+ type: int
+ windows:
+ description:
+ - This option can be set or modified when using REST.
+ - It requires ONTAP 9.11.0 or later.
+ version_added: 22.3.0
+ type: dict
+ suboptions:
+ default_user:
+ description:
+ - Specifies the default Windows user for the NFS server.
+ type: str
+ map_unknown_uid_to_default_user:
+ description:
+ - Specifies whether or not the mapping of an unknown UID to the default Windows user is enabled.
+ type: bool
+ v3_ms_dos_client_enabled:
+ description:
+ - Specifies whether NFSv3 MS-DOS client support is enabled.
+ type: bool
+ root:
+ description:
+ - This option can be set or modified when using REST.
+ - It requires ONTAP 9.11.0 or later.
+ type: dict
+ version_added: 22.3.0
+ suboptions:
+ ignore_nt_acl:
+ description:
+ - Specifies whether Windows ACLs affect root access from NFS.
+ - If this option is enabled, root access from NFS ignores the NT ACL set on the file or directory.
+ type: bool
+ skip_write_permission_check:
+ description:
+ - Specifies if permission checks are to be skipped for NFS WRITE calls from root/owner.
+ - For copying read-only files to a destination folder which has inheritable ACLs, this option must be enabled.
+ type: bool
+ security:
+ description:
+ - This option can be set or modified when using REST.
+ - It requires ONTAP 9.11.0 or later.
+ type: dict
+ version_added: 22.3.0
+ suboptions:
+ chown_mode:
+ description:
+ - Specifies whether file ownership can be changed only by the superuser, or if a non-root user can also change file ownership.
+ - If this option is set to restricted, file ownership can be changed only by the superuser,
+ even though the on-disk permissions allow a non-root user to change file ownership.
+ - If this option is set to unrestricted, file ownership can be changed by the superuser and by the non-root user,
+ depending upon the access granted by on-disk permissions.
+ - If this option is set to use-export-policy, file ownership can be changed in accordance with the relevant export rules.
+ choices: ['restricted', 'unrestricted', 'use_export_policy']
+ type: str
+ nt_acl_display_permission:
+ description:
+ - Controls the permissions that are displayed to NFSv3 and NFSv4 clients on a file or directory that has an NT ACL set.
+ - When true, the displayed permissions are based on the maximum access granted by the NT ACL to any user.
+ - When false, the displayed permissions are based on the minimum access granted by the NT ACL to any user.
+ type: bool
+ ntfs_unix_security:
+ description:
+ - Specifies how NFSv3 security changes affect NTFS volumes.
+ - If this option is set to ignore, ONTAP ignores NFSv3 security changes.
+ - If this option is set to fail, this overrides the UNIX security options set in the relevant export rules.
+ - If this option is set to use_export_policy, ONTAP processes NFSv3 security changes in accordance with the relevant export rules.
+ choices: ['ignore', 'fail', 'use_export_policy']
+ type: str
+ permitted_encryption_types:
+ description:
+ - Specifies the permitted encryption types for Kerberos over NFS.
+ type: list
+ elements: str
+ rpcsec_context_idle:
+ description:
+ - Specifies, in seconds, the amount of time a RPCSEC_GSS context is permitted to remain unused before it is deleted.
+ type: int
+"""
+
+EXAMPLES = """
+ - name: change nfs status
+ netapp.ontap.na_ontap_nfs:
+ state: present
+ service_state: stopped
+ vserver: vs_hack
+ nfsv3: disabled
+ nfsv4: disabled
+ nfsv41: enabled
+ tcp: disabled
+ udp: disabled
+ vstorage_state: disabled
+ nfsv4_id_domain: example.com
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: create nfs configuration - REST
+ netapp.ontap.na_ontap_nfs:
+ state: present
+ service_state: stopped
+ vserver: vs_hack
+ nfsv3: disabled
+ nfsv4: disabled
+ nfsv41: enabled
+ tcp: disabled
+ udp: disabled
+ vstorage_state: disabled
+ nfsv4_id_domain: example.com
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Modify nfs configuration - REST
+ netapp.ontap.na_ontap_nfs:
+ state: present
+ vserver: vs_hack
+ root:
+ ignore_nt_acl: true
+ skip_write_permission_check: true
+ security:
+ chown_mode: restricted
+ nt_acl_display_permission: true
+ ntfs_unix_security: fail
+ rpcsec_context_idle: 5
+ windows:
+ v3_ms_dos_client_enabled: true
+ map_unknown_uid_to_default_user: false
+ default_user: test_user
+ tcp_max_xfer_size: 16384
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Delete nfs configuration
+ netapp.ontap.na_ontap_nfs:
+ state: absent
+ vserver: vs_hack
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+"""
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppONTAPNFS:
+ """ object initialize and class methods """
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ service_state=dict(required=False, type='str', choices=['started', 'stopped']),
+ vserver=dict(required=True, type='str'),
+ nfsv3=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']),
+ nfsv3_fsid_change=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']),
+ nfsv4_fsid_change=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']),
+ nfsv4=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']),
+ nfsv41=dict(required=False, type='str', default=None, choices=['enabled', 'disabled'], aliases=['nfsv4.1']),
+ nfsv41_pnfs=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']),
+ nfsv4_numeric_ids=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']),
+ vstorage_state=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']),
+ tcp=dict(required=False, default=None, type='str', choices=['enabled', 'disabled']),
+ udp=dict(required=False, default=None, type='str', choices=['enabled', 'disabled']),
+ nfsv4_id_domain=dict(required=False, type='str', default=None),
+ nfsv40_acl=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']),
+ nfsv40_read_delegation=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']),
+ nfsv40_referrals=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']),
+ nfsv40_write_delegation=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']),
+ nfsv41_acl=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']),
+ nfsv41_read_delegation=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']),
+ nfsv41_referrals=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']),
+ nfsv41_write_delegation=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']),
+ showmount=dict(required=False, default=None, type='str', choices=['enabled', 'disabled']),
+ tcp_max_xfer_size=dict(required=False, default=None, type='int'),
+
+ # security
+ security=dict(type='dict', options=dict(
+ rpcsec_context_idle=dict(required=False, type='int'),
+ ntfs_unix_security=dict(required=False, type='str', choices=['ignore', 'fail', 'use_export_policy']),
+ chown_mode=dict(required=False, type='str', choices=['restricted', 'unrestricted', 'use_export_policy']),
+ nt_acl_display_permission=dict(required=False, type='bool'),
+ permitted_encryption_types=dict(type='list', elements='str', required=False),
+ )),
+ # root
+ root=dict(type='dict', options=dict(
+ ignore_nt_acl=dict(required=False, type='bool'),
+ skip_write_permission_check=dict(required=False, type='bool'),
+ )),
+ # windows
+ windows=dict(type='dict', options=dict(
+ map_unknown_uid_to_default_user=dict(required=False, type='bool'),
+ v3_ms_dos_client_enabled=dict(required=False, type='bool'),
+ default_user=dict(required=False, type='str'),
+ )),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.zapi_names = {
+ 'nfsv3': 'is-nfsv3-enabled', # REST: protocol.v3_enabled
+ 'nfsv3_fsid_change': 'is-nfsv3-fsid-change-enabled',
+ 'nfsv4_fsid_change': 'is-nfsv4-fsid-change-enabled',
+ 'nfsv4': 'is-nfsv40-enabled', # REST: protocol.v40_enabled
+ 'nfsv41': 'is-nfsv41-enabled', # REST: protocol.v41_enabled
+ 'nfsv41_pnfs': 'is-nfsv41-pnfs-enabled', # protocol.v41_features.pnfs_enabled
+ 'nfsv4_numeric_ids': 'is-nfsv4-numeric-ids-enabled',
+ 'vstorage_state': 'is-vstorage-enabled', # REST: vstorage_enabled
+ 'nfsv4_id_domain': 'nfsv4-id-domain', # REST: protocol.v4_id_domain
+ 'tcp': 'is-tcp-enabled', # REST: transport.tcp_enabled
+ 'udp': 'is-udp-enabled', # REST: transport.udp_enabled
+ 'nfsv40_acl': 'is-nfsv40-acl-enabled', # REST: protocol.v40_features.acl_enabled
+ 'nfsv40_read_delegation': 'is-nfsv40-read-delegation-enabled', # REST: protocol.v40_features.read_delegation_enabled
+ 'nfsv40_referrals': 'is-nfsv40-referrals-enabled',
+ 'nfsv40_write_delegation': 'is-nfsv40-write-delegation-enabled', # REST: protocol.v40_features.write_delegation_enabled
+ 'nfsv41_acl': 'is-nfsv41-acl-enabled', # REST: protocol.v41_features.acl_enabled
+ 'nfsv41_read_delegation': 'is-nfsv41-read-delegation-enabled', # REST: protocol.v41_features.read_delegation_enabled
+ 'nfsv41_referrals': 'is-nfsv41-referrals-enabled',
+ 'nfsv41_write_delegation': 'is-nfsv41-write-delegation-enabled', # REST: protocol.v41_features.write_delegation_enabled
+ 'showmount': 'showmount', # REST: showmount_enabled
+ 'tcp_max_xfer_size': 'tcp-max-xfer-size'
+ }
+
+ self.rest_api = netapp_utils.OntapRestAPI(self.module)
+ unsupported_rest_properties = ['nfsv3_fsid_change',
+ 'nfsv4_fsid_change',
+ 'nfsv4_numeric_ids',
+ 'nfsv40_referrals',
+ 'nfsv41_referrals']
+ partially_supported_rest_properties = [['showmount', (9, 8)], ['root', (9, 11, 0)], ['windows', (9, 11, 0)], ['security', (9, 11, 0)],
+ ['tcp_max_xfer_size', (9, 11, 0)]]
+ self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, unsupported_rest_properties, partially_supported_rest_properties)
+ if 'nfsv4.1' in self.parameters:
+ self.module.warn('Error: "nfsv4.1" option conflicts with Ansible naming conventions - please use "nfsv41".')
+ self.svm_uuid = None
+ self.unsupported_zapi_properties = ['root', 'windows', 'security']
+ self.parameters = self.na_helper.filter_out_none_entries(self.parameters)
+ if not self.use_rest:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ for unsupported_zapi_property in self.unsupported_zapi_properties:
+ if self.parameters.get(unsupported_zapi_property) is not None:
+ msg = "Error: %s option is not supported with ZAPI. It can only be used with REST." % unsupported_zapi_property
+ self.module.fail_json(msg=msg)
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_nfs_service(self):
+ if self.use_rest:
+ return self.get_nfs_service_rest()
+ nfs_get_iter = netapp_utils.zapi.NaElement('nfs-service-get-iter')
+ nfs_info = netapp_utils.zapi.NaElement('nfs-info')
+ nfs_info.add_new_child('vserver', self.parameters['vserver'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(nfs_info)
+ nfs_get_iter.add_child_elem(query)
+ result = self.server.invoke_successfully(nfs_get_iter, True)
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ return self.format_return(result)
+ return None
+
+ def format_return(self, result):
+ attributes_list = result.get_child_by_name('attributes-list').get_child_by_name('nfs-info')
+ return {
+ 'nfsv3': self.convert_from_bool(attributes_list.get_child_content('is-nfsv3-enabled')),
+ 'nfsv3_fsid_change': self.convert_from_bool(attributes_list.get_child_content('is-nfsv3-fsid-change-enabled')),
+ 'nfsv4_fsid_change': self.convert_from_bool(attributes_list.get_child_content('is-nfsv4-fsid-change-enabled')),
+ 'nfsv4': self.convert_from_bool(attributes_list.get_child_content('is-nfsv40-enabled')),
+ 'nfsv41': self.convert_from_bool(attributes_list.get_child_content('is-nfsv41-enabled')),
+ 'nfsv41_pnfs': self.convert_from_bool(attributes_list.get_child_content('is-nfsv41-pnfs-enabled')),
+ 'nfsv4_numeric_ids': self.convert_from_bool(attributes_list.get_child_content('is-nfsv4-numeric-ids-enabled')),
+ 'vstorage_state': self.convert_from_bool(attributes_list.get_child_content('is-vstorage-enabled')),
+ 'nfsv4_id_domain': attributes_list.get_child_content('nfsv4-id-domain'),
+ 'tcp': self.convert_from_bool(attributes_list.get_child_content('is-tcp-enabled')),
+ 'udp': self.convert_from_bool(attributes_list.get_child_content('is-udp-enabled')),
+ 'nfsv40_acl': self.convert_from_bool(attributes_list.get_child_content('is-nfsv40-acl-enabled')),
+ 'nfsv40_read_delegation': self.convert_from_bool(attributes_list.get_child_content('is-nfsv40-read-delegation-enabled')),
+ 'nfsv40_referrals': self.convert_from_bool(attributes_list.get_child_content('is-nfsv40-referrals-enabled')),
+ 'nfsv40_write_delegation': self.convert_from_bool(attributes_list.get_child_content('is-nfsv40-write-delegation-enabled')),
+ 'nfsv41_acl': self.convert_from_bool(attributes_list.get_child_content('is-nfsv41-acl-enabled')),
+ 'nfsv41_read_delegation': self.convert_from_bool(attributes_list.get_child_content('is-nfsv41-read-delegation-enabled')),
+ 'nfsv41_referrals': self.convert_from_bool(attributes_list.get_child_content('is-nfsv41-referrals-enabled')),
+ 'nfsv41_write_delegation': self.convert_from_bool(attributes_list.get_child_content('is-nfsv41-write-delegation-enabled')),
+ 'showmount': self.convert_from_bool(attributes_list.get_child_content('showmount')),
+ 'tcp_max_xfer_size': self.na_helper.get_value_for_int(True, attributes_list.get_child_content('tcp-max-xfer-size'))
+ }
+
+ def get_nfs_status(self):
+ nfs_status = netapp_utils.zapi.NaElement('nfs-status')
+ result = self.server.invoke_successfully(nfs_status, True)
+ return result.get_child_content('is-enabled')
+
+ def create_nfs_service(self):
+ if self.use_rest:
+ return self.create_nfs_service_rest()
+ # This is what the old module did, not sure what happens if nfs dosn't exist.
+ self.enable_nfs()
+
+ def enable_nfs(self):
+ """
+ enable nfs (online). If the NFS service was not explicitly created,
+ this API will create one with default options.
+ """
+ nfs_enable = netapp_utils.zapi.NaElement.create_node_with_children('nfs-enable')
+ try:
+ self.server.invoke_successfully(nfs_enable, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error changing the service_state of nfs %s to %s: %s' %
+ (self.parameters['vserver'], self.parameters['service_state'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def disable_nfs(self):
+ """
+ disable nfs (offline).
+ """
+ nfs_disable = netapp_utils.zapi.NaElement.create_node_with_children('nfs-disable')
+ try:
+ self.server.invoke_successfully(nfs_disable, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error changing the service_state of nfs %s to %s: %s' %
+ (self.parameters['vserver'], self.parameters['service_state'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_nfs_service(self, modify):
+ if self.use_rest:
+ return self.modify_nfs_service_rest(modify)
+ # This is what the old module did, not sure what happens if nfs dosn't exist.
+ nfs_modify = netapp_utils.zapi.NaElement('nfs-service-modify')
+ service_state = modify.pop('service_state', None)
+ self.modify_service_state(service_state)
+ for each in modify:
+ if each in ['nfsv4_id_domain', 'tcp_max_xfer_size']:
+ nfs_modify.add_new_child(self.zapi_names[each], str(modify[each]))
+ else:
+ nfs_modify.add_new_child(self.zapi_names[each], self.convert_to_bool(modify[each]))
+ try:
+ self.server.invoke_successfully(nfs_modify, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying nfs: %s' % (to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_service_state(self, service_state):
+ nfs_enabled = self.get_nfs_status()
+ if service_state == 'started' and nfs_enabled == 'false':
+ self.enable_nfs()
+ elif service_state == 'stopped' and nfs_enabled == 'true':
+ self.disable_nfs()
+
+ def delete_nfs_service(self):
+ """
+ delete nfs service.
+ """
+ if self.use_rest:
+ return self.delete_nfs_service_rest()
+ nfs_delete = netapp_utils.zapi.NaElement.create_node_with_children('nfs-service-destroy')
+ try:
+ self.server.invoke_successfully(nfs_delete, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting nfs: %s' % (to_native(error)),
+ exception=traceback.format_exc())
+
+ def get_nfs_service_rest(self):
+ api = 'protocols/nfs/services'
+ params = {'svm.name': self.parameters['vserver'],
+ 'fields': 'protocol.v3_enabled,'
+ 'protocol.v40_enabled,'
+ 'protocol.v41_enabled,'
+ 'protocol.v41_features.pnfs_enabled,'
+ 'vstorage_enabled,'
+ 'protocol.v4_id_domain,'
+ 'transport.tcp_enabled,'
+ 'transport.udp_enabled,'
+ 'protocol.v40_features.acl_enabled,'
+ 'protocol.v40_features.read_delegation_enabled,'
+ 'protocol.v40_features.write_delegation_enabled,'
+ 'protocol.v41_features.acl_enabled,'
+ 'protocol.v41_features.read_delegation_enabled,'
+ 'protocol.v41_features.write_delegation_enabled,'
+ 'enabled,'
+ 'svm.uuid,'}
+ if self.parameters.get('showmount'):
+ params['fields'] += 'showmount_enabled,'
+ if self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 11, 0):
+ params['fields'] += 'root.*,security.*,windows.*,transport.tcp_max_transfer_size'
+ # TODO: might return more than 1 record, find out
+ record, error = rest_generic.get_one_record(self.rest_api, api, params)
+ if error:
+ self.module.fail_json(msg='Error getting nfs services for SVM %s: %s' % (self.parameters['vserver'], to_native(error)),
+ exception=traceback.format_exc())
+ return self.format_get_nfs_service_rest(record) if record else record
+
+ def format_get_nfs_service_rest(self, record):
+ return {
+ 'nfsv3': self.convert_from_bool(self.na_helper.safe_get(record, ['protocol', 'v3_enabled'])),
+ 'nfsv4': self.convert_from_bool(self.na_helper.safe_get(record, ['protocol', 'v40_enabled'])),
+ 'nfsv41': self.convert_from_bool(self.na_helper.safe_get(record, ['protocol', 'v41_enabled'])),
+ 'nfsv41_pnfs': self.convert_from_bool(self.na_helper.safe_get(record, ['protocol', 'v41_features', 'pnfs_enabled'])),
+ 'vstorage_state': self.convert_from_bool(self.na_helper.safe_get(record, ['vstorage_enabled'])),
+ 'nfsv4_id_domain': self.na_helper.safe_get(record, ['protocol', 'v4_id_domain']),
+ 'tcp': self.convert_from_bool(self.na_helper.safe_get(record, ['transport', 'tcp_enabled'])),
+ 'udp': self.convert_from_bool(self.na_helper.safe_get(record, ['transport', 'udp_enabled'])),
+ 'tcp_max_xfer_size': self.na_helper.safe_get(record, ['transport', 'tcp_max_transfer_size']),
+ 'nfsv40_acl': self.convert_from_bool(self.na_helper.safe_get(record, ['protocol', 'v40_features', 'acl_enabled'])),
+ 'nfsv40_read_delegation': self.convert_from_bool(self.na_helper.safe_get(record, ['protocol', 'v40_features', 'read_delegation_enabled'])),
+ 'nfsv40_write_delegation': self.convert_from_bool(self.na_helper.safe_get(record, ['protocol', 'v40_features', 'write_delegation_enabled'])),
+ 'nfsv41_acl': self.convert_from_bool(self.na_helper.safe_get(record, ['protocol', 'v41_features', 'acl_enabled'])),
+ 'nfsv41_read_delegation': self.convert_from_bool(self.na_helper.safe_get(record, ['protocol', 'v41_features', 'read_delegation_enabled'])),
+ 'nfsv41_write_delegation': self.convert_from_bool(self.na_helper.safe_get(record, ['protocol', 'v41_features', 'write_delegation_enabled'])),
+ 'showmount': self.convert_from_bool(self.na_helper.safe_get(record, ['showmount_enabled'])),
+ 'svm_uuid': self.na_helper.safe_get(record, ['svm', 'uuid']),
+ 'service_state': self.convert_from_bool_to_started(self.na_helper.safe_get(record, ['enabled'])),
+ 'root': self.na_helper.safe_get(record, ['root']),
+ 'windows': self.na_helper.safe_get(record, ['windows']),
+ 'security': self.na_helper.safe_get(record, ['security']),
+ }
+
+ def create_nfs_service_rest(self):
+ api = 'protocols/nfs/services'
+ body = {'svm.name': self.parameters['vserver']}
+ body.update(self.create_modify_body(body))
+ dummy, error = rest_generic.post_async(self.rest_api, api, body, job_timeout=120)
+ if error:
+ self.module.fail_json(msg='Error creating nfs service for SVM %s: %s' % (self.parameters['vserver'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_nfs_service_rest(self):
+ if self.svm_uuid is None:
+ self.module.fail_json(msg='Error deleting nfs service for SVM %s: svm.uuid is None' % self.parameters['vserver'])
+ dummy, error = rest_generic.delete_async(self.rest_api, 'protocols/nfs/services', self.svm_uuid, job_timeout=120)
+ if error:
+ self.module.fail_json(msg='Error deleting nfs service for SVM %s' % self.parameters['vserver'])
+
+ def modify_nfs_service_rest(self, modify):
+ if self.svm_uuid is None:
+ self.module.fail_json(msg='Error modifying nfs service for SVM %s: svm.uuid is None' % self.parameters['vserver'])
+ api = 'protocols/nfs/services'
+ body = {}
+ body.update(self.create_modify_body(body, modify))
+ dummy, error = rest_generic.patch_async(self.rest_api, api, self.svm_uuid, body, job_timeout=120)
+ if error:
+ self.module.fail_json(msg='Error modifying nfs service for SVM %s: %s' % (self.parameters['vserver'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def create_modify_body(self, body, modify=None):
+ params = modify or self.parameters
+ if params.get('nfsv3') is not None:
+ body['protocol.v3_enabled'] = self.convert_to_bool(params['nfsv3'])
+ if params.get('nfsv4') is not None:
+ body['protocol.v40_enabled'] = self.convert_to_bool(params['nfsv4'])
+ if params.get('nfsv41') is not None:
+ body['protocol.v41_enabled'] = self.convert_to_bool(params['nfsv41'])
+ if params.get('nfsv41_pnfs') is not None:
+ body['protocol.v41_features.pnfs_enabled'] = self.convert_to_bool(params['nfsv41_pnfs'])
+ if params.get('vstorage_state') is not None:
+ body['vstorage_enabled'] = self.convert_to_bool(params['vstorage_state'])
+ if params.get('nfsv4_id_domain') is not None:
+ body['protocol.v4_id_domain'] = params['nfsv4_id_domain']
+ if params.get('tcp') is not None:
+ body['transport.tcp_enabled'] = self.convert_to_bool(params['tcp'])
+ if params.get('udp') is not None:
+ body['transport.udp_enabled'] = self.convert_to_bool(params['udp'])
+ if params.get('nfsv40_acl') is not None:
+ body['protocol.v40_features.acl_enabled'] = self.convert_to_bool(params['nfsv40_acl'])
+ if params.get('nfsv40_read_delegation') is not None:
+ body['protocol.v40_features.read_delegation_enabled'] = self.convert_to_bool(params['nfsv40_read_delegation'])
+ if params.get('nfsv40_write_delegation') is not None:
+ body['protocol.v40_features.write_delegation_enabled'] = self.convert_to_bool(params['nfsv40_write_delegation'])
+ if params.get('nfsv41_acl') is not None:
+ body['protocol.v41_features.acl_enabled'] = self.convert_to_bool(params['nfsv41_acl'])
+ if params.get('nfsv41_read_delegation') is not None:
+ body['protocol.v41_features.read_delegation_enabled'] = self.convert_to_bool(params['nfsv41_read_delegation'])
+ if params.get('nfsv41_write_delegation') is not None:
+ body['protocol.v41_features.write_delegation_enabled'] = self.convert_to_bool(params['nfsv41_write_delegation'])
+ if params.get('showmount') is not None:
+ body['showmount_enabled'] = self.convert_to_bool(params['showmount'])
+ # Tested this out, in both create and modify, changing the service_state will enable and disabled the service
+ # during both a create and modify.
+ if params.get('service_state') is not None:
+ body['enabled'] = self.convert_to_bool(params['service_state'])
+ if params.get('root') is not None:
+ body['root'] = params['root']
+ if params.get('windows') is not None:
+ body['windows'] = params['windows']
+ if params.get('security') is not None:
+ body['security'] = params['security']
+ if params.get('tcp_max_xfer_size') is not None:
+ body['transport.tcp_max_transfer_size'] = params['tcp_max_xfer_size']
+ return body
+
+ def convert_to_bool(self, value):
+ return 'true' if value in ['enabled', 'started'] else 'false'
+
+ def convert_from_bool(self, value):
+ return 'enabled' if value in ['true', True] else 'disabled'
+
+ def convert_from_bool_to_started(self, value):
+ return 'started' if value in ['true', True] else 'stopped'
+
+ def validate_modify(self, current, modify):
+ '''Earlier ONTAP versions do not support tcp_max_xfer_size'''
+ if 'tcp_max_xfer_size' in modify and current['tcp_max_xfer_size'] is None:
+ self.module.fail_json(msg='Error: tcp_max_xfer_size is not supported on ONTAP 9.3 or earlier.')
+
+ def apply(self):
+ current = self.get_nfs_service()
+ if self.use_rest and current is not None:
+ self.svm_uuid = current.get('svm_uuid')
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ modify = None
+ if cd_action is None and self.parameters['state'] == 'present':
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if not self.use_rest:
+ self.validate_modify(current, modify)
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_nfs_service()
+ elif cd_action == 'delete':
+ self.delete_nfs_service()
+ elif modify:
+ self.modify_nfs_service(modify)
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ """ Create object and call apply """
+ obj = NetAppONTAPNFS()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_node.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_node.py
new file mode 100644
index 000000000..ced6f44be
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_node.py
@@ -0,0 +1,265 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+module: na_ontap_node
+short_description: NetApp ONTAP Modify or Rename a node.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.7.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Modify or Rename an ONTAP node.
+options:
+ name:
+ description:
+ - The name for the node
+ required: true
+ type: str
+
+ from_name:
+ description:
+ - The name of the node to be renamed. If I(name) already exists, no action will be performed.
+ type: str
+
+ location:
+ description:
+ - The location for the node
+ type: str
+
+ asset_tag:
+ description:
+ - The asset tag for the node, not supported by REST
+ type: str
+
+'''
+
+EXAMPLES = """
+- name: modify node
+ na_ontap_node:
+ name: laurentncluster-2
+ location: SF1
+ asset_tag: mytag
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+- name: rename node
+ na_ontap_node:
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ from_name: laurentn-vsim1
+ name: laurentncluster-2
+
+- name: modify and rename node
+ na_ontap_node:
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ location: SF2
+ from_name: laurentn-vsim1
+ name: laurentncluster-2
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+import ansible_collections.netapp.ontap.plugins.module_utils.rest_response_helpers as rrh
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapNode(object):
+ """
+ Rename and modify node
+ """
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ name=dict(required=True, type='str'),
+ from_name=dict(required=False, type='str'),
+ location=dict(required=False, type='str'),
+ asset_tag=dict(required=False, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.rest_api = OntapRestAPI(self.module)
+
+ # some attributes are not supported in REST implementation
+ unsupported_rest_properties = ['asset_tag']
+ self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, unsupported_rest_properties)
+ if not self.use_rest:
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.cluster = netapp_utils.setup_na_ontap_zapi(module=self.module)
+ return
+
+ def update_node_details(self, uuid, modify):
+ api = 'cluster/nodes/%s' % uuid
+ data = {}
+ if 'from_name' in self.parameters:
+ data['name'] = self.parameters['name']
+ if 'location' in self.parameters:
+ data['location'] = self.parameters['location']
+ if not data:
+ self.module.fail_json(msg='Nothing to update in the modified attributes: %s' % modify)
+ response, error = self.rest_api.patch(api, body=data)
+ response, error = rrh.check_for_error_and_job_results(api, response, error, self.rest_api)
+ if error:
+ self.module.fail_json(msg='Error while modifying node details: %s' % error)
+
+ def modify_node(self, modify=None, uuid=None):
+ """
+ Modify an existing node
+ :return: none
+ """
+ if self.use_rest:
+ self.update_node_details(uuid, modify)
+ else:
+ node_obj = netapp_utils.zapi.NaElement('system-node-modify')
+ node_obj.add_new_child('node', self.parameters['name'])
+ if 'location' in self.parameters:
+ node_obj.add_new_child('node-location', self.parameters['location'])
+ if 'asset_tag' in self.parameters:
+ node_obj.add_new_child('node-asset-tag', self.parameters['asset_tag'])
+ try:
+ self.cluster.invoke_successfully(node_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying node: %s' %
+ (to_native(error)),
+ exception=traceback.format_exc())
+
+ def rename_node(self):
+ """
+ Rename an existing node
+ :return: none
+ """
+ node_obj = netapp_utils.zapi.NaElement('system-node-rename')
+ node_obj.add_new_child('node', self.parameters['from_name'])
+ node_obj.add_new_child('new-name', self.parameters['name'])
+ try:
+ self.cluster.invoke_successfully(node_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error renaming node: %s' %
+ (to_native(error)),
+ exception=traceback.format_exc())
+
+ def get_node(self, name):
+ if self.use_rest:
+ api = 'cluster/nodes'
+ query = {
+ 'fields': 'name,uuid,location',
+ 'name': name
+ }
+ message, error = self.rest_api.get(api, query)
+ node, error = rrh.check_for_0_or_1_records(api, message, error)
+ if error:
+ self.module.fail_json(msg='Error while fetching node details: %s' % error)
+ if node:
+ if 'location' not in message['records'][0]:
+ node_location = ''
+ else:
+ node_location = message['records'][0]['location']
+ return dict(
+ name=message['records'][0]['name'],
+ uuid=message['records'][0]['uuid'],
+ location=node_location)
+ return None
+ else:
+ node_obj = netapp_utils.zapi.NaElement('system-node-get')
+ node_obj.add_new_child('node', name)
+ try:
+ result = self.cluster.invoke_successfully(node_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ if to_native(error.code) == "13115":
+ # 13115 (EINVALIDINPUTERROR) if the node does not exist
+ return None
+ else:
+ self.module.fail_json(msg=to_native(
+ error), exception=traceback.format_exc())
+ attributes = result.get_child_by_name('attributes')
+ if attributes is not None:
+ node_info = attributes.get_child_by_name('node-details-info')
+ node_location = node_info.get_child_content('node-location')
+ node_location = node_location if node_location is not None else ''
+ node_tag = node_info.get_child_content('node-tag')
+ node_tag = node_tag if node_tag is not None else ''
+ return dict(
+ name=node_info['node'],
+ location=node_location,
+ asset_tag=node_tag)
+ return None
+
+ def apply(self):
+ from_exists = None
+ modify = None
+ uuid = None
+ current = self.get_node(self.parameters['name'])
+ if current is None and 'from_name' in self.parameters:
+ from_exists = self.get_node(self.parameters['from_name'])
+ if from_exists is None:
+ self.module.fail_json(msg='Node not found: %s' % self.parameters['from_name'])
+ uuid = from_exists['uuid'] if 'uuid' in from_exists else None
+ # since from_exists contains the node name, modify will at least contain the node name if a rename is required.
+ modify = self.na_helper.get_modified_attributes(from_exists, self.parameters)
+ elif current is not None:
+ uuid = current['uuid'] if 'uuid' in current else None
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+
+ allowed_options = ['name', 'location']
+ if not self.use_rest:
+ allowed_options.append('asset_tag')
+ if modify and any(x not in allowed_options for x in modify):
+ self.module.fail_json(msg='Too many modified attributes found: %s, allowed: %s' % (modify, allowed_options))
+ if current is None and from_exists is None:
+ msg = 'from_name: %s' % self.parameters.get('from_name') if 'from_name' in self.parameters \
+ else 'name: %s' % self.parameters['name']
+ self.module.fail_json(msg='Node not found: %s' % msg)
+ if self.na_helper.changed:
+ if not self.module.check_mode:
+ if not self.use_rest:
+ if 'name' in modify:
+ self.rename_node()
+ modify.pop('name')
+ if modify:
+ self.modify_node(modify, uuid)
+
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """
+ Start, Stop and Enable node services.
+ """
+ obj = NetAppOntapNode()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ntfs_dacl.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ntfs_dacl.py
new file mode 100644
index 000000000..90c6cf655
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ntfs_dacl.py
@@ -0,0 +1,355 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'
+}
+
+DOCUMENTATION = """
+module: na_ontap_ntfs_dacl
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+short_description: NetApp Ontap create, delate or modify NTFS DACL (discretionary access control list)
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: '20.4.0'
+description:
+- Create, modify, or destroy a NTFS DACL
+
+options:
+ state:
+ description:
+ - Whether the specified NTFS DACL should exist or not.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ vserver:
+ description:
+ - Specifies the vserver for the NTFS DACL.
+ required: true
+ type: str
+
+ security_descriptor:
+ description:
+ - Specifies the NTFS security descriptor.
+ required: true
+ type: str
+
+ access_type:
+ description:
+ - Specifies DACL ACE's access type. Possible values.
+ choices: ['allow', 'deny']
+ required: true
+ type: str
+
+ account:
+ description:
+ - Specifies DACL ACE's SID or domain account name of NTFS security descriptor.
+ required: true
+ type: str
+
+ rights:
+ description:
+ - Specifies DACL ACE's access rights. Mutually exclusive with advanced_access_rights.
+ choices: ['no_access', 'full_control', 'modify', 'read_and_execute', 'read', 'write']
+ type: str
+
+ apply_to:
+ description:
+ - Specifies apply DACL entry.
+ choices: ['this_folder', 'sub_folders', 'files']
+ type: list
+ elements: str
+
+ advanced_access_rights:
+ description:
+ - Specifies DACL ACE's Advanced access rights. Mutually exclusive with rights.
+ choices: ['read_data', 'write_data', 'append_data', 'read_ea', 'write_ea', 'execute_file', 'delete_child',
+ 'read_attr', 'write_attr', 'delete', 'read_perm', 'write_perm', 'write_owner', 'full_control']
+ type: list
+ elements: str
+
+"""
+
+EXAMPLES = """
+ - name: Add NTFS DACL
+ na_ontap_ntfs_dacl:
+ state: present
+ vserver: SVM1
+ security_descriptor: ansible_sd
+ access_type: allow
+ account: DOMAIN\\Account
+ rights: modify
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+
+ - name: Modify NTFS DACL
+ na_ontap_ntfs_dacl:
+ state: present
+ vserver: SVM1
+ security_descriptor: ansible_sd
+ access_type: full_control
+ account: DOMAIN\\Account
+ rights: modify
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Remove NTFS DACL
+ na_ontap_ntfs_dacl:
+ state: absent
+ vserver: SVM1
+ security_descriptor: ansible_sd
+ account: DOMAIN\\Account
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+"""
+
+RETURN = """
+
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapNtfsDacl(object):
+ """
+ Creates, Modifies and Destroys an NTFS DACL
+ """
+
+ def __init__(self):
+ """
+ Initialize the Ontap NTFS DACL class
+ """
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ security_descriptor=dict(required=True, type='str'),
+ access_type=dict(required=True, choices=['allow', 'deny'], type='str'),
+ account=dict(required=True, type='str'),
+ rights=dict(required=False,
+ choices=['no_access', 'full_control', 'modify', 'read_and_execute', 'read', 'write'],
+ type='str'),
+ apply_to=dict(required=False, choices=['this_folder', 'sub_folders', 'files'], type='list', elements='str'),
+ advanced_access_rights=dict(required=False,
+ choices=['read_data', 'write_data', 'append_data', 'read_ea', 'write_ea',
+ 'execute_file', 'delete_child', 'read_attr', 'write_attr', 'delete',
+ 'read_perm', 'write_perm', 'write_owner', 'full_control'],
+ type='list', elements='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[('rights', 'advanced_access_rights')],
+ )
+
+ # set up variables
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg='The python NetApp-Lib module is required')
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_dacl(self):
+
+ dacl_entry = None
+ advanced_access_list = None
+
+ dacl_get_iter = netapp_utils.zapi.NaElement('file-directory-security-ntfs-dacl-get-iter')
+ dacl_info = netapp_utils.zapi.NaElement('file-directory-security-ntfs-dacl')
+ dacl_info.add_new_child('vserver', self.parameters['vserver'])
+ dacl_info.add_new_child('ntfs-sd', self.parameters['security_descriptor'])
+ dacl_info.add_new_child('access-type', self.parameters['access_type'])
+ dacl_info.add_new_child('account', self.parameters['account'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(dacl_info)
+ dacl_get_iter.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(dacl_get_iter, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching %s DACL for account %s for security descriptor %s: %s' % (
+ self.parameters['access_type'], self.parameters['account'], self.parameters['security_descriptor'],
+ to_native(error)), exception=traceback.format_exc())
+
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ attributes_list = result.get_child_by_name('attributes-list')
+
+ if attributes_list is None:
+ return None
+
+ dacl = attributes_list.get_child_by_name('file-directory-security-ntfs-dacl')
+
+ apply_to_list = []
+ apply_to = dacl.get_child_by_name('apply-to')
+ for apply_child in apply_to.get_children():
+ inheritance_level = apply_child.get_content()
+
+ apply_to_list.append(inheritance_level)
+
+ if dacl.get_child_by_name('advanced-rights'):
+
+ advanced_access_list = []
+ advanced_access = dacl.get_child_by_name('advanced-rights')
+ for right in advanced_access.get_children():
+ advanced_access_right = right.get_content()
+ advanced_right = {
+ 'advanced_access_rights': advanced_access_right
+ }
+ advanced_access_list.append(advanced_right)
+
+ dacl_entry = {
+ 'access_type': dacl.get_child_content('access-type'),
+ 'account': dacl.get_child_content('account'),
+ 'apply_to': apply_to_list,
+ 'security_descriptor': dacl.get_child_content('ntfs-sd'),
+ 'readable_access_rights': dacl.get_child_content('readable-access-rights'),
+ 'vserver': dacl.get_child_content('vserver'),
+ }
+
+ if advanced_access_list is not None:
+ dacl_entry['advanced_rights'] = advanced_access_list
+ else:
+ dacl_entry['rights'] = dacl.get_child_content('rights')
+ return dacl_entry
+
+ def add_dacl(self):
+ """
+ Adds a new NTFS DACL to an existing NTFS security descriptor
+ """
+
+ dacl_obj = netapp_utils.zapi.NaElement("file-directory-security-ntfs-dacl-add")
+ dacl_obj.add_new_child("access-type", self.parameters['access_type'])
+ dacl_obj.add_new_child("account", self.parameters['account'])
+ dacl_obj.add_new_child("ntfs-sd", self.parameters['security_descriptor'])
+
+ if 'rights' not in self.parameters.keys() and 'advanced_access_rights' not in self.parameters.keys():
+ self.module.fail_json(msg='Either rights or advanced_access_rights must be specified.')
+
+ if self.parameters.get('apply_to'):
+ apply_to_obj = netapp_utils.zapi.NaElement("apply-to")
+
+ for apply_entry in self.parameters['apply_to']:
+ apply_to_obj.add_new_child('inheritance-level', apply_entry)
+ dacl_obj.add_child_elem(apply_to_obj)
+
+ if self.parameters.get('advanced_access_rights'):
+ access_rights_obj = netapp_utils.zapi.NaElement("advanced-rights")
+
+ for right in self.parameters['advanced_access_rights']:
+ access_rights_obj.add_new_child('advanced-access-rights', right)
+
+ dacl_obj.add_child_elem(access_rights_obj)
+
+ if self.parameters.get('rights'):
+ dacl_obj.add_new_child("rights", self.parameters['rights'])
+
+ try:
+ self.server.invoke_successfully(dacl_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error adding %s DACL for account %s for security descriptor %s: %s' % (
+ self.parameters['access_type'], self.parameters['account'], self.parameters['security_descriptor'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def remove_dacl(self):
+ """
+ Deletes a NTFS DACL from an existing NTFS security descriptor
+ """
+ dacl_obj = netapp_utils.zapi.NaElement("file-directory-security-ntfs-dacl-remove")
+ dacl_obj.add_new_child("access-type", self.parameters['access_type'])
+ dacl_obj.add_new_child("account", self.parameters['account'])
+ dacl_obj.add_new_child("ntfs-sd", self.parameters['security_descriptor'])
+
+ try:
+ self.server.invoke_successfully(dacl_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting %s DACL for account %s for security descriptor %s: %s' % (
+ self.parameters['access_type'], self.parameters['account'], self.parameters['security_descriptor'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_dacl(self):
+ """
+ Modifies a NTFS DACL on an existing NTFS security descriptor
+ """
+
+ dacl_obj = netapp_utils.zapi.NaElement("file-directory-security-ntfs-dacl-modify")
+ dacl_obj.add_new_child("access-type", self.parameters['access_type'])
+ dacl_obj.add_new_child("account", self.parameters['account'])
+ dacl_obj.add_new_child("ntfs-sd", self.parameters['security_descriptor'])
+
+ if self.parameters.get('apply_to'):
+ apply_to_obj = netapp_utils.zapi.NaElement("apply-to")
+
+ for apply_entry in self.parameters['apply_to']:
+ apply_to_obj.add_new_child('inheritance-level', apply_entry)
+ dacl_obj.add_child_elem(apply_to_obj)
+
+ if self.parameters.get('advanced_access_rights'):
+ access_rights_obj = netapp_utils.zapi.NaElement("advanced-rights")
+
+ for right in self.parameters['advanced_access_rights']:
+ access_rights_obj.add_new_child('advanced-access-rights', right)
+
+ dacl_obj.add_child_elem(access_rights_obj)
+
+ if self.parameters.get('rights'):
+ dacl_obj.add_new_child("rights", self.parameters['rights'])
+
+ try:
+ self.server.invoke_successfully(dacl_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying %s DACL for account %s for security descriptor %s: %s' % (
+ self.parameters['access_type'], self.parameters['account'], self.parameters['security_descriptor'],
+ to_native(error)), exception=traceback.format_exc())
+
+ def apply(self):
+ current, modify = self.get_dacl(), None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None and self.parameters['state'] == 'present':
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.add_dacl()
+ elif cd_action == 'delete':
+ self.remove_dacl()
+ elif modify:
+ self.modify_dacl()
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ """
+ Creates the NetApp Ontap NTFS DACL object and runs the correct play task
+ """
+ obj = NetAppOntapNtfsDacl()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ntfs_sd.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ntfs_sd.py
new file mode 100644
index 000000000..d0abf1e57
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ntfs_sd.py
@@ -0,0 +1,288 @@
+#!/usr/bin/python
+
+# (c) 2020-2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'
+}
+
+DOCUMENTATION = """
+
+module: na_ontap_ntfs_sd
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+short_description: NetApp ONTAP create, delete or modify NTFS security descriptor
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: '20.4.0'
+description:
+ - Create, modify or destroy NTFS security descriptor
+
+options:
+ state:
+ description:
+ - Whether the specified NTFS security descriptor should exist or not.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ vserver:
+ description:
+ - Specifies the vserver for the NTFS security descriptor.
+ required: true
+ type: str
+
+ name:
+ description:
+ - Specifies the NTFS security descriptor name. Not modifiable.
+ required: true
+ type: str
+
+ owner:
+ description:
+ - Specifies the owner's SID or domain account of the NTFS security descriptor.
+ - Need to provide the full path of the owner.
+ type: str
+
+ group:
+ description:
+ - Specifies the group's SID or domain account of the NTFS security descriptor.
+ - Need to provide the full path of the group.
+ required: false
+ type: str
+
+ control_flags_raw:
+ description:
+ - Specifies the security descriptor control flags.
+ - 1... .... .... .... = Self Relative
+ - .0.. .... .... .... = RM Control Valid
+ - ..0. .... .... .... = SACL Protected
+ - ...0 .... .... .... = DACL Protected
+ - .... 0... .... .... = SACL Inherited
+ - .... .0.. .... .... = DACL Inherited
+ - .... ..0. .... .... = SACL Inherit Required
+ - .... ...0 .... .... = DACL Inherit Required
+ - .... .... ..0. .... = SACL Defaulted
+ - .... .... ...0 .... = SACL Present
+ - .... .... .... 0... = DACL Defaulted
+ - .... .... .... .1.. = DACL Present
+ - .... .... .... ..0. = Group Defaulted
+ - .... .... .... ...0 = Owner Defaulted
+ - At present only the following flags are honored. Others are ignored.
+ - ..0. .... .... .... = SACL Protected
+ - ...0 .... .... .... = DACL Protected
+ - .... .... ..0. .... = SACL Defaulted
+ - .... .... .... 0... = DACL Defaulted
+ - .... .... .... ..0. = Group Defaulted
+ - .... .... .... ...0 = Owner Defaulted
+ - Convert the 16 bit binary flags and convert to decimal for the input.
+ type: int
+
+"""
+
+EXAMPLES = """
+ - name: Create NTFS Security Descriptor
+ na_ontap_ntfs_sd:
+ state: present
+ vserver: SVM1
+ name: ansible_sd
+ owner: DOMAIN\\Account
+ group: DOMAIN\\Group
+ control_flags_raw: 0
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Modify NTFS Security Descriptor
+ na_ontap_ntfs_sd:
+ state: present
+ vserver: SVM1
+ name: ansible_sd
+ owner: DOMAIN\\Account
+ group: DOMAIN\\Group
+ control_flags_raw: 0
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Delete NTFS Security Descriptor
+ na_ontap_ntfs_sd:
+ state: absent
+ vserver: SVM1
+ name: ansible_sd
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+"""
+
+RETURN = """
+
+"""
+
+
+import traceback
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapNtfsSd(object):
+ """
+ Creates, Modifies and Destroys a NTFS security descriptor
+ """
+
+ def __init__(self):
+ """
+ Initialize the Ontap NTFS Security Descriptor class
+ """
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ name=dict(required=True, type='str'),
+ owner=dict(required=False, type='str'),
+ group=dict(required=False, type='str'),
+ control_flags_raw=dict(required=False, type='int'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True,
+ )
+
+ # set up variables
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg='The python NetApp-Lib module is required')
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_ntfs_sd(self):
+
+ ntfs_sd_entry, result = None, None
+
+ ntfs_sd_get_iter = netapp_utils.zapi.NaElement('file-directory-security-ntfs-get-iter')
+ ntfs_sd_info = netapp_utils.zapi.NaElement('file-directory-security-ntfs')
+ ntfs_sd_info.add_new_child('vserver', self.parameters['vserver'])
+ ntfs_sd_info.add_new_child('ntfs-sd', self.parameters['name'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(ntfs_sd_info)
+ ntfs_sd_get_iter.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(ntfs_sd_get_iter, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching NTFS security descriptor %s : %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ attributes_list = result.get_child_by_name('attributes-list')
+ ntfs_sd = attributes_list.get_child_by_name('file-directory-security-ntfs')
+ ntfs_sd_entry = {
+ 'vserver': ntfs_sd.get_child_content('vserver'),
+ 'name': ntfs_sd.get_child_content('ntfs-sd'),
+ 'owner': ntfs_sd.get_child_content('owner'),
+ 'group': ntfs_sd.get_child_content('group'),
+ 'control_flags_raw': ntfs_sd.get_child_content('control-flags-raw'),
+ }
+ if ntfs_sd_entry.get('control_flags_raw'):
+ ntfs_sd_entry['control_flags_raw'] = int(ntfs_sd_entry['control_flags_raw'])
+ return ntfs_sd_entry
+ return None
+
+ def add_ntfs_sd(self):
+ """
+ Adds a new NTFS security descriptor
+ """
+
+ ntfs_sd_obj = netapp_utils.zapi.NaElement("file-directory-security-ntfs-create")
+ ntfs_sd_obj.add_new_child("ntfs-sd", self.parameters['name'])
+
+ if self.parameters.get('control_flags_raw') is not None:
+ ntfs_sd_obj.add_new_child("control-flags-raw", str(self.parameters['control_flags_raw']))
+
+ if self.parameters.get('owner'):
+ ntfs_sd_obj.add_new_child("owner", self.parameters['owner'])
+
+ if self.parameters.get('group'):
+ ntfs_sd_obj.add_new_child("group", self.parameters['group'])
+
+ try:
+ self.server.invoke_successfully(ntfs_sd_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(
+ msg='Error creating NTFS security descriptor %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def remove_ntfs_sd(self):
+ """
+ Deletes a NTFS security descriptor
+ """
+ ntfs_sd_obj = netapp_utils.zapi.NaElement("file-directory-security-ntfs-delete")
+ ntfs_sd_obj.add_new_child("ntfs-sd", self.parameters['name'])
+ try:
+ self.server.invoke_successfully(ntfs_sd_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting NTFS security descriptor %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_ntfs_sd(self):
+ """
+ Modifies a NTFS security descriptor
+ """
+
+ ntfs_sd_obj = netapp_utils.zapi.NaElement("file-directory-security-ntfs-modify")
+ ntfs_sd_obj.add_new_child("ntfs-sd", self.parameters['name'])
+
+ if self.parameters.get('control_flags_raw') is not None:
+ ntfs_sd_obj.add_new_child('control-flags-raw', str(self.parameters['control_flags_raw']))
+
+ if self.parameters.get('owner'):
+ ntfs_sd_obj.add_new_child('owner', self.parameters['owner'])
+
+ if self.parameters.get('group'):
+ ntfs_sd_obj.add_new_child('group', self.parameters['group'])
+
+ try:
+ self.server.invoke_successfully(ntfs_sd_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(
+ msg='Error modifying NTFS security descriptor %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ current, modify = self.get_ntfs_sd(), None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None and self.parameters['state'] == 'present':
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.add_ntfs_sd()
+ elif cd_action == 'delete':
+ self.remove_ntfs_sd()
+ elif modify:
+ self.modify_ntfs_sd()
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ """
+ Creates, deletes and modifies NTFS secudity descriptor
+ """
+ obj = NetAppOntapNtfsSd()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ntp.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ntp.py
new file mode 100644
index 000000000..dc8a000e8
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ntp.py
@@ -0,0 +1,271 @@
+#!/usr/bin/python
+
+# (c) 2018-2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+module: na_ontap_ntp
+short_description: NetApp ONTAP NTP server
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create or delete or modify NTP server in ONTAP
+options:
+ state:
+ description:
+ - Whether the specified NTP server should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: 'present'
+ server_name:
+ description:
+ - The name of the NTP server to manage.
+ required: True
+ type: str
+ version:
+ description:
+ - give version for NTP server
+ choices: ['auto', '3', '4']
+ default: 'auto'
+ type: str
+ key_id:
+ description:
+ - The symmetric authentication key ID being used for this time server.
+ type: int
+ version_added: 21.21.0
+"""
+
+EXAMPLES = """
+ - name: Create NTP server
+ na_ontap_ntp:
+ state: present
+ version: auto
+ key_id: 1
+ server_name: "{{ server_name }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: Delete NTP server
+ na_ontap_ntp:
+ state: absent
+ server_name: "{{ server_name }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppOntapNTPServer:
+ """ object initialize and class methods """
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ server_name=dict(required=True, type='str'),
+ version=dict(required=False, type='str', default='auto',
+ choices=['auto', '3', '4']),
+ key_id=dict(required=False, type='int'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ self.rest_api = OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+ if self.use_rest and not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 7):
+ msg = 'REST requires ONTAP 9.7 or later for na_ontap_ntp'
+ self.use_rest = self.na_helper.fall_back_to_zapi(self.module, msg, self.parameters)
+ if not self.use_rest:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def get_ntp_server(self):
+ """
+ Return details about the ntp server
+ :param:
+ name : Name of the server_name
+ :return: Details about the ntp server. None if not found.
+ :rtype: dict
+ """
+ if self.use_rest:
+ return self.get_ntp_server_rest()
+ ntp_iter = netapp_utils.zapi.NaElement('ntp-server-get-iter')
+ ntp_info = netapp_utils.zapi.NaElement('ntp-server-info')
+ ntp_info.add_new_child('server-name', self.parameters['server_name'])
+
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(ntp_info)
+
+ ntp_iter.add_child_elem(query)
+ result = self.server.invoke_successfully(ntp_iter, True)
+ return_value = None
+
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) == 1:
+
+ ntp_server_name = result.get_child_by_name('attributes-list').\
+ get_child_by_name('ntp-server-info').\
+ get_child_content('server-name')
+ server_version = result.get_child_by_name('attributes-list').\
+ get_child_by_name('ntp-server-info').\
+ get_child_content('version')
+ server_key_id = result.get_child_by_name('attributes-list').\
+ get_child_by_name('ntp-server-info').\
+ get_child_content('key-id')
+ return_value = {
+ 'server-name': ntp_server_name,
+ 'version': server_version,
+ 'key_id': int(server_key_id) if server_key_id is not None else 0,
+ }
+
+ return return_value
+
+ def get_ntp_server_rest(self):
+ api = 'cluster/ntp/servers'
+ options = {'server': self.parameters['server_name'],
+ 'fields': 'server,version,key.id'}
+ record, error = rest_generic.get_one_record(self.rest_api, api, options)
+ if error:
+ self.module.fail_json(msg=error)
+ if record:
+ return {
+ 'server': self.na_helper.safe_get(record, ['server']),
+ 'version': self.na_helper.safe_get(record, ['version']),
+ 'key_id': self.na_helper.safe_get(record, ['key', 'id']),
+ }
+ return None
+
+ def create_ntp_server(self):
+ """
+ create ntp server.
+ """
+ if self.use_rest:
+ return self.create_ntp_server_rest()
+ ntp_server_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'ntp-server-create', **{'server-name': self.parameters['server_name'],
+ 'version': self.parameters['version']
+ })
+ if self.parameters.get('key_id'):
+ ntp_server_create.add_new_child("key-id", str(self.parameters['key_id']))
+
+ try:
+ self.server.invoke_successfully(ntp_server_create,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating ntp server %s: %s'
+ % (self.parameters['server_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def create_ntp_server_rest(self):
+ api = 'cluster/ntp/servers'
+ params = {
+ 'server': self.parameters['server_name'],
+ 'version': self.parameters['version']
+ }
+ if self.parameters.get('key_id'):
+ params['key'] = {'id': self.parameters['key_id']}
+ dummy, error = rest_generic.post_async(self.rest_api, api, params)
+ if error:
+ self.module.fail_json(msg=error)
+
+ def delete_ntp_server(self):
+ """
+ delete ntp server.
+ """
+ if self.use_rest:
+ return self.delete_ntp_server_rest()
+ ntp_server_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'ntp-server-delete', **{'server-name': self.parameters['server_name']})
+
+ try:
+ self.server.invoke_successfully(ntp_server_delete,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting ntp server %s: %s'
+ % (self.parameters['server_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_ntp_server_rest(self):
+ dummy, error = rest_generic.delete_async(self.rest_api, 'cluster/ntp/servers', self.parameters['server_name'])
+ if error:
+ self.module.fail_json(msg=error)
+
+ def modify_ntp_server(self, modify):
+ """
+ modify the ntp server
+ """
+ if self.use_rest:
+ return self.modify_ntp_server_rest(modify)
+ ntp_modify = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'ntp-server-modify',
+ **{'server-name': self.parameters['server_name'], 'version': self.parameters['version']})
+ if modify.get('key_id'):
+ ntp_modify.add_new_child("key-id", str(self.parameters['key_id']))
+ try:
+ self.server.invoke_successfully(ntp_modify,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying version for ntp server %s: %s'
+ % (self.parameters['server_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_ntp_server_rest(self, modify):
+ body = {}
+ if modify.get('version'):
+ body['version'] = modify['version']
+ if modify.get('key_id'):
+ body['key'] = {'id': modify['key_id']}
+ if body:
+ dummy, error = rest_generic.patch_async(self.rest_api, 'cluster/ntp/servers', self.parameters['server_name'], body)
+ if error:
+ self.module.fail_json(msg=error)
+
+ def apply(self):
+ """Apply action to ntp-server"""
+
+ modify = None
+ current = self.get_ntp_server()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None and self.parameters['state'] == 'present':
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_ntp_server()
+ elif cd_action == 'delete':
+ self.delete_ntp_server()
+ elif modify:
+ self.modify_ntp_server(modify)
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ """ Create object and call apply """
+ ntp_obj = NetAppOntapNTPServer()
+ ntp_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ntp_key.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ntp_key.py
new file mode 100644
index 000000000..cba2754dc
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ntp_key.py
@@ -0,0 +1,159 @@
+#!/usr/bin/python
+
+# (c) 2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+module: na_ontap_ntp_key
+short_description: NetApp ONTAP NTP key
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 21.21.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Create or delete or modify NTP key in ONTAP
+options:
+ state:
+ description:
+ - Whether the specified NTP key should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: 'present'
+ id:
+ description:
+ - NTP symmetric authentication key ID. The ID must be in the range 1 to 65535.
+ required: True
+ type: int
+ digest_type:
+ description:
+ - NTP symmetric authentication key type. Only SHA1 currently supported.
+ choices: ['sha1']
+ type: str
+ required: True
+ value:
+ description:
+ - NTP symmetric authentication key value. The value must be exactly 40 hexadecimal digits for SHA1 keys.
+ type: str
+ required: True
+"""
+
+EXAMPLES = """
+ - name: Create NTP key
+ na_ontap_ntp_key:
+ state: present
+ digest_type: sha1
+ value: "{{ key_value }}"
+ id: 1
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: Delete NTP key
+ na_ontap_ntp_key:
+ state: absent
+ id: 1
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppOntapNTPKey:
+ """ object initialize and class methods """
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ id=dict(required=True, type='int'),
+ digest_type=dict(required=True, type='str', choices=['sha1']),
+ value=dict(required=True, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.rest_api = OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+ self.rest_api.fail_if_not_rest_minimum_version('na_ontap_ntp_key', 9, 7)
+
+ def get_ntp_key(self):
+ api = 'cluster/ntp/keys'
+ options = {'id': self.parameters['id'],
+ 'fields': 'id,digest_type,value'}
+ record, error = rest_generic.get_one_record(self.rest_api, api, options)
+ if error:
+ self.module.fail_json(msg='Error fetching key with id %s: %s' % (self.parameters['id'], to_native(error)),
+ exception=traceback.format_exc())
+ return record
+
+ def create_ntp_key(self):
+ api = 'cluster/ntp/keys'
+ params = {
+ 'id': self.parameters['id'],
+ 'digest_type': self.parameters['digest_type'],
+ 'value': self.parameters['value']
+ }
+ dummy, error = rest_generic.post_async(self.rest_api, api, params)
+ if error:
+ self.module.fail_json(msg='Error creating key with id %s: %s' % (self.parameters['id'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_ntp_key(self):
+ dummy, error = rest_generic.delete_async(self.rest_api, 'cluster/ntp/keys', str(self.parameters['id']))
+ if error:
+ self.module.fail_json(msg='Error deleting key with id %s: %s' % (self.parameters['id'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_ntp_key(self, modify):
+ body = {}
+ if 'digest_type' in modify:
+ body['digest_type'] = self.parameters['digest_type']
+ if 'value' in modify:
+ body['value'] = self.parameters['value']
+ if body:
+ dummy, error = rest_generic.patch_async(self.rest_api, 'cluster/ntp/keys', str(self.parameters['id']), body)
+ if error:
+ self.module.fail_json(msg='Error modifying key with id %s: %s' % (self.parameters['id'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ cd_action = None
+ current = self.get_ntp_key()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ modify = self.na_helper.get_modified_attributes(current, self.parameters) if cd_action is None else None
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_ntp_key()
+ elif cd_action == 'delete':
+ self.delete_ntp_key()
+ elif modify:
+ self.modify_ntp_key(modify)
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ """ Create object and call apply """
+ ntp_obj = NetAppOntapNTPKey()
+ ntp_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_nvme.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_nvme.py
new file mode 100644
index 000000000..3026a1781
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_nvme.py
@@ -0,0 +1,250 @@
+#!/usr/bin/python
+
+# (c) 2018-2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Create/Delete NVMe Service
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_nvme
+options:
+ state:
+ choices: ['present', 'absent']
+ description:
+ - Whether the specified NVMe should exist or not.
+ default: present
+ type: str
+ vserver:
+ description:
+ - Name of the vserver to use.
+ required: true
+ type: str
+ status_admin:
+ description:
+ - Whether the status of NVMe should be up or down
+ type: bool
+short_description: "NetApp ONTAP Manage NVMe Service"
+version_added: 2.8.0
+'''
+
+EXAMPLES = """
+
+ - name: Create NVMe
+ netapp.ontap.na_ontap_nvme:
+ state: present
+ status_admin: False
+ vserver: "{{ vserver }}"
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Modify NVMe
+ netapp.ontap.na_ontap_nvme:
+ state: present
+ status_admin: True
+ vserver: "{{ vserver }}"
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Delete NVMe
+ netapp.ontap.na_ontap_nvme:
+ state: absent
+ vserver: "{{ vserver }}"
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+"""
+
+RETURN = """
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppONTAPNVMe:
+ """
+ Class with NVMe service methods
+ """
+
+ def __init__(self):
+ self.svm_uuid = None
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ status_admin=dict(required=False, type='bool')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.rest_api = OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+
+ if not self.use_rest:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_nvme(self):
+ """
+ Get current nvme details
+ :return: dict if nvme exists, None otherwise
+ """
+ if self.use_rest:
+ return self.get_nvme_rest()
+ nvme_get = netapp_utils.zapi.NaElement('nvme-get-iter')
+ query = {
+ 'query': {
+ 'nvme-target-service-info': {
+ 'vserver': self.parameters['vserver']
+ }
+ }
+ }
+ nvme_get.translate_struct(query)
+ try:
+ result = self.server.invoke_successfully(nvme_get, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching nvme info: %s' % to_native(error),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ attributes_list = result.get_child_by_name('attributes-list')
+ nvme_info = attributes_list.get_child_by_name('nvme-target-service-info')
+ return {'status_admin': self.na_helper.get_value_for_bool(True, nvme_info.get_child_content('is-available'))}
+ return None
+
+ def create_nvme(self):
+ """
+ Create NVMe service
+ """
+ if self.use_rest:
+ return self.create_nvme_rest()
+ nvme_create = netapp_utils.zapi.NaElement('nvme-create')
+ if self.parameters.get('status_admin') is not None:
+ options = {'is-available': self.na_helper.get_value_for_bool(False, self.parameters['status_admin'])}
+ nvme_create.translate_struct(options)
+ try:
+ self.server.invoke_successfully(nvme_create, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating nvme for vserver %s: %s'
+ % (self.parameters['vserver'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_nvme(self):
+ """
+ Delete NVMe service
+ """
+ if self.use_rest:
+ return self.delete_nvme_rest()
+ nvme_delete = netapp_utils.zapi.NaElement('nvme-delete')
+ try:
+ self.server.invoke_successfully(nvme_delete, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting nvme for vserver %s: %s'
+ % (self.parameters['vserver'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_nvme(self, status=None):
+ """
+ Modify NVMe service
+ """
+ if status is None:
+ status = self.parameters['status_admin']
+ if self.use_rest:
+ return self.modify_nvme_rest(status)
+ options = {'is-available': status}
+ nvme_modify = netapp_utils.zapi.NaElement('nvme-modify')
+ nvme_modify.translate_struct(options)
+ try:
+ self.server.invoke_successfully(nvme_modify, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying nvme for vserver %s: %s'
+ % (self.parameters['vserver'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def get_nvme_rest(self):
+ api = 'protocols/nvme/services'
+ params = {'svm.name': self.parameters['vserver'], 'fields': 'enabled'}
+ record, error = rest_generic.get_one_record(self.rest_api, api, params)
+ if error:
+ self.module.fail_json(msg='Error fetching nvme info for vserver: %s' % self.parameters['vserver'])
+ if record:
+ self.svm_uuid = record['svm']['uuid']
+ record['status_admin'] = record.pop('enabled')
+ return record
+ return None
+
+ def create_nvme_rest(self):
+ api = 'protocols/nvme/services'
+ body = {'svm.name': self.parameters['vserver']}
+ if self.parameters.get('status_admin') is not None:
+ body['enabled'] = self.parameters['status_admin']
+ dummy, error = rest_generic.post_async(self.rest_api, api, body)
+ if error:
+ self.module.fail_json(msg='Error creating nvme for vserver %s: %s' % (self.parameters['vserver'],
+ to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_nvme_rest(self):
+ api = 'protocols/nvme/services'
+ dummy, error = rest_generic.delete_async(self.rest_api, api, self.svm_uuid)
+ if error:
+ self.module.fail_json(msg='Error deleting nvme for vserver %s: %s' % (self.parameters['vserver'],
+ to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_nvme_rest(self, status):
+ if status == 'false':
+ status = False
+ api = 'protocols/nvme/services'
+ dummy, error = rest_generic.patch_async(self.rest_api, api, self.svm_uuid, {'enabled': status})
+ if error:
+ self.module.fail_json(msg='Error modifying nvme for vserver: %s' % self.parameters['vserver'])
+
+ def apply(self):
+ """
+ Apply action to NVMe service
+ """
+ modify = None
+ current = self.get_nvme()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if self.parameters.get('status_admin') is not None:
+ if cd_action is None and self.parameters['state'] == 'present':
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_nvme()
+ elif cd_action == 'delete':
+ # NVMe status_admin needs to be down before deleting it
+ self.modify_nvme('false')
+ self.delete_nvme()
+ elif modify:
+ self.modify_nvme()
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ """Execute action"""
+ community_obj = NetAppONTAPNVMe()
+ community_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_nvme_namespace.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_nvme_namespace.py
new file mode 100644
index 000000000..d328c4a0c
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_nvme_namespace.py
@@ -0,0 +1,256 @@
+#!/usr/bin/python
+
+# (c) 2018-2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Create/Delete NVME namespace
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_nvme_namespace
+options:
+ state:
+ choices: ['present', 'absent']
+ description:
+ - Whether the specified namespace should exist or not.
+ default: present
+ type: str
+ vserver:
+ description:
+ - Name of the vserver to use.
+ required: true
+ type: str
+ ostype:
+ description:
+ - Specifies the ostype for initiators
+ choices: ['windows', 'linux', 'vmware', 'xen', 'hyper_v']
+ type: str
+ size:
+ description:
+ - Size in bytes.
+ Range is [0..2^63-1].
+ type: int
+ size_unit:
+ description:
+ - The unit used to interpret the size parameter.
+ choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
+ type: str
+ default: 'b'
+ path:
+ description:
+ - Namespace path.
+ required: true
+ type: str
+ block_size:
+ description:
+ - Size in bytes of a logical block. Possible values are 512 (Data ONTAP 9.6 and later), 4096. The default value is 4096.
+ choices: [512, 4096]
+ type: int
+ version_added: '20.5.0'
+short_description: "NetApp ONTAP Manage NVME Namespace"
+version_added: 2.8.0
+'''
+
+EXAMPLES = """
+
+ - name: Create NVME Namespace
+ netapp.ontap.na_ontap_nvme_namespace:
+ state: present
+ ostype: linux
+ path: /vol/ansible/test
+ size: 20
+ size_unit: mb
+ vserver: "{{ vserver }}"
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Create NVME Namespace (Idempotency)
+ netapp.ontap.na_ontap_nvme_namespace:
+ state: present
+ ostype: linux
+ path: /vol/ansible/test
+ size: 20
+ size_unit: mb
+ vserver: "{{ vserver }}"
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+"""
+
+RETURN = """
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppONTAPNVMENamespace:
+ """
+ Class with NVME namespace methods
+ """
+
+ def __init__(self):
+
+ self.namespace_uuid = None
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ ostype=dict(required=False, type='str', choices=['windows', 'linux', 'vmware', 'xen', 'hyper_v']),
+ path=dict(required=True, type='str'),
+ size=dict(required=False, type='int'),
+ size_unit=dict(default='b', choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb'], type='str'),
+ block_size=dict(required=False, choices=[512, 4096], type='int')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[('state', 'present', ['ostype', 'size'])],
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.rest_api = OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+
+ if self.parameters.get('size'):
+ self.parameters['size'] = self.parameters['size'] * \
+ netapp_utils.POW2_BYTE_MAP[self.parameters['size_unit']]
+
+ if not self.use_rest:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_namespace(self):
+ """
+ Get current namespace details
+ :return: dict if namespace exists, None otherwise
+ """
+ if self.use_rest:
+ return self.get_namespace_rest()
+ namespace_get = netapp_utils.zapi.NaElement('nvme-namespace-get-iter')
+ query = {
+ 'query': {
+ 'nvme-namespace-info': {
+ 'path': self.parameters['path'],
+ 'vserver': self.parameters['vserver']
+ }
+ }
+ }
+ namespace_get.translate_struct(query)
+ try:
+ result = self.server.invoke_successfully(namespace_get, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching namespace info: %s' % to_native(error),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ return result
+ return None
+
+ def create_namespace(self):
+ """
+ Create a NVME Namespace
+ """
+ if self.use_rest:
+ return self.create_namespace_rest()
+ options = {'path': self.parameters['path'],
+ 'ostype': self.parameters['ostype'],
+ 'size': self.parameters['size']
+ }
+ if self.parameters.get('block_size'):
+ options['block-size'] = self.parameters['block_size']
+ namespace_create = netapp_utils.zapi.NaElement('nvme-namespace-create')
+ namespace_create.translate_struct(options)
+ try:
+ self.server.invoke_successfully(namespace_create, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating namespace for path %s: %s'
+ % (self.parameters.get('path'), to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_namespace(self):
+ """
+ Delete a NVME Namespace
+ """
+ if self.use_rest:
+ return self.delete_namespace_rest()
+ options = {'path': self.parameters['path']
+ }
+ namespace_delete = netapp_utils.zapi.NaElement.create_node_with_children('nvme-namespace-delete', **options)
+ try:
+ self.server.invoke_successfully(namespace_delete, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting namespace for path %s: %s'
+ % (self.parameters.get('path'), to_native(error)),
+ exception=traceback.format_exc())
+
+ def get_namespace_rest(self):
+ api = 'storage/namespaces'
+ params = {
+ 'svm.name': self.parameters['vserver'],
+ 'name': self.parameters['path'],
+ 'fields': 'enabled'
+ }
+ record, error = rest_generic.get_one_record(self.rest_api, api, params)
+ if error:
+ self.module.fail_json(msg='Error fetching namespace info for vserver: %s' % self.parameters['vserver'])
+ if record:
+ self.namespace_uuid = record['uuid']
+ return record
+ return None
+
+ def create_namespace_rest(self):
+ api = 'storage/namespaces'
+ body = {'svm.name': self.parameters['vserver'],
+ 'os_type': self.parameters['ostype'],
+ 'name': self.parameters['path'],
+ 'space.size': self.parameters['size']}
+ if self.parameters.get('block_size') is not None:
+ body['space.block_size'] = self.parameters['block_size']
+ dummy, error = rest_generic.post_async(self.rest_api, api, body)
+ if error:
+ self.module.fail_json(msg='Error creating namespace for vserver %s: %s' % (self.parameters['vserver'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_namespace_rest(self):
+ api = 'storage/namespaces'
+ dummy, error = rest_generic.delete_async(self.rest_api, api, self.namespace_uuid)
+ if error:
+ self.module.fail_json(msg='Error deleting namespace for vserver %s: %s' % (self.parameters['vserver'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ """
+ Apply action to NVME Namespace
+ """
+ current = self.get_namespace()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_namespace()
+ elif cd_action == 'delete':
+ self.delete_namespace()
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action)
+ self.module.exit_json(**result)
+
+
+def main():
+ """Execute action"""
+ community_obj = NetAppONTAPNVMENamespace()
+ community_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_nvme_subsystem.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_nvme_subsystem.py
new file mode 100644
index 000000000..7d76a81a7
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_nvme_subsystem.py
@@ -0,0 +1,463 @@
+#!/usr/bin/python
+
+# (c) 2018-2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Create/Delete NVME subsystem
+ - Associate(modify) host/map to NVME subsystem
+ - NVMe service should be existing in the data vserver with NVMe protocol as a pre-requisite
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_nvme_subsystem
+options:
+ state:
+ choices: ['present', 'absent']
+ description:
+ - Whether the specified subsystem should exist or not.
+ default: present
+ type: str
+ vserver:
+ description:
+ - Name of the vserver to use.
+ required: true
+ type: str
+ subsystem:
+ description:
+ - Specifies the subsystem
+ required: true
+ type: str
+ ostype:
+ description:
+ - Specifies the ostype for initiators
+ choices: ['windows', 'linux', 'vmware', 'xen', 'hyper_v']
+ type: str
+ skip_host_check:
+ description:
+ - Skip host check
+ - Required to delete an NVMe Subsystem with attached NVMe namespaces
+ default: false
+ type: bool
+ skip_mapped_check:
+ description:
+ - Skip mapped namespace check
+ - Required to delete an NVMe Subsystem with attached NVMe namespaces
+ default: false
+ type: bool
+ hosts:
+ description:
+ - List of host NQNs (NVMe Qualification Name) associated to the controller.
+ type: list
+ elements: str
+ paths:
+ description:
+ - List of Namespace paths to be associated with the subsystem.
+ type: list
+ elements: str
+short_description: "NetApp ONTAP Manage NVME Subsystem"
+version_added: 2.8.0
+'''
+
+EXAMPLES = """
+
+ - name: Create NVME Subsystem
+ netapp.ontap.na_ontap_nvme_subsystem:
+ state: present
+ subsystem: test_sub
+ vserver: test_dest
+ ostype: linux
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Delete NVME Subsystem
+ netapp.ontap.na_ontap_nvme_subsystem:
+ state: absent
+ subsystem: test_sub
+ vserver: test_dest
+ skip_host_check: True
+ skip_mapped_check: True
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Associate NVME Subsystem host/map
+ netapp.ontap.na_ontap_nvme_subsystem:
+ state: present
+ subsystem: "{{ subsystem }}"
+ ostype: linux
+ hosts: nqn.1992-08.com.netapp:sn.3017cfc1e2ba11e89c55005056b36338:subsystem.ansible
+ paths: /vol/ansible/test,/vol/ansible/test1
+ vserver: "{{ vserver }}"
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Modify NVME subsystem map
+ netapp.ontap.na_ontap_nvme_subsystem:
+ state: present
+ subsystem: test_sub
+ vserver: test_dest
+ skip_host_check: True
+ skip_mapped_check: True
+ paths: /vol/ansible/test
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+"""
+
+RETURN = """
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppONTAPNVMESubsystem:
+ """
+ Class with NVME subsytem methods
+ """
+
+ def __init__(self):
+
+ self.subsystem_uuid = None
+ self.namespace_list = []
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ subsystem=dict(required=True, type='str'),
+ ostype=dict(required=False, type='str', choices=['windows', 'linux', 'vmware', 'xen', 'hyper_v']),
+ skip_host_check=dict(required=False, type='bool', default=False),
+ skip_mapped_check=dict(required=False, type='bool', default=False),
+ hosts=dict(required=False, type='list', elements='str'),
+ paths=dict(required=False, type='list', elements='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule(self.module)
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.rest_api = netapp_utils.OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+
+ if not self.use_rest:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_subsystem(self):
+ """
+ Get current subsystem details
+ :return: dict if subsystem exists, None otherwise
+ """
+ if self.use_rest:
+ return self.get_subsystem_rest()
+ result = self.get_zapi_info('nvme-subsystem-get-iter', 'nvme-subsystem-info')
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ return True
+ return None
+
+ def create_subsystem(self):
+ """
+ Create a NVME Subsystem
+ """
+ if self.use_rest:
+ return self.create_subsystem_rest()
+ options = {'subsystem': self.parameters['subsystem'],
+ 'ostype': self.parameters['ostype']
+ }
+ subsystem_create = netapp_utils.zapi.NaElement('nvme-subsystem-create')
+ subsystem_create.translate_struct(options)
+ try:
+ self.server.invoke_successfully(subsystem_create, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating subsystem for %s: %s'
+ % (self.parameters.get('subsystem'), to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_subsystem(self):
+ """
+ Delete a NVME subsystem
+ """
+ if self.use_rest:
+ return self.delete_subsystem_rest()
+ options = {'subsystem': self.parameters['subsystem'],
+ 'skip-host-check': 'true' if self.parameters.get('skip_host_check') else 'false',
+ 'skip-mapped-check': 'true' if self.parameters.get('skip_mapped_check') else 'false',
+ }
+ subsystem_delete = netapp_utils.zapi.NaElement.create_node_with_children('nvme-subsystem-delete', **options)
+ try:
+ self.server.invoke_successfully(subsystem_delete, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting subsystem for %s: %s'
+ % (self.parameters.get('subsystem'), to_native(error)),
+ exception=traceback.format_exc())
+
+ def get_subsystem_host_map(self, type):
+ """
+ Get current subsystem host details
+ :return: list if host exists, None otherwise
+ """
+ if type == 'hosts':
+ zapi_get, zapi_info, zapi_type = 'nvme-subsystem-host-get-iter', 'nvme-target-subsystem-host-info', 'host-nqn'
+ elif type == 'paths':
+ zapi_get, zapi_info, zapi_type = 'nvme-subsystem-map-get-iter', 'nvme-target-subsystem-map-info', 'path'
+ result = self.get_zapi_info(zapi_get, zapi_info, zapi_type)
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ attrs_list = result.get_child_by_name('attributes-list')
+ return_list = [item[zapi_type] for item in attrs_list.get_children()]
+ return {type: return_list}
+ return None
+
+ def get_zapi_info(self, zapi_get_method, zapi_info, zapi_type=None):
+ subsystem_get = netapp_utils.zapi.NaElement(zapi_get_method)
+ query = {
+ 'query': {
+ zapi_info: {
+ 'subsystem': self.parameters.get('subsystem'),
+ 'vserver': self.parameters.get('vserver')
+ }
+ }
+ }
+ subsystem_get.translate_struct(query)
+ qualifier = " %s" % zapi_type if zapi_type else ""
+ try:
+ result = self.server.invoke_successfully(subsystem_get, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching subsystem%s info: %s' % (qualifier, to_native(error)),
+ exception=traceback.format_exc())
+ return result
+
+ def add_subsystem_host_map(self, data, type):
+ """
+ Add a NVME Subsystem host/map
+ :param: data: list of hosts/paths to be added
+ :param: type: hosts/paths
+ """
+ if type == 'hosts':
+ zapi_add, zapi_type = 'nvme-subsystem-host-add', 'host-nqn'
+ elif type == 'paths':
+ zapi_add, zapi_type = 'nvme-subsystem-map-add', 'path'
+
+ for item in data:
+ options = {'subsystem': self.parameters['subsystem'],
+ zapi_type: item
+ }
+ subsystem_add = netapp_utils.zapi.NaElement.create_node_with_children(zapi_add, **options)
+ try:
+ self.server.invoke_successfully(subsystem_add, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error adding %s for subsystem %s: %s'
+ % (item, self.parameters.get('subsystem'), to_native(error)),
+ exception=traceback.format_exc())
+
+ def remove_subsystem_host_map(self, data, type):
+ """
+ Remove a NVME Subsystem host/map
+ :param: data: list of hosts/paths to be added
+ :param: type: hosts/paths
+ """
+ if type == 'hosts':
+ zapi_remove, zapi_type = 'nvme-subsystem-host-remove', 'host-nqn'
+ elif type == 'paths':
+ zapi_remove, zapi_type = 'nvme-subsystem-map-remove', 'path'
+
+ for item in data:
+ options = {'subsystem': self.parameters['subsystem'],
+ zapi_type: item
+ }
+ subsystem_remove = netapp_utils.zapi.NaElement.create_node_with_children(zapi_remove, **options)
+ try:
+ self.server.invoke_successfully(subsystem_remove, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error removing %s for subsystem %s: %s'
+ % (item, self.parameters.get('subsystem'), to_native(error)),
+ exception=traceback.format_exc())
+
+ def associate_host_map(self, types):
+ """
+ Check if there are hosts or paths to be associated with the subsystem
+ """
+ action_add_dict = {}
+ action_remove_dict = {}
+ for type in types:
+ current = None
+ if self.parameters.get(type):
+ if self.use_rest:
+ if self.subsystem_uuid:
+ current = self.get_subsystem_host_map_rest(type)
+ else:
+ current = self.get_subsystem_host_map(type)
+ if current:
+ add_items = self.na_helper.\
+ get_modified_attributes(current, self.parameters, get_list_diff=True).get(type)
+ remove_items = [item for item in current[type] if item not in self.parameters.get(type)]
+ else:
+ add_items = self.parameters[type]
+ remove_items = {}
+ if add_items:
+ action_add_dict[type] = add_items
+ self.na_helper.changed = True
+ if remove_items:
+ action_remove_dict[type] = remove_items
+ self.na_helper.changed = True
+ return action_add_dict, action_remove_dict
+
+ def modify_host_map(self, add_host_map, remove_host_map):
+ for type, data in sorted(add_host_map.items()):
+ if self.use_rest:
+ self.add_subsystem_host_map_rest(data, type)
+ else:
+ self.add_subsystem_host_map(data, type)
+ for type, data in sorted(remove_host_map.items()):
+ if self.use_rest:
+ self.remove_subsystem_host_map_rest(data, type)
+ else:
+ self.remove_subsystem_host_map(data, type)
+
+ def get_subsystem_rest(self):
+ api = 'protocols/nvme/subsystems'
+ params = {'svm.name': self.parameters['vserver'], 'name': self.parameters['subsystem']}
+ record, error = rest_generic.get_one_record(self.rest_api, api, params)
+ if error:
+ if self.na_helper.ignore_missing_vserver_on_delete(error):
+ return None
+ self.module.fail_json(msg='Error fetching subsystem info for vserver: %s, %s' % (self.parameters['vserver'], to_native(error)))
+ if record:
+ self.subsystem_uuid = record['uuid']
+ return record
+ return None
+
+ def get_subsystem_host_map_rest(self, type):
+ if type == 'hosts':
+ api = 'protocols/nvme/subsystems/%s/hosts' % self.subsystem_uuid
+ records, error = rest_generic.get_0_or_more_records(self.rest_api, api)
+ if error:
+ self.module.fail_json(msg='Error fetching subsystem host info for vserver: %s: %s' % (self.parameters['vserver'], to_native(error)))
+ if records is not None:
+ return {type: [record['nqn'] for record in records]}
+ return None
+ if type == 'paths':
+ api = 'protocols/nvme/subsystem-maps'
+ query = {'svm.name': self.parameters['vserver'], 'subsystem.name': self.parameters['subsystem']}
+ records, error = rest_generic.get_0_or_more_records(self.rest_api, api, query)
+ if error:
+ self.module.fail_json(msg='Error fetching subsystem map info for vserver: %s: %s' % (self.parameters['vserver'], to_native(error)))
+ if records is not None:
+ return_list = []
+ for each in records:
+ return_list.append(each['namespace']['name'])
+ self.namespace_list.append(each['namespace'])
+ return {type: return_list}
+ return None
+
+ def add_subsystem_host_map_rest(self, data, type):
+ if type == 'hosts':
+ records = [{'nqn': item} for item in data]
+ api = 'protocols/nvme/subsystems/%s/hosts' % self.subsystem_uuid
+ body = {'records': records}
+ dummy, error = rest_generic.post_async(self.rest_api, api, body)
+ if error:
+ self.module.fail_json(
+ msg='Error adding %s for subsystem %s: %s' % (records, self.parameters['subsystem'], to_native(error)), exception=traceback.format_exc())
+ elif type == 'paths':
+ api = 'protocols/nvme/subsystem-maps'
+ for item in data:
+ body = {'subsystem.name': self.parameters['subsystem'],
+ 'svm.name': self.parameters['vserver'],
+ 'namespace.name': item
+ }
+ dummy, error = rest_generic.post_async(self.rest_api, api, body)
+ if error:
+ self.module.fail_json(
+ msg='Error adding %s for subsystem %s: %s' % (item, self.parameters['subsystem'], to_native(error)), exception=traceback.format_exc())
+
+ def remove_subsystem_host_map_rest(self, data, type):
+ if type == 'hosts':
+ for item in data:
+ api = 'protocols/nvme/subsystems/%s/hosts/%s' % (self.subsystem_uuid, item)
+ dummy, error = rest_generic.delete_async(self.rest_api, api, None)
+ if error:
+ self.module.fail_json(msg='Error removing %s for subsystem %s: %s'
+ % (item, self.parameters['subsystem'], to_native(error)), exception=traceback.format_exc())
+ elif type == 'paths':
+ for item in data:
+ namespace_uuid = None
+ for each in self.namespace_list:
+ if each['name'] == item:
+ namespace_uuid = each['uuid']
+ api = 'protocols/nvme/subsystem-maps/%s/%s' % (self.subsystem_uuid, namespace_uuid)
+ body = {'subsystem.name': self.parameters['subsystem'],
+ 'svm.name': self.parameters['vserver'],
+ 'namespace.name': item
+ }
+ dummy, error = rest_generic.delete_async(self.rest_api, api, None, body=body)
+ if error:
+ self.module.fail_json(msg='Error removing %s for subsystem %s: %s'
+ % (item, self.parameters['subsystem'], to_native(error)), exception=traceback.format_exc())
+
+ def create_subsystem_rest(self):
+ api = 'protocols/nvme/subsystems'
+ body = {'svm.name': self.parameters['vserver'],
+ 'os_type': self.parameters['ostype'],
+ 'name': self.parameters['subsystem']}
+ dummy, error = rest_generic.post_async(self.rest_api, api, body)
+ if error:
+ self.module.fail_json(msg='Error creating subsystem for vserver %s: %s' % (self.parameters['vserver'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_subsystem_rest(self):
+ api = 'protocols/nvme/subsystems'
+ body = {'allow_delete_while_mapped': 'true' if self.parameters.get('skip_mapped_check') else 'false',
+ 'allow_delete_with_hosts': 'true' if self.parameters.get('skip_host_check') else 'false'}
+ dummy, error = rest_generic.delete_async(self.rest_api, api, self.subsystem_uuid, body=body)
+ if error:
+ self.module.fail_json(msg='Error deleting subsystem for vserver %s: %s' % (self.parameters['vserver'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ """
+ Apply action to NVME subsystem
+ """
+ types = ['hosts', 'paths']
+ current = self.get_subsystem()
+ add_host_map, remove_host_map = dict(), dict()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action == 'create' and self.parameters.get('ostype') is None:
+ self.module.fail_json(msg="Error: Missing required parameter 'ostype' for creating subsystem")
+ if cd_action != 'delete' and self.parameters['state'] == 'present':
+ add_host_map, remove_host_map = self.associate_host_map(types)
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_subsystem()
+ self.get_subsystem()
+ self.modify_host_map(add_host_map, remove_host_map)
+ elif cd_action == 'delete':
+ self.delete_subsystem()
+ elif cd_action is None:
+ self.modify_host_map(add_host_map, remove_host_map)
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action)
+ self.module.exit_json(**result)
+
+
+def main():
+ """Execute action"""
+ community_obj = NetAppONTAPNVMESubsystem()
+ community_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_object_store.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_object_store.py
new file mode 100644
index 000000000..32b3e7631
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_object_store.py
@@ -0,0 +1,360 @@
+#!/usr/bin/python
+
+# (c) 2019-2021, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_object_store
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: na_ontap_object_store
+short_description: NetApp ONTAP manage object store config.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.9.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Create or delete object store config on ONTAP.
+
+options:
+
+ state:
+ description:
+ - Whether the specified object store config should exist or not.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ name:
+ required: true
+ description:
+ - The name of the object store config to manage.
+ type: str
+
+ provider_type:
+ description:
+ - The name of the object store config provider.
+ type: str
+
+ server:
+ description:
+ - Fully qualified domain name of the object store config.
+ type: str
+
+ port:
+ description:
+ - Port number of the object store that ONTAP uses when establishing a connection.
+ type: int
+ version_added: 21.9.0
+
+ container:
+ description:
+ - Data bucket/container name used in S3 requests.
+ type: str
+
+ access_key:
+ description:
+ - Access key ID for AWS_S3 and SGWS provider types.
+ type: str
+
+ secret_password:
+ description:
+ - Secret access key for AWS_S3 and SGWS provider types.
+ type: str
+
+ certificate_validation_enabled:
+ description:
+ - Is SSL/TLS certificate validation enabled?
+ - If not specified, ONTAP will default to true.
+ type: bool
+ version_added: 21.9.0
+
+ ssl_enabled:
+ description:
+ - Is SSL enabled?
+ - If not specified, ONTAP will default to true.
+ type: bool
+ version_added: 21.9.0
+
+ change_password:
+ description:
+ - By default, the secret_password is used on create but ignored if the resource already exists.
+ - If set to true, the module always attempt to change the paswword as it cannot read the current value.
+ - When this is set to true, the module is not idempotent.
+ type: bool
+ default: false
+ version_added: 21.13.0
+
+ owner:
+ description:
+ - Owner of the target. Cannot be modifed.
+ - With REST, allowed values are fabricpool or snapmirror. A target can be used by only one feature.
+ - With ZAPI, the only allowed value is fabricpool.
+ - If absent, fabricpool is assumed on creation.
+ type: str
+ version_added: 21.13.0
+'''
+
+EXAMPLES = """
+- name: object store Create
+ netapp.ontap.na_ontap_object_store:
+ state: present
+ name: ansible
+ provider_type: SGWS
+ server: abc
+ container: abc
+ access_key: s3.amazonaws.com
+ secret_password: abc
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+- name: object store Create
+ netapp.ontap.na_ontap_object_store:
+ state: absent
+ name: ansible
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppOntapObjectStoreConfig():
+ ''' object initialize and class methods '''
+
+ def __init__(self):
+ self.use_rest = False
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ change_password=dict(required=False, type='bool', default=False, no_log=False),
+ ))
+ # only fields that are readable through a GET request
+ rest_options = dict(
+ name=dict(required=True, type='str'),
+ provider_type=dict(required=False, type='str'),
+ server=dict(required=False, type='str'),
+ container=dict(required=False, type='str'),
+ access_key=dict(required=False, type='str', no_log=True),
+ port=dict(required=False, type='int'),
+ certificate_validation_enabled=dict(required=False, type='bool'),
+ ssl_enabled=dict(required=False, type='bool'),
+ owner=dict(required=False, type='str')
+ )
+ self.rest_get_fields = list(rest_options.keys())
+ # additional fields for POST/PATCH
+ rest_options.update(dict(
+ secret_password=dict(required=False, type='str', no_log=True),
+ ))
+ self.rest_all_fields = rest_options.keys()
+ self.argument_spec.update(rest_options)
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ # API should be used for ONTAP 9.6 or higher, Zapi for lower version
+ self.rest_api = OntapRestAPI(self.module)
+ if self.rest_api.is_rest():
+ self.use_rest = True
+ elif not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ else:
+ if self.parameters.get('owner', 'fabricpool') != 'fabricpool':
+ self.module.fail_json(msg='Error: unsupported value for owner: %s when using ZAPI.' % self.parameters.get('owner'))
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def get_aggr_object_store(self):
+ """
+ Fetch details if object store config exists.
+ :return:
+ Dictionary of current details if object store config found
+ None if object store config is not found
+ """
+ if self.use_rest:
+ api = "cloud/targets"
+ query = {'name': self.parameters['name']}
+ fields = ','.join(self.rest_get_fields)
+ fields += ',uuid'
+ record, error = rest_generic.get_one_record(self.rest_api, api, query, fields)
+ if error:
+ self.module.fail_json(msg='Error %s' % error)
+ return record
+ else:
+ aggr_object_store_get_iter = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'aggr-object-store-config-get', **{'object-store-name': self.parameters['name']})
+ try:
+ result = self.server.invoke_successfully(aggr_object_store_get_iter, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ # Error 15661 denotes an object store not being found.
+ if to_native(error.code) == "15661":
+ return None
+ else:
+ self.module.fail_json(msg=to_native(error), exception=traceback.format_exc())
+ info = self.na_helper.safe_get(result, ['attributes', 'aggr-object-store-config-info'])
+ if info:
+ zapi_to_rest = {
+ 'access_key': dict(key_list=['access-key'], convert_to=str),
+ 'certificate_validation_enabled': dict(key_list=['is-certificate-validation-enabled'], convert_to=bool),
+ 'container': dict(key_list=['s3-name'], convert_to=str),
+ 'name': dict(key_list=['object-store-name'], convert_to=str),
+ 'port': dict(key_list=['port'], convert_to=int),
+ 'provider_type': dict(key_list=['provider-type'], convert_to=str),
+ 'ssl_enabled': dict(key_list=['ssl-enabled'], convert_to=bool),
+ 'server': dict(key_list=['server'], convert_to=str)
+ }
+ results = {}
+ self.na_helper.zapi_get_attrs(info, zapi_to_rest, results)
+ return results
+ return None
+
+ def validate_and_build_body(self, modify=None):
+ if modify is None:
+ required_keys = set(['provider_type', 'server', 'container', 'access_key'])
+ if not required_keys.issubset(set(self.parameters.keys())):
+ self.module.fail_json(msg='Error provisioning object store %s: one of the following parameters are missing %s'
+ % (self.parameters['name'], ', '.join(required_keys)))
+ if not self.use_rest:
+ return None
+ params = self.parameters if modify is None else modify
+ body = {}
+ for key in (self.rest_all_fields):
+ if params.get(key) is not None:
+ body[key] = params[key]
+ if not modify and 'owner' not in body:
+ body['owner'] = 'fabricpool'
+ if modify and 'owner' in body:
+ self.module.fail_json(msg='Error modifying object store, owner cannot be changed. Found: %s.' % body['owner'])
+ return body
+
+ def create_aggr_object_store(self, body):
+ """
+ Create aggregate object store config
+ :return: None
+ """
+ if self.use_rest:
+ api = "cloud/targets"
+ dummy, error = rest_generic.post_async(self.rest_api, api, body)
+ if error:
+ self.module.fail_json(msg='Error %s' % error)
+ else:
+ options = {'object-store-name': self.parameters['name'],
+ 'provider-type': self.parameters['provider_type'],
+ 'server': self.parameters['server'],
+ 's3-name': self.parameters['container'],
+ 'access-key': self.parameters['access_key']}
+ if self.parameters.get('secret_password'):
+ options['secret-password'] = self.parameters['secret_password']
+ if self.parameters.get('port') is not None:
+ options['port'] = str(self.parameters['port'])
+ if self.parameters.get('certificate_validation_enabled') is not None:
+ options['is-certificate-validation-enabled'] = str(self.parameters['certificate_validation_enabled']).lower()
+ if self.parameters.get('ssl_enabled') is not None:
+ options['ssl-enabled'] = str(self.parameters['ssl_enabled']).lower()
+ object_store_create = netapp_utils.zapi.NaElement.create_node_with_children('aggr-object-store-config-create', **options)
+
+ try:
+ self.server.invoke_successfully(object_store_create, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error provisioning object store config %s: %s"
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_aggr_object_store(self, body, uuid=None):
+ """
+ modify aggregate object store config
+ :return: None
+ """
+ api = "cloud/targets"
+ dummy, error = rest_generic.patch_async(self.rest_api, api, uuid, body)
+ if error:
+ self.module.fail_json(msg='Error %s' % error)
+
+ def delete_aggr_object_store(self, uuid=None):
+ """
+ Delete aggregate object store config
+ :return: None
+ """
+ if self.use_rest:
+ api = "cloud/targets"
+ dummy, error = rest_generic.delete_async(self.rest_api, api, uuid)
+ if error:
+ self.module.fail_json(msg='Error %s' % error)
+ else:
+ object_store_destroy = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'aggr-object-store-config-delete', **{'object-store-name': self.parameters['name']})
+
+ try:
+ self.server.invoke_successfully(object_store_destroy,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error removing object store config %s: %s" %
+ (self.parameters['name'], to_native(error)), exception=traceback.format_exc())
+
+ def apply(self):
+ """
+ Apply action to the object store config
+ :return: None
+ """
+ modify = None
+ current = self.get_aggr_object_store()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None:
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.parameters['change_password'] and 'secret_password' in self.parameters:
+ if not modify:
+ modify = {}
+ modify['secret_password'] = self.parameters['secret_password']
+ self.na_helper.changed = True
+ self.module.warn('na_ontap_object_store is not idempotent when change_password is set to true')
+ if not self.use_rest and modify:
+ self.module.fail_json(msg="Error - modify is not supported with ZAPI: %s" % modify)
+ if cd_action == 'create' or modify:
+ body = self.validate_and_build_body(modify)
+
+ if self.na_helper.changed and not self.module.check_mode:
+ uuid = current['uuid'] if current and self.use_rest else None
+ if cd_action == 'create':
+ self.create_aggr_object_store(body)
+ elif cd_action == 'delete':
+ self.delete_aggr_object_store(uuid)
+ elif modify:
+ self.modify_aggr_object_store(body, uuid)
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ """
+ Create Object Store Config class instance and invoke apply
+ :return: None
+ """
+ obj_store = NetAppOntapObjectStoreConfig()
+ obj_store.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_partitions.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_partitions.py
new file mode 100644
index 000000000..33dd22e39
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_partitions.py
@@ -0,0 +1,415 @@
+#!/usr/bin/python
+
+# (c) 2021, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+
+module: na_ontap_partitions
+
+short_description: NetApp ONTAP Assign partitions and disks to nodes.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 21.8.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Assign the specified number of partitions or disks eligible for partitioning to a node.
+- There is some overlap between this module and the na_ontap_disks module.
+- If you don't have ADP v1 or v2 then you should be using the na_ontap_disks module to assign whole disks.
+- Partitions/disks are added in the following order
+- 1. Any unassigned partitions are added.
+- 2. Any unassigned disks of the correct type are added and will be partitioned when added to an aggregate if required.
+- 3. Any spare partner partitions will be re-assigned.
+- 4. Any partner spare disks will be re-assigned and be partitioned when added to an aggregate.
+- If you specify a partition_count less than the current number of partitions, then spare partitions will be unassigned.
+- If a previously partitioned disk has the partitions removed, and even if it is "slow zeroed" the system \
+ will consider it a shared partitioned disk rather than a spare.
+- In a root-data-data configuration (ADPv2) if you specify data1 as the partition_type then only P1 partitions will be counted.
+- Disk autoassign must be turned off before using this module to prevent the disks being reassigned automatically by the cluster.
+- This can be done through na_ontap_disk_options or via the cli "disk option modify -node <node_name> -autoassign off".
+
+options:
+ node:
+ required: true
+ type: str
+ description:
+ - Specifies the node that the partitions and disks should be assigned to.
+
+ partition_count:
+ required: true
+ type: int
+ description:
+ - Total number of partitions that should be assigned to the owner.
+
+ disk_type:
+ required: true
+ choices: ["ATA", "BSAS", "FCAL", "FSAS", "LUN", "MSATA", "SAS", "SSD", "SSD_NVM", "VMDISK", "unknown"]
+ type: str
+ description:
+ - The type of disk that the partitions that should use.
+
+ partition_type:
+ required: true
+ choices: ["data", "root", "data1", "data2"]
+ type: str
+ description:
+ - The type of partiton being assigned either root, data, data1 or data2,
+
+ partitioning_method:
+ required: true
+ choices: ["root_data", "root_data1_data2"]
+ type: str
+ description:
+ - The type of partiton method being used, either root_data or root_data1_data2.
+
+ min_spares:
+ description:
+ - Minimum spares disks or partitions required per type for the node.
+ type: int
+
+'''
+
+EXAMPLES = """
+- name: Assign specified total partitions to node cluster-01
+ na_ontap_disk_partitions_custom:
+ node: cluster-01
+ partition_count: 56
+ disk_type: FSAS
+ partition_type: data
+ hostname: "{{ hostname }}"
+ username: "{{ admin username }}"
+ password: "{{ admin password }}"
+"""
+
+RETURN = """
+
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+import ansible_collections.netapp.ontap.plugins.module_utils.rest_response_helpers as rrh
+
+
+class NetAppOntapPartitions():
+ ''' object initialize and class methods '''
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ node=dict(required=True, type='str'),
+ partition_count=dict(required=True, type='int'),
+ disk_type=dict(required=True, type='str', choices=['ATA', 'BSAS', 'FCAL', 'FSAS', 'LUN', 'MSATA', 'SAS', 'SSD', 'SSD_NVM', 'VMDISK', 'unknown']),
+ partition_type=dict(required=True, type='str', choices=['data1', 'data2', 'data', 'root']),
+ partitioning_method=dict(required=True, type='str', choices=['root_data1_data2', 'root_data']),
+ min_spares=dict(required=False, type='int')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ # If min_spares is not specified min_spares is 1 if SSD, min_spares is 2 for any other disk type.
+ if 'min_spares' not in self.parameters:
+ if self.parameters['disk_type'] in ('SSD', 'SSD_NVM'):
+ self.parameters['min_spares'] = 1
+ else:
+ self.parameters['min_spares'] = 2
+
+ self.rest_api = OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+
+ if not self.use_rest:
+ self.module.fail_json(msg=self.rest_api.requires_ontap_version('na_ontap_partitions', '9.6'))
+
+ def get_disks(self, container_type, node=None):
+ """
+ Check for owned disks, unassigned disks or spare disks.
+ Return: list of disks or an empty list
+ """
+ api = 'storage/disks'
+
+ if container_type == 'unassigned':
+ query = {
+ 'container_type': 'unassigned',
+ 'type': self.parameters['disk_type'],
+ 'fields': 'name'
+ }
+ if container_type == 'spare':
+ query = {
+ 'home_node.name': node,
+ 'container_type': 'spare',
+ 'type': self.parameters['disk_type'],
+ 'fields': 'name'
+ }
+ message, error = self.rest_api.get(api, query)
+ records, error = rrh.check_for_0_or_more_records(api, message, error)
+
+ if error:
+ self.module.fail_json(msg=error)
+
+ return records if records else list()
+
+ def get_partitions(self, container_type, node=None):
+ """
+ Get partitions info
+ Return: list of partitions of a specified container type or None.
+ """
+ api = 'private/cli/storage/disk/partition'
+
+ query = {}
+
+ if container_type == 'spare':
+ query = {
+ 'fields': 'partition,container-type,disk-type,partitioning-method,home-node-name,is-root,owner-node-name',
+ 'home-node-name': node,
+ 'disk-type': self.parameters['disk_type'],
+ 'container-type': 'spare',
+ 'partitioning-method': self.parameters['partitioning_method']
+ }
+ if container_type == 'unassigned':
+ query = {
+ 'fields': 'partition,container-type,disk-type,partitioning-method,home-node-name,is-root,owner-node-name',
+ 'nodelist': node,
+ 'disk-type': self.parameters['disk_type'],
+ 'container-type': 'unassigned'
+ }
+ if container_type == 'owner':
+ query = {
+ 'fields': 'partition,container-type,disk-type,partitioning-method,home-node-name,is-root,owner-node-name',
+ 'home-node-name': node,
+ 'disk-type': self.parameters['disk_type'],
+ 'partitioning-method': self.parameters['partitioning_method']
+ }
+
+ if self.parameters['partition_type'] == 'root':
+ query['is-root'] = True
+ else:
+ query['is-root'] = False
+
+ message, error = self.rest_api.get(api, query)
+ records, error = rrh.check_for_0_or_more_records(api, message, error)
+ if error:
+ self.module.fail_json(msg=error)
+
+ if records:
+ if self.parameters['partitioning_method'] == 'root_data1_data2':
+ # get just the P1 or P2 partitions
+ data_partitions = []
+ for record in records:
+ if self.parameters['partition_type'] == 'data1' and record['partition'].endswith('P1'):
+ data_partitions.append(record)
+ elif self.parameters['partition_type'] == 'data2' and record['partition'].endswith('P2'):
+ data_partitions.append(record)
+ return data_partitions
+
+ return records
+ else:
+ return list()
+
+ def get_partner_node_name(self):
+ """
+ return: partner_node_name, str
+ """
+ api = 'cluster/nodes'
+ query = {
+ 'ha.partners.name': self.parameters['node']
+ }
+ message, error = self.rest_api.get(api, query)
+ records, error = rrh.check_for_0_or_more_records(api, message, error)
+
+ if error:
+ self.module.fail_json(msg=error)
+
+ return records[0]['name'] if records else None
+
+ def assign_disks(self, disks):
+ """
+ Assign disks to node
+ """
+ api = 'private/cli/storage/disk/assign'
+ for disk in disks:
+ body = {
+ 'owner': self.parameters['node'],
+ 'disk': disk['name']
+ }
+
+ dummy, error = self.rest_api.post(api, body)
+ if error:
+ self.module.fail_json(msg=error)
+
+ def unassign_disks(self, disks):
+ """
+ Unassign disks.
+ Disk autoassign must be turned off when removing ownership of a disk
+ """
+ api = 'private/cli/storage/disk/removeowner'
+ for disk in disks: # api requires 1 disk to be removed at a time.
+ body = {
+ 'disk': disk['name']
+ }
+ dummy, error = self.rest_api.post(api, body)
+ if error:
+ self.module.fail_json(msg=error)
+
+ def assign_partitions(self, required_partitions):
+ """
+ Assign partitions to node
+ """
+ api = 'private/cli/storage/disk/partition/assign'
+ for required_partition in required_partitions:
+ body = {
+ 'owner': self.parameters['node'],
+ 'partition': required_partition['partition']
+ }
+
+ dummy, error = self.rest_api.post(api, body)
+ if error:
+ self.module.fail_json(msg=error)
+
+ def unassign_partitions(self, required_partitions):
+ """
+ Unassign partitions from node
+ """
+ api = 'private/cli/storage/disk/partition/removeowner'
+ for required_partition in required_partitions:
+ body = {
+ 'partition': required_partition['partition']
+ }
+
+ dummy, error = self.rest_api.post(api, body)
+ if error:
+ self.module.fail_json(msg=error)
+
+ def determine_assignment(self, owned_partitions, own_spare_disks):
+ """
+ Determine which action to take
+ return: dict containing lists of the disks/partitions to be assigned/reassigned
+ """
+ # build dict of partitions and disks to be unassigned/assigned
+ assignment = {
+ 'required_unassigned_partitions': [],
+ 'required_partner_spare_disks': [],
+ 'required_partner_spare_partitions': [],
+ 'required_unassigned_disks': []
+ }
+
+ unassigned_partitions = self.get_partitions(container_type='owned', node=self.parameters['node'])
+ required_partitions = self.parameters['partition_count'] - (len(owned_partitions) + len(own_spare_disks))
+
+ # are there enough unassigned partitions to meet the requirement?
+ if required_partitions > len(unassigned_partitions):
+ assignment['required_unassigned_partitions'] = unassigned_partitions
+ # check for unassigned disks
+ unassigned_disks = self.get_disks(container_type='spare')
+ required_unassigned_disks = required_partitions - len(unassigned_partitions)
+ if required_unassigned_disks > len(unassigned_disks):
+ assignment['required_unassigned_disks'] = unassigned_disks
+ # not enough unassigned disks
+ required_partner_spare_partitions = required_unassigned_disks - len(unassigned_disks)
+ partner_node_name = self.get_partner_node_name()
+ if partner_node_name:
+ partner_spare_partitions = self.get_partitions(container_type='spare', node=partner_node_name)
+ partner_spare_disks = self.get_disks(container_type='spare', node=partner_node_name)
+ else:
+ partner_spare_partitions = []
+ partner_spare_disks = []
+ # check for partner spare partitions
+ if required_partner_spare_partitions <= (len(partner_spare_partitions) - self.parameters['min_spares']):
+ # we have enough spare partitions and dont need any disks
+ assignment['required_partner_spare_partitions'] = partner_spare_partitions[0:required_partner_spare_partitions]
+ else:
+ # we don't know if we can take all of the spare partitions as we don't know how many spare disks there are
+ # spare partions != spare disks
+ if len(partner_spare_disks) >= self.parameters['min_spares']:
+ # we have enough spare disks so can use all spare partitions if required
+ if required_partner_spare_partitions <= len(partner_spare_partitions):
+ # now we know we have spare disks we can take as may spare partitions as we need
+ assignment['required_partner_spare_partitions'] = partner_spare_partitions[0:required_partner_spare_partitions]
+ else:
+ # we need to take some spare disks as well as using any spare partitions
+ required_partner_spare_disks = required_partner_spare_partitions - len(partner_spare_partitions)
+ required_partner_spare_partitions_count = required_partner_spare_partitions - required_partner_spare_disks
+ assignment['required_partner_spare_partitions'] = partner_spare_partitions[0:required_partner_spare_partitions_count]
+ if required_partner_spare_disks > len(partner_spare_disks) - self.parameters['min_spares']:
+ self.module.fail_json(msg='Not enough partner spare disks or partner spare partitions remain to fulfill the request')
+ else:
+ assignment['required_partner_spare_disks'] = partner_spare_disks[0:required_partner_spare_disks]
+ else:
+ self.module.fail_json(msg='Not enough partner spare disks or partner spare partitions remain to fulfill the request')
+ else:
+ assignment['required_unassigned_disks'] = unassigned_disks[0:required_unassigned_disks]
+ else:
+ assignment['required_unassigned_partitions'] = unassigned_partitions[0:required_partitions]
+
+ return assignment
+
+ def apply(self):
+ '''Apply action to partitions'''
+ changed = False
+
+ owned_partitions = self.get_partitions(container_type='owned', node=self.parameters['node'])
+ own_spare_disks = self.get_disks(container_type='spare', node=self.parameters['node'])
+
+ # are the required partitions more than the currently owned partitions and spare disks, if so we need to assign
+ if self.parameters['partition_count'] > (len(own_spare_disks) + len(owned_partitions)):
+ # which action needs to be taken
+ assignment = self.determine_assignment(owned_partitions=owned_partitions, own_spare_disks=own_spare_disks)
+
+ # now that we have calculated where the partitions and disks come from we can take action
+ if len(assignment['required_unassigned_partitions']) > 0:
+ changed = True
+ if not self.module.check_mode:
+ self.assign_partitions(assignment['required_unassigned_partitions'])
+
+ if len(assignment['required_unassigned_disks']) > 0:
+ changed = True
+ if not self.module.check_mode:
+ self.assign_disks(assignment['required_unassigned_disks'])
+
+ if len(assignment['required_partner_spare_partitions']) > 0:
+ changed = True
+ if not self.module.check_mode:
+ self.unassign_partitions(assignment['required_partner_spare_partitions'])
+ self.assign_partitions(assignment['required_partner_spare_partitions'])
+
+ if len(assignment['required_partner_spare_disks']) > 0:
+ changed = True
+ if not self.module.check_mode:
+ self.unassign_disks(assignment['required_partner_spare_disks'])
+ self.assign_disks(assignment['required_partner_spare_disks'])
+
+ # unassign
+ elif self.parameters['partition_count'] < len(owned_partitions):
+ spare_partitions = self.get_partitions(container_type='spare', node=self.parameters['node'])
+ unassign_partitions = len(owned_partitions) - self.parameters['partition_count']
+
+ if unassign_partitions > len(spare_partitions):
+ self.module.fail_json(msg='Not enough spare partitions exist fulfill the partition unassignment request')
+ elif (len(spare_partitions) - unassign_partitions + len(own_spare_disks)) < self.parameters['min_spares']:
+ self.module.fail_json(msg='Unassignment of specified partitions would leave node with less than the minimum number of spares')
+ else:
+ changed = True
+ if not self.module.check_mode:
+ self.unassign_partitions(spare_partitions[0:unassign_partitions])
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ ''' Create object and call apply '''
+ obj_aggr = NetAppOntapPartitions()
+ obj_aggr.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ports.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ports.py
new file mode 100644
index 000000000..885bee277
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ports.py
@@ -0,0 +1,583 @@
+#!/usr/bin/python
+
+# (c) 2019-2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_ports
+'''
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+
+module: na_ontap_ports
+short_description: NetApp ONTAP add/remove ports
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.9.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+ - Add or remove ports for broadcast domain and portset.
+
+options:
+ state:
+ description:
+ - Whether the specified port should be added or removed.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ vserver:
+ description:
+ - Name of the SVM.
+ - Specify this option when operating on portset.
+ type: str
+
+ names:
+ description:
+ - List of ports.
+ type: list
+ elements: str
+ required: true
+
+ resource_name:
+ description:
+ - name of the portset or broadcast domain.
+ type: str
+ required: true
+
+ resource_type:
+ description:
+ - type of the resource to add a port to or remove a port from.
+ - adding or removing ports in portset requires ONTAP version 9.9 or later in REST
+ choices: ['broadcast_domain', 'portset']
+ required: true
+ type: str
+
+ ipspace:
+ description:
+ - Specify the required ipspace for the broadcast domain.
+ - A domain ipspace can not be modified after the domain has been created.
+ type: str
+
+ portset_type:
+ description:
+ - Protocols accepted for portset.
+ choices: ['fcp', 'iscsi', 'mixed']
+ type: str
+
+'''
+
+EXAMPLES = '''
+
+ - name: broadcast domain remove port
+ tags:
+ - remove
+ netapp.ontap.na_ontap_ports:
+ state: absent
+ names: test-vsim1:e0d-1,test-vsim1:e0d-2
+ resource_type: broadcast_domain
+ resource_name: ansible_domain
+ hostname: "{{ hostname }}"
+ username: user
+ password: password
+ https: False
+
+ - name: broadcast domain add port
+ tags:
+ - add
+ netapp.ontap.na_ontap_ports:
+ state: present
+ names: test-vsim1:e0d-1,test-vsim1:e0d-2
+ resource_type: broadcast_domain
+ resource_name: ansible_domain
+ ipspace: Default
+ hostname: "{{ hostname }}"
+ username: user
+ password: password
+ https: False
+
+ - name: portset remove port
+ tags:
+ - remove
+ netapp.ontap.na_ontap_ports:
+ state: absent
+ names: lif_2
+ resource_type: portset
+ resource_name: portset_1
+ vserver: "{{ vserver }}"
+ hostname: "{{ hostname }}"
+ username: user
+ password: password
+ https: False
+
+ - name: portset add port
+ tags:
+ - add
+ netapp.ontap.na_ontap_ports:
+ state: present
+ names: lif_2
+ resource_type: portset
+ resource_name: portset_1
+ portset_type: iscsi
+ vserver: "{{ vserver }}"
+ hostname: "{{ hostname }}"
+ username: user
+ password: password
+ https: False
+
+'''
+
+RETURN = '''
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppOntapPorts:
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ vserver=dict(required=False, type='str'),
+ names=dict(required=True, type='list', elements='str'),
+ resource_name=dict(required=True, type='str'),
+ resource_type=dict(required=True, type='str', choices=['broadcast_domain', 'portset']),
+ ipspace=dict(required=False, type='str'),
+ portset_type=dict(required=False, type='str', choices=['fcp', 'iscsi', 'mixed']),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('resource_type', 'portset', ['vserver']),
+ ],
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ # Set up Rest API
+ self.rest_api = OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+ self.desired_ports = None
+ self.desired_lifs = None
+
+ if self.use_rest and 'ipspace' not in self.parameters and self.parameters['resource_type'] == 'broadcast_domain':
+ error_msg = "Error: ipspace space is a required option with REST"
+ self.module.fail_json(msg=error_msg)
+
+ if self.use_rest and not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 9) and self.parameters['resource_type'] == 'portset':
+ self.module.fail_json(msg='Error: adding or removing ports in portset requires ONTAP version 9.9 or later in REST')
+
+ if 'names' in self.parameters:
+ self.parameters['names'] = list(set([port.strip() for port in self.parameters['names']]))
+ if self.use_rest and self.parameters['resource_type'] == 'broadcast_domain':
+ self.desired_ports = self.get_ports_rest(self.parameters['names'])
+ if self.use_rest and self.parameters['resource_type'] == 'portset':
+ self.desired_lifs = self.get_san_lifs_rest(self.parameters['names'])
+
+ if not self.use_rest:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ if self.parameters['resource_type'] == 'broadcast_domain':
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+ elif self.parameters['resource_type'] == 'portset':
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def add_broadcast_domain_ports(self, ports):
+ """
+ Add broadcast domain ports
+ :param: ports to be added.
+ """
+ domain_obj = netapp_utils.zapi.NaElement('net-port-broadcast-domain-add-ports')
+ domain_obj.add_new_child("broadcast-domain", self.parameters['resource_name'])
+ if self.parameters.get('ipspace'):
+ domain_obj.add_new_child("ipspace", self.parameters['ipspace'])
+ ports_obj = netapp_utils.zapi.NaElement('ports')
+ domain_obj.add_child_elem(ports_obj)
+ for port in ports:
+ ports_obj.add_new_child('net-qualified-port-name', port)
+ try:
+ self.server.invoke_successfully(domain_obj, True)
+ return True
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error adding port for broadcast domain %s: %s' %
+ (self.parameters['resource_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def add_broadcast_domain_ports_rest(self, ports):
+ """
+ Add broadcast domain ports in rest.
+ :param: ports to be added or moved.
+ """
+ api = 'network/ethernet/ports'
+ body = {
+ 'broadcast_domain': {
+ 'name': self.parameters['resource_name'],
+ 'ipspace': {'name': self.parameters['ipspace']}
+ }
+ }
+ for port in ports:
+ dummy, error = rest_generic.patch_async(self.rest_api, api, port['uuid'], body)
+ if error:
+ self.module.fail_json(msg=error)
+
+ def remove_broadcast_domain_ports(self, ports):
+ """
+ Deletes broadcast domain ports
+ :param: ports to be removed.
+ """
+ domain_obj = netapp_utils.zapi.NaElement('net-port-broadcast-domain-remove-ports')
+ domain_obj.add_new_child("broadcast-domain", self.parameters['resource_name'])
+ if self.parameters.get('ipspace'):
+ domain_obj.add_new_child("ipspace", self.parameters['ipspace'])
+ ports_obj = netapp_utils.zapi.NaElement('ports')
+ domain_obj.add_child_elem(ports_obj)
+ for port in ports:
+ ports_obj.add_new_child('net-qualified-port-name', port)
+ try:
+ self.server.invoke_successfully(domain_obj, True)
+ return True
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error removing port for broadcast domain %s: %s' %
+ (self.parameters['resource_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def remove_broadcast_domain_ports_rest(self, ports, ipspace):
+ body = {'ports': ports}
+ api = "private/cli/network/port/broadcast-domain/remove-ports"
+ query = {'broadcast-domain': self.parameters['resource_name'], 'ipspace': ipspace}
+ response, error = rest_generic.patch_async(self.rest_api, api, None, body, query)
+ if error:
+ self.module.fail_json(msg='Error removing ports: %s' % error)
+
+ def get_broadcast_domain_ports(self):
+ """
+ Return details about the broadcast domain ports.
+ :return: Details about the broadcast domain ports. [] if not found.
+ :rtype: list
+ """
+ if self.use_rest:
+ return self.get_broadcast_domain_ports_rest()
+ domain_get_iter = netapp_utils.zapi.NaElement('net-port-broadcast-domain-get-iter')
+ broadcast_domain_info = netapp_utils.zapi.NaElement('net-port-broadcast-domain-info')
+ broadcast_domain_info.add_new_child('broadcast-domain', self.parameters['resource_name'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(broadcast_domain_info)
+ domain_get_iter.add_child_elem(query)
+ result = self.server.invoke_successfully(domain_get_iter, True)
+ ports = []
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) == 1:
+ domain_info = result.get_child_by_name('attributes-list').get_child_by_name('net-port-broadcast-domain-info')
+ domain_ports = domain_info.get_child_by_name('ports')
+ if domain_ports is not None:
+ ports = [port.get_child_content('port') for port in domain_ports.get_children()]
+ return ports
+
+ def get_broadcast_domain_ports_rest(self):
+ """
+ Return details about the broadcast domain ports.
+ :return: Details about the broadcast domain ports. [] if not found.
+ :rtype: list
+ """
+ api = 'network/ethernet/broadcast-domains'
+ query = {'name': self.parameters['resource_name'], 'ipspace.name': self.parameters['ipspace']}
+ fields = 'ports'
+ record, error = rest_generic.get_one_record(self.rest_api, api, query, fields)
+ if error:
+ self.module.fail_json(msg=error)
+ ports = []
+ if record and 'ports' in record:
+ ports = ['%s:%s' % (port['node']['name'], port['name']) for port in record['ports']]
+ return ports
+
+ def remove_portset_ports(self, port, portset_uuid=None):
+ """
+ Removes all existing ports from portset
+ :return: None
+ """
+ if self.use_rest:
+ return self.remove_portset_ports_rest(port, portset_uuid)
+ options = {'portset-name': self.parameters['resource_name'],
+ 'portset-port-name': port.strip()}
+
+ portset_modify = netapp_utils.zapi.NaElement.create_node_with_children('portset-remove', **options)
+
+ try:
+ self.server.invoke_successfully(portset_modify, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error removing port in portset %s: %s' %
+ (self.parameters['resource_name'], to_native(error)), exception=traceback.format_exc())
+
+ def remove_portset_ports_rest(self, port, portset_uuid):
+ """
+ Removes all existing ports from portset
+ :return: None
+ """
+ api = 'protocols/san/portsets/%s/interfaces' % portset_uuid
+ dummy, error = rest_generic.delete_async(self.rest_api, api, self.desired_lifs[port]['uuid'])
+ if error:
+ self.module.fail_json(msg=error)
+
+ def add_portset_ports(self, port):
+ """
+ Add the list of ports to portset
+ :return: None
+ """
+ options = {'portset-name': self.parameters['resource_name'],
+ 'portset-port-name': port.strip()}
+
+ portset_modify = netapp_utils.zapi.NaElement.create_node_with_children('portset-add', **options)
+
+ try:
+ self.server.invoke_successfully(portset_modify, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error adding port in portset %s: %s' %
+ (self.parameters['resource_name'], to_native(error)), exception=traceback.format_exc())
+
+ def add_portset_ports_rest(self, portset_uuid, ports_to_add):
+ """
+ Add the list of ports to portset
+ :return: None
+ """
+ api = 'protocols/san/portsets/%s/interfaces' % portset_uuid
+ body = {'records': []}
+ for port in ports_to_add:
+ body['records'].append({self.desired_lifs[port]['lif_type']: {'name': port}})
+ dummy, error = rest_generic.post_async(self.rest_api, api, body)
+ if error:
+ self.module.fail_json(msg=error)
+
+ def portset_get_iter(self):
+ """
+ Compose NaElement object to query current portset using vserver, portset-name and portset-type parameters
+ :return: NaElement object for portset-get-iter with query
+ """
+ portset_get = netapp_utils.zapi.NaElement('portset-get-iter')
+ query = netapp_utils.zapi.NaElement('query')
+ portset_info = netapp_utils.zapi.NaElement('portset-info')
+ portset_info.add_new_child('vserver', self.parameters['vserver'])
+ portset_info.add_new_child('portset-name', self.parameters['resource_name'])
+ if self.parameters.get('portset_type'):
+ portset_info.add_new_child('portset-type', self.parameters['portset_type'])
+ query.add_child_elem(portset_info)
+ portset_get.add_child_elem(query)
+ return portset_get
+
+ def portset_get(self):
+ """
+ Get current portset info
+ :return: List of current ports if query successful, else return []
+ """
+ portset_get_iter = self.portset_get_iter()
+ result, ports = None, []
+ try:
+ result = self.server.invoke_successfully(portset_get_iter, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching portset %s: %s'
+ % (self.parameters['resource_name'], to_native(error)),
+ exception=traceback.format_exc())
+ # return portset details
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) > 0:
+ portset_get_info = result.get_child_by_name('attributes-list').get_child_by_name('portset-info')
+ if int(portset_get_info.get_child_content('portset-port-total')) > 0:
+ port_info = portset_get_info.get_child_by_name('portset-port-info')
+ ports = [port.get_content() for port in port_info.get_children()]
+ return ports
+
+ def portset_get_rest(self):
+ """
+ Get current portset info
+ :return: List of current ports if query successful, else return {}
+ """
+ api = 'protocols/san/portsets'
+ query = {
+ 'svm.name': self.parameters['vserver'],
+ 'name': self.parameters['resource_name']
+ }
+ if self.parameters.get('portset_type'):
+ query['protocol'] = self.parameters['portset_type']
+ fields = 'interfaces'
+ record, error = rest_generic.get_one_record(self.rest_api, api, query, fields)
+ if error:
+ self.module.fail_json(msg=error)
+ current = {}
+ if record:
+ current['uuid'] = record['uuid']
+ if 'interfaces' in record:
+ # This will form ports list for fcp, iscsi and mixed protocols.
+ ports = [lif.get('ip', lif.get('fc'))['name'] for lif in record['interfaces']]
+ current['ports'] = ports
+ if not current and self.parameters['state'] == 'present':
+ error_msg = "Error: Portset '%s' does not exist" % self.parameters['resource_name']
+ self.module.fail_json(msg=error_msg)
+ return current
+
+ def modify_broadcast_domain_ports(self):
+ """
+ compare current and desire ports. Call add or remove ports methods if needed.
+ :return: None.
+ """
+ current_ports = self.get_broadcast_domain_ports()
+ cd_ports = self.parameters['names']
+ if self.parameters['state'] == 'present':
+ ports_to_add = [port for port in cd_ports if port not in current_ports]
+ if len(ports_to_add) > 0:
+ if not self.module.check_mode:
+ if self.use_rest:
+ self.add_broadcast_domain_ports_rest(self.ports_to_add_from_desired(ports_to_add))
+ else:
+ self.add_broadcast_domain_ports(ports_to_add)
+ self.na_helper.changed = True
+
+ if self.parameters['state'] == 'absent':
+ ports_to_remove = [port for port in cd_ports if port in current_ports]
+ if len(ports_to_remove) > 0:
+ if not self.module.check_mode:
+ if self.use_rest:
+ self.remove_broadcast_domain_ports_rest(ports_to_remove, self.parameters['ipspace'])
+ else:
+ self.remove_broadcast_domain_ports(ports_to_remove)
+ self.na_helper.changed = True
+
+ def modify_portset_ports(self):
+ uuid = None
+ if self.use_rest:
+ current = self.portset_get_rest()
+ if 'uuid' in current:
+ uuid = current['uuid']
+ current_ports = current['ports'] if 'ports' in current else []
+ else:
+ current_ports = self.portset_get()
+ cd_ports = self.parameters['names']
+ if self.parameters['state'] == 'present':
+ ports_to_add = [port for port in cd_ports if port not in current_ports]
+ if len(ports_to_add) > 0:
+ if not self.module.check_mode:
+ if self.use_rest:
+ self.add_portset_ports_rest(uuid, ports_to_add)
+ else:
+ for port in ports_to_add:
+ self.add_portset_ports(port)
+ self.na_helper.changed = True
+
+ if self.parameters['state'] == 'absent':
+ ports_to_remove = [port for port in cd_ports if port in current_ports]
+ if len(ports_to_remove) > 0:
+ if not self.module.check_mode:
+ for port in ports_to_remove:
+ self.remove_portset_ports(port, uuid)
+ self.na_helper.changed = True
+
+ def get_ports_rest(self, ports):
+ # list of desired ports not present in the node.
+ missing_ports = []
+ # list of uuid information of each desired port should present in broadcast domain.
+ desired_ports = []
+ for port in ports:
+ current = self.get_net_port_rest(port)
+ if current is None:
+ missing_ports.append(port)
+ else:
+ desired_ports.append(current)
+ # Error if any of provided ports are not found.
+ if missing_ports and self.parameters['state'] == 'present':
+ self.module.fail_json(msg='Error: ports: %s not found' % ', '.join(missing_ports))
+ return desired_ports
+
+ def get_net_port_rest(self, port):
+ if ':' not in port:
+ error_msg = "Error: Invalid value specified for port: %s, provide port name as node_name:port_name" % port
+ self.module.fail_json(msg=error_msg)
+ node_name, port_name = port.split(':')
+ api = 'network/ethernet/ports'
+ query = {
+ 'name': port_name,
+ 'node.name': node_name,
+ }
+ fields = 'name,uuid'
+ record, error = rest_generic.get_one_record(self.rest_api, api, query, fields)
+ if error:
+ self.module.fail_json(msg=error)
+ if record:
+ current = {'uuid': record['uuid'], 'name': '%s:%s' % (record['node']['name'], record['name'])}
+ return current
+ return None
+
+ def ports_to_add_from_desired(self, ports):
+ ports_to_add = []
+ for port in ports:
+ for port_to_add in self.desired_ports:
+ if port == port_to_add['name']:
+ ports_to_add.append({'uuid': port_to_add['uuid']})
+ return ports_to_add
+
+ def get_san_lifs_rest(self, san_lifs):
+ # list of lifs not present in the vserver
+ missing_lifs = []
+ # dict with each key is lif name, value contains lif type - fc or ip and uuid.
+ desired_lifs = {}
+ record, record2, error, error2 = None, None, None, None
+ for lif in san_lifs:
+ if self.parameters.get('portset_type') in [None, 'mixed', 'iscsi']:
+ record, error = self.get_san_lif_type(lif, 'ip')
+ if self.parameters.get('portset_type') in [None, 'mixed', 'fcp']:
+ record2, error2 = self.get_san_lif_type(lif, 'fc')
+ if error is None and error2 is not None and record:
+ # ignore error on fc if ip interface is found
+ error2 = None
+ if error2 is None and error is not None and record2:
+ # ignore error on ip if fc interface is found
+ error = None
+ if error or error2:
+ errors = [to_native(err) for err in (error, error2) if err]
+ self.module.fail_json(msg='Error fetching lifs details for %s: %s' % (lif, ' - '.join(errors)),
+ exception=traceback.format_exc())
+ if record:
+ desired_lifs[lif] = {'lif_type': 'ip', 'uuid': record['uuid']}
+ if record2:
+ desired_lifs[lif] = {'lif_type': 'fc', 'uuid': record2['uuid']}
+ if record is None and record2 is None:
+ missing_lifs.append(lif)
+ if missing_lifs and self.parameters['state'] == 'present':
+ error_msg = 'Error: lifs: %s of type %s not found in vserver %s' % \
+ (', '.join(missing_lifs), self.parameters.get('portset_type', 'fcp or iscsi'), self.parameters['vserver'])
+ self.module.fail_json(msg=error_msg)
+ return desired_lifs
+
+ def get_san_lif_type(self, lif, portset_type):
+ api = 'network/%s/interfaces' % portset_type
+ query = {'name': lif, 'svm.name': self.parameters['vserver']}
+ record, error = rest_generic.get_one_record(self.rest_api, api, query)
+ return record, error
+
+ def apply(self):
+ if self.parameters['resource_type'] == 'broadcast_domain':
+ self.modify_broadcast_domain_ports()
+ elif self.parameters['resource_type'] == 'portset':
+ self.modify_portset_ports()
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ portset_obj = NetAppOntapPorts()
+ portset_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_portset.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_portset.py
new file mode 100644
index 000000000..2132a1b0b
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_portset.py
@@ -0,0 +1,423 @@
+#!/usr/bin/python
+
+# (c) 2018-2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+short_description: NetApp ONTAP Create/Delete portset
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Create/Delete ONTAP portset, modify ports in a portset.
+ - Modify type(protocol) is not supported in ONTAP.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_portset
+options:
+ state:
+ description:
+ - If you want to create a portset.
+ default: present
+ type: str
+ vserver:
+ required: true
+ description:
+ - Name of the SVM.
+ type: str
+ name:
+ required: true
+ description:
+ - Name of the port set to create.
+ type: str
+ type:
+ description:
+ - Required for create in ZAPI.
+ - Default value is mixed if not specified at the time of creation in REST.
+ - Protocols accepted for this portset.
+ choices: ['fcp', 'iscsi', 'mixed']
+ type: str
+ force:
+ description:
+ - If 'false' or not specified, the request will fail if there are any igroups bound to this portset.
+ - If 'true', forcibly destroy the portset, even if there are existing igroup bindings.
+ type: bool
+ default: False
+ ports:
+ description:
+ - Specify the ports associated with this portset. Should be comma separated.
+ - It represents the expected state of a list of ports at any time, and replaces the current value of ports.
+ - Adds a port if it is specified in expected state but not in current state.
+ - Deletes a port if it is in current state but not in expected state.
+ type: list
+ elements: str
+version_added: 2.8.0
+
+'''
+
+EXAMPLES = """
+ - name: Create Portset
+ netapp.ontap.na_ontap_portset:
+ state: present
+ vserver: vserver_name
+ name: portset_name
+ ports: a1
+ type: "{{ protocol type }}"
+ username: "{{ netapp username }}"
+ password: "{{ netapp password }}"
+ hostname: "{{ netapp hostname }}"
+
+ - name: Modify ports in portset
+ netapp.ontap.na_ontap_portset:
+ state: present
+ vserver: vserver_name
+ name: portset_name
+ ports: a1,a2
+ username: "{{ netapp username }}"
+ password: "{{ netapp password }}"
+ hostname: "{{ netapp hostname }}"
+
+ - name: Delete Portset
+ netapp.ontap.na_ontap_portset:
+ state: absent
+ vserver: vserver_name
+ name: portset_name
+ force: True
+ type: "{{ protocol type }}"
+ username: "{{ netapp username }}"
+ password: "{{ netapp password }}"
+ hostname: "{{ netapp hostname }}"
+"""
+
+RETURN = """
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppONTAPPortset:
+ """
+ Methods to create or delete portset
+ """
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', default='present'),
+ vserver=dict(required=True, type='str'),
+ name=dict(required=True, type='str'),
+ type=dict(required=False, type='str', choices=[
+ 'fcp', 'iscsi', 'mixed']),
+ force=dict(required=False, type='bool', default=False),
+ ports=dict(required=False, type='list', elements='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ if 'ports' in self.parameters:
+ self.parameters['ports'] = list(set([port.strip() for port in self.parameters['ports']]))
+ if '' in self.parameters['ports'] and self.parameters['state'] == 'present':
+ self.module.fail_json(msg="Error: invalid value specified for ports")
+
+ # Setup REST API.
+ self.rest_api = OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+ self.uuid, self.lifs_info = None, {}
+ if self.use_rest and not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 9, 1):
+ msg = 'REST requires ONTAP 9.9.1 or later for portset APIs.'
+ if self.parameters['use_rest'].lower() == 'always':
+ self.module.fail_json(msg='Error: %s' % msg)
+ if self.parameters['use_rest'].lower() == 'auto':
+ self.module.warn('Falling back to ZAPI: %s' % msg)
+ self.use_rest = False
+
+ if not self.use_rest:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def portset_get_iter(self):
+ """
+ Compose NaElement object to query current portset using vserver, portset-name and portset-type parameters
+ :return: NaElement object for portset-get-iter with query
+ """
+ portset_get = netapp_utils.zapi.NaElement('portset-get-iter')
+ query = netapp_utils.zapi.NaElement('query')
+ portset_info = netapp_utils.zapi.NaElement('portset-info')
+ portset_info.add_new_child('vserver', self.parameters['vserver'])
+ portset_info.add_new_child('portset-name', self.parameters['name'])
+ query.add_child_elem(portset_info)
+ portset_get.add_child_elem(query)
+ return portset_get
+
+ def portset_get(self):
+ """
+ Get current portset info
+ :return: Dictionary of current portset details if query successful, else return None
+ """
+ if self.use_rest:
+ return self.portset_get_rest()
+ portset_get_iter = self.portset_get_iter()
+ result, portset_info = None, dict()
+ try:
+ result = self.server.invoke_successfully(portset_get_iter, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching portset %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ # return portset details
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) > 0:
+ portset_get_info = result.get_child_by_name('attributes-list').get_child_by_name('portset-info')
+ portset_info['type'] = portset_get_info.get_child_content('portset-type')
+ if int(portset_get_info.get_child_content('portset-port-total')) > 0:
+ ports = portset_get_info.get_child_by_name('portset-port-info')
+ portset_info['ports'] = [port.get_content() for port in ports.get_children()]
+ else:
+ portset_info['ports'] = []
+ return portset_info
+ return None
+
+ def create_portset(self):
+ """
+ Create a portset
+ """
+ if self.use_rest:
+ return self.create_portset_rest()
+ if self.parameters.get('type') is None:
+ self.module.fail_json(msg='Error: Missing required parameter for create (type)')
+ portset_info = netapp_utils.zapi.NaElement("portset-create")
+ portset_info.add_new_child("portset-name", self.parameters['name'])
+ portset_info.add_new_child("portset-type", self.parameters['type'])
+ try:
+ self.server.invoke_successfully(
+ portset_info, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error creating portset %s: %s" %
+ (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_portset(self):
+ """
+ Delete a portset
+ """
+ if self.use_rest:
+ return self.delete_portset_rest()
+ portset_info = netapp_utils.zapi.NaElement("portset-destroy")
+ portset_info.add_new_child("portset-name", self.parameters['name'])
+ if self.parameters.get('force'):
+ portset_info.add_new_child("force", str(self.parameters['force']))
+ try:
+ self.server.invoke_successfully(
+ portset_info, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error deleting portset %s: %s" %
+ (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def remove_ports(self, ports):
+ """
+ Removes all existing ports from portset
+ :return: None
+ """
+ for port in ports:
+ self.modify_port(port, 'portset-remove', 'removing')
+
+ def add_ports(self, ports=None):
+ """
+ Add the list of ports to portset
+ :return: None
+ """
+ if ports is None:
+ ports = self.parameters.get('ports')
+ # don't add if ports is None
+ if ports is None:
+ return
+ for port in ports:
+ self.modify_port(port, 'portset-add', 'adding')
+
+ def modify_port(self, port, zapi, action):
+ """
+ Add or remove an port to/from a portset
+ """
+ options = {'portset-name': self.parameters['name'],
+ 'portset-port-name': port}
+
+ portset_modify = netapp_utils.zapi.NaElement.create_node_with_children(zapi, **options)
+
+ try:
+ self.server.invoke_successfully(portset_modify, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error %s port in portset %s: %s' % (action, self.parameters['name'],
+ to_native(error)),
+ exception=traceback.format_exc())
+
+ def portset_get_rest(self):
+ api = "protocols/san/portsets"
+ query = {'name': self.parameters['name'], 'svm.name': self.parameters['vserver']}
+ fields = 'uuid,protocol,interfaces'
+ record, error = rest_generic.get_one_record(self.rest_api, api, query, fields)
+ if error:
+ self.module.fail_json(msg='Error fetching portset %s: %s'
+ % (self.parameters['name'], to_native(error)))
+ portset_info = None
+ if record:
+ portset_info = self.form_portset_info(record)
+ return portset_info
+
+ def form_portset_info(self, record):
+ self.uuid = record['uuid']
+ # if type is not set, assign current type
+ # for avoiding incompatible network interface error in modify portset.
+ if self.parameters.get('type') is None:
+ self.parameters['type'] = record['protocol']
+ portset_info = {
+ 'type': record['protocol'],
+ 'ports': []
+ }
+ if 'interfaces' in record:
+ for lif in record['interfaces']:
+ for key, value in lif.items():
+ if key in ['fc', 'ip']:
+ # add current lifs type and uuid to self.lifs for modify and delete purpose.
+ self.lifs_info[value['name']] = {'lif_type': key, 'uuid': value['uuid']}
+ # This will form ports list for fcp, iscsi and mixed protocols.
+ portset_info['ports'].append(value['name'])
+ return portset_info
+
+ def create_portset_rest(self):
+ api = "protocols/san/portsets"
+ body = {'name': self.parameters['name'], 'svm.name': self.parameters['vserver']}
+ if 'type' in self.parameters:
+ body['protocol'] = self.parameters['type']
+ if self.lifs_info:
+ body['interfaces'] = [{self.lifs_info[lif]['lif_type']: {'name': lif}} for lif in self.lifs_info]
+ dummy, error = rest_generic.post_async(self.rest_api, api, body)
+ if error:
+ self.module.fail_json(msg="Error creating portset %s: %s" %
+ (self.parameters['name'], to_native(error)))
+
+ def delete_portset_rest(self):
+ api = "protocols/san/portsets"
+ # Default value is False if 'force' not in parameters.
+ query = {'allow_delete_while_bound': self.parameters.get('force', False)}
+ dummy, error = rest_generic.delete_async(self.rest_api, api, self.uuid, query)
+ if error:
+ self.module.fail_json(msg="Error deleting portset %s: %s" %
+ (self.parameters['name'], to_native(error)))
+
+ def modify_portset_rest(self, ports_to_add, ports_to_remove):
+ if ports_to_add:
+ self.add_ports_to_portset(ports_to_add)
+ for port in ports_to_remove:
+ self.remove_port_from_portset(port)
+
+ def add_ports_to_portset(self, ports_to_add):
+ api = 'protocols/san/portsets/%s/interfaces' % self.uuid
+ body = {'records': [{self.lifs_info[port]['lif_type']: {'name': port}} for port in ports_to_add]}
+ dummy, error = rest_generic.post_async(self.rest_api, api, body)
+ if error:
+ self.module.fail_json(msg='Error adding port in portset %s: %s' % (self.parameters['name'],
+ to_native(error)))
+
+ def remove_port_from_portset(self, port_to_remove):
+ api = 'protocols/san/portsets/%s/interfaces' % self.uuid
+ dummy, error = rest_generic.delete_async(self.rest_api, api, self.lifs_info[port_to_remove]['uuid'])
+ if error:
+ self.module.fail_json(msg='Error removing port in portset %s: %s' % (self.parameters['name'],
+ to_native(error)))
+
+ def get_san_lifs_rest(self, san_lifs):
+ # list of lifs not present in the vserver
+ missing_lifs = []
+ record, record2, error, error2 = None, None, None, None
+ for lif in san_lifs:
+ if self.parameters.get('type') in [None, 'mixed', 'iscsi']:
+ record, error = self.get_san_lif_type_uuid(lif, 'ip')
+ if self.parameters.get('type') in [None, 'mixed', 'fcp']:
+ record2, error2 = self.get_san_lif_type_uuid(lif, 'fc')
+ if error is None and error2 is not None and record:
+ # ignore error on fc if ip interface is found
+ error2 = None
+ if error2 is None and error is not None and record2:
+ # ignore error on ip if fc interface is found
+ error = None
+ if error or error2:
+ errors = [to_native(err) for err in (error, error2) if err]
+ self.module.fail_json(msg='Error fetching lifs details for %s: %s' % (lif, ' - '.join(errors)),
+ exception=traceback.format_exc())
+ if record:
+ self.lifs_info[lif] = {'lif_type': 'ip', 'uuid': record['uuid']}
+ if record2:
+ self.lifs_info[lif] = {'lif_type': 'fc', 'uuid': record2['uuid']}
+ if record is None and record2 is None:
+ missing_lifs.append(lif)
+ if missing_lifs and self.parameters['state'] == 'present':
+ error_msg = 'Error: lifs: %s of type %s not found in vserver %s' % \
+ (', '.join(missing_lifs), self.parameters.get('type', 'fcp or iscsi'), self.parameters['vserver'])
+ self.module.fail_json(msg=error_msg)
+
+ def get_san_lif_type_uuid(self, lif, portset_type):
+ api = 'network/%s/interfaces' % portset_type
+ query = {'name': lif, 'svm.name': self.parameters['vserver']}
+ record, error = rest_generic.get_one_record(self.rest_api, api, query)
+ return record, error
+
+ def apply(self):
+ """
+ Applies action from playbook
+ """
+ current, modify = self.portset_get(), None
+ # get lifs type and uuid which is not present in current.
+ if self.use_rest and self.parameters['state'] == 'present':
+ self.get_san_lifs_rest([port for port in self.parameters['ports'] if port not in self.lifs_info])
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None and self.parameters['state'] == 'present':
+ if self.parameters.get('type') and self.parameters['type'] != current['type']:
+ self.module.fail_json(msg="modify protocol(type) not supported and %s already exists in vserver %s under different type" %
+ (self.parameters['name'], self.parameters['vserver']))
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_portset()
+ # REST handles create and add ports in create api call itself.
+ if not self.use_rest:
+ self.add_ports()
+ elif cd_action == 'delete':
+ self.delete_portset()
+ elif modify:
+ add_ports = set(self.parameters['ports']) - set(current['ports'])
+ remove_ports = set(current['ports']) - set(self.parameters['ports'])
+ if self.use_rest:
+ self.modify_portset_rest(add_ports, remove_ports)
+ else:
+ self.add_ports(add_ports)
+ self.remove_ports(remove_ports)
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ """
+ Execute action from playbook
+ """
+ portset_obj = NetAppONTAPPortset()
+ portset_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_publickey.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_publickey.py
new file mode 100644
index 000000000..420238389
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_publickey.py
@@ -0,0 +1,302 @@
+#!/usr/bin/python
+
+# (c) 2021, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_publickey
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+
+module: na_ontap_publickey
+
+short_description: NetApp ONTAP publickey configuration
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 21.7.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+ - Add, modify, or remove publickeys.
+ - Requires ONTAP 9.7 or later, and only supports REST.
+
+options:
+ state:
+ description:
+ - Whether the specified publickey should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: 'present'
+ account:
+ description:
+ - The name of the user account.
+ required: true
+ type: str
+ comment:
+ description:
+ - Optional comment for the public key.
+ type: str
+ delete_all:
+ description:
+ - If index is not present, with state=absent, delete all public key for this user account.
+ type: bool
+ default: false
+ index:
+ description:
+ - Index number for the public key.
+ - If index is not present, with state=present, the public key is always added, using the next available index.
+ - If index is not present, with state=present, the module is not idempotent.
+ - If index is not present, with state=absent, if only one key is found, it is deleted. Otherwise an error is reported.
+ - See also C(delete_all) option.
+ type: int
+ public_key:
+ description:
+ - The public key.
+ type: str
+ vserver:
+ description:
+ - The name of the vserver to use.
+ - Omit this option for cluster scoped user accounts.
+ type: str
+
+notes:
+ - This module supports check_mode.
+ - This module is not idempotent if index is omitted.
+'''
+
+EXAMPLES = """
+
+ - name: Create publickey
+ netapp.ontap.na_ontap_publickey:
+ state: present
+ account: SampleUser
+ index: 0
+ public_key: "{{ netapp_publickey }}"
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Delete single publickey
+ netapp.ontap.na_ontap_publickey:
+ state: absent
+ account: SampleUser
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Modify single publickey
+ netapp.ontap.na_ontap_publickey:
+ state: present
+ account: SampleUser
+ comment: ssh key for XXXX
+ index: 0
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+cd_action:
+ description: whether a public key is created or deleted.
+ returned: success
+ type: str
+
+modify:
+ description: attributes that were modified if the key already exists.
+ returned: success
+ type: dict
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+import ansible_collections.netapp.ontap.plugins.module_utils.rest_response_helpers as rrh
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapPublicKey:
+ """
+ Common operations to manage public keys.
+ """
+
+ def __init__(self):
+ self.use_rest = False
+ argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ argument_spec.update(dict(
+ state=dict(type='str', choices=['present', 'absent'], default='present'),
+ account=dict(required=True, type='str'),
+ comment=dict(type='str'),
+ delete_all=dict(type='bool', default=False),
+ index=dict(type='int'),
+ public_key=dict(type='str'),
+ vserver=dict(type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ('delete_all', 'index')
+ ],
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ # REST API is required
+ self.rest_api = OntapRestAPI(self.module)
+ # check version
+ self.rest_api.fail_if_not_rest_minimum_version('na_ontap_publickey', 9, 7)
+
+ def get_public_keys(self):
+ api = 'security/authentication/publickeys'
+ query = {
+ 'account.name': self.parameters['account'],
+ 'fields': 'account,owner,index,public_key,comment'
+ }
+ if self.parameters.get('vserver') is None:
+ # vserser is empty for cluster
+ query['scope'] = 'cluster'
+ else:
+ query['owner.name'] = self.parameters['vserver']
+
+ if self.parameters.get('index') is not None:
+ query['index'] = self.parameters['index']
+
+ response, error = self.rest_api.get(api, query)
+ if self.parameters.get('index') is not None:
+ record, error = rrh.check_for_0_or_1_records(api, response, error)
+ records = [record]
+ else:
+ records, error = rrh.check_for_0_or_more_records(api, response, error)
+ if error:
+ msg = "Error in get_public_key: %s" % error
+ self.module.fail_json(msg=msg)
+ if records is None or records == [None]:
+ records = []
+ # flatten {'account': {'name': 'some_name'}} into {'account': 'some_name'} to match input parameters
+ return [dict([(k, v if k != 'account' else v['name']) for k, v in record.items()]) for record in records]
+
+ def create_public_key(self):
+ api = 'security/authentication/publickeys'
+ body = {
+ 'account.name': self.parameters['account'],
+ 'public_key': self.parameters['public_key']
+ }
+ if self.parameters.get('vserver') is not None:
+ # vserser is empty for cluster
+ body['owner.name'] = self.parameters['vserver']
+ for attr in ('comment', 'index'):
+ value = self.parameters.get(attr)
+ if value is not None:
+ body[attr] = value
+
+ dummy, error = self.rest_api.post(api, body)
+ if error:
+ msg = "Error in create_public_key: %s" % error
+ self.module.fail_json(msg=msg)
+
+ def modify_public_key(self, current, modify):
+ # not supported in 2.6
+ # sourcery skip: dict-comprehension
+ api = 'security/authentication/publickeys/%s/%s/%d' % (current['owner']['uuid'], current['account'], current['index'])
+ body = {}
+ modify_copy = dict(modify)
+ for key in modify:
+ if key in ('comment', 'public_key'):
+ body[key] = modify_copy.pop(key)
+ if modify_copy:
+ msg = 'Error: attributes not supported in modify: %s' % modify_copy
+ self.module.fail_json(msg=msg)
+ if not body:
+ msg = 'Error: nothing to change - modify called with: %s' % modify
+ self.module.fail_json(msg=msg)
+ if 'public_key' not in body:
+ # if not present, REST API reports 502 Server Error: Proxy Error for url
+ body['public_key'] = current['public_key']
+
+ dummy, error = self.rest_api.patch(api, body)
+ if error:
+ msg = "Error in modify_public_key: %s" % error
+ self.module.fail_json(msg=msg)
+
+ def delete_public_key(self, current):
+ api = 'security/authentication/publickeys/%s/%s/%d' % (current['owner']['uuid'], current['account'], current['index'])
+ dummy, error = self.rest_api.delete(api)
+ if error:
+ msg = "Error in delete_public_key: %s" % error
+ self.module.fail_json(msg=msg)
+
+ def get_actions(self):
+ """Determines whether a create, delete, modify action is required
+ If index is provided, we expect to find 0 or 1 record.
+ If index is not provided:
+ 1. As documented in ONTAP, a create without index should add a new public key.
+ This is not idempotent, and this rules out a modify operation.
+ 2. When state is absent, if a single record is found, we assume a delete.
+ 3. When state is absent, if more than one record is found, a delete action is rejected with 1 exception:
+ we added a delete_all option, so that all existing keys can be deleted.
+ """
+ cd_action, current, modify = None, None, None
+ if self.parameters['state'] == 'present' and self.parameters.get('index') is None:
+ # always create, by keeping current as None
+ self.module.warn('Module is not idempotent if index is not provided with state=present.')
+ records = []
+ else:
+ records = self.get_public_keys()
+ if len(records) > 1:
+ if self.parameters['state'] == 'absent' and self.parameters.get('delete_all'):
+ cd_action = 'delete_all'
+ self.na_helper.changed = True
+ else:
+ msg = 'index is required as more than one public_key exists for user account %s: ' % self.parameters['account']
+ msg += str(records)
+ self.module.fail_json(msg='Error: %s' % msg)
+ elif len(records) == 1:
+ current = records[0]
+
+ if cd_action is None:
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None:
+ if current and 'comment' not in current:
+ # force an entry as REST does not return anything if no comment was set
+ current['comment'] = ''
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+
+ return cd_action, modify, records
+
+ def apply(self):
+ cd_action, modify, records = self.get_actions()
+
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_public_key()
+ elif cd_action in ('delete', 'delete_all'):
+ # there is exactly 1 record for delete
+ # and 2 or more records for delete_all
+ for record in records:
+ self.delete_public_key(record)
+ elif modify:
+ # there is exactly 1 record for modify
+ self.modify_public_key(records[0], modify)
+
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ obj = NetAppOntapPublicKey()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_qos_adaptive_policy_group.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_qos_adaptive_policy_group.py
new file mode 100644
index 000000000..62499fc5e
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_qos_adaptive_policy_group.py
@@ -0,0 +1,323 @@
+#!/usr/bin/python
+
+# (c) 2018-2023, NetApp, Inc
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: na_ontap_qos_adaptive_policy_group
+short_description: NetApp ONTAP Adaptive Quality of Service policy group.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap_zapi
+version_added: 2.9.0
+author: NetApp Ansible Team (@joshedmonds) <ng-ansibleteam@netapp.com>
+
+description:
+ - Create, destroy, modify, or rename an Adaptive QoS policy group on NetApp ONTAP. Module is based on the standard QoS policy group module.
+
+options:
+ state:
+ choices: ['present', 'absent']
+ description:
+ - Whether the specified policy group should exist or not.
+ default: 'present'
+ type: str
+
+ name:
+ description:
+ - The name of the policy group to manage.
+ type: str
+ required: true
+
+ vserver:
+ description:
+ - Name of the vserver to use.
+ type: str
+ required: true
+
+ from_name:
+ description:
+ - Name of the existing policy group to be renamed to name.
+ type: str
+
+ absolute_min_iops:
+ description:
+ - Absolute minimum IOPS defined by this policy.
+ type: str
+
+ expected_iops:
+ description:
+ - Minimum expected IOPS defined by this policy.
+ type: str
+
+ peak_iops:
+ description:
+ - Maximum possible IOPS per allocated or used TB|GB.
+ type: str
+
+ peak_iops_allocation:
+ choices: ['allocated_space', 'used_space']
+ description:
+ - Whether peak_iops is specified by allocated or used space.
+ default: 'used_space'
+ type: str
+
+ force:
+ type: bool
+ default: False
+ description:
+ - Setting to 'true' forces the deletion of the workloads associated with the policy group along with the policy group.
+'''
+
+EXAMPLES = """
+ - name: create adaptive qos policy group
+ netapp.ontap.na_ontap_qos_adaptive_policy_group:
+ state: present
+ name: aq_policy_1
+ vserver: policy_vserver
+ absolute_min_iops: 70IOPS
+ expected_iops: 100IOPS/TB
+ peak_iops: 250IOPS/TB
+ peak_iops_allocation: allocated_space
+ hostname: 10.193.78.30
+ username: admin
+ password: netapp1!
+
+ - name: modify adaptive qos policy group expected iops
+ netapp.ontap.na_ontap_qos_adaptive_policy_group:
+ state: present
+ name: aq_policy_1
+ vserver: policy_vserver
+ absolute_min_iops: 70IOPS
+ expected_iops: 125IOPS/TB
+ peak_iops: 250IOPS/TB
+ peak_iops_allocation: allocated_space
+ hostname: 10.193.78.30
+ username: admin
+ password: netapp1!
+
+ - name: modify adaptive qos policy group peak iops allocation
+ netapp.ontap.na_ontap_qos_adaptive_policy_group:
+ state: present
+ name: aq_policy_1
+ vserver: policy_vserver
+ absolute_min_iops: 70IOPS
+ expected_iops: 125IOPS/TB
+ peak_iops: 250IOPS/TB
+ peak_iops_allocation: used_space
+ hostname: 10.193.78.30
+ username: admin
+ password: netapp1!
+
+ - name: delete qos policy group
+ netapp.ontap.na_ontap_qos_adaptive_policy_group:
+ state: absent
+ name: aq_policy_1
+ vserver: policy_vserver
+ hostname: 10.193.78.30
+ username: admin
+ password: netapp1!
+
+"""
+
+RETURN = """
+"""
+
+import traceback
+
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+class NetAppOntapAdaptiveQosPolicyGroup:
+ """
+ Create, delete, modify and rename a policy group.
+ """
+ def __init__(self):
+ """
+ Initialize the Ontap qos policy group class.
+ """
+ self.argument_spec = netapp_utils.na_ontap_zapi_only_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ from_name=dict(required=False, type='str'),
+ vserver=dict(required=True, type='str'),
+ absolute_min_iops=dict(required=False, type='str'),
+ expected_iops=dict(required=False, type='str'),
+ peak_iops=dict(required=False, type='str'),
+ peak_iops_allocation=dict(choices=['allocated_space', 'used_space'], default='used_space'),
+ force=dict(required=False, type='bool', default=False)
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.na_helper.module_replaces('na_ontap_qos_policy_group', self.module)
+ msg = 'The module only supports ZAPI and is deprecated; netapp.ontap.na_ontap_qos_policy_group should be used instead.'
+ self.na_helper.fall_back_to_zapi(self.module, msg, self.parameters)
+
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def get_policy_group(self, policy_group_name=None):
+ """
+ Return details of a policy group.
+ :param policy_group_name: policy group name
+ :return: policy group details.
+ :rtype: dict.
+ """
+ if policy_group_name is None:
+ policy_group_name = self.parameters['name']
+ policy_group_get_iter = netapp_utils.zapi.NaElement('qos-adaptive-policy-group-get-iter')
+ policy_group_info = netapp_utils.zapi.NaElement('qos-adaptive-policy-group-info')
+ policy_group_info.add_new_child('policy-group', policy_group_name)
+ policy_group_info.add_new_child('vserver', self.parameters['vserver'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(policy_group_info)
+ policy_group_get_iter.add_child_elem(query)
+ result = self.server.invoke_successfully(policy_group_get_iter, True)
+ policy_group_detail = None
+
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) == 1:
+ policy_info = result.get_child_by_name('attributes-list').get_child_by_name('qos-adaptive-policy-group-info')
+
+ policy_group_detail = {
+ 'name': policy_info.get_child_content('policy-group'),
+ 'vserver': policy_info.get_child_content('vserver'),
+ 'absolute_min_iops': policy_info.get_child_content('absolute-min-iops'),
+ 'expected_iops': policy_info.get_child_content('expected-iops'),
+ 'peak_iops': policy_info.get_child_content('peak-iops'),
+ 'peak_iops_allocation': policy_info.get_child_content('peak-iops-allocation')
+ }
+ return policy_group_detail
+
+ def create_policy_group(self):
+ """
+ create a policy group name.
+ """
+ policy_group = netapp_utils.zapi.NaElement('qos-adaptive-policy-group-create')
+ policy_group.add_new_child('policy-group', self.parameters['name'])
+ policy_group.add_new_child('vserver', self.parameters['vserver'])
+ if self.parameters.get('absolute_min_iops'):
+ policy_group.add_new_child('absolute-min-iops', self.parameters['absolute_min_iops'])
+ if self.parameters.get('expected_iops'):
+ policy_group.add_new_child('expected-iops', self.parameters['expected_iops'])
+ if self.parameters.get('peak_iops'):
+ policy_group.add_new_child('peak-iops', self.parameters['peak_iops'])
+ if self.parameters.get('peak_iops_allocation'):
+ policy_group.add_new_child('peak-iops-allocation', self.parameters['peak_iops_allocation'])
+ try:
+ self.server.invoke_successfully(policy_group, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating adaptive qos policy group %s: %s' %
+ (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_policy_group(self, policy_group=None):
+ """
+ delete an existing policy group.
+ :param policy_group: policy group name.
+ """
+ if policy_group is None:
+ policy_group = self.parameters['name']
+ policy_group_obj = netapp_utils.zapi.NaElement('qos-adaptive-policy-group-delete')
+ policy_group_obj.add_new_child('policy-group', policy_group)
+ if self.parameters.get('force'):
+ policy_group_obj.add_new_child('force', str(self.parameters['force']))
+ try:
+ self.server.invoke_successfully(policy_group_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting adaptive qos policy group %s: %s' %
+ (policy_group, to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_policy_group(self):
+ """
+ Modify policy group.
+ """
+ policy_group_obj = netapp_utils.zapi.NaElement('qos-adaptive-policy-group-modify')
+ policy_group_obj.add_new_child('policy-group', self.parameters['name'])
+ if self.parameters.get('absolute_min_iops'):
+ policy_group_obj.add_new_child('absolute-min-iops', self.parameters['absolute_min_iops'])
+ if self.parameters.get('expected_iops'):
+ policy_group_obj.add_new_child('expected-iops', self.parameters['expected_iops'])
+ if self.parameters.get('peak_iops'):
+ policy_group_obj.add_new_child('peak-iops', self.parameters['peak_iops'])
+ if self.parameters.get('peak_iops_allocation'):
+ policy_group_obj.add_new_child('peak-iops-allocation', self.parameters['peak_iops_allocation'])
+ try:
+ self.server.invoke_successfully(policy_group_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying adaptive qos policy group %s: %s' %
+ (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def rename_policy_group(self):
+ """
+ Rename policy group name.
+ """
+ rename_obj = netapp_utils.zapi.NaElement('qos-adaptive-policy-group-rename')
+ rename_obj.add_new_child('new-name', self.parameters['name'])
+ rename_obj.add_new_child('policy-group-name', self.parameters['from_name'])
+ try:
+ self.server.invoke_successfully(rename_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error renaming adaptive qos policy group %s: %s' %
+ (self.parameters['from_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_helper(self, modify):
+ """
+ helper method to modify policy group.
+ :param modify: modified attributes.
+ """
+ for attribute in modify.keys():
+ if attribute in ['absolute_min_iops', 'expected_iops', 'peak_iops', 'peak_iops_allocation']:
+ self.modify_policy_group()
+
+ def apply(self):
+ """
+ Run module based on playbook
+ """
+ current, rename = self.get_policy_group(), None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action == 'create' and self.parameters.get('from_name'):
+ # form current with from_name.
+ current = self.get_policy_group(self.parameters['from_name'])
+ if current is None:
+ self.module.fail_json(msg='Error: qos adaptive policy igroup with from_name=%s not found' % self.parameters.get('from_name'))
+ # allow for rename and check for modify with current from from_name.
+ rename, cd_action = True, None
+ modify = self.na_helper.get_modified_attributes(current, self.parameters) if cd_action is None else None
+ if self.na_helper.changed and not self.module.check_mode:
+ if rename:
+ self.rename_policy_group()
+ if cd_action == 'create':
+ self.create_policy_group()
+ elif cd_action == 'delete':
+ self.delete_policy_group()
+ elif modify:
+ self.modify_helper(modify)
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ '''Apply vserver operations from playbook'''
+ qos_policy_group = NetAppOntapAdaptiveQosPolicyGroup()
+ qos_policy_group.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_qos_policy_group.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_qos_policy_group.py
new file mode 100644
index 000000000..8628efd46
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_qos_policy_group.py
@@ -0,0 +1,579 @@
+#!/usr/bin/python
+
+# (c) 2018-2023, NetApp, Inc
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_qos_policy_group
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: na_ontap_qos_policy_group
+short_description: NetApp ONTAP manage policy group in Quality of Service.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.8.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+ - Create, destroy, modify, or rename QoS policy group on NetApp ONTAP.
+ - With ZAPI, only fixed QoS policy group is supported.
+ - With REST, both fixed and adaptive QoS policy group are supported.
+
+options:
+ state:
+ choices: ['present', 'absent']
+ description:
+ - Whether the specified policy group should exist or not.
+ default: 'present'
+ type: str
+
+ name:
+ description:
+ - The name of the policy group to manage.
+ required: true
+ type: str
+
+ vserver:
+ description:
+ - Name of the vserver to use.
+ required: true
+ type: str
+
+ from_name:
+ description:
+ - Name of the existing policy group to be renamed to name.
+ type: str
+
+ max_throughput:
+ description:
+ - Maximum throughput defined by this policy.
+ - Not supported with REST, use C(fixed_qos_options).
+ type: str
+
+ min_throughput:
+ description:
+ - Minimum throughput defined by this policy.
+ - Not supported with REST, use C(fixed_qos_options).
+ type: str
+
+ is_shared:
+ description:
+ - Whether the SLOs of the policy group are shared between the workloads or if the SLOs are applied separately to each workload.
+ - Not supported with REST, use C(fixed_qos_options).
+ type: bool
+ version_added: 20.12.0
+
+ force:
+ type: bool
+ description:
+ - Setting to 'true' forces the deletion of the workloads associated with the policy group along with the policy group.
+ - Not supported with REST.
+
+ fixed_qos_options:
+ version_added: 21.19.0
+ type: dict
+ description:
+ - Set Minimum and Maximum throughput defined by this policy.
+ - Only supported with REST.
+ - Required one of throughtput options when creating qos_policy.
+ suboptions:
+ capacity_shared:
+ description:
+ - Whether the SLOs of the policy group are shared between the workloads or if the SLOs are applied separately to each workload.
+ - Default value is False if not used in creating qos policy.
+ type: bool
+ max_throughput_iops:
+ description:
+ - Maximum throughput defined by this policy. It is specified in terms of IOPS.
+ - 0 means no maximum throughput is enforced.
+ type: int
+ max_throughput_mbps:
+ description:
+ - Maximum throughput defined by this policy. It is specified in terms of Mbps.
+ - 0 means no maximum throughput is enforced.
+ type: int
+ min_throughput_iops:
+ description:
+ - Minimum throughput defined by this policy. It is specified in terms of IOPS.
+ - 0 means no minimum throughput is enforced.
+ - These floors are not guaranteed on non-AFF platforms or when FabricPool tiering policies are set.
+ type: int
+ min_throughput_mbps:
+ description:
+ - Minimum throughput defined by this policy. It is specified in terms of Mbps.
+ - 0 means no minimum throughput is enforced.
+ - Requires ONTAP 9.8 or later, and REST support.
+ type: int
+
+ adaptive_qos_options:
+ version_added: 21.19.0
+ type: dict
+ description:
+ - Adaptive QoS policy-groups define measurable service level objectives (SLOs) that adjust based on the storage object used space
+ and the storage object allocated space.
+ - Only supported with REST.
+ suboptions:
+ absolute_min_iops:
+ description:
+ - Specifies the absolute minimum IOPS that is used as an override when the expected_iops is less than this value.
+ - These floors are not guaranteed on non-AFF platforms or when FabricPool tiering policies are set.
+ type: int
+ required: true
+ expected_iops:
+ description:
+ - Expected IOPS. Specifies the minimum expected IOPS per TB allocated based on the storage object allocated size.
+ - These floors are not guaranteed on non-AFF platforms or when FabricPool tiering policies are set.
+ type: int
+ required: true
+ peak_iops:
+ description:
+ - Peak IOPS. Specifies the maximum possible IOPS per TB allocated based on the storage object allocated size or
+ the storage object used size.
+ type: int
+ required: true
+ block_size:
+ description:
+ - Specifies the block size.
+ - Requires ONTAP 9.10.1 or later.
+ type: str
+ required: false
+ choices: ['any', '4k', '8k', '16k', '32k', '64k', '128k']
+ version_added: 22.6.0
+'''
+
+EXAMPLES = """
+ - name: create qos policy group in ZAPI.
+ netapp.ontap.na_ontap_qos_policy_group:
+ state: present
+ name: policy_1
+ vserver: policy_vserver
+ max_throughput: 800KB/s,800iops
+ min_throughput: 100iops
+ hostname: 10.193.78.30
+ username: admin
+ password: netapp1!
+ use_rest: never
+
+ - name: modify qos policy group max throughput in ZAPI.
+ netapp.ontap.na_ontap_qos_policy_group:
+ state: present
+ name: policy_1
+ vserver: policy_vserver
+ max_throughput: 900KB/s,800iops
+ min_throughput: 100iops
+ hostname: 10.193.78.30
+ username: admin
+ password: netapp1!
+ use_rest: never
+
+ - name: delete qos policy group
+ netapp.ontap.na_ontap_qos_policy_group:
+ state: absent
+ name: policy_1
+ vserver: policy_vserver
+ hostname: 10.193.78.30
+ username: admin
+ password: netapp1!
+
+ - name: create qos policy group in REST.
+ netapp.ontap.na_ontap_qos_policy_group:
+ state: present
+ name: policy_1
+ vserver: policy_vserver
+ hostname: 10.193.78.30
+ username: admin
+ password: netapp1!
+ use_rest: always
+ fixed_qos_options:
+ max_throughput_iops: 800
+ max_throughput_mbps: 200
+ min_throughput_iops: 500
+ min_throughput_mbps: 100
+ capacity_shared: True
+
+ - name: modify qos policy max_throughput in REST.
+ netapp.ontap.na_ontap_qos_policy_group:
+ state: present
+ name: policy_1
+ vserver: policy_vserver
+ hostname: 10.193.78.30
+ username: admin
+ password: netapp1!
+ use_rest: always
+ fixed_qos_options:
+ max_throughput_iops: 1000
+ max_throughput_mbps: 300
+
+ - name: create adaptive qos policy group in REST.
+ netapp.ontap.na_ontap_qos_policy_group:
+ state: present
+ name: adaptive_policy
+ vserver: policy_vserver
+ hostname: 10.193.78.30
+ username: admin
+ password: netapp1!
+ use_rest: always
+ adaptive_qos_options:
+ absolute_min_iops: 100
+ expected_iops: 200
+ peak_iops: 500
+
+"""
+
+RETURN = """
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppOntapQosPolicyGroup:
+ """
+ Create, delete, modify and rename a policy group.
+ """
+ def __init__(self):
+ """
+ Initialize the Ontap qos policy group class.
+ """
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ from_name=dict(required=False, type='str'),
+ vserver=dict(required=True, type='str'),
+ max_throughput=dict(required=False, type='str'),
+ min_throughput=dict(required=False, type='str'),
+ is_shared=dict(required=False, type='bool'),
+ force=dict(required=False, type='bool'),
+ fixed_qos_options=dict(required=False, type='dict', options=dict(
+ capacity_shared=dict(required=False, type='bool'),
+ max_throughput_iops=dict(required=False, type='int'),
+ max_throughput_mbps=dict(required=False, type='int'),
+ min_throughput_iops=dict(required=False, type='int'),
+ min_throughput_mbps=dict(required=False, type='int')
+ )),
+ adaptive_qos_options=dict(required=False, type='dict', options=dict(
+ absolute_min_iops=dict(required=True, type='int'),
+ expected_iops=dict(required=True, type='int'),
+ peak_iops=dict(required=True, type='int'),
+ block_size=dict(required=False, type='str', choices=['any', '4k', '8k', '16k', '32k', '64k', '128k'])
+ ))
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['max_throughput', 'fixed_qos_options'],
+ ['min_throughput', 'fixed_qos_options'],
+ ['max_throughput', 'adaptive_qos_options'],
+ ['min_throughput', 'adaptive_qos_options'],
+ ['fixed_qos_options', 'adaptive_qos_options'],
+ ['is_shared', 'adaptive_qos_options'],
+ ['is_shared', 'fixed_qos_options']
+ ]
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ # Set up Rest API
+ self.rest_api = OntapRestAPI(self.module)
+ unsupported_rest_properties = ['is_shared', 'max_throughput', 'min_throughput', 'force']
+ self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, unsupported_rest_properties)
+
+ if self.use_rest and self.parameters['state'] == 'present':
+ if not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 8) and \
+ self.na_helper.safe_get(self.parameters, ['fixed_qos_options', 'min_throughput_mbps']):
+ self.module.fail_json(msg="Minimum version of ONTAP for 'fixed_qos_options.min_throughput_mbps' is (9, 8, 0)")
+ if not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 10, 1) and \
+ self.na_helper.safe_get(self.parameters, ['adaptive_qos_options', 'block_size']):
+ self.module.fail_json(msg="Minimum version of ONTAP for 'adaptive_qos_options.block_size' is (9, 10, 1)")
+ self.uuid = None
+
+ if not self.use_rest:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ if 'adaptive_qos_options' in self.parameters:
+ self.module.fail_json(msg="Error: use 'na_ontap_qos_adaptive_policy_group' module for create/modify/delete adaptive policy with ZAPI")
+ if 'fixed_qos_options' in self.parameters and self.parameters['state'] == 'present':
+ self.module.fail_json(msg="Error: 'fixed_qos_options' not supported with ZAPI, use 'max_throughput' and 'min_throughput'")
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+ # default value for force is false in ZAPI.
+ self.parameters['force'] = False
+
+ def get_policy_group(self, policy_group_name=None):
+ """
+ Return details of a policy group.
+ :param policy_group_name: policy group name
+ :return: policy group details.
+ :rtype: dict.
+ """
+ if policy_group_name is None:
+ policy_group_name = self.parameters['name']
+ if self.use_rest:
+ return self.get_policy_group_rest(policy_group_name)
+ policy_group_get_iter = netapp_utils.zapi.NaElement('qos-policy-group-get-iter')
+ policy_group_info = netapp_utils.zapi.NaElement('qos-policy-group-info')
+ policy_group_info.add_new_child('policy-group', policy_group_name)
+ policy_group_info.add_new_child('vserver', self.parameters['vserver'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(policy_group_info)
+ policy_group_get_iter.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(policy_group_get_iter, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching qos policy group %s: %s' %
+ (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ policy_group_detail = None
+
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) == 1:
+ policy_info = result.get_child_by_name('attributes-list').get_child_by_name('qos-policy-group-info')
+
+ policy_group_detail = {
+ 'name': policy_info.get_child_content('policy-group'),
+ 'vserver': policy_info.get_child_content('vserver'),
+ 'max_throughput': policy_info.get_child_content('max-throughput'),
+ 'min_throughput': policy_info.get_child_content('min-throughput'),
+ 'is_shared': self.na_helper.get_value_for_bool(True, policy_info.get_child_content('is-shared'))
+ }
+ return policy_group_detail
+
+ def get_policy_group_rest(self, policy_group_name):
+ api = 'storage/qos/policies'
+ query = {
+ 'name': policy_group_name,
+ 'svm.name': self.parameters['vserver']
+ }
+ fields = 'name,svm'
+ if 'fixed_qos_options' in self.parameters:
+ fields += ',fixed'
+ elif 'adaptive_qos_options' in self.parameters:
+ fields += ',adaptive'
+ record, error = rest_generic.get_one_record(self.rest_api, api, query, fields)
+ if error:
+ self.module.fail_json(msg='Error fetching qos policy group %s: %s' %
+ (self.parameters['name'], error))
+ current = None
+ if record:
+ self.uuid = record['uuid']
+ current = {
+ 'name': record['name'],
+ 'vserver': record['svm']['name']
+ }
+
+ if 'fixed' in record:
+ current['fixed_qos_options'] = {}
+ for fixed_qos_option in ['capacity_shared', 'max_throughput_iops', 'max_throughput_mbps', 'min_throughput_iops']:
+ current['fixed_qos_options'][fixed_qos_option] = record['fixed'].get(fixed_qos_option)
+ if self.na_helper.safe_get(self.parameters, ['fixed_qos_options', 'min_throughput_mbps']):
+ current['fixed_qos_options']['min_throughput_mbps'] = record['fixed'].get('min_throughput_mbps')
+
+ if 'adaptive' in record:
+ current['adaptive_qos_options'] = {}
+ for adaptive_qos_option in ['absolute_min_iops', 'expected_iops', 'peak_iops', 'block_size']:
+ current['adaptive_qos_options'][adaptive_qos_option] = record['adaptive'].get(adaptive_qos_option)
+ return current
+
+ def create_policy_group(self):
+ """
+ create a policy group name.
+ """
+ if self.use_rest:
+ return self.create_policy_group_rest()
+ policy_group = netapp_utils.zapi.NaElement('qos-policy-group-create')
+ policy_group.add_new_child('policy-group', self.parameters['name'])
+ policy_group.add_new_child('vserver', self.parameters['vserver'])
+ if self.parameters.get('max_throughput'):
+ policy_group.add_new_child('max-throughput', self.parameters['max_throughput'])
+ if self.parameters.get('min_throughput'):
+ policy_group.add_new_child('min-throughput', self.parameters['min_throughput'])
+ if self.parameters.get('is_shared') is not None:
+ policy_group.add_new_child('is-shared', self.na_helper.get_value_for_bool(False, self.parameters['is_shared']))
+ try:
+ self.server.invoke_successfully(policy_group, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating qos policy group %s: %s' %
+ (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def create_policy_group_rest(self):
+ api = 'storage/qos/policies'
+ body = {
+ 'name': self.parameters['name'],
+ 'svm.name': self.parameters['vserver']
+ }
+ if 'fixed_qos_options' in self.parameters:
+ body['fixed'] = self.na_helper.filter_out_none_entries(self.parameters['fixed_qos_options'])
+ # default value for capacity_shared is False in REST.
+ if self.na_helper.safe_get(body, ['fixed', 'capacity_shared']) is None:
+ body['fixed']['capacity_shared'] = False
+ else:
+ body['adaptive'] = self.na_helper.filter_out_none_entries(self.parameters['adaptive_qos_options'])
+ dummy, error = rest_generic.post_async(self.rest_api, api, body)
+ if error:
+ self.module.fail_json(msg='Error creating qos policy group %s: %s' %
+ (self.parameters['name'], error))
+
+ def delete_policy_group(self, policy_group=None):
+ """
+ delete an existing policy group.
+ :param policy_group: policy group name.
+ """
+ if self.use_rest:
+ return self.delete_policy_group_rest()
+ if policy_group is None:
+ policy_group = self.parameters['name']
+ policy_group_obj = netapp_utils.zapi.NaElement('qos-policy-group-delete')
+ policy_group_obj.add_new_child('policy-group', policy_group)
+ if self.parameters.get('force'):
+ policy_group_obj.add_new_child('force', str(self.parameters['force']))
+ try:
+ self.server.invoke_successfully(policy_group_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting qos policy group %s: %s' %
+ (policy_group, to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_policy_group_rest(self):
+ api = 'storage/qos/policies'
+ dummy, error = rest_generic.delete_async(self.rest_api, api, self.uuid)
+ if error:
+ self.module.fail_json(msg='Error deleting qos policy group %s: %s' %
+ (self.parameters['name'], error))
+
+ def modify_policy_group(self, modify):
+ """
+ Modify policy group.
+ """
+ if self.use_rest:
+ return self.modify_policy_group_rest(modify)
+ policy_group_obj = netapp_utils.zapi.NaElement('qos-policy-group-modify')
+ policy_group_obj.add_new_child('policy-group', self.parameters['name'])
+ if self.parameters.get('max_throughput'):
+ policy_group_obj.add_new_child('max-throughput', self.parameters['max_throughput'])
+ if self.parameters.get('min_throughput'):
+ policy_group_obj.add_new_child('min-throughput', self.parameters['min_throughput'])
+ try:
+ self.server.invoke_successfully(policy_group_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying qos policy group %s: %s' %
+ (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_policy_group_rest(self, modify):
+ api = 'storage/qos/policies'
+ body = {}
+ if 'fixed_qos_options' in modify:
+ body['fixed'] = modify['fixed_qos_options']
+ else:
+ body['adaptive'] = self.parameters['adaptive_qos_options']
+ dummy, error = rest_generic.patch_async(self.rest_api, api, self.uuid, body)
+ if error:
+ self.module.fail_json(msg='Error modifying qos policy group %s: %s' %
+ (self.parameters['name'], error))
+
+ def rename_policy_group(self):
+ """
+ Rename policy group name.
+ """
+ if self.use_rest:
+ return self.rename_policy_group_rest()
+ rename_obj = netapp_utils.zapi.NaElement('qos-policy-group-rename')
+ rename_obj.add_new_child('new-name', self.parameters['name'])
+ rename_obj.add_new_child('policy-group-name', self.parameters['from_name'])
+ try:
+ self.server.invoke_successfully(rename_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error renaming qos policy group %s: %s' %
+ (self.parameters['from_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def rename_policy_group_rest(self):
+ api = 'storage/qos/policies'
+ body = {'name': self.parameters['name']}
+ dummy, error = rest_generic.patch_async(self.rest_api, api, self.uuid, body)
+ if error:
+ self.module.fail_json(msg='Error renaming qos policy group %s: %s' %
+ (self.parameters['from_name'], error))
+
+ def modify_helper(self, modify):
+ """
+ helper method to modify policy group.
+ :param modify: modified attributes.
+ """
+ if any(
+ attribute in modify
+ for attribute in ['max_throughput', 'min_throughput', 'fixed_qos_options', 'adaptive_qos_options']
+ ):
+ self.modify_policy_group(modify)
+
+ def validate_adaptive_or_fixed_qos_options(self):
+ error = None
+ # one of the fixed throughput option required in create qos_policy.
+ if 'fixed_qos_options' in self.parameters:
+ fixed_options = ['max_throughput_iops', 'max_throughput_mbps', 'min_throughput_iops', 'min_throughput_mbps']
+ if not any(x in self.na_helper.filter_out_none_entries(self.parameters['fixed_qos_options']) for x in fixed_options):
+ error = True
+ # error if both fixed_qos_options or adaptive_qos_options not present in creating qos policy.
+ elif self.parameters.get('fixed_qos_options', self.parameters.get('adaptive_qos_options')) is None:
+ error = True
+ return error
+
+ def apply(self):
+ """
+ Run module based on playbook
+ """
+ current = self.get_policy_group()
+ rename, cd_action = None, None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action == 'create' and self.parameters.get('from_name'):
+ # create policy by renaming an existing one
+ old_policy = self.get_policy_group(self.parameters['from_name'])
+ rename = self.na_helper.is_rename_action(old_policy, current)
+ if rename:
+ current = old_policy
+ cd_action = None
+ if rename is None:
+ self.module.fail_json(msg='Error renaming qos policy group: cannot find %s' %
+ self.parameters['from_name'])
+ modify = self.na_helper.get_modified_attributes(current, self.parameters) if cd_action is None else {}
+ if 'is_shared' in modify or self.na_helper.safe_get(modify, ['fixed_qos_options', 'capacity_shared']) is not None:
+ self.module.fail_json(msg="Error cannot modify '%s' attribute." %
+ ('is_shared' if 'is_shared' in modify else 'fixed_qos_options.capacity_shared'))
+ if self.use_rest and cd_action == 'create' and self.validate_adaptive_or_fixed_qos_options():
+ error = "Error: atleast one throughput in 'fixed_qos_options' or all 'adaptive_qos_options' required in creating qos_policy in REST."
+ self.module.fail_json(msg=error)
+ if self.na_helper.changed and not self.module.check_mode:
+ if rename:
+ self.rename_policy_group()
+ if cd_action == 'create':
+ self.create_policy_group()
+ elif cd_action == 'delete':
+ self.delete_policy_group()
+ elif modify:
+ self.modify_helper(modify)
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ '''Apply vserver operations from playbook'''
+ qos_policy_group = NetAppOntapQosPolicyGroup()
+ qos_policy_group.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_qtree.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_qtree.py
new file mode 100644
index 000000000..3451078d7
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_qtree.py
@@ -0,0 +1,462 @@
+#!/usr/bin/python
+
+# (c) 2018-2023, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_qtree
+'''
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+
+module: na_ontap_qtree
+
+short_description: NetApp ONTAP manage qtrees
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+ - Create/Modify/Delete Qtrees.
+
+options:
+
+ state:
+ description:
+ - Whether the specified qtree should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: 'present'
+
+ name:
+ description:
+ - The name of the qtree to manage.
+ - With REST, this can also be a path.
+ required: true
+ type: str
+
+ from_name:
+ description:
+ - Name of the qtree to be renamed.
+ version_added: 2.7.0
+ type: str
+
+ flexvol_name:
+ description:
+ - The name of the FlexVol the qtree should exist on.
+ required: true
+ type: str
+
+ vserver:
+ description:
+ - The name of the vserver to use.
+ required: true
+ type: str
+
+ export_policy:
+ description:
+ - The name of the export policy to apply.
+ version_added: 2.9.0
+ type: str
+
+ security_style:
+ description:
+ - The security style for the qtree.
+ choices: ['unix', 'ntfs', 'mixed']
+ type: str
+ version_added: 2.9.0
+
+ oplocks:
+ description:
+ - Whether the oplocks should be enabled or not for the qtree.
+ choices: ['enabled', 'disabled']
+ type: str
+ version_added: 2.9.0
+
+ unix_permissions:
+ description:
+ - File permissions bits of the qtree.
+ - Accepts either octal or string format.
+ - Examples 0777, 777 in octal and ---rwxrwxrwx, sstrwxrwxrwx, rwxrwxrwx in string format.
+ version_added: 2.9.0
+ type: str
+
+ force_delete:
+ description:
+ - Whether the qtree should be deleted even if files still exist.
+ - Note that the default of true reflect the REST API behavior.
+ - a value of false is not supported with REST.
+ type: bool
+ default: true
+ version_added: 20.8.0
+
+ wait_for_completion:
+ description:
+ - Only applicable for REST. When using ZAPI, the deletion is always synchronous.
+ - Deleting a qtree may take time if many files need to be deleted.
+ - Set this parameter to 'true' for synchronous execution during delete.
+ - Set this parameter to 'false' for asynchronous execution.
+ - For asynchronous, execution exits as soon as the request is sent, and the qtree is deleted in background.
+ type: bool
+ default: true
+ version_added: 2.9.0
+
+ time_out:
+ description:
+ - Maximum time to wait for qtree deletion in seconds when wait_for_completion is True.
+ - Error out if task is not completed in defined time.
+ - Default is set to 3 minutes.
+ default: 180
+ type: int
+ version_added: 2.9.0
+
+ unix_user:
+ description:
+ - The user set as owner of the qtree.
+ - Only supported with REST and ONTAP 9.9 or later.
+ type: str
+ version_added: 21.21.0
+
+ unix_group:
+ description:
+ - The group set as owner of the qtree.
+ - Only supported with REST and ONTAP 9.9 or later.
+ type: str
+ version_added: 21.21.0
+
+'''
+
+EXAMPLES = """
+- name: Create Qtrees.
+ netapp.ontap.na_ontap_qtree:
+ state: present
+ name: ansibleQTree
+ flexvol_name: ansibleVolume
+ export_policy: policyName
+ security_style: mixed
+ oplocks: disabled
+ unix_permissions: 0777
+ vserver: ansibleVServer
+ unix_user: user1
+ unix_group: group1
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+- name: Rename Qtrees.
+ netapp.ontap.na_ontap_qtree:
+ state: present
+ from_name: ansibleQTree
+ name: ansibleQTree_rename
+ flexvol_name: ansibleVolume
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+- name: modify Qtrees unix_permissions using string format.
+ netapp.ontap.na_ontap_qtree:
+ state: present
+ name: ansibleQTree_rename
+ flexvol_name: ansibleVolume
+ vserver: ansibleVServer
+ unix_permissions: sstrwxrwxrwx
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+- name: delete Qtrees.
+ netapp.ontap.na_ontap_qtree:
+ state: absent
+ name: ansibleQTree_rename
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+
+"""
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+import ansible_collections.netapp.ontap.plugins.module_utils.rest_response_helpers as rrh
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppOntapQTree:
+ '''Class with qtree operations'''
+
+ def __init__(self):
+ self.use_rest = False
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ from_name=dict(required=False, type='str'),
+ flexvol_name=dict(required=True, type='str'),
+ vserver=dict(required=True, type='str'),
+ export_policy=dict(required=False, type='str'),
+ security_style=dict(required=False, type='str', choices=['unix', 'ntfs', 'mixed']),
+ oplocks=dict(required=False, type='str', choices=['enabled', 'disabled']),
+ unix_permissions=dict(required=False, type='str'),
+ force_delete=dict(required=False, type='bool', default=True),
+ wait_for_completion=dict(required=False, type='bool', default=True),
+ time_out=dict(required=False, type='int', default=180),
+ unix_user=dict(required=False, type='str'),
+ unix_group=dict(required=False, type='str')
+ ))
+ self.volume_uuid, self.qid = None, None
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['flexvol_name'])
+ ],
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ self.rest_api = OntapRestAPI(self.module)
+ unsupported_rest_properties = ['oplocks']
+ partially_supported_rest_properties = [['unix_user', (9, 9)], ['unix_group', (9, 9)]]
+ self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, unsupported_rest_properties, partially_supported_rest_properties)
+ if not self.use_rest:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_qtree(self, name=None):
+ """
+ Checks if the qtree exists.
+ :param:
+ name : qtree name
+ :return:
+ Details about the qtree
+ False if qtree is not found
+ :rtype: bool
+ """
+ if name is None:
+ name = self.parameters['name']
+ if self.use_rest:
+ api = "storage/qtrees"
+ query = {'fields': 'export_policy,unix_permissions,security_style,volume',
+ 'svm.name': self.parameters['vserver'],
+ 'volume': self.parameters['flexvol_name'],
+ 'name': '"' + name + '"'}
+ if 'unix_user' in self.parameters:
+ query['fields'] += ',user.name'
+ if 'unix_group' in self.parameters:
+ query['fields'] += ',group.name'
+ record, error = rest_generic.get_one_record(self.rest_api, api, query)
+ if error:
+ msg = "Error fetching qtree: %s" % error
+ self.module.fail_json(msg=msg)
+ if record:
+ self.volume_uuid = record['volume']['uuid']
+ self.qid = str(record['id'])
+ return {
+ 'name': record['name'],
+ 'export_policy': self.na_helper.safe_get(record, ['export_policy', 'name']),
+ 'security_style': self.na_helper.safe_get(record, ['security_style']),
+ 'unix_permissions': str(self.na_helper.safe_get(record, ['unix_permissions'])),
+ 'unix_user': self.na_helper.safe_get(record, ['user', 'name']),
+ 'unix_group': self.na_helper.safe_get(record, ['group', 'name'])
+ }
+ return None
+
+ qtree_list_iter = netapp_utils.zapi.NaElement('qtree-list-iter')
+ query_details = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'qtree-info', **{'vserver': self.parameters['vserver'],
+ 'volume': self.parameters['flexvol_name'],
+ 'qtree': name})
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ qtree_list_iter.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(qtree_list_iter, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching qtree: %s' % to_native(error),
+ exception=traceback.format_exc())
+ return_q = None
+ if (result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1):
+ return_q = {'export_policy': result['attributes-list']['qtree-info']['export-policy'],
+ 'oplocks': result['attributes-list']['qtree-info']['oplocks'],
+ 'security_style': result['attributes-list']['qtree-info']['security-style']}
+
+ value = self.na_helper.safe_get(result, ['attributes-list', 'qtree-info', 'mode'])
+ return_q['unix_permissions'] = value if value is not None else ''
+
+ return return_q
+
+ def create_qtree(self):
+ """
+ Create a qtree
+ """
+ if self.use_rest:
+ api = "storage/qtrees"
+ body = {'volume': {'name': self.parameters['flexvol_name']},
+ 'svm': {'name': self.parameters['vserver']}}
+ body.update(self.form_create_modify_body_rest())
+ query = dict(return_timeout=10)
+ dummy, error = rest_generic.post_async(self.rest_api, api, body, query)
+ if error:
+ if "job reported error:" in error and "entry doesn't exist" in error:
+ # ignore RBAC issue with FSx - BURT1525998
+ self.module.warn('Ignoring job status, assuming success.')
+ return
+ self.module.fail_json(msg='Error creating qtree %s: %s' % (self.parameters['name'], error))
+ else:
+ self.create_or_modify_qtree_zapi('qtree-create', "Error creating qtree %s: %s")
+
+ def delete_qtree(self):
+ """
+ Delete a qtree
+ """
+ if self.use_rest:
+ api = "storage/qtrees/%s" % self.volume_uuid
+ query = {'return_timeout': 3}
+ response, error = rest_generic.delete_async(self.rest_api, api, self.qid, query)
+ if self.parameters['wait_for_completion']:
+ dummy, error = rrh.check_for_error_and_job_results(api, response, error, self.rest_api)
+ if error:
+ self.module.fail_json(msg='Error deleting qtree %s: %s' % (self.parameters['name'], error))
+
+ else:
+ path = '/vol/%s/%s' % (self.parameters['flexvol_name'], self.parameters['name'])
+ options = {'qtree': path}
+ if self.parameters['force_delete']:
+ options['force'] = "true"
+ qtree_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'qtree-delete', **options)
+
+ try:
+ self.server.invoke_successfully(qtree_delete,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error deleting qtree %s: %s" % (path, to_native(error)),
+ exception=traceback.format_exc())
+
+ def rename_qtree(self):
+ """
+ Rename a qtree
+ """
+ if self.use_rest:
+ error = 'Internal error, use modify with REST'
+ self.module.fail_json(msg=error)
+ else:
+ path = '/vol/%s/%s' % (self.parameters['flexvol_name'], self.parameters['from_name'])
+ new_path = '/vol/%s/%s' % (self.parameters['flexvol_name'], self.parameters['name'])
+ qtree_rename = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'qtree-rename', **{'qtree': path,
+ 'new-qtree-name': new_path})
+
+ try:
+ self.server.invoke_successfully(qtree_rename,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error renaming qtree %s: %s"
+ % (self.parameters['from_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_qtree(self):
+ """
+ Modify a qtree
+ """
+ if self.use_rest:
+ body = self.form_create_modify_body_rest()
+ api = "storage/qtrees/%s" % self.volume_uuid
+ query = dict(return_timeout=10)
+ dummy, error = rest_generic.patch_async(self.rest_api, api, self.qid, body, query)
+ if error:
+ self.module.fail_json(msg='Error modifying qtree %s: %s' % (self.parameters['name'], error))
+ else:
+ self.create_or_modify_qtree_zapi('qtree-modify', 'Error modifying qtree %s: %s')
+
+ def create_or_modify_qtree_zapi(self, zapi_request_name, error_message):
+ options = {'qtree': self.parameters['name'], 'volume': self.parameters['flexvol_name']}
+
+ if self.parameters.get('export_policy'):
+ options['export-policy'] = self.parameters['export_policy']
+ if self.parameters.get('security_style'):
+ options['security-style'] = self.parameters['security_style']
+ if self.parameters.get('oplocks'):
+ options['oplocks'] = self.parameters['oplocks']
+ if self.parameters.get('unix_permissions'):
+ options['mode'] = self.parameters['unix_permissions']
+ zapi_request = netapp_utils.zapi.NaElement.create_node_with_children(zapi_request_name, **options)
+
+ try:
+ self.server.invoke_successfully(zapi_request, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg=(error_message % (self.parameters['name'], to_native(error))), exception=traceback.format_exc())
+
+ def form_create_modify_body_rest(self):
+ body = {'name': self.parameters['name']}
+ if self.parameters.get('security_style'):
+ body['security_style'] = self.parameters['security_style']
+ if self.parameters.get('unix_permissions'):
+ body['unix_permissions'] = self.parameters['unix_permissions']
+ if self.parameters.get('export_policy'):
+ body['export_policy'] = {'name': self.parameters['export_policy']}
+ if self.parameters.get('unix_user'):
+ body['user'] = {'name': self.parameters['unix_user']}
+ if self.parameters.get('unix_group'):
+ body['group'] = {'name': self.parameters['unix_group']}
+ return body
+
+ def apply(self):
+ '''Call create/delete/modify/rename operations'''
+ current = self.get_qtree()
+ rename, cd_action, modify = None, None, None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action == 'create' and self.parameters.get('from_name'):
+ current = self.get_qtree(self.parameters['from_name'])
+ if current is None:
+ self.module.fail_json(msg="Error renaming: qtree %s does not exist" % self.parameters['from_name'])
+ cd_action = None
+ if not self.use_rest:
+ # modify can change the name for REST, as UUID is the key.
+ rename = True
+
+ if cd_action is None:
+ octal_value = current.get('unix_permissions') if current else None
+ if self.parameters.get('unix_permissions')\
+ and self.na_helper.compare_chmod_value(octal_value, self.parameters['unix_permissions']):
+ del self.parameters['unix_permissions']
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+
+ if self.use_rest and cd_action == 'delete' and not self.parameters['force_delete']:
+ self.module.fail_json(msg='Error: force_delete option is not supported for REST, unless set to true.')
+
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_qtree()
+ elif cd_action == 'delete':
+ self.delete_qtree()
+ else:
+ if rename:
+ self.rename_qtree()
+ if modify:
+ self.modify_qtree()
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ '''Apply qtree operations from playbook'''
+ qtree_obj = NetAppOntapQTree()
+ qtree_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_quota_policy.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_quota_policy.py
new file mode 100644
index 000000000..d2604c62c
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_quota_policy.py
@@ -0,0 +1,257 @@
+#!/usr/bin/python
+
+# (c) 2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_quota_policy
+'''
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = """
+module: na_ontap_quota_policy
+short_description: NetApp Ontap create, assign, rename or delete quota policy
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap_zapi
+version_added: '19.11.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Create, assign, rename or delete the quota policy
+ - This module only supports ZAPI and is deprecated.
+ - The final version of ONTAP to support ZAPI is 9.12.1.
+options:
+ state:
+ description:
+ - Whether the specified quota policy should exist or not.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ vserver:
+ description:
+ - Specifies the vserver for the quota policy.
+ required: true
+ type: str
+
+ name:
+ description:
+ - Specifies the quota policy name to create or rename to.
+ required: true
+ type: str
+
+ from_name:
+ description:
+ - Name of the existing quota policy to be renamed to name.
+ type: str
+
+ auto_assign:
+ description:
+ - when true, assign the policy to the vserver, whether it is newly created, renamed, or already exists.
+ - when true, the policy identified by name replaces the already assigned policy.
+ - when false, the policy is created if it does not already exist but is not assigned.
+ type: bool
+ default: true
+ version_added: 20.12.0
+"""
+
+EXAMPLES = """
+ - name: Create quota policy
+ na_ontap_quota_policy:
+ state: present
+ vserver: SVM1
+ name: ansible_policy
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Rename quota policy
+ na_ontap_quota_policy:
+ state: present
+ vserver: SVM1
+ name: new_ansible
+ from_name: ansible
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Delete quota policy
+ na_ontap_quota_policy:
+ state: absent
+ vserver: SVM1
+ name: ansible_policy
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+"""
+
+RETURN = """
+
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils import zapis_svm
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapQuotaPolicy(object):
+ """
+ Create, assign, rename or delete a quota policy
+ """
+
+ def __init__(self):
+ """
+ Initialize the ONTAP quota policy class
+ """
+
+ self.argument_spec = netapp_utils.na_ontap_zapi_only_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ name=dict(required=True, type='str'),
+ from_name=dict(required=False, type='str'),
+ auto_assign=dict(required=False, type='bool', default=True),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['name', 'vserver'])
+ ],
+ supports_check_mode=True
+ )
+
+ # set up variables
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.na_helper.module_deprecated(self.module)
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg='The python NetApp-Lib module is required')
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_quota_policy(self, policy_name=None):
+
+ if policy_name is None:
+ policy_name = self.parameters['name']
+
+ return_value = None
+ quota_policy_get_iter = netapp_utils.zapi.NaElement('quota-policy-get-iter')
+ quota_policy_info = netapp_utils.zapi.NaElement('quota-policy-info')
+ quota_policy_info.add_new_child('policy-name', policy_name)
+ quota_policy_info.add_new_child('vserver', self.parameters['vserver'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(quota_policy_info)
+ quota_policy_get_iter.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(quota_policy_get_iter, True)
+ if result.get_child_by_name('attributes-list'):
+ quota_policy_attributes = result['attributes-list']['quota-policy-info']
+ return_value = {
+ 'name': quota_policy_attributes['policy-name']
+ }
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching quota policy %s: %s' % (policy_name, to_native(error)),
+ exception=traceback.format_exc())
+ return return_value
+
+ def create_quota_policy(self):
+ """
+ Creates a new quota policy
+ """
+ quota_policy_obj = netapp_utils.zapi.NaElement("quota-policy-create")
+ quota_policy_obj.add_new_child("policy-name", self.parameters['name'])
+ quota_policy_obj.add_new_child("vserver", self.parameters['vserver'])
+ try:
+ self.server.invoke_successfully(quota_policy_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating quota policy %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_quota_policy(self):
+ """
+ Deletes a quota policy
+ """
+ quota_policy_obj = netapp_utils.zapi.NaElement("quota-policy-delete")
+ quota_policy_obj.add_new_child("policy-name", self.parameters['name'])
+ try:
+ self.server.invoke_successfully(quota_policy_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting quota policy %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def rename_quota_policy(self):
+ """
+ Rename a quota policy
+ """
+ quota_policy_obj = netapp_utils.zapi.NaElement("quota-policy-rename")
+ quota_policy_obj.add_new_child("policy-name", self.parameters['from_name'])
+ quota_policy_obj.add_new_child("vserver", self.parameters['vserver'])
+ quota_policy_obj.add_new_child("new-policy-name", self.parameters['name'])
+ try:
+ self.server.invoke_successfully(quota_policy_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error renaming quota policy %s: %s' % (self.parameters['from_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ current = self.get_quota_policy()
+ # rename and create are mutually exclusive
+ rename, cd_action = None, None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action == 'create' and self.parameters.get('from_name'):
+ # create policy by renaming it
+ rename = self.na_helper.is_rename_action(self.get_quota_policy(self.parameters['from_name']), current)
+ if rename is None:
+ self.module.fail_json(msg='Error renaming quota policy: %s does not exist.' % self.parameters['from_name'])
+
+ # check if policy should be assigned
+ assign_policy = cd_action == 'create' and self.parameters['auto_assign']
+ if cd_action is None and current and self.parameters['auto_assign']:
+ # find out if the existing policy needs to be changed
+ svm = zapis_svm.get_vserver(self.server, self.parameters['vserver'])
+ if svm.get('quota_policy') != self.parameters['name']:
+ assign_policy = True
+ self.na_helper.changed = True
+ if cd_action == 'delete':
+ # can't delete if already assigned
+ svm = zapis_svm.get_vserver(self.server, self.parameters['vserver'])
+ if svm.get('quota_policy') == self.parameters['name']:
+ self.module.fail_json(msg='Error policy %s cannot be deleted as it is assigned to the vserver %s' %
+ (self.parameters['name'], self.parameters['vserver']))
+
+ if self.na_helper.changed and not self.module.check_mode:
+ if rename:
+ self.rename_quota_policy()
+ elif cd_action == 'create':
+ self.create_quota_policy()
+ elif cd_action == 'delete':
+ self.delete_quota_policy()
+ if assign_policy:
+ zapis_svm.modify_vserver(self.server, self.module, self.parameters['vserver'], modify=dict(quota_policy=self.parameters['name']))
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action)
+ self.module.exit_json(**result)
+
+
+def main():
+ """
+ Creates the NetApp Ontap quota policy object and runs the correct play task
+ """
+ obj = NetAppOntapQuotaPolicy()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_quotas.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_quotas.py
new file mode 100644
index 000000000..1aca89feb
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_quotas.py
@@ -0,0 +1,890 @@
+#!/usr/bin/python
+
+# (c) 2018-2023, NetApp, Inc
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+'''
+na_ontap_quotas
+'''
+
+
+DOCUMENTATION = '''
+module: na_ontap_quotas
+short_description: NetApp ONTAP Quotas
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.8.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Set/Modify/Delete quota on ONTAP
+options:
+ state:
+ description:
+ - Whether the specified quota should exist or not.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ vserver:
+ required: true
+ description:
+ - Name of the vserver to use.
+ type: str
+ volume:
+ description:
+ - The name of the volume that the quota resides on.
+ required: true
+ type: str
+ quota_target:
+ description:
+ - The quota target of the type specified.
+ - Required to create or modify a rule.
+ - users and group takes quota_target value in REST.
+ - For default user and group quota rules, the quota_target must be specified as "".
+ type: str
+ qtree:
+ description:
+ - Name of the qtree for the quota.
+ - For user or group rules, it can be the qtree name or "" if no qtree.
+ - For tree type rules, this field must be "".
+ default: ""
+ type: str
+ type:
+ description:
+ - The type of quota rule
+ - Required to create or modify a rule.
+ choices: ['user', 'group', 'tree']
+ type: str
+ policy:
+ description:
+ - Name of the quota policy from which the quota rule should be obtained.
+ - Only supported with ZAPI.
+ - Multiple alternative quota policies (active and backup) are not supported in REST.
+ - REST manages the quota rules of the active policy.
+ type: str
+ set_quota_status:
+ description:
+ - Whether the specified volume should have quota status on or off.
+ type: bool
+ perform_user_mapping:
+ description:
+ - Whether quota management will perform user mapping for the user specified in quota-target.
+ - User mapping can be specified only for a user quota rule.
+ type: bool
+ aliases: ['user_mapping']
+ version_added: 20.12.0
+ file_limit:
+ description:
+ - The number of files that the target can have.
+ - use '-' to reset file limit.
+ type: str
+ disk_limit:
+ description:
+ - The amount of disk space that is reserved for the target.
+ - Expects a number followed with B (for bytes), KB, MB, GB, TB.
+ - If the unit is not present KB is used by default.
+ - Examples - 10MB, 20GB, 1TB, 20B, 10.
+ - In REST, if limit is less than 1024 bytes, the value is rounded up to 1024 bytes.
+ - use '-' to reset disk limit.
+ type: str
+ soft_file_limit:
+ description:
+ - The number of files the target would have to exceed before a message is logged and an SNMP trap is generated.
+ - use '-' to reset soft file limit.
+ type: str
+ soft_disk_limit:
+ description:
+ - The amount of disk space the target would have to exceed before a message is logged and an SNMP trap is generated.
+ - See C(disk_limit) for format description.
+ - In REST, if limit is less than 1024 bytes, the value is rounded up to 1024 bytes.
+ - use '-' to reset soft disk limit.
+ type: str
+ threshold:
+ description:
+ - The amount of disk space the target would have to exceed before a message is logged.
+ - See C(disk_limit) for format description.
+ - Only supported with ZAPI.
+ type: str
+ activate_quota_on_change:
+ description:
+ - Method to use to activate quota on a change.
+ - Default value is 'resize' in ZAPI.
+ - With REST, Changes to quota rule limits C(file_limit), C(disk_limit), C(soft_file_limit), and C(soft_disk_limit) are applied automatically
+ without requiring a quota resize operation.
+ choices: ['resize', 'reinitialize', 'none']
+ type: str
+ version_added: 20.12.0
+
+'''
+
+EXAMPLES = """
+ - name: Create quota rule in ZAPI.
+ netapp.ontap.na_ontap_quotas:
+ state: present
+ vserver: ansible
+ volume: ansible
+ quota_target: user1
+ type: user
+ policy: ansible
+ file_limit: 2
+ disk_limit: 3
+ set_quota_status: True
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: Resize quota
+ netapp.ontap.na_ontap_quotas:
+ state: present
+ vserver: ansible
+ volume: ansible
+ quota_target: user1
+ type: user
+ policy: ansible
+ file_limit: 2
+ disk_limit: 3
+ set_quota_status: True
+ activate_quota_on_change: resize
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: Reinitialize quota
+ netapp.ontap.na_ontap_quotas:
+ state: present
+ vserver: ansible
+ volume: ansible
+ quota_target: user1
+ type: user
+ policy: ansible
+ file_limit: 2
+ disk_limit: 3
+ set_quota_status: True
+ activate_quota_on_change: reinitialize
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: modify quota
+ netapp.ontap.na_ontap_quotas:
+ state: present
+ vserver: ansible
+ volume: ansible
+ quota_target: user1
+ type: user
+ policy: ansible
+ file_limit: 2
+ disk_limit: 3
+ threshold: 3
+ set_quota_status: False
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: Delete quota
+ netapp.ontap.na_ontap_quotas:
+ state: absent
+ vserver: ansible
+ volume: ansible
+ quota_target: /vol/ansible
+ type: user
+ policy: ansible
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: Add/Set quota rule for type user in REST.
+ netapp.ontap.na_ontap_quotas:
+ state: present
+ vserver: ansible
+ volume: ansible
+ quota_target: "user1,user2"
+ qtree: qtree
+ type: user
+ file_limit: 2
+ disk_limit: 3
+ set_quota_status: True
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: Modify quota reset file limit and modify disk limit.
+ netapp.ontap.na_ontap_quotas:
+ state: present
+ vserver: ansible
+ volume: ansible
+ quota_target: "user1,user2"
+ qtree: qtree
+ type: user
+ file_limit: "-"
+ disk_limit: 100
+ set_quota_status: True
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: Add/Set quota rule for type group in REST.
+ netapp.ontap.na_ontap_quotas:
+ state: present
+ vserver: ansible
+ volume: ansible
+ quota_target: group1
+ qtree: qtree
+ type: group
+ file_limit: 2
+ disk_limit: 3
+ set_quota_status: True
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: Add/Set quota rule for type qtree in REST.
+ netapp.ontap.na_ontap_quotas:
+ state: present
+ vserver: ansible
+ volume: ansible
+ quota_target: qtree1
+ type: qtree
+ file_limit: 2
+ disk_limit: 3
+ set_quota_status: True
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+
+"""
+
+import time
+import traceback
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+import ansible_collections.netapp.ontap.plugins.module_utils.rest_response_helpers as rrh
+
+
+class NetAppONTAPQuotas:
+ '''Class with quotas methods'''
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ volume=dict(required=True, type='str'),
+ quota_target=dict(required=False, type='str'),
+ qtree=dict(required=False, type='str', default=""),
+ type=dict(required=False, type='str', choices=['user', 'group', 'tree']),
+ policy=dict(required=False, type='str'),
+ set_quota_status=dict(required=False, type='bool'),
+ perform_user_mapping=dict(required=False, type='bool', aliases=['user_mapping']),
+ file_limit=dict(required=False, type='str'),
+ disk_limit=dict(required=False, type='str'),
+ soft_file_limit=dict(required=False, type='str'),
+ soft_disk_limit=dict(required=False, type='str'),
+ threshold=dict(required=False, type='str'),
+ activate_quota_on_change=dict(required=False, type='str', choices=['resize', 'reinitialize', 'none'])
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True,
+ required_by={
+ 'policy': ['quota_target', 'type'],
+ 'perform_user_mapping': ['quota_target', 'type'],
+ 'file_limit': ['quota_target', 'type'],
+ 'disk_limit': ['quota_target', 'type'],
+ 'soft_file_limit': ['quota_target', 'type'],
+ 'soft_disk_limit': ['quota_target', 'type'],
+ 'threshold': ['quota_target', 'type'],
+ },
+ required_together=[('quota_target', 'type')]
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ # Set up Rest API
+ self.rest_api = netapp_utils.OntapRestAPI(self.module)
+ unsupported_rest_properties = ['policy', 'threshold']
+ self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, unsupported_rest_properties)
+ self.volume_uuid = None # volume UUID after quota rule creation, used for on or off quota status
+ self.quota_uuid = None
+ self.warn_msg = None
+ self.validate_parameters_ZAPI_REST()
+
+ if not self.use_rest:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def validate_parameters_ZAPI_REST(self):
+ if self.use_rest:
+ if self.parameters.get('type') == 'tree':
+ if self.parameters['qtree']:
+ self.module.fail_json(msg="Error: Qtree cannot be specified for a tree type rule, it should be ''.")
+ # valid qtree name for ZAPI is /vol/vol_name/qtree_name and REST is qtree_name.
+ if '/' in self.parameters.get('quota_target', ''):
+ self.parameters['quota_target'] = self.parameters['quota_target'].split('/')[-1]
+ for quota_limit in ['file_limit', 'disk_limit', 'soft_file_limit', 'soft_disk_limit']:
+ if self.parameters.get(quota_limit) == '-1':
+ self.parameters[quota_limit] = '-'
+ else:
+ # converted blank parameter to * as shown in vsim
+ if self.parameters.get('quota_target') == "":
+ self.parameters['quota_target'] = '*'
+ if not self.parameters.get('activate_quota_on_change'):
+ self.parameters['activate_quota_on_change'] = 'resize'
+ size_format_error_message = "input string is not a valid size format. A valid size format is constructed as" \
+ "<integer><size unit>. For example, '10MB', '10KB'. Only numeric input is also valid." \
+ "The default unit size is KB."
+ if self.parameters.get('disk_limit') and self.parameters['disk_limit'] != '-' and not self.convert_to_kb_or_bytes('disk_limit'):
+ self.module.fail_json(msg='disk_limit %s' % size_format_error_message)
+ if self.parameters.get('soft_disk_limit') and self.parameters['soft_disk_limit'] != '-' and not self.convert_to_kb_or_bytes('soft_disk_limit'):
+ self.module.fail_json(msg='soft_disk_limit %s' % size_format_error_message)
+ if self.parameters.get('threshold') and self.parameters['threshold'] != '-' and not self.convert_to_kb_or_bytes('threshold'):
+ self.module.fail_json(msg='threshold %s' % size_format_error_message)
+
+ def get_quota_status(self):
+ """
+ Return details about the quota status
+ :param:
+ name : volume name
+ :return: status of the quota. None if not found.
+ :rtype: dict
+ """
+ quota_status_get = netapp_utils.zapi.NaElement('quota-status')
+ quota_status_get.translate_struct({
+ 'volume': self.parameters['volume']
+ })
+ try:
+ result = self.server.invoke_successfully(quota_status_get, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching quotas status info: %s' % to_native(error),
+ exception=traceback.format_exc())
+ return result['status']
+
+ def get_quotas_with_retry(self, get_request, policy):
+ return_values = None
+ if policy is not None:
+ get_request['query']['quota-entry'].add_new_child('policy', policy)
+ try:
+ result = self.server.invoke_successfully(get_request, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ # Bypass a potential issue in ZAPI when policy is not set in the query
+ # https://github.com/ansible-collections/netapp.ontap/issues/4
+ # BURT1076601 Loop detected in next() for table quota_rules_zapi
+ if policy is None and 'Reason - 13001:success' in to_native(error):
+ result = None
+ return_values = self.debug_quota_get_error(error)
+ else:
+ self.module.fail_json(msg='Error fetching quotas info for policy %s: %s'
+ % (policy, to_native(error)),
+ exception=traceback.format_exc())
+ return result, return_values
+
+ def get_quotas(self, policy=None):
+ """
+ Get quota details
+ :return: name of volume if quota exists, None otherwise
+ """
+ if self.parameters.get('type') is None:
+ return None
+ if policy is None:
+ policy = self.parameters.get('policy')
+ quota_get = netapp_utils.zapi.NaElement('quota-list-entries-iter')
+ query = {
+ 'query': {
+ 'quota-entry': {
+ 'volume': self.parameters['volume'],
+ 'quota-target': self.parameters['quota_target'],
+ 'quota-type': self.parameters['type'],
+ 'vserver': self.parameters['vserver'],
+ 'qtree': self.parameters['qtree'] or '""'
+ }
+ }
+ }
+ quota_get.translate_struct(query)
+ result, return_values = self.get_quotas_with_retry(quota_get, policy)
+ if result is None:
+ return return_values
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ # if quota-target is '*', the query treats it as a wildcard. But a blank entry is represented as '*'.
+ # Hence the need to loop through all records to find a match.
+ for quota_entry in result.get_child_by_name('attributes-list').get_children():
+ quota_target = quota_entry.get_child_content('quota-target')
+ if quota_target == self.parameters['quota_target']:
+ return_values = {'volume': quota_entry.get_child_content('volume'),
+ 'file_limit': quota_entry.get_child_content('file-limit'),
+ 'disk_limit': quota_entry.get_child_content('disk-limit'),
+ 'soft_file_limit': quota_entry.get_child_content('soft-file-limit'),
+ 'soft_disk_limit': quota_entry.get_child_content('soft-disk-limit'),
+ 'threshold': quota_entry.get_child_content('threshold')}
+ value = self.na_helper.safe_get(quota_entry, ['perform-user-mapping'])
+ if value is not None:
+ return_values['perform_user_mapping'] = self.na_helper.get_value_for_bool(True, value)
+ return return_values
+ return None
+
+ def get_quota_policies(self):
+ """
+ Get list of quota policies
+ :return: list of quota policies (empty list if None found)
+ """
+ quota_policy_get = netapp_utils.zapi.NaElement('quota-policy-get-iter')
+ query = {
+ 'query': {
+ 'quota-policy-info': {
+ 'vserver': self.parameters['vserver']
+ }
+ }
+ }
+ quota_policy_get.translate_struct(query)
+ try:
+ result = self.server.invoke_successfully(quota_policy_get, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching quota policies: %s' % to_native(error),
+ exception=traceback.format_exc())
+ return ([policy['policy-name'] for policy in result['attributes-list'].get_children()]
+ if result.get_child_by_name('attributes-list')
+ else [])
+
+ def debug_quota_get_error(self, error):
+ policies = self.get_quota_policies()
+ entries = {}
+ for policy in policies:
+ entries[policy] = self.get_quotas(policy)
+ if len(policies) == 1:
+ self.module.warn('retried with success using policy="%s" on "13001:success" ZAPI error.' % policy)
+ return entries[policies[0]]
+ self.module.fail_json(msg='Error fetching quotas info: %s - current vserver policies: %s, details: %s'
+ % (to_native(error), policies, entries))
+
+ def quota_entry_set(self):
+ """
+ Adds a quota entry
+ """
+ options = {'volume': self.parameters['volume'],
+ 'quota-target': self.parameters['quota_target'],
+ 'quota-type': self.parameters['type'],
+ 'qtree': self.parameters['qtree']}
+
+ self.set_zapi_options(options)
+ if self.parameters.get('policy'):
+ options['policy'] = self.parameters['policy']
+ set_entry = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'quota-set-entry', **options)
+ try:
+ self.server.invoke_successfully(set_entry, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error adding/modifying quota entry %s: %s'
+ % (self.parameters['volume'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def quota_entry_delete(self):
+ """
+ Deletes a quota entry
+ """
+ options = {'volume': self.parameters['volume'],
+ 'quota-target': self.parameters['quota_target'],
+ 'quota-type': self.parameters['type'],
+ 'qtree': self.parameters['qtree']}
+ set_entry = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'quota-delete-entry', **options)
+ if self.parameters.get('policy'):
+ set_entry.add_new_child('policy', self.parameters['policy'])
+ try:
+ self.server.invoke_successfully(set_entry, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting quota entry %s: %s'
+ % (self.parameters['volume'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def quota_entry_modify(self, modify_attrs):
+ """
+ Modifies a quota entry
+ """
+ for key in list(modify_attrs):
+ modify_attrs[key.replace("_", "-")] = modify_attrs.pop(key)
+ options = {'volume': self.parameters['volume'],
+ 'quota-target': self.parameters['quota_target'],
+ 'quota-type': self.parameters['type'],
+ 'qtree': self.parameters['qtree']}
+ options.update(modify_attrs)
+ self.set_zapi_options(options)
+ if self.parameters.get('policy'):
+ options['policy'] = str(self.parameters['policy'])
+ modify_entry = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'quota-modify-entry', **options)
+ try:
+ self.server.invoke_successfully(modify_entry, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying quota entry %s: %s'
+ % (self.parameters['volume'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def set_zapi_options(self, options):
+ if self.parameters.get('file_limit'):
+ options['file-limit'] = self.parameters['file_limit']
+ if self.parameters.get('disk_limit'):
+ options['disk-limit'] = self.parameters['disk_limit']
+ if self.parameters.get('perform_user_mapping') is not None:
+ options['perform-user-mapping'] = str(self.parameters['perform_user_mapping'])
+ if self.parameters.get('soft_file_limit'):
+ options['soft-file-limit'] = self.parameters['soft_file_limit']
+ if self.parameters.get('soft_disk_limit'):
+ options['soft-disk-limit'] = self.parameters['soft_disk_limit']
+ if self.parameters.get('threshold'):
+ options['threshold'] = self.parameters['threshold']
+
+ def on_or_off_quota(self, status, cd_action=None):
+ """
+ on or off quota
+ """
+ quota = netapp_utils.zapi.NaElement.create_node_with_children(
+ status, **{'volume': self.parameters['volume']})
+ try:
+ self.server.invoke_successfully(quota,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ if cd_action == 'delete' and status == 'quota-on' and '14958:No valid quota rules found' in to_native(error):
+ # ignore error on quota-on, as all rules have been deleted
+ self.module.warn('Last rule deleted, quota is off.')
+ return
+ self.module.fail_json(msg='Error setting %s for %s: %s'
+ % (status, self.parameters['volume'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def resize_quota(self, cd_action=None):
+ """
+ resize quota
+ """
+ quota = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'quota-resize', **{'volume': self.parameters['volume']})
+ try:
+ self.server.invoke_successfully(quota,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ if cd_action == 'delete' and '14958:No valid quota rules found' in to_native(error):
+ # ignore error on quota-on, as all rules have been deleted
+ self.module.warn('Last rule deleted, but quota is on as resize is not allowed.')
+ return
+ self.module.fail_json(msg='Error setting %s for %s: %s'
+ % ('quota-resize', self.parameters['volume'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def get_quotas_rest(self):
+ """
+ Retrieves quotas with rest API.
+ If type is user then it returns all possible combinations of user name records.
+ Report api is used to fetch file and disk limit info
+ """
+ if not self.use_rest:
+ return self.get_quotas()
+ query = {'svm.name': self.parameters.get('vserver'),
+ 'volume.name': self.parameters.get('volume'),
+ 'type': self.parameters.get('type'),
+ 'fields': 'svm.uuid,'
+ 'svm.name,'
+ 'space.hard_limit,'
+ 'files.hard_limit,'
+ 'user_mapping,'
+ 'qtree.name,'
+ 'type,'
+ 'space.soft_limit,'
+ 'files.soft_limit,'
+ 'volume.uuid,'
+ 'users.name,'
+ 'group.name,'}
+
+ # set qtree name in query for type user and group if not ''.
+ if self.parameters['qtree']:
+ query['qtree.name'] = self.parameters['qtree']
+ if self.parameters.get('quota_target'):
+ type = self.parameters['type']
+ field_name = 'users.name' if type == 'user' else 'group.name' if type == 'group' else 'qtree.name'
+ query[field_name] = self.parameters['quota_target']
+ api = 'storage/quota/rules'
+ # If type: user, get quota rules api returns users which has name starts with input target user names.
+ # Example of users list in a record:
+ # users: [{'name': 'quota_user'}], users: [{'name': 'quota_user'}, {'name': 'quota'}]
+ records, error = rest_generic.get_0_or_more_records(self.rest_api, api, query)
+ if error:
+ self.module.fail_json(msg="Error on getting quota rule info: %s" % error)
+ if records:
+ record = None
+ for item in records:
+ # along with user/group, qtree should also match to get current quota.
+ # for type user/group if qtree is not set in create, its not returned in GET, make desired qtree None if ''.
+ desired_qtree = self.parameters['qtree'] if self.parameters.get('qtree') else None
+ current_qtree = self.na_helper.safe_get(item, ['qtree', 'name'])
+ type = self.parameters.get('type')
+ if type in ['user', 'group']:
+ if desired_qtree != current_qtree:
+ continue
+ if type == 'user':
+ desired_users = self.parameters['quota_target'].split(',')
+ current_users = [user['name'] for user in item['users']]
+ if set(current_users) == set(desired_users):
+ record = item
+ break
+ elif item['group']['name'] == self.parameters['quota_target']:
+ record = item
+ break
+ # for type tree, desired quota_target should match current tree.
+ elif type == 'tree' and current_qtree == self.parameters['quota_target']:
+ record = item
+ break
+ if record:
+ self.volume_uuid = record['volume']['uuid']
+ self.quota_uuid = record['uuid']
+ current = {
+ 'soft_file_limit': self.na_helper.safe_get(record, ['files', 'soft_limit']),
+ 'disk_limit': self.na_helper.safe_get(record, ['space', 'hard_limit']),
+ 'soft_disk_limit': self.na_helper.safe_get(record, ['space', 'soft_limit']),
+ 'file_limit': self.na_helper.safe_get(record, ['files', 'hard_limit']),
+ 'perform_user_mapping': self.na_helper.safe_get(record, ['user_mapping']),
+ }
+ # Rest allows reset quota limits using '-', convert None to '-' to avoid idempotent issue.
+ current['soft_file_limit'] = '-' if current['soft_file_limit'] is None else str(current['soft_file_limit'])
+ current['disk_limit'] = '-' if current['disk_limit'] is None else str(current['disk_limit'])
+ current['soft_disk_limit'] = '-' if current['soft_disk_limit'] is None else str(current['soft_disk_limit'])
+ current['file_limit'] = '-' if current['file_limit'] is None else str(current['file_limit'])
+ return current
+ return None
+
+ def quota_entry_set_rest(self):
+ """
+ quota_entry_set with rest API.
+ for type: 'user' and 'group', quota_target is used.
+ value for user, group and qtree should be passed as ''
+ """
+ if not self.use_rest:
+ return self.quota_entry_set()
+ body = {'svm.name': self.parameters.get('vserver'),
+ 'volume.name': self.parameters.get('volume'),
+ 'type': self.parameters.get('type'),
+ 'qtree.name': self.parameters.get('qtree')}
+ quota_target = self.parameters.get('quota_target')
+ if self.parameters.get('type') == 'user':
+ body['users.name'] = quota_target.split(',')
+ elif self.parameters.get('type') == 'group':
+ body['group.name'] = quota_target
+ if self.parameters.get('type') == 'tree':
+ body['qtree.name'] = quota_target
+ if 'file_limit' in self.parameters:
+ body['files.hard_limit'] = self.parameters.get('file_limit')
+ if 'soft_file_limit' in self.parameters:
+ body['files.soft_limit'] = self.parameters.get('soft_file_limit')
+ if 'disk_limit' in self.parameters:
+ body['space.hard_limit'] = self.parameters.get('disk_limit')
+ if 'soft_disk_limit' in self.parameters:
+ body['space.soft_limit'] = self.parameters.get('soft_disk_limit')
+ if 'perform_user_mapping' in self.parameters:
+ body['user_mapping'] = self.parameters.get('perform_user_mapping')
+ query = {'return_records': 'true'} # in order to capture UUID
+ api = 'storage/quota/rules'
+ response, error = rest_generic.post_async(self.rest_api, api, body, query)
+ if error:
+ if "job reported error:" in error and "entry doesn't exist" in error:
+ # ignore RBAC issue with FSx - BURT1525998
+ self.module.warn('Ignoring job status, assuming success.')
+ elif '5308568' in error:
+ # code: 5308568 requires quota to be disabled/enabled to take effect.
+ # code: 5308571 - rule created, but to make it active reinitialize quota.
+ # reinitialize will disable/enable quota.
+ self.form_warn_msg_rest('create', '5308568')
+ elif '5308571' in error:
+ self.form_warn_msg_rest('create', '5308571')
+ else:
+ self.module.fail_json(msg="Error on creating quotas rule: %s" % error)
+ # fetch volume uuid as response will be None if above code error occurs.
+ self.volume_uuid = self.get_quota_status_or_volume_id_rest(get_volume=True)
+ # skip fetching volume uuid from response if volume_uuid already populated.
+ if not self.volume_uuid and response:
+ record, error = rrh.check_for_0_or_1_records(api, response, error, query)
+ if not error and record and not record['volume']['uuid']:
+ error = 'volume uuid key not present in %s:' % record
+ if error:
+ self.module.fail_json(msg='Error on getting volume uuid: %s' % error)
+ if record:
+ self.volume_uuid = record['volume']['uuid']
+
+ def quota_entry_delete_rest(self):
+ """
+ quota_entry_delete with rest API.
+ """
+ if not self.use_rest:
+ return self.quota_entry_delete()
+ api = 'storage/quota/rules'
+ dummy, error = rest_generic.delete_async(self.rest_api, api, self.quota_uuid)
+ if error is not None:
+ # delete operation succeeded, but reinitialize is required.
+ # code: 5308569 requires quota to be disabled/enabled to take effect.
+ # code: 5308572 error occurs when trying to delete last rule.
+ if '5308569' in error:
+ self.form_warn_msg_rest('delete', '5308569')
+ elif '5308572' in error:
+ self.form_warn_msg_rest('delete', '5308572')
+ else:
+ self.module.fail_json(msg="Error on deleting quotas rule: %s" % error)
+
+ def quota_entry_modify_rest(self, modify_quota):
+ """
+ quota_entry_modify with rest API.
+ User mapping cannot be turned on for multiuser quota rules.
+ """
+ if not self.use_rest:
+ return self.quota_entry_modify(modify_quota)
+ body = {}
+ if 'disk_limit' in modify_quota:
+ body['space.hard_limit'] = modify_quota['disk_limit']
+ if 'file_limit' in modify_quota:
+ body['files.hard_limit'] = modify_quota['file_limit']
+ if 'soft_disk_limit' in modify_quota:
+ body['space.soft_limit'] = modify_quota['soft_disk_limit']
+ if 'soft_file_limit' in modify_quota:
+ body['files.soft_limit'] = modify_quota['soft_file_limit']
+ if 'perform_user_mapping' in modify_quota:
+ body['user_mapping'] = modify_quota['perform_user_mapping']
+ api = 'storage/quota/rules'
+ dummy, error = rest_generic.patch_async(self.rest_api, api, self.quota_uuid, body)
+ if error is not None:
+ # limits are modified but internal error, require reinitialize quota.
+ if '5308567' in error:
+ self.form_warn_msg_rest('modify', '5308567')
+ else:
+ self.module.fail_json(msg="Error on modifying quotas rule: %s" % error)
+
+ def get_quota_status_or_volume_id_rest(self, get_volume=None):
+ """
+ Get the status info on or off
+ """
+ if not self.use_rest:
+ return self.get_quota_status()
+ api = 'storage/volumes'
+ params = {'name': self.parameters['volume'],
+ 'svm.name': self.parameters['vserver'],
+ 'fields': 'quota.state,uuid'}
+ record, error = rest_generic.get_one_record(self.rest_api, api, params)
+ if error:
+ msg = "volume uuid" if get_volume else "quota status info"
+ self.module.fail_json(msg="Error on getting %s: %s" % (msg, error))
+ if record:
+ return record['uuid'] if get_volume else record['quota']['state']
+ self.module.fail_json(msg="Error: Volume %s in SVM %s does not exist" % (self.parameters['volume'], self.parameters['vserver']))
+
+ def on_or_off_quota_rest(self, status, cd_action=None):
+ """
+ quota_entry_modify quota status with rest API.
+ """
+ if not self.use_rest:
+ return self.on_or_off_quota(status, cd_action)
+ body = {}
+ body['quota.enabled'] = status == 'quota-on'
+ api = 'storage/volumes'
+ if not self.volume_uuid:
+ self.volume_uuid = self.get_quota_status_or_volume_id_rest(get_volume=True)
+ dummy, error = rest_generic.patch_async(self.rest_api, api, self.volume_uuid, body)
+ if error is not None:
+ self.module.fail_json(msg='Error setting %s for %s: %s'
+ % (status, self.parameters['volume'], to_native(error)))
+
+ def form_warn_msg_rest(self, action, code):
+ start_msg = "Quota policy rule %s opertation succeeded. " % action
+ end_msg = "reinitialize(disable and enable again) the quota for volume %s " \
+ "in SVM %s." % (self.parameters['volume'], self.parameters['vserver'])
+ msg = 'unexpected code: %s' % code
+ if code == '5308572':
+ msg = "However the rule is still being enforced. To stop enforcing, "
+ if code in ['5308568', '5308569', '5308567']:
+ msg = "However quota resize failed due to an internal error. To make quotas active, "
+ if code == '5308571':
+ msg = "but quota resize is skipped. To make quotas active, "
+ self.warn_msg = start_msg + msg + end_msg
+
+ def apply(self):
+ """
+ Apply action to quotas
+ """
+ cd_action = None
+ modify_quota_status = None
+ modify_quota = None
+ current = self.get_quotas_rest()
+ if self.parameters.get('type') is not None:
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None:
+ modify_quota = self.na_helper.get_modified_attributes(current, self.parameters)
+ quota_status = self.get_quota_status_or_volume_id_rest()
+ if 'set_quota_status' in self.parameters and quota_status is not None:
+ # if 'set_quota_status' == True in create, sometimes there is delay in status update from 'initializing' -> 'on'.
+ # if quota_status == 'on' and options(set_quota_status == True and activate_quota_on_change == 'resize'),
+ # sometimes there is delay in status update from 'resizing' -> 'on'
+ set_quota_status = True if quota_status in ('on', 'resizing', 'initializing') else False
+ quota_status_action = self.na_helper.get_modified_attributes({'set_quota_status': set_quota_status}, self.parameters)
+ if quota_status_action:
+ modify_quota_status = 'quota-on' if quota_status_action['set_quota_status'] else 'quota-off'
+ if (self.parameters.get('activate_quota_on_change') in ['resize', 'reinitialize']
+ and (cd_action is not None or modify_quota is not None)
+ and modify_quota_status is None
+ and quota_status in ('on', None)):
+ modify_quota_status = self.parameters['activate_quota_on_change']
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.quota_entry_set_rest()
+ elif cd_action == 'delete':
+ self.quota_entry_delete_rest()
+ elif modify_quota:
+ self.quota_entry_modify_rest(modify_quota)
+ if modify_quota_status in ['quota-off', 'quota-on']:
+ self.on_or_off_quota_rest(modify_quota_status)
+ elif modify_quota_status == 'resize':
+ if not self.use_rest:
+ self.resize_quota(cd_action)
+ elif modify_quota_status == 'reinitialize':
+ self.on_or_off_quota_rest('quota-off')
+ time.sleep(10) # status switch interval
+ self.on_or_off_quota_rest('quota-on', cd_action)
+ # if warn message and quota not reinitialize, throw warnings to reinitialize in REST.
+ if self.warn_msg and modify_quota_status != 'reinitialize':
+ self.module.warn(self.warn_msg)
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify_quota, extra_responses={'modify_quota_status': modify_quota_status})
+ self.module.exit_json(**result)
+
+ def convert_to_kb_or_bytes(self, option):
+ """
+ convert input to kb, and set to self.parameters.
+ :param option: disk_limit or soft_disk_limit.
+ :return: boolean if it can be converted.
+ """
+ self.parameters[option].replace(' ', '')
+ slices = re.findall(r"\d+|\D+", self.parameters[option])
+ if len(slices) < 1 or len(slices) > 2:
+ return False
+ if not slices[0].isdigit():
+ return False
+ if len(slices) > 1 and slices[1].lower() not in ['b', 'kb', 'mb', 'gb', 'tb']:
+ return False
+ # force kb as the default unit for REST
+ if len(slices) == 1 and self.use_rest:
+ slices = (slices[0], 'kb')
+ if len(slices) > 1:
+ if not self.use_rest:
+ # conversion to KB
+ self.parameters[option] = str(int(slices[0]) * netapp_utils.POW2_BYTE_MAP[slices[1].lower()] // 1024)
+ else:
+ # conversion to Bytes
+ self.parameters[option] = str(int(slices[0]) * netapp_utils.POW2_BYTE_MAP[slices[1].lower()])
+ if self.use_rest:
+ # Rounding off the converted bytes
+ self.parameters[option] = str(((int(self.parameters[option]) + 1023) // 1024) * 1024)
+ return True
+
+
+def main():
+ '''Execute action'''
+ quota_obj = NetAppONTAPQuotas()
+ quota_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_rest_cli.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_rest_cli.py
new file mode 100644
index 000000000..544057b2c
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_rest_cli.py
@@ -0,0 +1,156 @@
+#!/usr/bin/python
+
+# (c) 2019-2023, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_rest_cli
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Run CLI commands on ONTAP through REST api/private/cli/.
+ - This module can run as admin or vsdamin and requires HTTP application to be enabled.
+ - Access permissions can be customized using ONTAP rest-role.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_rest_cli
+short_description: NetApp ONTAP run any CLI command using REST api/private/cli/
+version_added: 2.9.0
+options:
+ command:
+ description:
+ - a string command.
+ required: true
+ type: str
+ verb:
+ description:
+ - a string indicating which api call to run
+ - OPTIONS is useful to know which verbs are supported by the REST API
+ choices: ['GET', 'POST', 'PATCH', 'DELETE', 'OPTIONS']
+ required: true
+ type: str
+ params:
+ description:
+ - a dictionary of parameters to pass into the api call
+ type: dict
+ body:
+ description:
+ - a dictionary for info specification
+ type: dict
+'''
+
+EXAMPLES = """
+ - name: run ontap rest cli command
+ netapp.ontap.na_ontap_rest_cli:
+ hostname: "{{ hostname }}"
+ username: "{{ admin username }}"
+ password: "{{ admin password }}"
+ command: 'version'
+ verb: 'GET'
+
+ - name: run ontap rest cli command
+ netapp.ontap.na_ontap_rest_cli:
+ hostname: "{{ hostname }}"
+ username: "{{ admin username }}"
+ password: "{{ admin password }}"
+ command: 'security/login/motd'
+ verb: 'PATCH'
+ params: {'vserver': 'ansibleSVM'}
+ body: {'message': 'test'}
+
+ - name: set option
+ netapp.ontap.na_ontap_rest_cli:
+ command: options
+ verb: PATCH
+ params:
+ option_name: lldp.enable
+ body:
+ option_value: "on"
+"""
+
+RETURN = """
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+
+
+class NetAppONTAPCommandREST():
+ ''' calls a CLI command '''
+
+ def __init__(self):
+ self.use_rest = False
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ command=dict(required=True, type='str'),
+ verb=dict(required=True, type='str', choices=['GET', 'POST', 'PATCH', 'DELETE', 'OPTIONS']),
+ params=dict(required=False, type='dict'),
+ body=dict(required=False, type='dict')
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.rest_api = OntapRestAPI(self.module)
+ parameters = self.module.params
+ # set up state variables
+ self.command = parameters['command']
+ self.verb = parameters['verb']
+ self.params = parameters['params']
+ self.body = parameters['body']
+
+ if self.rest_api.is_rest():
+ self.use_rest = True
+ else:
+ msg = 'failed to connect to REST over %s: %s' % (parameters['hostname'], self.rest_api.errors)
+ msg += '. Use na_ontap_command for non-rest CLI.'
+ self.module.fail_json(msg=msg)
+
+ def run_command(self):
+ api = "private/cli/" + self.command
+
+ if self.verb == 'POST':
+ message, error = self.rest_api.post(api, self.body, self.params)
+ elif self.verb == 'GET':
+ message, error = self.rest_api.get(api, self.params)
+ elif self.verb == 'PATCH':
+ message, error = self.rest_api.patch(api, self.body, self.params)
+ elif self.verb == 'DELETE':
+ message, error = self.rest_api.delete(api, self.body, self.params)
+ elif self.verb == 'OPTIONS':
+ message, error = self.rest_api.options(api, self.params)
+ else:
+ self.module.fail_json(msg='Error: unexpected verb %s' % self.verb,
+ exception=traceback.format_exc())
+
+ if error:
+ self.module.fail_json(msg='Error: %s' % error)
+ return message
+
+ def apply(self):
+ ''' calls the command and returns raw output '''
+ changed = False if self.verb in ['GET', 'OPTIONS'] else True
+ if self.module.check_mode:
+ output = "Would run command: '%s'" % str(self.command)
+ else:
+ output = self.run_command()
+ self.module.exit_json(changed=changed, msg=output)
+
+
+def main():
+ """
+ Execute action from playbook
+ """
+ command = NetAppONTAPCommandREST()
+ command.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_rest_info.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_rest_info.py
new file mode 100644
index 000000000..b1b5b6dae
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_rest_info.py
@@ -0,0 +1,1138 @@
+#!/usr/bin/python
+
+# (c) 2020-2023, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" NetApp ONTAP Info using REST APIs """
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: na_ontap_rest_info
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+short_description: NetApp ONTAP information gatherer using REST APIs
+description:
+ - This module allows you to gather various information about ONTAP configuration using REST APIs
+version_added: 20.5.0
+notes:
+ - I(security_login_role_config_info) there is no REST equivalent.
+ - I(security_login_role_info) there is no REST equivalent.
+ - I(security_key_manager_key_info) there is no REST equivalent.
+ - I(vserver_motd_info) there is no REST equivalent.
+ - I(vserver_login_banner_info) there is no REST equivalent.
+ - I(vscan_connection_extended_stats_info) there is no REST equivalent.
+ - I(env_sensors_info) there is no REST equivalent.
+ - I(fcp_adapter_info) there is no REST equivalent.
+ - I(net_dev_discovery_info) there is no REST equivalent.
+ - I(net_failover_group_info) there is no REST equivalent.
+ - I(net_firewall_info) there is no REST equivalent.
+ - I(ntfs_dacl_info) there is no REST equivalent.
+ - I(ntfs_sd_info) there is no REST equivalent.
+ - I(role_info) there is not REST equivalent.
+ - I(subsys_health_info) there is not REST equivalent.
+ - I(volume_move_target_aggr_info) there is not REST equivalent.
+
+options:
+ state:
+ type: str
+ description:
+ - deprecated as of 21.1.0.
+ - this option was ignored and continues to be ignored.
+ gather_subset:
+ type: list
+ elements: str
+ description:
+ - When supplied, this argument will restrict the information collected to a given subset.
+ - Either the REST API or the ZAPI info name can be given. Possible values for this argument include
+ - application/applications or application_info
+ - application/consistency-groups
+ - application/templates or application_template_info
+ - cloud/targets or cloud_targets_info
+ - cluster
+ - cluster/chassis or cluster_chassis_info
+ - cluster/counter/tables
+ - cluster/fireware/history
+ - cluster/jobs or cluster_jobs_info
+ - cluster/licensing/capacity-pools
+ - cluster/licensing/license-managers
+ - cluster/licensing/licenses or license_info
+ - cluster/mediators
+ - cluster/metrics or cluster_metrics_info
+ - cluster/metrocluster or metrocluster_info
+ - cluster/metrocluster/diagnostics or cluster_metrocluster_diagnostics or metrocluster_check_info
+ - cluster/metrocluster/dr-groups
+ - cluster/metrocluster/interconnects
+ - cluster/metrocluster/nodes or metrocluster-node-get-iter
+ - cluster/metrocluster/operations
+ - cluster/metrocluster/svms
+ - cluster/nodes or cluster_node_info or sysconfig_info
+ - cluster/ntp/keys
+ - cluster/ntp/servers or ntp_server_info
+ - cluster/peers or cluster_peer_info
+ - cluster/schedules or cluster_schedules or job_schedule_cron_info
+ - cluster/sensors
+ - cluster/software or ontap_system_version or cluster_image_info
+ - cluster/software/download or cluster_software_download
+ - cluster/software/history or cluster_software_history
+ - cluster/software/packages or cluster_software_packages
+ - cluster/web
+ - name-services/cache/group-membership/settings
+ - name-services/cache/host/settings
+ - name-services/cache/netgroup/settings
+ - name-services/cache/setting
+ - name-services/cache/unix-group/settings
+ - name-services/dns or svm_dns_config_info or net_dns_info
+ - name-services/ldap or svm_ldap_config_info or ldap_client or ldap_config
+ - name-services/ldap-schemas
+ - name-services/local-hosts
+ - name-services/name-mappings or svm_name_mapping_config_info
+ - name-services/nis or svm_nis_config_info
+ - name-services/unix-groups
+ - name-services/unix-users
+ - network/ethernet/broadcast-domains or broadcast_domains_info or net_port_broadcast_domain_info
+ - network/ethernet/ports or network_ports_info or net_port_info
+ - network/ethernet/switch/ports
+ - network/ethernet/switches or cluster_switch_info
+ - network/fc/fabrics
+ - network/fc/interfaces
+ - network/fc/logins or san_fc_logins_info
+ - network/fc/ports
+ - network/fc/wwpn-aliases or san_fc_wppn-aliases or fcp_alias_info
+ - network/http-proxy
+ - network/ip/bgp/peer-groups
+ - network/ip/interfaces or ip_interfaces_info or net_interface_info
+ - network/ip/routes or ip_routes_info or net_routes_info
+ - network/ip/service-policies or ip_service_policies or net_interface_service_policy_info
+ - network/ip/subnets
+ - network/ipspaces or network_ipspaces_info or net_ipspaces_info
+ - private/support/alerts or sys_cluster_alerts
+ - private/cli/vserver/security/file-directory or file_directory_security
+ - protocols/active-directory
+ - protocols/audit
+ - protocols/cifs/connections
+ - protocols/cifs/domains
+ - protocols/cifs/group-policies
+ - protocols/cifs/home-directory/search-paths or cifs_home_directory_info
+ - protocols/cifs/local-groups
+ - protocols/cifs/local-users
+ - protocols/cifs/netbios
+ - protocols/cifs/services or cifs_services_info or cifs_options_info
+ - protocols/cifs/session/files
+ - protocols/cifs/sessions
+ - protocols/cifs/shadow-copies
+ - protocols/cifs/shadowcopy-sets
+ - protocols/cifs/shares or cifs_share_info
+ - protocols/cifs/users-and-groups/privileges
+ - protocols/cifs/unix-symlink-mapping
+ - protocols/fpolicy
+ - protocols/locks
+ - protocols/ndmp
+ - protocols/ndmp/nodes
+ - protocols/ndmp/sessions
+ - protocols/ndmp/svms
+ - protocols/nfs/connected-clients
+ - protocols/nfs/connected-client-maps
+ - protocols/nfs/connected-client-settings
+ - protocols/nfs/export-policies or export_policy_info
+ - protocols/nfs/export-policies/rules B(Requires the owning_resource to be set)
+ - protocols/nfs/kerberos/interfaces
+ - protocols/nfs/kerberos/realms or kerberos_realm_info
+ - protocols/nfs/services or vserver_nfs_info or nfs_info
+ - protocols/nvme/interfaces or nvme_interface_info
+ - protocols/nvme/services or nvme_info
+ - protocols/nvme/subsystems or nvme_subsystem_info
+ - protocols/nvme/subsystem-controllers
+ - protocols/nvme/subsystem-maps
+ - protocols/s3/buckets
+ - protocols/s3/services
+ - protocols/san/fcp/services or san_fcp_services or fcp_service_info
+ - protocols/san/igroups or nitiator_groups_info or igroup_info
+ - protocols/san/iscsi/credentials or san_iscsi_credentials
+ - protocols/san/iscsi/services or san_iscsi_services or iscsi_service_info
+ - protocols/san/iscsi/sessions
+ - protocols/san/lun-maps or san_lun_maps or lun_map_info
+ - protocols/san/portsets
+ - protocols/san/vvol-bindings
+ - protocols/vscan or vscan_status_info or vscan_info
+ - protocols/vscan/on-access-policies B(Requires the owning_resource to be set)
+ - protocols/vscan/on-demand-policies B(Requires the owning_resource to be set)
+ - protocols/vscan/scanner-pools B(Requires the owning_resource to be set)
+ - protocols/vscan/server-status or vscan_connection_status_all_info
+ - security
+ - security/accounts or security_login_info or security_login_account_info
+ - security/anti-ransomware/suspects
+ - security/audit
+ - security/audit/destinations or cluster_log_forwarding_info
+ - security/audit/messages
+ - security/authentication/cluster/ad-proxy
+ - security/authentication/cluster/ldap
+ - security/authentication/cluster/nis
+ - security/authentication/cluster/saml-sp
+ - security/authentication/publickeys
+ - security/aws-kms
+ - security/azure-key-vaults
+ - security/certificates
+ - security/gcp-kms
+ - security/ipsec
+ - security/ipsec/ca-certificates
+ - security/ipsec/policies
+ - security/ipsec/security-associations
+ - security/key-manager-configs
+ - security/key-managers
+ - security/key-stores
+ - security/login/messages
+ - security/multi-admin-verify
+ - security/multi-admin-verify/approval-groups
+ - security/multi-admin-verify/requests
+ - security/multi-admin-verify/rules
+ - security/roles or security_login_rest_role_info
+ - security/ssh
+ - security/ssh/svms
+ - snapmirror/policies or snapmirror_policy_info
+ - snapmirror/relationships or snapmirror_info
+ - storage/aggregates or aggregate_info
+ - storage/bridges or storage_bridge_info
+ - storage/cluster
+ - storage/disks or disk_info
+ - storage/file/clone/split-loads
+ - storage/file/clone/split-status
+ - storage/file/clone/tokens
+ - storage/file/moves
+ - storage/flexcache/flexcaches or storage_flexcaches_info
+ - storage/flexcache/origins or storage_flexcaches_origin_info
+ - storage/luns or storage_luns_info or lun_info (if serial_number is present, serial_hex and naa_id are computed)
+ - storage/namespaces or storage_NVMe_namespaces or nvme_namespace_info
+ - storage/pools
+ - storage/ports or storage_ports_info
+ - storage/qos/policies or storage_qos_policies or qos_policy_info or qos_adaptive_policy_info
+ - storage/qos/workloads
+ - storage/qtrees or storage_qtrees_config or qtree_info
+ - storage/quota/reports or storage_quota_reports or quota_report_info
+ - storage/quota/rules or storage_quota_policy_rules
+ - storage/shelves or storage_shelves_config or shelf_info
+ - storage/snaplock/audit-logs
+ - storage/snaplock/compliance-clocks
+ - storage/snaplock/event-retention/operations
+ - storage/snaplock/event-retention/policies
+ - storage/snaplock/file-fingerprints
+ - storage/snaplock/litigations
+ - storage/snapshot-policies or storage_snapshot_policies or snapshot_policy_info
+ - storage/switches
+ - storage/tape-devices
+ - storage/volumes or volume_info
+ - storage/volumes/snapshots B(Requires the owning_resource to be set)
+ - storage/volume-efficiency-policies or sis_policy_info
+ - support/autosupport or autosupport_config_info
+ - support/autosupport/check or autosupport_check_info
+ - support/autosupport/messages or autosupport_messages_history
+ - support/auto-update
+ - support/auto-update/configurations
+ - support/auto-update/updates
+ - support/configuration-backup
+ - support/configuration-backup/backups
+ - support/coredump/coredumps
+ - support/ems or support_ems_config
+ - support/ems/destinations or event_notification_info or event_notification_destination_info
+ - support/ems/events or support_ems_events
+ - support/ems/filters or support_ems_filters
+ - support/ems/messages
+ - support/snmp
+ - support/snmp/traphosts
+ - support/snmp/users
+ - svm/migrations
+ - svm/peers or svm_peers_info or vserver_peer_info
+ - svm/peer-permissions or svm_peer-permissions_info
+ - svm/svms or vserver_info
+ - B(The following do not have direct Rest API equivalent)
+ - aggr_efficiency_info
+ - cifs_vserver_security_info
+ - clock_info
+ - cluster_identity_info
+ - net_vlan_info
+ - sis_info
+ - snapmirror_destination_info
+ - system_node_info
+ - volume_space_info
+ - Can specify a list of values to include a larger subset.
+ - REST APIs are supported with ONTAP 9.6 onwards.
+ default: "demo"
+ max_records:
+ type: int
+ description:
+ - Maximum number of records returned in a single call.
+ default: 1024
+ fields:
+ type: list
+ elements: str
+ description:
+ - Request specific fields from subset.
+ - Recommended - '<list of fields>' to return specified fields, only one subset will be allowed.
+ - Discouraged - '*' to return all the fields, one or more subsets are allowed. This option can be used for discovery, but is discouraged in production.
+ - Stongly discouraged - '**' to return all the fields, one or more subsets are allowed.
+ This option can put an extra load on the system and should not be used in production.
+ - Limited - '' to return default fields, generally the properties that uniquely identify the record (keys).
+ Other data is not returned by default and need to be explicitly called for using the field name or *.
+ - If the option is not present, return default fields for that API (see '' above).
+ version_added: '20.6.0'
+ parameters:
+ description:
+ - Allows for any rest option to be passed in
+ type: dict
+ version_added: '20.7.0'
+ use_python_keys:
+ description:
+ - If true, I(/) in the returned dictionary keys are translated to I(_).
+ - It makes it possible to use a . notation when processing the output.
+ - For instance I(ontap_info["svm/svms"]) can be accessed as I(ontap_info.svm_svms).
+ type: bool
+ default: false
+ version_added: '21.9.0'
+ owning_resource:
+ description:
+ - Some resources cannot be accessed directly. You need to select them based on the owner or parent. For instance, volume for a snaphot.
+ - The following subsets require an owning resource, and the following suboptions when uuid is not present.
+ - <storage/volumes/snapshots> B(volume_name) is the volume name, B(svm_name) is the owning vserver name for the volume.
+ - <protocols/nfs/export-policies/rules> B(policy_name) is the name of the policy, B(svm_name) is the owning vserver name for the policy,
+ B(rule_index) is the rule index.
+ - <protocols/vscan/on-access-policies> B(svm_name) is the owning vserver name for the vscan
+ - <protocols/vscan/on-demand-policies> B(svm_name) is the owning vserver name for the vscan
+ - <protocols/vscan/scanner-pools> B(svm_name) is the owning vserver name for the vscan
+ type: dict
+ version_added: '21.19.0'
+ ignore_api_errors:
+ description:
+ - List of substrings.
+ - If a substring is contained in an error message when fetching a subset, the module does not fail and the error is reported in the subset.
+ type: list
+ elements: str
+ version_added: '21.23.0'
+'''
+
+EXAMPLES = '''
+- name: run ONTAP gather facts for vserver info
+ netapp.ontap.na_ontap_rest_info:
+ hostname: "1.2.3.4"
+ username: "testuser"
+ password: "test-password"
+ https: true
+ validate_certs: false
+ use_rest: Always
+ gather_subset:
+ - svm/svms
+
+- name: run ONTAP gather facts for aggregate info and volume info
+ netapp.ontap.na_ontap_rest_info:
+ hostname: "1.2.3.4"
+ username: "testuser"
+ password: "test-password"
+ https: true
+ validate_certs: false
+ use_rest: Always
+ gather_subset:
+ - storage/aggregates
+ - storage/volumes
+
+- name: run ONTAP gather facts for all subsets
+ netapp.ontap.na_ontap_rest_info:
+ hostname: "1.2.3.4"
+ username: "testuser"
+ password: "test-password"
+ https: true
+ validate_certs: false
+ use_rest: Always
+ gather_subset:
+ - all
+
+- name: run ONTAP gather facts for aggregate info and volume info with fields section
+ netapp.ontap.na_ontap_rest_info:
+ hostname: "1.2.3.4"
+ username: "testuser"
+ password: "test-password"
+ https: true
+ fields:
+ - '*'
+ validate_certs: false
+ use_rest: Always
+ gather_subset:
+ - storage/aggregates
+ - storage/volumes
+
+- name: run ONTAP gather facts for aggregate info with specified fields
+ netapp.ontap.na_ontap_rest_info:
+ hostname: "1.2.3.4"
+ username: "testuser"
+ password: "test-password"
+ https: true
+ fields:
+ - 'uuid'
+ - 'name'
+ - 'node'
+ validate_certs: false
+ use_rest: Always
+ gather_subset:
+ - storage/aggregates
+ parameters:
+ recommend:
+ true
+
+- name: Get Snapshot info (owning_resource example)
+ netapp.ontap.na_ontap_rest_info:
+ hostname: "1.2.3.4"
+ username: "testuser"
+ password: "test-password"
+ https: true
+ fields:
+ - '*'
+ validate_certs: false
+ use_rest: Always
+ gather_subset:
+ - storage/volumes/snapshots
+ owning_resource:
+ volume_name: volume_name
+ svm_name: svm_name
+
+- name: run ONTAP gather facts for volume info with query on name and state
+ netapp.ontap.na_ontap_rest_info:
+ hostname: "1.2.3.4"
+ username: "testuser"
+ password: "test-password"
+ https: true
+ validate_certs: false
+ gather_subset:
+ - storage/volumes
+ parameters:
+ name: ansible*
+ state: online
+
+- name: run ONTAP gather fact to get DACLs
+ netapp.ontap.na_ontap_rest_info:
+ hostname: "1.2.3.4"
+ username: "testuser"
+ password: "test-password"
+ https: true
+ validate_certs: false
+ gather_subset:
+ - file_directory_security
+ parameters:
+ vserver: svm1
+ path: /vol1/qtree1
+ use_python_keys: true
+
+- name: get ip network interface info.
+ netapp.ontap.na_ontap_rest_info:
+ hostname: "1.2.3.4"
+ username: "testuser"
+ password: "test-password"
+ https: true
+ validate_certs: false
+ gather_subset:
+ - ip_interfaces_info
+ parameters:
+ location.failover: home_node_only
+ location.node.name: ontap_cluster
+ service_policy.name: default-data-files
+
+- name: get aggregate info
+ netapp.ontap.na_ontap_rest_info:
+ hostname: "1.2.3.4"
+ username: "testuser"
+ password: "test-password"
+ https: true
+ validate_certs: false
+ gather_subset:
+ - aggregate_info
+ parameters:
+ node.name: ontap_cluster
+ block_storage.primary.raid_type: raid_dp
+
+# assuming module_defaults is used to set hostname, username, ...
+- name: run demo subset using custom vsadmin role
+ netapp.ontap.na_ontap_rest_info:
+ gather_subset:
+ - demo
+ force_ontap_version: 9.8
+ ignore_api_errors:
+ - 'not authorized for that command'
+
+# reports: {"cluster/nodes": {"error": {"code": "6", "message": "not authorized for that command"}}
+'''
+
+import codecs
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_text, to_bytes
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_owning_resource, rest_vserver
+
+
+class NetAppONTAPGatherInfo(object):
+ '''Class with gather info methods'''
+
+ def __init__(self):
+ """
+ Parse arguments, setup state variables,
+ check parameters and ensure request module is installed
+ """
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(type='str', required=False),
+ gather_subset=dict(default=['demo'], type='list', elements='str', required=False),
+ max_records=dict(type='int', default=1024, required=False),
+ fields=dict(type='list', elements='str', required=False),
+ parameters=dict(type='dict', required=False),
+ use_python_keys=dict(type='bool', default=False),
+ owning_resource=dict(type='dict', required=False),
+ ignore_api_errors=dict(type='list', elements='str', required=False),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ # set up variables
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.fields = ''
+
+ self.rest_api = OntapRestAPI(self.module)
+ self.rest_api.fail_if_not_rest_minimum_version('na_ontap_rest_info', 9, 6, 0)
+
+ def get_subset_info(self, gather_subset_info, default_fields=None):
+ """
+ Gather ONTAP information for the given subset using REST APIs
+ Input for REST APIs call : (api, data)
+ return gathered_ontap_info
+ """
+
+ api = gather_subset_info['api_call']
+ if gather_subset_info.pop('post', False):
+ self.run_post(gather_subset_info)
+ if default_fields:
+ fields = default_fields + ',' + self.fields
+ elif 'fields' in gather_subset_info:
+ fields = gather_subset_info['fields']
+ else:
+ fields = self.fields
+
+ data = {'max_records': self.parameters['max_records'], 'fields': fields}
+
+ # Delete the fields record from data if it is a private/cli API call.
+ # The private_cli_fields method handles the fields for API calls using the private/cli endpoint.
+ if '/private/cli' in api:
+ del data['fields']
+
+ # allow for passing in any additional rest api fields
+ if self.parameters.get('parameters'):
+ for each in self.parameters['parameters']:
+ data[each] = self.parameters['parameters'][each]
+
+ gathered_ontap_info, error = self.rest_api.get(api, data)
+
+ if not error:
+ return gathered_ontap_info
+
+ # If the API doesn't exist (using an older system), we don't want to fail the task.
+ if int(error.get('code', 0)) == 3 or (
+ # if Aggr recommender can't make a recommendation, it will fail with the following error code, don't fail the task.
+ int(error.get('code', 0)) == 19726344 and "No recommendation can be made for this cluster" in error.get('message')):
+ return error.get('message')
+
+ # Do not fail on error
+ for error_pattern in self.parameters.get('ignore_api_errors', []):
+ if error_pattern in error.get('message'):
+ return {'error': error}
+ # Fail the module if error occurs from REST APIs call
+ if int(error.get('code', 0)) == 6:
+ error = "Error: %s user is not authorized to make %s api call" % (self.parameters.get('username'), api)
+ self.module.fail_json(msg=error)
+
+ @staticmethod
+ def strip_dacls(response):
+ # Use 'DACL - ACE' as a marker for the start of the list of DACLS in the descriptor.
+ if 'acls' not in response['records'][0]:
+ return None
+ if 'DACL - ACEs' not in response['records'][0]['acls']:
+ return None
+ index = response['records'][0]['acls'].index('DACL - ACEs')
+ dacls = response['records'][0]['acls'][(index + 1):]
+
+ dacl_list = []
+ if dacls:
+ for dacl in dacls:
+ # The '-' marker is the start of the DACL, the '-0x' marker is the end of the DACL.
+ start_hyphen = dacl.index('-') + 1
+ first_hyphen_removed = dacl[start_hyphen:]
+ end_hyphen = first_hyphen_removed.index('-0x')
+ dacl_dict = {'access_type': dacl[:start_hyphen - 1].strip()}
+ dacl_dict['user_or_group'] = first_hyphen_removed[:end_hyphen]
+ dacl_list.append(dacl_dict)
+ return dacl_list
+
+ def run_post(self, gather_subset_info):
+ api = gather_subset_info['api_call']
+ post_return, error = self.rest_api.post(api, None)
+ if error:
+ return None
+ dummy, error = self.rest_api.wait_on_job(post_return['job'], increment=5)
+ if error:
+ # TODO: Handle errors that are not errors
+ self.module.fail_json(msg="%s" % error)
+
+ def get_next_records(self, api):
+ """
+ Gather next set of ONTAP information for the specified api
+ Input for REST APIs call : (api, data)
+ return gather_subset_info
+ """
+
+ data = {}
+ gather_subset_info, error = self.rest_api.get(api, data)
+
+ if error:
+ self.module.fail_json(msg=error)
+
+ return gather_subset_info
+
+ def private_cli_fields(self, api):
+ '''
+ The private cli endpoint does not allow '*' to be an entered.
+ If fields='*' or fields are not included within the playbook, the API call will be populated to return all possible fields.
+ If fields is entered into the playbook the fields entered will be used when calling the API.
+ '''
+ if 'fields' not in self.parameters or '*' in self.parameters['fields'] or '**' in self.parameters['fields']:
+ if api == 'support/autosupport/check':
+ fields = 'node,corrective-action,status,error-detail,check-type,check-category'
+ elif api == 'private/cli/vserver/security/file-directory':
+ fields = 'acls'
+ else:
+ self.module.fail_json(msg='Internal error, no field for %s' % api)
+ else:
+ fields = ','.join(self.parameters['fields'])
+ return fields
+
+ def convert_subsets(self):
+ """
+ Convert an info to the REST API
+ """
+ info_to_rest_mapping = {
+ "aggregate_info": "storage/aggregates",
+ "aggr_efficiency_info": ['storage/aggregates', 'space.efficiency,name,node'],
+ "application_info": "application/applications",
+ "application_template_info": "application/templates",
+ "autosupport_check_info": "support/autosupport/check",
+ "autosupport_config_info": "support/autosupport",
+ "autosupport_messages_history": "support/autosupport/messages",
+ "broadcast_domains_info": "network/ethernet/broadcast-domains",
+ "cifs_home_directory_info": "protocols/cifs/home-directory/search-paths",
+ "cifs_options_info": "protocols/cifs/services",
+ "cifs_services_info": "protocols/cifs/services",
+ "cifs_share_info": "protocols/cifs/shares",
+ "cifs_vserver_security_info": ["protocols/cifs/services", "security.encrypt_dc_connection,"
+ "security.kdc_encryption,security.smb_signing,"
+ "security.smb_encryption,"
+ "security.lm_compatibility_level,svm.name"],
+ "clock_info": ["cluster/nodes", "date"],
+ "cloud_targets_info": "cloud/targets",
+ "cluster_chassis_info": "cluster/chassis",
+ "cluster_identity_info": ["cluster", "contact,location,name,uuid"],
+ "cluster_image_info": "cluster/software",
+ "cluster_jobs_info": "cluster/jobs",
+ "cluster_log_forwarding_info": "security/audit/destinations",
+ "cluster_metrocluster_diagnostics": "cluster/metrocluster/diagnostics",
+ "cluster_metrics_info": "cluster/metrics",
+ "cluster_node_info": "cluster/nodes",
+ "cluster_peer_info": "cluster/peers",
+ "cluster_schedules": "cluster/schedules",
+ "cluster_software_download": "cluster/software/download",
+ "cluster_software_history": "cluster/software/history",
+ "cluster_software_packages": "cluster/software/packages",
+ "cluster_switch_info": "network/ethernet/switches",
+ "disk_info": "storage/disks",
+ "event_notification_info": "support/ems/destinations",
+ "event_notification_destination_info": "support/ems/destinations",
+ "export_policy_info": "protocols/nfs/export-policies",
+ "fcp_alias_info": "network/fc/wwpn-aliases",
+ "fcp_service_info": "protocols/san/fcp/services",
+ "file_directory_security": "private/cli/vserver/security/file-directory",
+ "igroup_info": "protocols/san/igroups",
+ "initiator_groups_info": "protocols/san/igroups",
+ "ip_interfaces_info": "network/ip/interfaces",
+ "ip_routes_info": "network/ip/routes",
+ "ip_service_policies": "network/ip/service-policies",
+ "iscsi_service_info": "protocols/san/iscsi/services",
+ "job_schedule_cron_info": "cluster/schedules",
+ "kerberos_realm_info": "protocols/nfs/kerberos/realms",
+ "ldap_client": "name-services/ldap",
+ "ldap_config": "name-services/ldap",
+ "license_info": "cluster/licensing/licenses",
+ "lun_info": "storage/luns",
+ "lun_map_info": "protocols/san/lun-maps",
+ "net_dns_info": "name-services/dns",
+ "net_interface_info": "network/ip/interfaces",
+ "net_interface_service_policy_info": "network/ip/service-policies",
+ "net_port_broadcast_domain_info": "network/ethernet/broadcast-domains",
+ "net_port_info": "network/ethernet/ports",
+ "net_routes_info": "network/ip/routes",
+ "net_ipspaces_info": "network/ipspaces",
+ "net_vlan_info": ["network/ethernet/ports", "name,node.name,vlan.base_port,vlan.tag"],
+ "network_ipspaces_info": "network/ipspaces",
+ "network_ports_info": "network/ethernet/ports",
+ "nfs_info": "protocols/nfs/services",
+ "ntp_server_info": "cluster/ntp/servers",
+ "nvme_info": "protocols/nvme/services",
+ "nvme_interface_info": "protocols/nvme/interfaces",
+ "nvme_namespace_info": "storage/namespaces",
+ "nvme_subsystem_info": "protocols/nvme/subsystems",
+ "metrocluster_info": "cluster/metrocluster",
+ "metrocluster_node_info": "cluster/metrocluster/nodes",
+ "metrocluster_check_info": "cluster/metrocluster/diagnostics",
+ "ontap_system_version": "cluster/software",
+ "quota_report_info": "storage/quota/reports",
+ "qos_policy_info": "storage/qos/policies",
+ "qos_adaptive_policy_info": "storage/qos/policies",
+ "qtree_info": "storage/qtrees",
+ "san_fc_logins_info": "network/fc/logins",
+ "san_fc_wppn-aliases": "network/fc/wwpn-aliases",
+ "san_fcp_services": "protocols/san/fcp/services",
+ "san_iscsi_credentials": "protocols/san/iscsi/credentials",
+ "san_iscsi_services": "protocols/san/iscsi/services",
+ "san_lun_maps": "protocols/san/lun-maps",
+ "security_login_account_info": "security/accounts",
+ "security_login_info": "security/accounts",
+ "security_login_rest_role_info": "security/roles",
+ "shelf_info": "storage/shelves",
+ "sis_info": ["storage/volumes", "efficiency.compression,efficiency.cross_volume_dedupe,"
+ "efficiency.cross_volume_dedupe,efficiency.compaction,"
+ "efficiency.compression,efficiency.dedupe,efficiency.policy.name,"
+ "efficiency.schedule,svm.name"],
+ "sis_policy_info": "storage/volume-efficiency-policies",
+ "snapmirror_destination_info": ["snapmirror/relationships", "destination.path,destination.svm.name,"
+ "destination.svm.uuid,policy.type,uuid,state,"
+ "source.path,source.svm.name,source.svm.uuid,"
+ "transfer.bytes_transferred"],
+ "snapmirror_info": "snapmirror/relationships",
+ "snapmirror_policy_info": "snapmirror/policies",
+ "snapshot_policy_info": "storage/snapshot-policies",
+ "storage_bridge_info": "storage/bridges",
+ "storage_flexcaches_info": "storage/flexcache/flexcaches",
+ "storage_flexcaches_origin_info": "storage/flexcache/origins",
+ "storage_luns_info": "storage/luns",
+ "storage_NVMe_namespaces": "storage/namespaces",
+ "storage_ports_info": "storage/ports",
+ "storage_qos_policies": "storage/qos/policies",
+ "storage_qtrees_config": "storage/qtrees",
+ "storage_quota_reports": "storage/quota/reports",
+ "storage_quota_policy_rules": "storage/quota/rules",
+ "storage_shelves_config": "storage/shelves",
+ "storage_snapshot_policies": "storage/snapshot-policies",
+ "support_ems_config": "support/ems",
+ "support_ems_events": "support/ems/events",
+ "support_ems_filters": "support/ems/filters",
+ "svm_dns_config_info": "name-services/dns",
+ "svm_ldap_config_info": "name-services/ldap",
+ "svm_name_mapping_config_info": "name-services/name-mappings",
+ "svm_nis_config_info": "name-services/nis",
+ "svm_peers_info": "svm/peers",
+ "svm_peer-permissions_info": "svm/peer-permissions",
+ "sysconfig_info": "cluster/nodes",
+ "system_node_info": ["cluster/nodes", "controller.cpu.firmware_release,controller.failed_fan.count,"
+ "controller.failed_fan.message,"
+ "controller.failed_power_supply.count,"
+ "controller.failed_power_supply.message,"
+ "controller.over_temperature,is_all_flash_optimized,"
+ "is_all_flash_select_optimized,is_capacity_optimized,state,name,"
+ "location,model,nvram.id,owner,serial_number,storage_configuration,"
+ "system_id,uptime,uuid,vendor_serial_number,nvram.battery_state,"
+ "version,vm.provider_type"],
+ "sys_cluster_alerts": "private/support/alerts",
+ "vserver_info": "svm/svms",
+ "vserver_peer_info": "svm/peers",
+ "vserver_nfs_info": "protocols/nfs/services",
+ "volume_info": "storage/volumes",
+ "volume_space_info": ["storage/volumes", 'space.logical_space.available,space.logical_space.used,'
+ 'space.logical_space.used_percent,space.snapshot.reserve_size,'
+ 'space.snapshot.reserve_percent,space.used,name,svm.name'],
+ "vscan_connection_status_all_info": "protocols/vscan/server-status",
+ "vscan_info": "protocols/vscan",
+ "vscan_status_info": "protocols/vscan"
+ }
+ # Add rest API names as there info version, also make sure we don't add a duplicate
+ subsets = []
+ for subset in self.parameters['gather_subset']:
+ if subset in info_to_rest_mapping:
+ if info_to_rest_mapping[subset] not in subsets:
+ subsets.append(info_to_rest_mapping[subset])
+ elif subset not in subsets:
+ subsets.append(subset)
+ return subsets
+
+ def add_naa_id(self, info):
+ ''' https://kb.netapp.com/Advice_and_Troubleshooting/Data_Storage_Systems/FlexPod_with_Infrastructure_Automation/
+ How_to_match__LUNs_NAA_number_to_its_serial_number
+ '''
+ if info and 'records' in info:
+ for lun in info['records']:
+ if 'serial_number' in lun:
+ hexlify = codecs.getencoder('hex')
+ lun['serial_hex'] = to_text(hexlify(to_bytes(lun['serial_number']))[0])
+ lun['naa_id'] = 'naa.600a0980' + lun['serial_hex']
+
+ def augment_subset_info(self, subset, subset_info):
+ if subset == 'private/cli/vserver/security/file-directory':
+ # creates a new list of dicts
+ subset_info = self.strip_dacls(subset_info)
+ if subset == 'storage/luns':
+ # mutates the existing dicts
+ self.add_naa_id(subset_info)
+ return subset_info
+
+ def get_ontap_subset_info_all(self, subset, default_fields, get_ontap_subset_info):
+ """ Iteratively get all records for a subset """
+ try:
+ # Verify whether the supported subset passed
+ specified_subset = get_ontap_subset_info[subset]
+ except KeyError:
+ self.module.fail_json(msg="Specified subset %s is not found, supported subsets are %s" %
+ (subset, list(get_ontap_subset_info.keys())))
+ if 'api_call' not in specified_subset:
+ specified_subset['api_call'] = subset
+ subset_info = self.get_subset_info(specified_subset, default_fields)
+
+ if subset_info is not None and isinstance(subset_info, dict) and '_links' in subset_info:
+ while subset_info['_links'].get('next'):
+ # Get all the set of records if next link found in subset_info for the specified subset
+ next_api = subset_info['_links']['next']['href']
+ gathered_subset_info = self.get_next_records(next_api.replace('/api', ''))
+
+ # Update the subset info for the specified subset
+ subset_info['_links'] = gathered_subset_info['_links']
+ subset_info['records'].extend(gathered_subset_info['records'])
+
+ # metrocluster doesn't have a records field, so we need to skip this
+ if subset_info.get('records') is not None:
+ # Getting total number of records
+ subset_info['num_records'] = len(subset_info['records'])
+
+ return self.augment_subset_info(subset, subset_info)
+
+ def apply(self):
+ """
+ Perform pre-checks, call functions and exit
+ """
+
+ # Defining gather_subset and appropriate api_call
+ get_ontap_subset_info = {
+ 'application/applications': {},
+ 'application/consistency-groups': {'version': (9, 10, 1)},
+ 'application/templates': {},
+ 'cloud/targets': {},
+ 'cluster': {},
+ 'cluster/chassis': {},
+ 'cluster/counter/tables': {'version': (9, 11, 1)},
+ 'cluster/fireware/history': {'version': (9, 8)},
+ 'cluster/jobs': {},
+ 'cluster/licensing/capacity-pools': {'version': (9, 8)},
+ 'cluster/licensing/license-managers': {'version': (9, 8)},
+ 'cluster/licensing/licenses': {},
+ 'cluster/mediators': {'version': (9, 8)},
+ 'cluster/metrics': {},
+ 'cluster/metrocluster': {'version': (9, 8)},
+ 'cluster/metrocluster/diagnostics': {
+ 'version': (9, 8),
+ 'post': True
+ },
+ 'cluster/metrocluster/dr-groups': {'version': (9, 8)},
+ 'cluster/metrocluster/interconnects': {'version': (9, 8)},
+ 'cluster/metrocluster/nodes': {'version': (9, 8)},
+ 'cluster/metrocluster/operations': {'version': (9, 8)},
+ 'cluster/metrocluster/svms': {'version': (9, 11, 1)},
+ 'cluster/nodes': {},
+ 'cluster/ntp/keys': {'version': (9, 7)},
+ 'cluster/ntp/servers': {'version': (9, 7)},
+ 'cluster/peers': {},
+ 'cluster/schedules': {},
+ 'cluster/sensors': {'version': (9, 11, 1)},
+ 'cluster/software': {},
+ 'cluster/software/download': {'version': (9, 7)},
+ 'cluster/software/history': {},
+ 'cluster/software/packages': {},
+ 'cluster/web': {'version': (9, 10, 1)},
+ 'name-services/cache/group-membership/settings': {'version': (9, 11, 1)},
+ 'name-services/cache/host/settings': {'version': (9, 11, 1)},
+ 'name-services/cache/netgroup/settings': {'version': (9, 11, 1)},
+ 'name-services/cache/setting': {'version': (9, 11, 1)},
+ 'name-services/cache/unix-group/settings': {'version': (9, 11, 1)},
+ 'name-services/dns': {},
+ 'name-services/ldap': {},
+ 'name-services/ldap-schemas': {'version': (9, 11, 1)},
+ 'name-services/local-hosts': {'version': (9, 10, 1)},
+ 'name-services/name-mappings': {},
+ 'name-services/nis': {},
+ 'name-services/unix-groups': {'version': (9, 9)},
+ 'name-services/unix-users': {'version': (9, 9)},
+ 'network/ethernet/broadcast-domains': {},
+ 'network/ethernet/ports': {},
+ 'network/ethernet/switch/ports': {'version': (9, 8)},
+ 'network/ethernet/switches': {'version': (9, 8)},
+ 'network/fc/fabrics': {'version': (9, 11, 1)},
+ 'network/fc/interfaces': {},
+ 'network/fc/logins': {},
+ 'network/fc/ports': {},
+ 'network/fc/wwpn-aliases': {},
+ 'network/http-proxy': {'version': (9, 7)},
+ 'network/ip/bgp/peer-groups': {'version': (9, 7)},
+ 'network/ip/interfaces': {},
+ 'network/ip/routes': {},
+ 'network/ip/service-policies': {},
+ 'network/ip/subnets': {'version': (9, 11, 1)},
+ 'network/ipspaces': {},
+ 'private/support/alerts': {},
+ 'protocols/active-directory': {'version': (9, 12, 1)},
+ 'protocols/audit': {},
+ 'protocols/cifs/connections': {'version': (9, 11, 1)},
+ 'protocols/cifs/domains': {'version': (9, 10, 1)},
+ 'protocols/cifs/group-policies': {'version': (9, 12, 1)},
+ 'protocols/cifs/home-directory/search-paths': {},
+ 'protocols/cifs/local-groups': {'version': (9, 9)},
+ 'protocols/cifs/local-users': {'version': (9, 9)},
+ 'protocols/cifs/netbios': {'version': (9, 11, 1)},
+ 'protocols/cifs/services': {},
+ 'protocols/cifs/session/files': {'version': (9, 11, 1)},
+ 'protocols/cifs/sessions': {'version': (9, 8)},
+ 'protocols/cifs/shadow-copies': {'version': (9, 11, 1)},
+ 'protocols/cifs/shadowcopy-sets': {'version': (9, 11, 1)},
+ 'protocols/cifs/shares': {},
+ 'protocols/cifs/unix-symlink-mapping': {},
+ 'protocols/cifs/users-and-groups/privileges': {'version': (9, 9)},
+ 'protocols/fpolicy': {},
+ 'protocols/locks': {'version': (9, 10, 1)},
+ 'protocols/ndmp': {'version': (9, 7)},
+ 'protocols/ndmp/nodes': {'version': (9, 7)},
+ 'protocols/ndmp/sessions': {'version': (9, 7)},
+ 'protocols/ndmp/svms': {'version': (9, 7)},
+ 'protocols/nfs/connected-clients': {'version': (9, 7)},
+ 'protocols/nfs/connected-client-maps': {'version': (9, 11, 1)},
+ 'protocols/nfs/connected-client-settings': {'version': (9, 12, 1)},
+ 'protocols/nfs/export-policies': {},
+ 'protocols/nfs/kerberos/interfaces': {},
+ 'protocols/nfs/kerberos/realms': {},
+ 'protocols/nfs/services': {},
+ 'protocols/nvme/interfaces': {},
+ 'protocols/nvme/services': {},
+ 'protocols/nvme/subsystem-controllers': {},
+ 'protocols/nvme/subsystem-maps': {},
+ 'protocols/nvme/subsystems': {},
+ 'protocols/s3/buckets': {'version': (9, 7)},
+ 'protocols/s3/services': {'version': (9, 7)},
+ 'protocols/san/fcp/services': {},
+ 'protocols/san/igroups': {},
+ 'protocols/san/iscsi/credentials': {},
+ 'protocols/san/iscsi/services': {},
+ 'protocols/san/iscsi/sessions': {},
+ 'protocols/san/lun-maps': {},
+ 'protocols/san/portsets': {'version': (9, 9)},
+ 'protocols/san/vvol-bindings': {'version': (9, 10, 1)},
+ 'protocols/vscan/server-status': {},
+ 'protocols/vscan': {},
+ 'security': {'version': (9, 7)},
+ 'security/accounts': {},
+ 'security/anti-ransomware/suspects': {'version': (9, 10, 1)},
+ 'security/audit': {},
+ 'security/audit/destinations': {},
+ 'security/audit/messages': {},
+ 'security/authentication/cluster/ad-proxy': {'version': (9, 7)},
+ 'security/authentication/cluster/ldap': {},
+ 'security/authentication/cluster/nis': {},
+ 'security/authentication/cluster/saml-sp': {},
+ 'security/authentication/publickeys': {'version': (9, 7)},
+ 'security/aws-kms': {'version': (9, 12, 1)},
+ 'security/azure-key-vaults': {'version': (9, 8)},
+ 'security/certificates': {},
+ 'security/gcp-kms': {'version': (9, 9)},
+ 'security/ipsec': {'version': (9, 8)},
+ 'security/ipsec/ca-certificates': {'version': (9, 10, 1)},
+ 'security/ipsec/policies': {'version': (9, 8)},
+ 'security/ipsec/security-associations': {'version': (9, 8)},
+ 'security/key-manager-configs': {'version': (9, 10, 1)},
+ 'security/key-managers': {},
+ 'security/key-stores': {'version': (9, 10, 1)},
+ 'security/login/messages': {},
+ 'security/multi-admin-verify': {'version': (9, 11, 1)},
+ 'security/multi-admin-verify/approval-groups': {'version': (9, 11, 1)},
+ 'security/multi-admin-verify/requests': {'version': (9, 11, 1)},
+ 'security/multi-admin-verify/rules': {'version': (9, 11, 1)},
+ 'security/roles': {},
+ 'security/ssh': {'version': (9, 7)},
+ 'security/ssh/svms': {'version': (9, 10, 1)},
+ 'snapmirror/policies': {},
+ 'snapmirror/relationships': {},
+ 'storage/aggregates': {},
+ 'storage/bridges': {'version': (9, 9)},
+ 'storage/cluster': {},
+ 'storage/disks': {},
+ 'storage/file/clone/split-loads': {'version': (9, 10, 1)},
+ 'storage/file/clone/split-status': {'version': (9, 10, 1)},
+ 'storage/file/clone/tokens': {'version': (9, 10, 1)},
+ 'storage/file/moves': {'version': (9, 11, 1)},
+ 'storage/flexcache/flexcaches': {},
+ 'storage/flexcache/origins': {},
+ 'storage/luns': {},
+ 'storage/namespaces': {},
+ 'storage/pools': {'version': (9, 11, 1)},
+ 'storage/ports': {},
+ 'storage/qos/policies': {},
+ 'storage/qos/workloads': {'version': (9, 10, 1)},
+ 'storage/qtrees': {},
+ 'storage/quota/reports': {},
+ 'storage/quota/rules': {},
+ 'storage/shelves': {},
+ 'storage/snaplock/audit-logs': {'version': (9, 7)},
+ 'storage/snaplock/compliance-clocks': {'version': (9, 7)},
+ 'storage/snaplock/event-retention/operations': {'version': (9, 7)},
+ 'storage/snaplock/event-retention/policies': {'version': (9, 7)},
+ 'storage/snaplock/file-fingerprints': {'version': (9, 7)},
+ 'storage/snaplock/litigations': {'version': (9, 7)},
+ 'storage/snapshot-policies': {},
+ 'storage/switches': {'version': (9, 9)},
+ 'storage/tape-devices': {'version': (9, 9)},
+ 'storage/volumes': {},
+ 'storage/volume-efficiency-policies': {'version': (9, 8)},
+ 'support/autosupport': {},
+ 'support/autosupport/check': {
+ 'api_call': '/private/cli/system/node/autosupport/check/details',
+ 'fields': self.private_cli_fields('support/autosupport/check'),
+ },
+ 'support/autosupport/messages': {},
+ 'support/auto-update': {'version': (9, 10, 1)},
+ 'support/auto-update/configurations': {'version': (9, 10, 1)},
+ 'support/auto-update/updates': {'version': (9, 10, 1)},
+ 'support/configuration-backup': {},
+ 'support/configuration-backup/backups': {'version': (9, 7)},
+ 'support/coredump/coredumps': {'version': (9, 10, 1)},
+ 'support/ems': {},
+ 'support/ems/destinations': {},
+ 'support/ems/events': {},
+ 'support/ems/filters': {},
+ 'support/ems/messages': {},
+ 'support/snmp': {'version': (9, 7)},
+ 'support/snmp/traphosts': {'version': (9, 7)},
+ 'support/snmp/users': {'version': (9, 7)},
+ 'svm/migrations': {'version': (9, 10, 1)},
+ 'svm/peers': {},
+ 'svm/peer-permissions': {},
+ 'svm/svms': {}
+ }
+ if 'gather_subset' in self.parameters and (
+ 'private/cli/vserver/security/file-directory' in self.parameters['gather_subset']
+ or 'file_directory_security' in self.parameters['gather_subset']
+ ):
+ get_ontap_subset_info['private/cli/vserver/security/file-directory'] = {
+ 'api_call': 'private/cli/vserver/security/file-directory',
+ 'fields': self.private_cli_fields('private/cli/vserver/security/file-directory')
+ }
+ if 'all' in self.parameters['gather_subset']:
+ # If all in subset list, get the information of all subsets
+ self.parameters['gather_subset'] = sorted(get_ontap_subset_info.keys())
+ if 'demo' in self.parameters['gather_subset']:
+ self.parameters['gather_subset'] = ['cluster/software', 'svm/svms', 'cluster/nodes']
+ get_ontap_subset_info = self.add_uuid_subsets(get_ontap_subset_info)
+
+ length_of_subsets = len(self.parameters['gather_subset'])
+ unsupported_subsets = self.subset_version_warning(get_ontap_subset_info)
+
+ if self.parameters.get('fields'):
+ if '**' in self.parameters.get('fields'):
+ self.module.warn('Using ** can put an extra load on the system and should not be used in production')
+ # If multiple fields specified to return, convert list to string
+ self.fields = ','.join(self.parameters.get('fields'))
+
+ if self.fields not in ('*', '**') and length_of_subsets > 1:
+ # Restrict gather subsets to one subset if fields section is list_of_fields
+ self.module.fail_json(msg="Error: fields: %s, only one subset will be allowed." % self.parameters.get('fields'))
+ converted_subsets = self.convert_subsets()
+
+ result_message = {}
+ for subset in converted_subsets:
+ subset, default_fields = subset if isinstance(subset, list) else (subset, None)
+ result_message[subset] = self.get_ontap_subset_info_all(subset, default_fields, get_ontap_subset_info)
+ for subset in unsupported_subsets:
+ result_message[subset] = '%s requires ONTAP %s' % (subset, get_ontap_subset_info[subset]['version'])
+
+ results = {'changed': False}
+ if self.parameters.get('state') is not None:
+ results['state'] = self.parameters['state']
+ results['warnings'] = "option 'state' is deprecated."
+ if self.parameters['use_python_keys']:
+ new_dict = dict((key.replace('/', '_'), value) for (key, value) in result_message.items())
+ new_dict = dict((key.replace('-', '_'), value) for (key, value) in new_dict.items())
+ result_message = new_dict
+ self.module.exit_json(ontap_info=result_message, **results)
+
+ def subset_version_warning(self, get_ontap_subset_info):
+ # If a user requests a subset that their version of ONTAP does not support give them a warning (but don't fail)
+ unsupported_subset = []
+ warn_message = ''
+ user_version = self.rest_api.get_ontap_version()
+ for subset in self.parameters['gather_subset']:
+ if subset in get_ontap_subset_info and 'version' in get_ontap_subset_info[subset] and get_ontap_subset_info[subset]['version'] > user_version:
+ warn_message += '%s requires %s, ' % (subset, get_ontap_subset_info[subset]['version'])
+ # remove subset so info dosn't fail for a bad subset
+ unsupported_subset.append(subset)
+ self.parameters['gather_subset'].remove(subset)
+ if warn_message != '':
+ self.module.warn('The following subset have been removed from your query as they are not supported on your version of ONTAP %s' % warn_message)
+ return unsupported_subset
+
+ def add_uuid_subsets(self, get_ontap_subset_info):
+ params = self.parameters.get('owning_resource')
+ if 'gather_subset' in self.parameters:
+ if 'storage/volumes/snapshots' in self.parameters['gather_subset']:
+ self.check_error_values('storage/volumes/snapshots', params, ['volume_name', 'svm_name'])
+ volume_uuid = rest_owning_resource.get_volume_uuid(self.rest_api, self.parameters['owning_resource']['volume_name'],
+ self.parameters['owning_resource']['svm_name'], self.module)
+ if volume_uuid:
+ get_ontap_subset_info['storage/volumes/snapshots'] = {'api_call': 'storage/volumes/%s/snapshots' % volume_uuid}
+ if 'protocols/nfs/export-policies/rules' in self.parameters['gather_subset']:
+ self.check_error_values('protocols/nfs/export-policies/rules', params, ['policy_name', 'svm_name', 'rule_index'])
+ policy_id = rest_owning_resource.get_export_policy_id(self.rest_api, self.parameters['owning_resource']['policy_name'],
+ self.parameters['owning_resource']['svm_name'], self.module)
+ if policy_id:
+ get_ontap_subset_info['protocols/nfs/export-policies/rules'] = {
+ 'api_call': 'protocols/nfs/export-policies/%s/rules/%s' % (policy_id, self.parameters['owning_resource']['rule_index']),
+ }
+ if 'protocols/vscan/on-access-policies' in self.parameters['gather_subset']:
+ self.add_vserver_owning_resource('protocols/vscan/on-access-policies', params, 'protocols/vscan/%s/on-access-policies', get_ontap_subset_info)
+ if 'protocols/vscan/on-demand-policies' in self.parameters['gather_subset']:
+ self.add_vserver_owning_resource('protocols/vscan/on-demand-policies', params, 'protocols/vscan/%s/on-demand-policies', get_ontap_subset_info)
+ if 'protocols/vscan/scanner-pools' in self.parameters['gather_subset']:
+ self.add_vserver_owning_resource('protocols/vscan/scanner-pools', params, 'protocols/vscan/%s/scanner-pools', get_ontap_subset_info)
+ return get_ontap_subset_info
+
+ def add_vserver_owning_resource(self, subset, params, api, get_ontap_subset_info):
+ self.check_error_values(subset, params, ['svm_name'])
+ svm_uuid, dummy = rest_vserver.get_vserver_uuid(self.rest_api, self.parameters['owning_resource']['svm_name'], self.module, True)
+ if svm_uuid:
+ get_ontap_subset_info[subset] = {'api_call': api % svm_uuid}
+
+ def check_error_values(self, api, params, items):
+ error = not params or sorted(list(params.keys())) != sorted(items)
+ if error:
+ self.module.fail_json(msg="Error: %s are required for %s" % (', '.join(items), api))
+
+
+def main():
+ """
+ Main function
+ """
+ obj = NetAppONTAPGatherInfo()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_restit.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_restit.py
new file mode 100644
index 000000000..7bfd63b71
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_restit.py
@@ -0,0 +1,393 @@
+#!/usr/bin/python
+'''
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Call a REST API on ONTAP.
+ - Cluster REST API are run using a cluster admin account.
+ - Vserver REST API can be run using a vsadmin account or using vserver tunneling (cluster admin with I(vserver_) options).
+ - In case of success, a json dictionary is returned as C(response).
+ - In case of a REST API error, C(status_code), C(error_code), C(error_message) are set to help with diagnosing the issue,
+ - and the call is reported as an error ('failed').
+ - Other errors (eg connection issues) are reported as Ansible error.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_restit
+short_description: NetApp ONTAP Run any REST API on ONTAP
+version_added: "20.4.0"
+options:
+ api:
+ description:
+ - The REST API to call (eg I(cluster/software), I(svms/svm)).
+ required: true
+ type: str
+ method:
+ description:
+ - The REST method to use.
+ default: GET
+ type: str
+ query:
+ description:
+ - A list of dictionaries for the query parameters
+ type: dict
+ body:
+ description:
+ - A dictionary for the info parameter
+ type: dict
+ aliases: ['info']
+ vserver_name:
+ description:
+ - if provided, forces vserver tunneling. username identifies a cluster admin account.
+ type: str
+ vserver_uuid:
+ description:
+ - if provided, forces vserver tunneling. username identifies a cluster admin account.
+ type: str
+ hal_linking:
+ description:
+ - if true, HAL-encoded links are returned in the response.
+ default: false
+ type: bool
+ wait_for_completion:
+ description:
+ - when true, POST/PATCH/DELETE can be handled synchronously and asynchronously.
+ - if the response indicates that a job is in progress, the job status is checked periodically until is completes.
+ - when false, the call returns immediately.
+ type: bool
+ default: false
+ version_added: 21.14.0
+ files:
+ description:
+ - A dictionary for the parameters when using multipart/form-data.
+ - This is very infrequently needed, but required to write a file (see examples)
+ - When present, requests will automatically set the Content-Type header to multipart/form-data.
+ type: dict
+ version_added: 21.24.0
+ accept_header:
+ description:
+ - Value for the Accept request HTTP header.
+ - This is very infrequently needed, but required to read a file (see examples).
+ - For most cases, omit this field. Set it to "multipart/form-data" when expecting such a format.
+ - By default the module is using "application/json" or "application/hal+json" when hal_linking is true.
+ type: str
+ version_added: 21.24.0
+'''
+
+EXAMPLES = """
+-
+ name: Ontap REST API
+ hosts: localhost
+ gather_facts: False
+ collections:
+ - netapp.ontap
+ vars:
+ login: &login
+ hostname: "{{ admin_ip }}"
+ username: "{{ admin_username }}"
+ password: "{{ admin_password }}"
+ https: true
+ validate_certs: false
+ svm_login: &svm_login
+ hostname: "{{ svm_admin_ip }}"
+ username: "{{ svm_admin_username }}"
+ password: "{{ svm_admin_password }}"
+ https: true
+ validate_certs: false
+
+ tasks:
+ - name: run ontap REST API command as cluster admin
+ na_ontap_restit:
+ <<: *login
+ api: cluster/software
+ register: result
+ - debug: var=result
+ - assert: { that: result.status_code==200, quiet: True }
+
+ - name: run ontap REST API command as cluster admin
+ na_ontap_restit:
+ <<: *login
+ api: cluster/software
+ query:
+ fields: version
+ register: result
+ - debug: var=result
+ - assert: { that: result.status_code==200, quiet: True }
+
+ - name: run ontap REST API command as cluster admin
+ na_ontap_restit:
+ <<: *login
+ api: svm/svms
+ register: result
+ - debug: var=result
+ - assert: { that: result.status_code==200, quiet: True }
+
+ - name: run ontap REST API command as cluster admin
+ na_ontap_restit:
+ <<: *login
+ api: svm/svms
+ query:
+ fields: aggregates,cifs,nfs,uuid
+ query_fields: name
+ query: trident_svm
+ hal_linking: true
+ register: result
+ - debug: var=result
+
+ - name: run ontap REST API command as vsadmin
+ na_ontap_restit:
+ <<: *svm_login
+ api: svm/svms
+ register: result
+ - debug: var=result
+ - assert: { that: result.status_code==200, quiet: True }
+
+ - name: run ontap REST API command as vserver tunneling
+ na_ontap_restit:
+ <<: *login
+ api: storage/volumes
+ vserver_name: ansibleSVM
+ register: result
+ - debug: var=result
+ - assert: { that: result.status_code==200, quiet: True }
+ - set_fact:
+ uuid: "{{ result.response.records | json_query(get_uuid) }}"
+ vars:
+ get_uuid: "[? name=='deleteme_ln1'].uuid"
+ - debug: var=uuid
+
+ - name: run ontap REST API command as DELETE method with vserver tunneling
+ na_ontap_restit:
+ <<: *login
+ api: "storage/volumes/{{ uuid[0] }}"
+ method: DELETE
+ vserver_name: ansibleSVM
+ query:
+ return_timeout: 60
+ register: result
+ when: uuid|length == 1
+ - debug: var=result
+ - assert: { that: result.skipped|default(false) or result.status_code|default(404) == 200, quiet: True }
+
+ - name: run ontap REST API command as POST method with vserver tunneling
+ na_ontap_restit:
+ <<: *login
+ api: storage/volumes
+ method: POST
+ vserver_name: ansibleSVM
+ query:
+ return_records: "true"
+ return_timeout: 60
+ body:
+ name: deleteme_ln1
+ aggregates:
+ - name: aggr1
+ register: result
+ - debug: var=result
+ - assert: { that: result.status_code==201, quiet: True }
+
+ - name: run ontap REST API command as DELETE method with vserver tunneling
+ # delete test volume if present
+ na_ontap_restit:
+ <<: *login
+ api: "storage/volumes/{{ result.response.records[0].uuid }}"
+ method: DELETE
+ vserver_name: ansibleSVM
+ query:
+ return_timeout: 60
+ register: result
+ - debug: var=result
+ - assert: { that: result.status_code==200, quiet: True }
+
+ - name: create a file
+ # assuming credentials are set using module_defaults
+ na_ontap_restit:
+ api: storage/volumes/f3c003cb-2974-11ed-b2f8-005056b38dae/files/laurent123.txt
+ method: post
+ files: {'data': 'some data'}
+
+ - name: read a file
+ # assuming credentials are set using module_defaults
+ na_ontap_restit:
+ api: storage/volumes/f3c003cb-2974-11ed-b2f8-005056b38dae/files/laurent123.txt
+ method: get
+ accept_header: "multipart/form-data"
+ query:
+ length: 100
+
+# error cases
+ - name: run ontap REST API command
+ na_ontap_restit:
+ <<: *login
+ api: unknown/endpoint
+ register: result
+ ignore_errors: True
+ - debug: var=result
+ - assert: { that: result.status_code==404, quiet: True }
+
+"""
+
+RETURN = """
+response:
+ description:
+ - If successful, a json dictionary returned by the REST API.
+ - If the REST API was executed but failed, an empty dictionary.
+ - Not present if the REST API call cannot be performed.
+ returned: On success
+ type: dict
+status_code:
+ description:
+ - The http status code.
+ - When wait_for_completion is True, this is forced to 0.
+ returned: Always
+ type: str
+error_code:
+ description:
+ - If the REST API was executed but failed, the error code set by the REST API.
+ - Not present if successful, or if the REST API call cannot be performed.
+ returned: On error
+ type: str
+error_message:
+ description:
+ - If the REST API was executed but failed, the error message set by the REST API.
+ - Not present if successful, or if the REST API call cannot be performed.
+ returned: On error
+ type: str
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppONTAPRestAPI(object):
+ ''' calls a REST API command '''
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ api=dict(required=True, type='str'),
+ method=dict(required=False, type='str', default='GET'),
+ query=dict(required=False, type='dict'),
+ body=dict(required=False, type='dict', aliases=['info']),
+ vserver_name=dict(required=False, type='str'),
+ vserver_uuid=dict(required=False, type='str'),
+ hal_linking=dict(required=False, type='bool', default=False),
+ wait_for_completion=dict(required=False, type='bool', default=False),
+ # to support very infrequent form-data format
+ files=dict(required=False, type='dict'),
+ accept_header=dict(required=False, type='str'),
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True,
+ )
+ parameters = self.module.params
+ # set up state variables
+ self.api = parameters['api']
+ self.method = parameters['method']
+ self.query = parameters['query']
+ self.body = parameters['body']
+ self.vserver_name = parameters['vserver_name']
+ self.vserver_uuid = parameters['vserver_uuid']
+ self.hal_linking = parameters['hal_linking']
+ self.wait_for_completion = parameters['wait_for_completion']
+ self.files = parameters['files']
+ self.accept_header = parameters['accept_header']
+
+ self.rest_api = OntapRestAPI(self.module)
+
+ if self.accept_header is None:
+ self.accept_header = 'application/hal+json' if self.hal_linking else 'application/json'
+
+ def build_headers(self):
+ return self.rest_api.build_headers(accept=self.accept_header, vserver_name=self.vserver_name, vserver_uuid=self.vserver_uuid)
+
+ def fail_on_error(self, status, response, error):
+ if error:
+ if isinstance(error, dict):
+ error_message = error.pop('message', None)
+ error_code = error.pop('code', None)
+ if not error:
+ # we exhausted the dictionary
+ error = 'check error_message and error_code for details.'
+ else:
+ error_message = error
+ error_code = None
+
+ msg = "Error when calling '%s': %s" % (self.api, str(error))
+ self.module.fail_json(msg=msg, status_code=status, response=response, error_message=error_message, error_code=error_code)
+
+ def run_api(self):
+ ''' calls the REST API '''
+ # TODO, log usage
+ status, response, error = self.rest_api.send_request(self.method, self.api, self.query, self.body, self.build_headers(), self.files)
+ self.fail_on_error(status, response, error)
+
+ return status, response
+
+ def run_api_async(self):
+ ''' calls the REST API '''
+ # TODO, log usage
+
+ args = [self.rest_api, self.api]
+ kwargs = {}
+ if self.method.upper() == 'POST':
+ method = rest_generic.post_async
+ kwargs['body'] = self.body
+ kwargs['files'] = self.files
+ elif self.method.upper() == 'PATCH':
+ method = rest_generic.patch_async
+ args.append(None) # uuid should be provided in the API
+ kwargs['body'] = self.body
+ kwargs['files'] = self.files
+ elif self.method.upper() == 'DELETE':
+ method = rest_generic.delete_async
+ args.append(None) # uuid should be provided in the API
+ else:
+ self.module.warn('wait_for_completion ignored for %s method.' % self.method)
+ return self.run_api()
+
+ kwargs.update({
+ 'raw_error': True,
+ 'headers': self.build_headers()
+ })
+ if self.query:
+ kwargs['query'] = self.query
+ response, error = method(*args, **kwargs)
+ self.fail_on_error(0, response, error)
+
+ return 0, response
+
+ def apply(self):
+ ''' calls the api and returns json output '''
+ if self.module.check_mode:
+ status_code, response = None, {'check_mode': 'would run %s %s' % (self.method, self.api)}
+ elif self.wait_for_completion:
+ status_code, response = self.run_api_async()
+ else:
+ status_code, response = self.run_api()
+ self.module.exit_json(changed=True, status_code=status_code, response=response)
+
+
+def main():
+ """
+ Execute action from playbook
+ """
+ restapi = NetAppONTAPRestAPI()
+ restapi.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_s3_buckets.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_s3_buckets.py
new file mode 100644
index 000000000..3e7197933
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_s3_buckets.py
@@ -0,0 +1,586 @@
+#!/usr/bin/python
+
+# (c) 2018-2023, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: na_ontap_s3_buckets
+short_description: NetApp ONTAP S3 Buckets
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 21.19.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create, delete, or modify S3 buckets on NetApp ONTAP.
+
+options:
+ state:
+ description:
+ - Whether the specified S3 bucket should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: 'present'
+
+ name:
+ description:
+ - The name of the S3 or NAS bucket.
+ type: str
+ required: true
+
+ vserver:
+ description:
+ - Name of the vserver to use.
+ type: str
+ required: true
+
+ aggregates:
+ description:
+ - List of aggregates names to use for the S3 bucket.
+ - This option is not supported when I(type=nas).
+ type: list
+ elements: str
+
+ constituents_per_aggregate:
+ description:
+ - Number of constituents per aggregate.
+ - This option is not supported when I(type=nas).
+ type: int
+
+ size:
+ description:
+ - Size of the S3 bucket in bytes.
+ - This option is not supported when I(type=nas).
+ type: int
+
+ comment:
+ description:
+ - Comment for the S3 bucket.
+ type: str
+
+ type:
+ description:
+ - Specifies the bucket type. Valid values are "s3"and "nas".
+ type: str
+ choices: ['s3', 'nas']
+ version_added: 22.6.0
+
+ nas_path:
+ description:
+ - Specifies the NAS path to which the nas bucket corresponds to.
+ type: str
+ version_added: 22.7.0
+
+ policy:
+ description:
+ - Access policy uses the Amazon Web Services (AWS) policy language syntax to allow S3 tenants to create access policies to their data
+ type: dict
+ suboptions:
+ statements:
+ description:
+ - Policy statements are built using this structure to specify permissions
+ - Grant <Effect> to allow/deny <Principal> to perform <Action> on <Resource> when <Condition> applies
+ type: list
+ elements: dict
+ suboptions:
+ sid:
+ description: Statement ID
+ type: str
+ resources:
+ description:
+ - The bucket and any object it contains.
+ - The wildcard characters * and ? can be used to form a regular expression for specifying a resource.
+ type: list
+ elements: str
+ actions:
+ description:
+ - You can specify * to mean all actions, or a list of one or more of the following
+ - GetObject
+ - PutObject
+ - DeleteObject
+ - ListBucket
+ - GetBucketAcl
+ - GetObjectAcl
+ - ListBucketMultipartUploads
+ - ListMultipartUploadParts
+ type: list
+ elements: str
+ effect:
+ description: The statement may allow or deny access
+ type: str
+ choices:
+ - allow
+ - deny
+ principals:
+ description: A list of one or more S3 users or groups.
+ type: list
+ elements: str
+ conditions:
+ description: Conditions for when a policy is in effect.
+ type: list
+ elements: dict
+ suboptions:
+ operator:
+ description:
+ - The operator to use for the condition.
+ type: str
+ choices:
+ - ip_address
+ - not_ip_address
+ - string_equals
+ - string_not_equals
+ - string_equals_ignore_case
+ - string_not_equals_ignore_case
+ - string_like
+ - string_not_like
+ - numeric_equals
+ - numeric_not_equals
+ - numeric_greater_than
+ - numeric_greater_than_equals
+ - numeric_less_than
+ - numeric_less_than_equals
+ max_keys:
+ description:
+ - The maximum number of keys that can be returned in a request.
+ type: list
+ elements: str
+ delimiters:
+ description:
+ - The delimiter used to identify a prefix in a list of objects.
+ type: list
+ elements: str
+ source_ips:
+ description:
+ - The source IP address of the request.
+ type: list
+ elements: str
+ prefixes:
+ description:
+ - The prefixes of the objects that you want to list.
+ type: list
+ elements: str
+ usernames:
+ description:
+ - The user names that you want to allow to access the bucket.
+ type: list
+ elements: str
+
+ qos_policy:
+ description:
+ - A policy group defines measurable service level objectives (SLOs) that apply to the storage objects with which the policy group is associated.
+ - If you do not assign a policy group to a bucket, the system wil not monitor and control the traffic to it.
+ - This option is not supported when I(type=nas).
+ type: dict
+ suboptions:
+ max_throughput_iops:
+ description: The maximum throughput in IOPS.
+ type: int
+ max_throughput_mbps:
+ description: The maximum throughput in MBPS.
+ type: int
+ min_throughput_iops:
+ description: The minimum throughput in IOPS.
+ type: int
+ min_throughput_mbps:
+ description: The minimum throughput in MBPS.
+ type: int
+ name:
+ description: The QoS policy group name. This is mutually exclusive with other QoS attributes.
+ type: str
+
+ audit_event_selector:
+ description:
+ - Audit event selector allows you to specify access and permission types to audit.
+ - This option is not supported when I(type=nas).
+ type: dict
+ suboptions:
+ access:
+ description:
+ - specifies the type of event access to be audited, read-only, write-only or all (default is all).
+ type: str
+ choices:
+ - read
+ - write
+ - all
+ permission:
+ description:
+ - specifies the type of event permission to be audited, allow-only, deny-only or all (default is all).
+ type: str
+ choices:
+ - allow
+ - deny
+ - all
+notes:
+ - module will try to set desired C(audit_event_selector) if the bucket is not configured with audit_event_selector options,
+ but may not take effect if there is no audit configuration present in vserver.
+'''
+
+EXAMPLES = """
+ - name: Create S3 bucket
+ netapp.ontap.na_ontap_s3_buckets:
+ state: present
+ name: carchi-test-bucket
+ comment: carchi8py was here
+ size: 838860800
+ vserver: ansibleSVM
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: false
+ use_rest: always
+
+ - name: Create S3 bucket with a policy
+ netapp.ontap.na_ontap_s3_buckets:
+ state: present
+ name: carchi-test-bucket
+ comment: carchi8py was here
+ size: 838860800
+ policy:
+ statements:
+ - sid: FullAccessToUser1
+ resources:
+ - bucket1
+ - bucket1/*
+ actions:
+ - GetObject
+ - PutObject
+ - DeleteObject
+ - ListBucket
+ effect: allow
+ conditions:
+ - operator: ip_address
+ max_keys:
+ - 1000
+ delimiters:
+ - "/"
+ source_ips:
+ - 1.1.1.1
+ - 1.2.2.0/24
+ prefixes:
+ - prex
+ usernames:
+ - user1
+ principals:
+ - user1
+ - group/grp1
+ vserver: ansibleSVM
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: false
+ use_rest: always
+
+ - name: Delete S3 bucket
+ netapp.ontap.na_ontap_s3_buckets:
+ state: absent
+ name: carchi-test-bucket
+ vserver: ansibleSVM
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: false
+ use_rest: always
+"""
+
+RETURN = """
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppOntapS3Buckets:
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ vserver=dict(required=True, type='str'),
+ aggregates=dict(required=False, type='list', elements='str'),
+ constituents_per_aggregate=dict(required=False, type='int'),
+ size=dict(required=False, type='int'),
+ comment=dict(required=False, type='str'),
+ type=dict(required=False, type='str', choices=['s3', 'nas']),
+ nas_path=dict(required=False, type='str'),
+ policy=dict(type='dict', options=dict(
+ statements=dict(type='list', elements='dict', options=dict(
+ sid=dict(required=False, type='str'),
+ resources=dict(required=False, type='list', elements='str'),
+ actions=dict(required=False, type='list', elements='str'),
+ effect=dict(required=False, type='str', choices=['allow', 'deny']),
+ conditions=dict(type='list', elements='dict', options=dict(
+ operator=dict(required=False, type='str', choices=['ip_address',
+ 'not_ip_address',
+ 'string_equals',
+ 'string_not_equals',
+ 'string_equals_ignore_case',
+ 'string_not_equals_ignore_case',
+ 'string_like',
+ 'string_not_like',
+ 'numeric_equals',
+ 'numeric_not_equals',
+ 'numeric_greater_than',
+ 'numeric_greater_than_equals',
+ 'numeric_less_than',
+ 'numeric_less_than_equals']),
+ max_keys=dict(required=False, type='list', elements='str', no_log=False),
+ delimiters=dict(required=False, type='list', elements='str'),
+ source_ips=dict(required=False, type='list', elements='str'),
+ prefixes=dict(required=False, type='list', elements='str'),
+ usernames=dict(required=False, type='list', elements='str'))),
+ principals=dict(type='list', elements='str')
+ )))),
+ qos_policy=dict(type='dict', options=dict(
+ max_throughput_iops=dict(type='int'),
+ max_throughput_mbps=dict(type='int'),
+ name=dict(type='str'),
+ min_throughput_iops=dict(type='int'),
+ min_throughput_mbps=dict(type='int'),
+ )),
+ audit_event_selector=dict(type='dict', options=dict(
+ access=dict(type='str', choices=['read', 'write', 'all']),
+ permission=dict(type='str', choices=['allow', 'deny', 'all']))),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.svm_uuid = None
+ self.uuid = None
+ self.volume_uuid = None
+ self.na_helper = NetAppModule(self.module)
+ self.parameters = self.na_helper.check_and_set_parameters(self.module)
+
+ self.rest_api = netapp_utils.OntapRestAPI(self.module)
+ self.rest_api.fail_if_not_rest_minimum_version('na_ontap_s3_bucket', 9, 8)
+ partially_supported_rest_properties = [['audit_event_selector', (9, 10, 1)], ['type', (9, 12, 1)], ['nas_path', (9, 12, 1)]]
+ self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, None, partially_supported_rest_properties)
+ # few keys in policy.statements will be configured with default value if not set in create.
+ # so removing None entries to avoid idempotent issue in next run.
+ if self.parameters.get('policy'):
+ # below keys can be reset with empty list.
+ # - statements.
+ # - conditions.
+ # - actions.
+ # - principals.
+ self.parameters['policy'] = self.na_helper.filter_out_none_entries(self.parameters['policy'], True)
+ for statement in self.parameters['policy'].get('statements', []):
+ if {} in self.parameters['policy']['statements']:
+ self.module.fail_json(msg="Error: cannot set empty dict for policy statements.")
+ if len(statement.get('resources', [])) == 1 and statement['resources'] == ["*"]:
+ statement['resources'] = [self.parameters['name'], self.parameters['name'] + '/*']
+ for condition in statement.get('conditions', []):
+ updated_ips = []
+ for ip in condition.get('source_ips', []):
+ if '/' in ip:
+ updated_ips.append(ip)
+ else:
+ # if cidr notation not set in each ip, append /32.
+ # cidr unset ip address will return with /32 in next run.
+ updated_ips.append(ip + '/32')
+ if updated_ips:
+ condition['source_ips'] = updated_ips
+
+ def get_s3_bucket(self):
+ api = 'protocols/s3/buckets'
+ fields = 'name,svm.name,size,comment,volume.uuid,policy,policy.statements,qos_policy'
+ if self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 12, 1):
+ fields += ',audit_event_selector,type,nas_path'
+ elif self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 10, 1):
+ fields += ',audit_event_selector'
+ params = {'name': self.parameters['name'],
+ 'svm.name': self.parameters['vserver'],
+ 'fields': fields}
+ record, error = rest_generic.get_one_record(self.rest_api, api, params)
+ if error:
+ self.module.fail_json(msg='Error fetching S3 bucket %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ return self.form_current(record) if record else None
+
+ def form_current(self, record):
+ self.set_uuid(record)
+ body = {
+ 'comment': self.na_helper.safe_get(record, ['comment']),
+ 'size': self.na_helper.safe_get(record, ['size']),
+ 'policy': self.na_helper.safe_get(record, ['policy']),
+ 'qos_policy': self.na_helper.safe_get(record, ['qos_policy']),
+ 'audit_event_selector': self.na_helper.safe_get(record, ['audit_event_selector']),
+ 'type': self.na_helper.safe_get(record, ['type']),
+ 'nas_path': self.na_helper.safe_get(record, ['nas_path'])
+ }
+ if body['policy']:
+ for policy_statement in body['policy'].get('statements', []):
+ # So we treat SID as a String as it can accept Words, or Numbers.
+ # ONTAP will return it as a String, unless it is just
+ # numbers then it is returned as an INT.
+ policy_statement['sid'] = str(policy_statement['sid'])
+ # setting keys in each condition to None if not present to avoid idempotency issue.
+ if not policy_statement.get('conditions'):
+ policy_statement['conditions'] = []
+ else:
+ for condition in policy_statement['conditions']:
+ condition['delimiters'] = condition.get('delimiters')
+ condition['max_keys'] = condition.get('max_keys')
+ condition['operator'] = condition.get('operator')
+ condition['prefixes'] = condition.get('prefixes')
+ condition['source_ips'] = condition.get('source_ips')
+ condition['usernames'] = condition.get('usernames')
+ # empty [] is used to reset policy statements.
+ # setting policy statements to [] to avoid idempotency issue.
+ else:
+ body['policy'] = {'statements': []}
+ return body
+
+ def set_uuid(self, record):
+ self.uuid = record['uuid']
+ self.svm_uuid = record['svm']['uuid']
+ # volume key is not returned for NAS buckets.
+ self.volume_uuid = self.na_helper.safe_get(record, ['volume', 'uuid'])
+
+ def create_s3_bucket(self):
+ api = 'protocols/s3/buckets'
+ body = {'svm.name': self.parameters['vserver'], 'name': self.parameters['name']}
+ body.update(self.form_create_or_modify_body())
+ dummy, error = rest_generic.post_async(self.rest_api, api, body, job_timeout=120)
+ if error:
+ self.module.fail_json(msg='Error creating S3 bucket %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_s3_bucket(self):
+ api = 'protocols/s3/buckets'
+ uuids = '%s/%s' % (self.svm_uuid, self.uuid)
+ dummy, error = rest_generic.delete_async(self.rest_api, api, uuids, job_timeout=120)
+ if error:
+ self.module.fail_json(msg='Error deleting S3 bucket %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_s3_bucket(self, modify):
+ api = 'protocols/s3/buckets'
+ uuids = '%s/%s' % (self.svm_uuid, self.uuid)
+ body = self.form_create_or_modify_body(modify)
+ dummy, error = rest_generic.patch_async(self.rest_api, api, uuids, body, job_timeout=120)
+ if error:
+ self.module.fail_json(msg='Error modifying S3 bucket %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def form_create_or_modify_body(self, params=None):
+ if params is None:
+ params = self.parameters
+ body = {}
+ options = ['aggregates', 'constituents_per_aggregate', 'size', 'comment', 'type', 'nas_path', 'policy']
+ for option in options:
+ if option in params:
+ body[option] = params[option]
+ if 'qos_policy' in params:
+ body['qos_policy'] = self.na_helper.filter_out_none_entries(params['qos_policy'])
+ if 'audit_event_selector' in params:
+ body['audit_event_selector'] = self.na_helper.filter_out_none_entries(params['audit_event_selector'])
+ return body
+
+ def check_volume_aggr(self):
+ api = 'storage/volumes/%s' % self.volume_uuid
+ params = {'fields': 'aggregates.name'}
+ record, error = rest_generic.get_one_record(self.rest_api, api, params)
+ if error:
+ self.module.fail_json(msg=error)
+ aggr_names = [aggr['name'] for aggr in record['aggregates']]
+ if self.parameters.get('aggregates'):
+ if sorted(aggr_names) != sorted(self.parameters['aggregates']):
+ return True
+ return False
+
+ def validate_modify_required(self, modify, current):
+ # if desired statement length different than current, allow modify.
+ if len(modify['policy']['statements']) != len(current['policy']['statements']):
+ return True
+ match_found = []
+ for statement in modify['policy']['statements']:
+ for index, current_statement in enumerate(current['policy']['statements']):
+ # continue to next if the current statement already has a match.
+ if index in match_found:
+ continue
+ statement_modified = self.na_helper.get_modified_attributes(current_statement, statement)
+ # no modify required, match found for the statment.
+ # break the loop and check next desired policy statement has match.
+ if not statement_modified:
+ match_found.append(index)
+ break
+ # match not found, switch to next current statement and continue to find desired statement is present.
+ if len(statement_modified) > 1:
+ continue
+ # 'conditions' key in policy.statements is list type, each element is dict.
+ # if the len of the desired conditions different than current, allow for modify.
+ # check for modify if 'conditions' is the only key present in statement_modified.
+ # check for difference in each modify[policy.statements[index][conditions] with current[policy.statements[index][conditions].
+ if statement_modified.get('conditions'):
+ if not current_statement['conditions']:
+ continue
+ if len(statement_modified.get('conditions')) != len(current_statement['conditions']):
+ continue
+
+ # each condition should be checked for modify based on the operator key.
+ def require_modify(desired, current):
+ for condition in desired:
+ # operator is a required field for condition, if not present, REST will throw error.
+ if condition.get('operator'):
+ for current_condition in current:
+ if condition['operator'] == current_condition['operator']:
+ condition_modified = self.na_helper.get_modified_attributes(current_condition, condition)
+ if condition_modified:
+ return True
+ else:
+ return True
+ if not require_modify(statement_modified['conditions'], current_statement['conditions']):
+ match_found.append(index)
+ break
+ # allow modify
+ # - if not match found
+ # - if only partial policy statements has match found.
+ return not match_found or len(match_found) != len(modify['policy']['statements'])
+
+ def apply(self):
+ current = self.get_s3_bucket()
+ cd_action, modify = None, None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None:
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if modify.get('type'):
+ self.module.fail_json(msg='Error: cannot modify bucket type.')
+ if len(modify) == 1 and 'policy' in modify and current.get('policy'):
+ if modify['policy'].get('statements'):
+ self.na_helper.changed = self.validate_modify_required(modify, current)
+ if not self.na_helper.changed:
+ modify = False
+ # volume uuid returned only for s3 buckets.
+ if current and self.volume_uuid and self.check_volume_aggr():
+ self.module.fail_json(msg='Aggregates cannot be modified for S3 bucket %s' % self.parameters['name'])
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_s3_bucket()
+ if cd_action == 'delete':
+ self.delete_s3_bucket()
+ if modify:
+ self.modify_s3_bucket(modify)
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ '''Apply volume operations from playbook'''
+ obj = NetAppOntapS3Buckets()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_s3_groups.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_s3_groups.py
new file mode 100644
index 000000000..e4b8d57f8
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_s3_groups.py
@@ -0,0 +1,234 @@
+#!/usr/bin/python
+
+# (c) 2022-2023, NetApp, Inc
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: na_ontap_s3_groups
+short_description: NetApp ONTAP S3 groups
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 21.21.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create, delete, or modify S3 groups on NetApp ONTAP.
+
+options:
+ state:
+ description:
+ - Whether the specified S3 group should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: 'present'
+
+ name:
+ description:
+ - The name of the S3 group.
+ type: str
+ required: true
+
+ vserver:
+ description:
+ - Name of the vserver to use.
+ type: str
+ required: true
+
+ comment:
+ description:
+ - comment about the group
+ type: str
+
+ users:
+ description: List of users to to add the the group
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description: username
+ type: str
+
+ policies:
+ description: Policies to add the the group
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description: policy name
+ type: str
+'''
+
+EXAMPLES = """
+ - name: Create and modify a S3 Group
+ netapp.ontap.na_ontap_s3_groups:
+ state: present
+ name: dev-group
+ comment: group for devs
+ vserver: ansibleSVM
+ users:
+ - name: carchi8py
+ - name: carchi8py2
+ policies:
+ - name: allow_policy
+ - name: deny_policy
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: false
+ use_rest: always
+
+ - name: Delete a S3 Group
+ netapp.ontap.na_ontap_s3_groups:
+ state: absent
+ name: dev-group
+ vserver: ansibleSVM
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: false
+ use_rest: always
+"""
+
+RETURN = """
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_vserver
+
+
+class NetAppOntapS3Groups:
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ name=dict(required=True, type='str'),
+ comment=dict(required=False, type='str'),
+ users=dict(required=False, type='list', elements='dict', options=dict(
+ name=dict(required=False, type='str'))),
+ policies=dict(required=False, type='list', elements='dict', options=dict(
+ name=dict(required=False, type='str')))))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True,
+ )
+ self.svm_uuid = None
+ self.group_id = None
+ self.na_helper = NetAppModule(self.module)
+ self.parameters = self.na_helper.check_and_set_parameters(self.module)
+ self.rest_api = netapp_utils.OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+ self.rest_api.fail_if_not_rest_minimum_version('na_ontap_s3_groups', 9, 8)
+
+ def get_s3_groups(self):
+ self.get_svm_uuid()
+ api = 'protocols/s3/services/%s/groups' % self.svm_uuid
+ fields = ','.join(('name',
+ 'comment',
+ 'users.name',
+ 'policies.name'))
+ params = {'name': self.parameters['name'],
+ 'fields': fields}
+ record, error = rest_generic.get_one_record(self.rest_api, api, params)
+ if error:
+ self.module.fail_json(msg='Error fetching S3 groups %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ if record:
+ self.group_id = record.get('id')
+ return self.form_current(record)
+ return record
+
+ @staticmethod
+ def form_current(record):
+ current = {
+ 'comment': record.get('comment'),
+ 'users': [],
+ 'policies': [],
+ }
+ # the APi Returning _link in each user and policy record which is causing modify to get called
+ if record.get('users'):
+ for user in record['users']:
+ current['users'].append({'name': user['name']})
+ if record.get('policies'):
+ for policy in record['policies']:
+ current['policies'].append({'name': policy['name']})
+ return current
+
+ def create_s3_groups(self):
+ api = 'protocols/s3/services/%s/groups' % self.svm_uuid
+ body = {'name': self.parameters['name'],
+ 'users': self.parameters['users'],
+ 'policies': self.parameters['policies']}
+ if self.parameters.get('comment'):
+ body['comment'] = self.parameters['comment']
+ dummy, error = rest_generic.post_async(self.rest_api, api, body)
+ if error:
+ self.module.fail_json(msg='Error creating S3 groups %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_s3_groups(self):
+ api = 'protocols/s3/services/%s/groups' % self.svm_uuid
+ dummy, error = rest_generic.delete_async(self.rest_api, api, self.group_id)
+ if error:
+ self.module.fail_json(msg='Error deleting S3 group %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_s3_groups(self, modify):
+ api = 'protocols/s3/services/%s/groups' % self.svm_uuid
+ body = {}
+ if modify.get('comment') is not None:
+ body['comment'] = self.parameters['comment']
+ if modify.get('users') is not None:
+ body['users'] = self.parameters['users']
+ if modify.get('policies') is not None:
+ body['policies'] = self.parameters['policies']
+ dummy, error = rest_generic.patch_async(self.rest_api, api, self.group_id, body)
+ if error:
+ self.module.fail_json(msg='Error modifying S3 group %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def get_svm_uuid(self):
+ record, error = rest_vserver.get_vserver_uuid(self.rest_api, self.parameters['vserver'], self.module, True)
+ self.svm_uuid = record
+
+ def apply(self):
+ current = self.get_s3_groups()
+ cd_action, modify = None, None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None:
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if cd_action == 'create' and (self.na_helper.safe_get(self.parameters, ['users']) is None
+ or self.na_helper.safe_get(self.parameters, ['policies']) is None):
+ self.module.fail_json(msg='policies and users are required for a creating a group.')
+ if modify and (self.na_helper.safe_get(self.parameters, ['users']) is None
+ or self.na_helper.safe_get(self.parameters, ['policies']) is None):
+ self.module.fail_json(msg='policies and users can not be empty when modifying a group.')
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_s3_groups()
+ if cd_action == 'delete':
+ self.delete_s3_groups()
+ if modify:
+ self.modify_s3_groups(modify)
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ obj = NetAppOntapS3Groups()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_s3_policies.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_s3_policies.py
new file mode 100644
index 000000000..7b54efbbe
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_s3_policies.py
@@ -0,0 +1,246 @@
+#!/usr/bin/python
+
+# (c) 2022, NetApp, Inc
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: na_ontap_s3_policies
+short_description: NetApp ONTAP S3 Policies
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 21.21.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create, delete, or modify S3 Policies on NetApp ONTAP.
+
+options:
+ state:
+ description:
+ - Whether the specified S3 policy should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: 'present'
+
+ name:
+ description:
+ - The name of the S3 policy.
+ type: str
+ required: true
+
+ vserver:
+ description:
+ - Name of the vserver to use.
+ type: str
+ required: true
+
+ comment:
+ description:
+ - comment about the policy
+ type: str
+
+ statements:
+ description:
+ - Policy statements are built using this structure to specify permissions
+ - Grant <Effect> to allow/deny <Principal> to perform <Action> on <Resource> when <Condition> applies
+ type: list
+ elements: dict
+ suboptions:
+ sid:
+ description: Statement ID
+ type: str
+ required: true
+ resources:
+ description:
+ - The bucket and any object it contains.
+ - The wildcard characters * and ? can be used to form a regular expression for specifying a resource.
+ type: list
+ elements: str
+ required: true
+ actions:
+ description:
+ - You can specify * to mean all actions, or a list of one or more of the following
+ - GetObject
+ - PutObject
+ - DeleteObject
+ - ListBucket
+ - GetBucketAcl
+ - GetObjectAcl
+ - ListBucketMultipartUploads
+ - ListMultipartUploadParts
+ type: list
+ elements: str
+ required: true
+ effect:
+ description: The statement may allow or deny access
+ type: str
+ choices:
+ - allow
+ - deny
+ required: true
+'''
+EXAMPLES = """
+ - name: Create and modify a S3 policy
+ netapp.ontap.na_ontap_s3_policies:
+ state: present
+ name: carchi-s3-policy
+ comment: carchi8py was here
+ vserver: ansibleSVM
+ statements:
+ - sid: 1
+ resources:
+ - "bucket1"
+ - "bucket1/*"
+ actions:
+ - "*"
+ effect:
+ allow
+ - sid: 2
+ resources:
+ - "bucket2"
+ - "bucket2/*"
+ actions:
+ - "*"
+ effect:
+ allow
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: false
+ use_rest: always
+
+ - name: delete S3 policy
+ netapp.ontap.na_ontap_s3_policies:
+ state: absent
+ name: carchi-s3-policy
+ vserver: ansibleSVM
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: false
+ use_rest: always
+"""
+RETURN = """
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_vserver
+
+
+class NetAppOntapS3Policies:
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ name=dict(required=True, type='str'),
+ comment=dict(required=False, type='str'),
+ statements=dict(type='list', elements='dict', options=dict(
+ sid=dict(required=True, type='str'),
+ resources=dict(required=True, type='list', elements='str'),
+ actions=dict(required=True, type='list', elements='str'),
+ effect=dict(required=True, type='str', choices=['allow', 'deny']),
+ ))))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.svm_uuid = None
+ self.uuid = None
+ self.na_helper = NetAppModule(self.module)
+ self.parameters = self.na_helper.check_and_set_parameters(self.module)
+ self.rest_api = OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+ self.rest_api.fail_if_not_rest_minimum_version('na_ontap_s3_policies', 9, 8)
+
+ def get_s3_policies(self):
+ self.get_svm_uuid()
+ api = 'protocols/s3/services/%s/policies' % self.svm_uuid
+ fields = ','.join(('name',
+ 'comment',
+ 'statements'))
+ params = {'name': self.parameters['name'],
+ 'fields': fields}
+ record, error = rest_generic.get_one_record(self.rest_api, api, params)
+ if error:
+ self.module.fail_json(msg='Error fetching S3 policy %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ # sid is an Str or a number, it will return a string back unless you pass a number then it returns a int
+ if record:
+ for each in record['statements']:
+ each['sid'] = str(each['sid'])
+ return record
+
+ def get_svm_uuid(self):
+ uuid, dummy = rest_vserver.get_vserver_uuid(self.rest_api, self.parameters['vserver'], self.module, True)
+ self.svm_uuid = uuid
+
+ def create_s3_policies(self):
+ api = 'protocols/s3/services/%s/policies' % self.svm_uuid
+ body = {'name': self.parameters['name']}
+ if self.parameters.get('comment'):
+ body['comment'] = self.parameters['comment']
+ if self.parameters.get('statements'):
+ body['statements'] = self.parameters['statements']
+ dummy, error = rest_generic.post_async(self.rest_api, api, body)
+ if error:
+ self.module.fail_json(msg='Error creating S3 policy %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_s3_policies(self):
+ api = 'protocols/s3/services/%s/policies' % self.svm_uuid
+ dummy, error = rest_generic.delete_async(self.rest_api, api, self.parameters['name'])
+ if error:
+ self.module.fail_json(msg='Error deleting S3 policy %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_s3_policies(self, modify):
+ api = 'protocols/s3/services/%s/policies' % self.svm_uuid
+ body = {}
+ if modify.get('comment'):
+ body['comment'] = self.parameters['comment']
+ if self.parameters.get('statements'):
+ body['statements'] = self.parameters['statements']
+ dummy, error = rest_generic.patch_async(self.rest_api, api, self.parameters['name'], body)
+ if error:
+ self.module.fail_json(msg='Error modifying S3 policy %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ current = self.get_s3_policies()
+ modify = None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None:
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_s3_policies()
+ if cd_action == 'delete':
+ self.delete_s3_policies()
+ if modify:
+ self.modify_s3_policies(modify)
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ '''Apply volume operations from playbook'''
+ obj = NetAppOntapS3Policies()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_s3_services.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_s3_services.py
new file mode 100644
index 000000000..ff5feb722
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_s3_services.py
@@ -0,0 +1,219 @@
+#!/usr/bin/python
+
+# (c) 2018-2022, NetApp, Inc
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: na_ontap_s3_services
+short_description: NetApp ONTAP S3 services
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 21.20.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create, delete or modify S3 Service
+
+options:
+ state:
+ description:
+ - Whether the specified S3 bucket should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: 'present'
+
+ name:
+ description:
+ - The name of the S3 service.
+ type: str
+ required: true
+
+ vserver:
+ description:
+ - Name of the vserver to use.
+ type: str
+ required: true
+
+ enabled:
+ description:
+ - enable or disable the service
+ type: bool
+
+ comment:
+ description:
+ - comment about the service
+ type: str
+
+ certificate_name:
+ description:
+ - name of https certificate to use for the service
+ type: str
+'''
+
+EXAMPLES = """
+ - name: create or modify s3 service
+ na_ontap_s3_services:
+ state: present
+ name: carchi-test
+ vserver: ansibleSVM
+ comment: not enabled
+ enabled: False
+ certificate_name: ansibleSVM_16E1C1284D889609
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: false
+ use_rest: always
+
+ - name: delete s3 service
+ na_ontap_s3_services:
+ state: absent
+ name: carchi-test
+ vserver: ansibleSVM
+ certificate_name: ansibleSVM_16E1C1284D889609
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: false
+ use_rest: always
+"""
+
+
+RETURN = """
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppOntapS3Services:
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ enabled=dict(required=False, type='bool'),
+ vserver=dict(required=True, type='str'),
+ comment=dict(required=False, type='str'),
+ certificate_name=dict(required=False, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.svm_uuid = None
+ self.na_helper = NetAppModule(self.module)
+ self.parameters = self.na_helper.check_and_set_parameters(self.module)
+
+ self.rest_api = OntapRestAPI(self.module)
+ partially_supported_rest_properties = [] # TODO: Remove if there nothing here
+ self.use_rest = self.rest_api.is_rest(partially_supported_rest_properties=partially_supported_rest_properties,
+ parameters=self.parameters)
+
+ self.rest_api.fail_if_not_rest_minimum_version('na_ontap_s3_services', 9, 8)
+
+ def get_s3_service(self):
+ api = 'protocols/s3/services'
+ fields = ','.join(('name',
+ 'enabled',
+ 'svm.uuid',
+ 'comment',
+ 'certificate.name'))
+
+ params = {
+ 'name': self.parameters['name'],
+ 'svm.name': self.parameters['vserver'],
+ 'fields': fields
+ }
+ record, error = rest_generic.get_one_record(self.rest_api, api, params)
+ if error:
+ self.module.fail_json(msg='Error fetching S3 service %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ if record:
+ if self.na_helper.safe_get(record, ['certificate', 'name']):
+ record['certificate_name'] = self.na_helper.safe_get(record, ['certificate', 'name'])
+ return self.set_uuids(record)
+ return None
+
+ def create_s3_service(self):
+ api = 'protocols/s3/services'
+ body = {'svm.name': self.parameters['vserver'], 'name': self.parameters['name']}
+ if self.parameters.get('enabled') is not None:
+ body['enabled'] = self.parameters['enabled']
+ if self.parameters.get('comment'):
+ body['comment'] = self.parameters['comment']
+ if self.parameters.get('certificate_name'):
+ body['certificate.name'] = self.parameters['certificate_name']
+ dummy, error = rest_generic.post_async(self.rest_api, api, body)
+ if error:
+ self.module.fail_json(msg='Error creating S3 service %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_s3_service(self):
+ api = 'protocols/s3/services'
+ # The rest default is to delete all users, and empty bucket attached to a service.
+ # This would not be idempotent, so switching this to False.
+ # second issue, delete_all: True will say it deleted, but the ONTAP system will show it's still there until the job for the
+ # delete buckets/users/groups is complete.
+ body = {'delete_all': False}
+ dummy, error = rest_generic.delete_async(self.rest_api, api, self.svm_uuid, body=body)
+ if error:
+ self.module.fail_json(msg='Error deleting S3 service %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_s3_service(self, modify):
+ # Once the service is created, bucket and user can not be modified by the service api, but only the user/group/bucket modules
+ api = 'protocols/s3/services'
+ body = {'name': self.parameters['name']}
+ if modify.get('enabled') is not None:
+ body['enabled'] = self.parameters['enabled']
+ if modify.get('comment'):
+ body['comment'] = self.parameters['comment']
+ if modify.get('certificate_name'):
+ body['certificate.name'] = self.parameters['certificate_name']
+ dummy, error = rest_generic.patch_async(self.rest_api, api, self.svm_uuid, body)
+ if error:
+ self.module.fail_json(msg='Error modifying S3 service %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def set_uuids(self, record):
+ self.svm_uuid = record['svm']['uuid']
+ return record
+
+ def apply(self):
+ current = self.get_s3_service()
+ cd_action, modify = None, None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None:
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_s3_service()
+ if cd_action == 'delete':
+ self.delete_s3_service()
+ if modify:
+ self.modify_s3_service(modify)
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ '''Apply volume operations from playbook'''
+ obj = NetAppOntapS3Services()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_s3_users.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_s3_users.py
new file mode 100644
index 000000000..d3a0efd64
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_s3_users.py
@@ -0,0 +1,193 @@
+#!/usr/bin/python
+
+# (c) 2022, NetApp, Inc
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: na_ontap_s3_users
+short_description: NetApp ONTAP S3 users
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 21.20.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create, delete, or modify S3 users on NetApp ONTAP.
+
+options:
+ state:
+ description:
+ - Whether the specified S3 user should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: 'present'
+
+ name:
+ description:
+ - The name of the S3 user.
+ type: str
+ required: true
+
+ vserver:
+ description:
+ - Name of the vserver to use.
+ type: str
+ required: true
+
+ comment:
+ description:
+ - comment about the user
+ type: str
+'''
+
+EXAMPLES = """
+ - name: create or modify s3 user
+ na_ontap_s3_users:
+ state: present
+ name: carchi8py
+ vserver: ansibleSVM
+ comment: not enabled
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: false
+ use_rest: always
+
+ - name: delete s3 user
+ na_ontap_s3_users:
+ state: absent
+ name: carchi8py
+ vserver: ansibleSVM
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: false
+ use_rest: always
+"""
+
+RETURN = """
+secret_key:
+ description: secret_key generated for the user
+ returned: on creation of user
+ type: str
+access_key:
+ description: access_key generated for the user
+ returned: on creation of user
+ type: str
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_vserver
+
+
+class NetAppOntapS3Users:
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ name=dict(required=True, type='str'),
+ comment=dict(required=False, type='str'),
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.svm_uuid = None
+ self.na_helper = NetAppModule(self.module)
+ self.parameters = self.na_helper.check_and_set_parameters(self.module)
+ self.rest_api = OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+ self.rest_api.fail_if_not_rest_minimum_version('na_ontap_s3_users', 9, 8)
+
+ def get_s3_user(self):
+ self.get_svm_uuid()
+ api = 'protocols/s3/services/%s/users' % self.svm_uuid
+ fields = ','.join(('name',
+ 'comment'))
+ params = {'name': self.parameters['name'],
+ 'fields': fields}
+ record, error = rest_generic.get_one_record(self.rest_api, api, params)
+ if error:
+ self.module.fail_json(msg='Error fetching S3 user %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ return record
+
+ def get_svm_uuid(self):
+ record, error = rest_vserver.get_vserver_uuid(self.rest_api, self.parameters['vserver'], self.module, True)
+ self.svm_uuid = record
+
+ def create_s3_user(self):
+ api = 'protocols/s3/services/%s/users' % self.svm_uuid
+ body = {'name': self.parameters['name']}
+ if self.parameters.get('comment'):
+ body['comment'] = self.parameters['comment']
+ response, error = rest_generic.post_async(self.rest_api, api, body)
+ if error:
+ self.module.fail_json(msg='Error creating S3 user %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ if response.get('num_records') == 1:
+ return response.get('records')[0]
+ self.module.fail_json(msg='Error creating S3 user %s' % self.parameters['name'], exception=traceback.format_exc())
+
+ def delete_s3_user(self):
+ api = 'protocols/s3/services/%s/users' % self.svm_uuid
+ dummy, error = rest_generic.delete_async(self.rest_api, api, self.parameters['name'])
+ if error:
+ self.module.fail_json(msg='Error deleting S3 user %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_s3_user(self, modify):
+ api = 'protocols/s3/services/%s/users' % self.svm_uuid
+ body = {}
+ if modify.get('comment'):
+ body['comment'] = self.parameters['comment']
+ dummy, error = rest_generic.patch_async(self.rest_api, api, self.parameters['name'], body)
+ if error:
+ self.module.fail_json(msg='Error modifying S3 user %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def parse_response(self, response):
+ if response is not None:
+ return response.get('secret_key'), response.get('access_key')
+ return None, None
+
+ def apply(self):
+ current = self.get_s3_user()
+ cd_action, modify, response = None, None, None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None:
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ response = self.create_s3_user()
+ if cd_action == 'delete':
+ self.delete_s3_user()
+ if modify:
+ self.modify_s3_user(modify)
+ secret_key, access_key = self.parse_response(response)
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify, extra_responses={'secret_key': secret_key,
+ 'access_key': access_key})
+ self.module.exit_json(**result)
+
+
+def main():
+ '''Apply volume operations from playbook'''
+ obj = NetAppOntapS3Users()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_certificates.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_certificates.py
new file mode 100644
index 000000000..c7131fe5e
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_certificates.py
@@ -0,0 +1,468 @@
+#!/usr/bin/python
+
+# (c) 2020-2023, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_security_certificates
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+
+module: na_ontap_security_certificates
+short_description: NetApp ONTAP manage security certificates.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: '20.7.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Install or delete security certificates on ONTAP. (Create and sign will come in a second iteration)
+
+options:
+
+ state:
+ description:
+ - Whether the specified security certificate should exist or not.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ common_name:
+ description:
+ - Common name of the certificate.
+ - Required for create and install.
+ - If name is present, ignored for sign and delete.
+ - If name is absent or ignored, required for sign and delete.
+ type: str
+
+ name:
+ description:
+ - The unique name of the security certificate per SVM.
+ - This parameter is not supported for ONTAP 9.6 or 9.7, as the REST API does not support it.
+ - If present with ONTAP 9.6 or 9.7, it is ignored by default, see I(ignore_name_if_not_supported).
+ - It is strongly recommended to use name for newer releases of ONTAP.
+ type: str
+
+ svm:
+ description:
+ - The name of the SVM (vserver).
+ - If present, the certificate is installed in the SVM.
+ - If absent, the certificate is installed in the cluster.
+ type: str
+ aliases:
+ - vserver
+
+ type:
+ description:
+ - Type of certificate
+ - Required for create and install.
+ - If name is present, ignored for sign and delete.
+ - If name is absent or ignored, required for sign and delete.
+ choices: ['client', 'server', 'client_ca', 'server_ca', 'root_ca']
+ type: str
+
+ public_certificate:
+ description:
+ - Public key certificate in PEM format.
+ - Required when installing a certificate. Ignored otherwise.
+ type: str
+
+ private_key:
+ description:
+ - Private key certificate in PEM format.
+ - Required when installing a CA-signed certificate. Ignored otherwise.
+ type: str
+
+ signing_request:
+ description:
+ - If present, the certificate identified by name and svm is used to sign the request.
+ - A signed certificate is returned.
+ type: str
+
+ expiry_time:
+ description:
+ - Certificate expiration time. Specifying an expiration time is recommended when creating a certificate.
+ - Can be provided when signing a certificate.
+ type: str
+
+ key_size:
+ description:
+ - Key size of the certificate in bits. Specifying a strong key size is recommended when creating a certificate.
+ - Ignored for sign and delete.
+ type: int
+
+ hash_function:
+ description:
+ - Hashing function. Can be provided when creating a self-signed certificate or when signing a certificate.
+ - Allowed values for create and sign are sha256, sha224, sha384, sha512.
+ type: str
+
+ intermediate_certificates:
+ description:
+ - Chain of intermediate Certificates in PEM format.
+ - Only valid when installing a certificate.
+ type: list
+ elements: str
+
+ ignore_name_if_not_supported:
+ description:
+ - ONTAP 9.6 and 9.7 REST API does not support I(name).
+ - If set to true, no error is reported if I(name) is present, and I(name) is not used.
+ type: bool
+ default: true
+ version_added: '20.8.0'
+
+notes:
+ - supports check mode.
+ - only supports REST. Requires ONTAP 9.6 or later, ONTAP 9.8 or later is recommended.
+'''
+
+EXAMPLES = """
+- name: install certificate
+ netapp.ontap.na_ontap_security_certificates:
+ # <<: *cert_login
+ common_name: "{{ ontap_cert_common_name }}"
+ name: "{{ ontap_cert_name }}"
+ public_certificate: "{{ ssl_certificate }}"
+ type: client_ca
+ svm: "{{ vserver }}"
+
+# ignore svm option for cluster/admin vserver.
+- name: install certificate in cluster vserver.
+ netapp.ontap.na_ontap_security_certificates:
+ # <<: *cert_login
+ common_name: "{{ ontap_cert_common_name }}"
+ name: "{{ ontap_cert_name }}"
+ public_certificate: "{{ ssl_certificate }}"
+ type: client_ca
+
+- name: create certificate
+ netapp.ontap.na_ontap_security_certificates:
+ # <<: *cert_login
+ common_name: "{{ ontap_cert_root_common_name }}"
+ name: "{{ ontap_cert_name }}"
+ type: root_ca
+ svm: "{{ vserver }}"
+ expiry_time: P365DT # one year
+
+- name: sign certificate using newly create certificate
+ tags: sign_request
+ netapp.ontap.na_ontap_security_certificates:
+ # <<: *login
+ name: "{{ ontap_cert_name }}"
+ svm: "{{ vserver }}"
+ signing_request: |
+ -----BEGIN CERTIFICATE REQUEST-----
+ MIIChDCCAWwCAQAwPzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMRIwEAYDVQQH
+ DAlTdW5ueXZhbGUxDzANBgNVBAoMBk5ldEFwcDCCASIwDQYJKoZIhvcNAQEBBQAD
+ ggEPADCCAQoCggEBALgXCj6Si/I4xLdV7wjWYTbt8jY20fQOjk/4E7yBT1vFBflE
+ ks6YDc6dhC2G18cnoj9E3DiR8lIHPoAlFB/VmBNDev3GZkbFlrbV7qYmf8OEx2H2
+ tAefgSP0jLmCHCN1yyhJoCG6FsAiD3tf6yoyFF6qS9ureGL0tCJJ/osx64WzUz+Q
+ EN8lx7VSxriEFMSjreXZDhUFaCdIYKKRENuEWyYvdy5cbBmczhuM8EP6peOVv5Hm
+ BJzPUDkq7oTtEHmttpATq2Y92qzNzETO0bXN5X/93AWri8/yEXdX+HEw1C/omtsE
+ jGsCXrCrIJ+DgUdT/GHNdBWlXl/cWGtEgEQ4vrUCAwEAAaAAMA0GCSqGSIb3DQEB
+ CwUAA4IBAQBjZNoQgr/JDm1T8zyRhLkl3zw4a16qKNu/MS7prqZHLVQgrptHRegU
+ Hbz11XoHfVOdbyuvtzEe95QsDd6FYCZ4qzZRF3se4IjMeqwdQZ5WP0/GFiwM8Uln
+ /0TCWjt759XMeUX7+wgOg5NRjJ660eWMXzu/UJf+vZO0Q2FiPIr13JvvY3TjT+9J
+ UUtK4r9PaUuOPN2YL9IQqSD3goh8302Qr3nBXUgjeUGLkgfUM5S39apund2hyTX2
+ JCLQsKr88pwU9iDho2tHLv/2QgLwNZLPu8V+7IGu6G4vB28lN4Uy7xbhxFOKtyWu
+ fK4sEdTw3B/aDN0tB8MHFdPYycNZsEac
+ -----END CERTIFICATE REQUEST-----
+ expiry_time: P180DT
+
+- name: delete certificate
+ netapp.ontap.na_ontap_security_certificates:
+ # <<: *cert_login
+ state: absent
+ name: "{{ ontap_cert_name }}"
+ svm: "{{ vserver }}"
+
+# For ONTAP 9.6 or 9.7, use common_name and type, in addition to, or in lieu of name
+- name: install certificate
+ netapp.ontap.na_ontap_security_certificates:
+ # <<: *cert_login
+ common_name: "{{ ontap_cert_common_name }}"
+ public_certificate: "{{ ssl_certificate }}"
+ type: client_ca
+ svm: "{{ vserver }}"
+
+- name: create certificate
+ netapp.ontap.na_ontap_security_certificates:
+ # <<: *cert_login
+ common_name: "{{ ontap_cert_root_common_name }}"
+ type: root_ca
+ svm: "{{ vserver }}"
+ expiry_time: P365DT # one year
+
+- name: sign certificate using newly create certificate
+ tags: sign_request
+ netapp.ontap.na_ontap_security_certificates:
+ # <<: *login
+ common_name: "{{ ontap_cert_root_common_name }}"
+ type: root_ca
+ svm: "{{ vserver }}"
+ signing_request: |
+ -----BEGIN CERTIFICATE REQUEST-----
+ MIIChDCCAWwCAQAwPzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMRIwEAYDVQQH
+ DAlTdW5ueXZhbGUxDzANBgNVBAoMBk5ldEFwcDCCASIwDQYJKoZIhvcNAQEBBQAD
+ ggEPADCCAQoCggEBALgXCj6Si/I4xLdV7wjWYTbt8jY20fQOjk/4E7yBT1vFBflE
+ ks6YDc6dhC2G18cnoj9E3DiR8lIHPoAlFB/VmBNDev3GZkbFlrbV7qYmf8OEx2H2
+ tAefgSP0jLmCHCN1yyhJoCG6FsAiD3tf6yoyFF6qS9ureGL0tCJJ/osx64WzUz+Q
+ EN8lx7VSxriEFMSjreXZDhUFaCdIYKKRENuEWyYvdy5cbBmczhuM8EP6peOVv5Hm
+ BJzPUDkq7oTtEHmttpATq2Y92qzNzETO0bXN5X/93AWri8/yEXdX+HEw1C/omtsE
+ jGsCXrCrIJ+DgUdT/GHNdBWlXl/cWGtEgEQ4vrUCAwEAAaAAMA0GCSqGSIb3DQEB
+ CwUAA4IBAQBjZNoQgr/JDm1T8zyRhLkl3zw4a16qKNu/MS7prqZHLVQgrptHRegU
+ Hbz11XoHfVOdbyuvtzEe95QsDd6FYCZ4qzZRF3se4IjMeqwdQZ5WP0/GFiwM8Uln
+ /0TCWjt759XMeUX7+wgOg5NRjJ660eWMXzu/UJf+vZO0Q2FiPIr13JvvY3TjT+9J
+ UUtK4r9PaUuOPN2YL9IQqSD3goh8302Qr3nBXUgjeUGLkgfUM5S39apund2hyTX2
+ JCLQsKr88pwU9iDho2tHLv/2QgLwNZLPu8V+7IGu6G4vB28lN4Uy7xbhxFOKtyWu
+ fK4sEdTw3B/aDN0tB8MHFdPYycNZsEac
+ -----END CERTIFICATE REQUEST-----
+ expiry_time: P180DT
+
+- name: delete certificate
+ netapp.ontap.na_ontap_security_certificates:
+ # <<: *cert_login
+ state: absent
+ common_name: "{{ ontap_cert_root_common_name }}"
+ type: root_ca
+ name: "{{ ontap_cert_name }}"
+ svm: "{{ vserver }}"
+"""
+
+RETURN = """
+ontap_info:
+ description: Returns public_certificate when signing, empty for create, install, and delete.
+ returned: always
+ type: dict
+ sample: '{
+ "ontap_info": {
+ "public_certificate": "-----BEGIN CERTIFICATE-----\n........-----END CERTIFICATE-----\n"
+ }
+ }'
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_vserver
+
+
+class NetAppOntapSecurityCertificates:
+ ''' object initialize and class methods '''
+
+ def __init__(self):
+ self.use_rest = False
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ common_name=dict(required=False, type='str'),
+ name=dict(required=False, type='str'),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ type=dict(required=False, choices=['client', 'server', 'client_ca', 'server_ca', 'root_ca']),
+ svm=dict(required=False, type='str', aliases=['vserver']),
+ public_certificate=dict(required=False, type='str'),
+ private_key=dict(required=False, type='str', no_log=True),
+ signing_request=dict(required=False, type='str'),
+ expiry_time=dict(required=False, type='str'),
+ key_size=dict(required=False, type='int'),
+ hash_function=dict(required=False, type='str'),
+ intermediate_certificates=dict(required=False, type='list', elements='str'),
+ ignore_name_if_not_supported=dict(required=False, type='bool', default=True)
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if self.parameters.get('name') is None and (self.parameters.get('common_name') is None or self.parameters.get('type') is None):
+ error = "Error: 'name' or ('common_name' and 'type') are required parameters."
+ self.module.fail_json(msg=error)
+
+ # ONTAP 9.6 and 9.7 do not support name. We'll change this to True if we detect an issue.
+ self.ignore_name_param = False
+
+ # API should be used for ONTAP 9.6 or higher
+ self.rest_api = OntapRestAPI(self.module)
+ if self.rest_api.is_rest():
+ self.use_rest = True
+ else:
+ self.module.fail_json(msg=self.rest_api.requires_ontap_9_6('na_ontap_security_certificates'))
+
+ def get_certificate(self):
+ """
+ Fetch uuid if certificate exists.
+ NOTE: because of a bug in ONTAP 9.6 and 9.7, name is not supported. We are
+ falling back to using common_name and type, but unicity is not guaranteed.
+ :return:
+ Dictionary if certificate with same name is found
+ None if not found
+ """
+ # REST allows setting cluster/admin svm in create certificate, but no records returned in GET.
+ # error if data svm not found
+ if 'svm' in self.parameters:
+ rest_vserver.get_vserver_uuid(self.rest_api, self.parameters['svm'], self.module, True)
+
+ error = "'name' or ('common_name', 'type') are required."
+ for key in ('name', 'common_name'):
+ if self.parameters.get(key) is None:
+ continue
+ data = {'fields': 'uuid',
+ key: self.parameters[key],
+ }
+ if self.parameters.get('svm') is not None:
+ data['svm.name'] = self.parameters['svm']
+ else:
+ data['scope'] = 'cluster'
+ if key == 'common_name':
+ if self.parameters.get('type') is not None:
+ data['type'] = self.parameters['type']
+ else:
+ error = "When using 'common_name', 'type' is required."
+ break
+
+ api = "security/certificates"
+ message, error = self.rest_api.get(api, data)
+ if error:
+ try:
+ name_not_supported_error = (key == 'name') and (error['message'] == 'Unexpected argument "name".')
+ except (KeyError, TypeError):
+ name_not_supported_error = False
+ if name_not_supported_error:
+ if self.parameters['ignore_name_if_not_supported'] and self.parameters.get('common_name') is not None:
+ # let's attempt a retry using common_name
+ self.ignore_name_param = True
+ continue
+ error = "ONTAP 9.6 and 9.7 do not support 'name'. Use 'common_name' and 'type' as a work-around."
+ # report success, or any other error as is
+ break
+
+ if error:
+ self.module.fail_json(msg='Error calling API: %s - %s' % (api, error))
+
+ if len(message['records']) == 1:
+ return message['records'][0]
+ if len(message['records']) > 1:
+ error = 'Duplicate records with same common_name are preventing safe operations: %s' % repr(message)
+ self.module.fail_json(msg=error)
+ return None
+
+ def create_or_install_certificate(self, validate_only=False):
+ """
+ Create or install certificate
+ :return: message (should be empty dict)
+ """
+ required_keys = ['type', 'common_name']
+ if validate_only:
+ if not set(required_keys).issubset(set(self.parameters.keys())):
+ self.module.fail_json(msg='Error creating or installing certificate: one or more of the following options are missing: %s'
+ % (', '.join(required_keys)))
+ return
+
+ optional_keys = ['public_certificate', 'private_key', 'expiry_time', 'key_size', 'hash_function', 'intermediate_certificates']
+ if not self.ignore_name_param:
+ optional_keys.append('name')
+ # special key: svm
+
+ body = {}
+ if self.parameters.get('svm') is not None:
+ body['svm'] = {'name': self.parameters['svm']}
+ for key in required_keys + optional_keys:
+ if self.parameters.get(key) is not None:
+ body[key] = self.parameters[key]
+ api = "security/certificates"
+ message, error = self.rest_api.post(api, body)
+ if error:
+ if self.parameters.get('svm') is None and error.get('target') == 'uuid':
+ error['target'] = 'cluster'
+ if error.get('message') == 'duplicate entry':
+ error['message'] += '. Same certificate may already exist under a different name.'
+ self.module.fail_json(msg="Error creating or installing certificate: %s" % error)
+ return message
+
+ def sign_certificate(self, uuid):
+ """
+ sign certificate
+ :return: a dictionary with key "public_certificate"
+ """
+ api = "security/certificates/%s/sign" % uuid
+ body = {'signing_request': self.parameters['signing_request']}
+ optional_keys = ['expiry_time', 'hash_function']
+ for key in optional_keys:
+ if self.parameters.get(key) is not None:
+ body[key] = self.parameters[key]
+ message, error = self.rest_api.post(api, body)
+ if error:
+ self.module.fail_json(msg="Error signing certificate: %s" % error)
+ return message
+
+ def delete_certificate(self, uuid):
+ """
+ Delete certificate
+ :return: message (should be empty dict)
+ """
+ api = "security/certificates/%s" % uuid
+ message, error = self.rest_api.delete(api)
+ if error:
+ self.module.fail_json(msg="Error deleting certificate: %s" % error)
+ return message
+
+ def apply(self):
+ """
+ Apply action to create/install/sign/delete certificate
+ :return: None
+ """
+ # TODO: add telemetry for REST
+
+ current = self.get_certificate()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ message = None
+ if self.parameters.get('signing_request') is not None:
+ error = None
+ if self.parameters['state'] == 'absent':
+ error = "'signing_request' is not supported with 'state' set to 'absent'"
+ elif current is None:
+ scope = 'cluster' if self.parameters.get('svm') is None else "svm: %s" % self.parameters.get('svm')
+ error = "signing certificate with name '%s' not found on %s" % (self.parameters.get('name'), scope)
+ elif cd_action is not None:
+ error = "'signing_request' is exclusive with other actions: create, install, delete"
+ if error is not None:
+ self.module.fail_json(msg=error)
+ cd_action = 'sign'
+ self.na_helper.changed = True
+
+ if self.na_helper.changed and cd_action == 'create':
+ # validate as much as we can in check_mode or not
+ self.create_or_install_certificate(validate_only=True)
+
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ message = self.create_or_install_certificate()
+ elif cd_action == 'sign':
+ message = self.sign_certificate(current['uuid'])
+ elif cd_action == 'delete':
+ message = self.delete_certificate(current['uuid'])
+
+ results = netapp_utils.generate_result(self.na_helper.changed, cd_action, extra_responses={'ontap_info': message})
+ self.module.exit_json(**results)
+
+
+def main():
+ """
+ Create instance and invoke apply
+ :return: None
+ """
+ sec_cert = NetAppOntapSecurityCertificates()
+ sec_cert.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_config.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_config.py
new file mode 100644
index 000000000..aac0ea1d5
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_config.py
@@ -0,0 +1,285 @@
+#!/usr/bin/python
+
+# (c) 2021-2023, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = """
+module: na_ontap_security_config
+short_description: NetApp ONTAP modify security config for SSL.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: '21.3.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Modifies the security configuration for SSL.
+options:
+ name:
+ description:
+ - The type of FIPS compliant interface.
+ type: str
+ default: ssl
+
+ is_fips_enabled:
+ description:
+ - Enables or disables FIPS-compliant mode for the cluster.
+ - For REST, it requires ontap version 9.8.
+ type: bool
+
+ supported_ciphers:
+ description:
+ - Selects the supported cipher suites for the selected interface.
+ - This option is supported only in ZAPI.
+ type: str
+
+ supported_protocols:
+ description:
+ - Selects the supported protocols for the selected interface. Supported_ciphers should not be specified if operating in FIPS-compliant mode.
+ - For REST, it requires ontap version 9.10.1 or later.
+ - Protocol versions can be removed only from lower versions.
+ - To remove protocol TLSv1 has to be removed first.
+ choices: ['TLSv1.3', 'TLSv1.2', 'TLSv1.1', 'TLSv1']
+ type: list
+ elements: str
+
+ supported_cipher_suites:
+ description:
+ - Names a cipher suite that the system can select during TLS handshakes.
+ - A list of available options can be found on the Internet Assigned Number Authority (IANA) website.
+ - To achieve idempotency all similar cipher_suites must be set.
+ - This option requires ontap version 9.10.1 or later.
+ type: list
+ elements: str
+ version_added: 22.4.0
+"""
+
+EXAMPLES = """
+ - name: Modify SSL Security Config - ZAPI
+ netapp.ontap.na_ontap_security_config:
+ name: ssl
+ is_fips_enabled: false
+ supported_ciphers: 'ALL:!LOW:!aNULL:!EXP:!eNULL:!3DES:!RC4:!SHA1'
+ supported_protocols: ['TLSv1.2', 'TLSv1.1', 'TLSv1']
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ontapi: "{{ ontap_info.ontap_info.ontap_version }}"
+ https: true
+ validate_certs: false
+
+ - name: Modify SSL Security Config - REST
+ netapp.ontap.na_ontap_security_config:
+ is_fips_enabled: false
+ supported_protocols: ['TLSv1.2', 'TLSv1.1', 'TLSv1']
+ supported_cipher_suites: ['TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384']
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ontapi: "{{ ontap_info.ontap_info.ontap_version }}"
+ https: true
+ validate_certs: false
+"""
+
+RETURN = """
+
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppOntapSecurityConfig:
+ """
+ Modifies SSL Security Config
+ """
+ def __init__(self):
+ """
+ Initialize the ONTAP Security Config class
+ """
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ name=dict(required=False, type='str', default='ssl'),
+ is_fips_enabled=dict(required=False, type='bool'),
+ supported_ciphers=dict(required=False, type='str'),
+ supported_protocols=dict(required=False, type='list', elements='str', choices=['TLSv1.3', 'TLSv1.2', 'TLSv1.1', 'TLSv1']),
+ supported_cipher_suites=dict(required=False, type='list', elements='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ self.rest_api = netapp_utils.OntapRestAPI(self.module)
+ unsupported_rest_properties = ['supported_ciphers']
+ partially_supported_rest_properties = [['supported_cipher_suites', (9, 10, 1)], ['supported_protocols', (9, 10, 1)]]
+ self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, unsupported_rest_properties, partially_supported_rest_properties)
+ if self.use_rest and self.rest_api.fail_if_not_rest_minimum_version('na_ontap_security_config', 9, 8, 0):
+ msg = 'REST requires ONTAP 9.8 or later.'
+ self.use_rest = self.na_helper.fall_back_to_zapi(self.module, msg, self.parameters)
+
+ if not self.use_rest:
+ if self.parameters.get('supported_cipher_suites'):
+ self.module.fail_json(msg="Error: The option supported_cipher_suites is supported only with REST.")
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg='The python NetApp-Lib module is required')
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ # Supported_ciphers is supported in ZAPI only.
+ if 'is_fips_enabled' in self.parameters and 'supported_ciphers' in self.parameters:
+ # if fips is enabled, supported ciphers should not be specified.
+ if self.parameters['is_fips_enabled']:
+ self.module.fail_json(
+ msg='is_fips_enabled was specified as true and supported_ciphers was specified. \
+ If fips is enabled then supported ciphers should not be specified')
+
+ if 'supported_ciphers' in self.parameters:
+ self.parameters['supported_ciphers'] = self.parameters['supported_ciphers'].replace('\\', '')
+
+ if 'is_fips_enabled' in self.parameters and 'supported_protocols' in self.parameters:
+ # if fips is enabled, TLSv1 is not a supported protocol.
+ if self.parameters['is_fips_enabled'] and 'TLSv1' in self.parameters['supported_protocols']:
+ self.module.fail_json(
+ msg='is_fips_enabled was specified as true and TLSv1 was specified as a supported protocol. \
+ If fips is enabled then TLSv1 is not a supported protocol')
+ # if fips is enabled, TLSv1.1 is not a supported protocol.
+ if self.parameters['is_fips_enabled'] and 'TLSv1.1' in self.parameters['supported_protocols']:
+ self.module.fail_json(
+ msg='is_fips_enabled was specified as true and TLSv1.1 was specified as a supported protocol. \
+ If fips is enabled then TLSv1.1 is not a supported protocol')
+
+ def get_security_config(self):
+ """
+ Get the current security configuration
+ """
+ if self.use_rest:
+ return self.get_security_config_rest()
+
+ return_value = None
+
+ security_config_get_iter = netapp_utils.zapi.NaElement('security-config-get')
+ security_config_info = netapp_utils.zapi.NaElement('desired-attributes')
+ if 'is_fips_enabled' in self.parameters:
+ security_config_info.add_new_child(
+ 'is-fips-enabled', self.na_helper.get_value_for_bool(from_zapi=False, value=self.parameters['is_fips_enabled'])
+ )
+ if 'supported_ciphers' in self.parameters:
+ security_config_info.add_new_child('supported-ciphers', self.parameters['supported_ciphers'])
+ if 'supported_protocols' in self.parameters:
+ security_config_info.add_new_child('supported-protocols', ','.join(self.parameters['supported_protocols']))
+
+ security_config_get_iter.add_child_elem(security_config_info)
+ security_config_get_iter.add_new_child('interface', self.parameters['name'])
+ try:
+ result = self.server.invoke_successfully(security_config_get_iter, True)
+ security_supported_protocols = []
+ if result.get_child_by_name('attributes'):
+ attributes = result.get_child_by_name('attributes')
+ security_config_attributes = attributes.get_child_by_name('security-config-info')
+ supported_protocols = security_config_attributes.get_child_by_name('supported-protocols')
+ for supported_protocol in supported_protocols.get_children():
+ security_supported_protocols.append(supported_protocol.get_content())
+ return_value = {
+ 'name': security_config_attributes['interface'],
+ 'is_fips_enabled': self.na_helper.get_value_for_bool(from_zapi=True, value=security_config_attributes['is-fips-enabled']),
+ 'supported_ciphers': security_config_attributes['supported-ciphers'],
+ 'supported_protocols': security_supported_protocols,
+ }
+
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(
+ msg='Error getting security config for interface %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ return return_value
+
+ def modify_security_config(self, modify):
+ """
+ Modifies the security configuration.
+ """
+ if self.use_rest:
+ return self.modify_security_config_rest(modify)
+
+ security_config_obj = netapp_utils.zapi.NaElement("security-config-modify")
+ security_config_obj.add_new_child("interface", self.parameters['name'])
+ if 'is_fips_enabled' in self.parameters:
+ self.parameters['is_fips_enabled'] = self.na_helper.get_value_for_bool(from_zapi=False, value=self.parameters['is_fips_enabled'])
+ security_config_obj.add_new_child('is-fips-enabled', self.parameters['is_fips_enabled'])
+ if 'supported_ciphers' in self.parameters:
+ security_config_obj.add_new_child('supported-ciphers', self.parameters['supported_ciphers'])
+ if 'supported_protocols' in self.parameters:
+ supported_protocol_obj = netapp_utils.zapi.NaElement("supported-protocols")
+ for protocol in self.parameters['supported_protocols']:
+ supported_protocol_obj.add_new_child('string', protocol)
+ security_config_obj.add_child_elem(supported_protocol_obj)
+ try:
+ self.server.invoke_successfully(security_config_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(
+ msg='Error modifying security config for interface %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc()
+ )
+
+ def get_security_config_rest(self):
+ """
+ Get the current security configuration
+ """
+ fields = 'fips.enabled,'
+ if self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 10, 1):
+ fields += 'tls.cipher_suites,tls.protocol_versions'
+ record, error = rest_generic.get_one_record(self.rest_api, '/security', None, fields)
+ if error:
+ self.module.fail_json(msg="Error on getting security config: %s" % error)
+ if record:
+ return {
+ 'is_fips_enabled': self.na_helper.safe_get(record, ['fips', 'enabled']),
+ 'supported_cipher_suites': self.na_helper.safe_get(record, ['tls', 'cipher_suites']),
+ 'supported_protocols': self.na_helper.safe_get(record, ['tls', 'protocol_versions'])
+ }
+ return record
+
+ def modify_security_config_rest(self, modify):
+ """
+ Modify the current security configuration
+ """
+ body = {}
+ if 'is_fips_enabled' in modify:
+ body['fips.enabled'] = modify['is_fips_enabled']
+ if 'supported_cipher_suites' in modify:
+ body['tls.cipher_suites'] = modify['supported_cipher_suites']
+ if 'supported_protocols' in modify:
+ body['tls.protocol_versions'] = modify['supported_protocols']
+ record, error = rest_generic.patch_async(self.rest_api, '/security', None, body)
+ if error:
+ self.module.fail_json(msg="Error on modifying security config: %s" % error)
+
+ def apply(self):
+ current = self.get_security_config()
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.na_helper.changed and not self.module.check_mode:
+ self.modify_security_config(modify)
+ result = netapp_utils.generate_result(self.na_helper.changed, modify=modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ """
+ Creates the NetApp ONTAP security config object and runs the correct play task
+ """
+ obj = NetAppOntapSecurityConfig()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_ipsec_ca_certificate.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_ipsec_ca_certificate.py
new file mode 100644
index 000000000..6ddd78b90
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_ipsec_ca_certificate.py
@@ -0,0 +1,184 @@
+#!/usr/bin/python
+
+# (c) 2022, NetApp, Inc. GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = """
+module: na_ontap_security_ipsec_ca_certificate
+short_description: NetApp ONTAP module to add or delete ipsec ca certificate.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: '22.1.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Create or delete security IPsec CA Certificate.
+options:
+ state:
+ description:
+ - Create or delete security IPsec CA Certificate.
+ - The certificate must already be installed on the system, for instance using na_ontap_security_certificates.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+ name:
+ description:
+ - Name of the CA certificate.
+ - Certificate must be already installed in svm or cluster scope.
+ type: str
+ required: true
+ svm:
+ description:
+ - Name of svm.
+ - If not set cluster scope is assumed.
+ type: str
+ required: false
+
+notes:
+ - Supports check_mode.
+ - Only supported with REST and requires ONTAP 9.10.1 or later.
+"""
+
+EXAMPLES = """
+ - name: Add IPsec CA certificate to svm.
+ netapp.ontap.na_ontap_security_ipsec_ca_certificate:
+ name: cert1
+ svm: ansibleSVM
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: "{{ validate_certs }}"
+
+ - name: Delete IPsec CA certificate in svm.
+ netapp.ontap.na_ontap_security_ipsec_ca_certificate:
+ name: cert1
+ svm: ansibleSVM
+ state: absent
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: "{{ validate_certs }}"
+
+ - name: Add IPsec CA certificate to cluster.
+ netapp.ontap.na_ontap_security_ipsec_ca_certificate:
+ name: cert2
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: "{{ validate_certs }}"
+
+ - name: Delete IPsec CA certificate from cluster.
+ netapp.ontap.na_ontap_security_ipsec_ca_certificate:
+ name: cert2
+ state: absent
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: "{{ validate_certs }}"
+"""
+
+RETURN = """
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppOntapSecurityCACertificate:
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ svm=dict(required=False, type='str')
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.uuid = None
+ self.na_helper = NetAppModule(self.module)
+ self.parameters = self.na_helper.check_and_set_parameters(self.module)
+ self.rest_api = netapp_utils.OntapRestAPI(self.module)
+ self.rest_api.fail_if_not_rest_minimum_version('na_ontap_security_ipsec_ca_certificate', 9, 10, 1)
+
+ def get_certificate_uuid(self):
+ """Get certificate UUID."""
+ api = 'security/certificates'
+ query = {'name': self.parameters['name']}
+ if self.parameters.get('svm'):
+ query['svm.name'] = self.parameters['svm']
+ else:
+ query['scope'] = 'cluster'
+ record, error = rest_generic.get_one_record(self.rest_api, api, query, 'uuid')
+ if error:
+ self.module.fail_json(msg="Error fetching uuid for certificate %s: %s" % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ if record:
+ return record['uuid']
+ return None
+
+ def get_ipsec_ca_certificate(self):
+ """GET IPsec CA certificate record"""
+ self.uuid = self.get_certificate_uuid()
+ if self.uuid is None:
+ if self.parameters['state'] == 'absent':
+ return None
+ svm_or_scope = self.parameters['svm'] if self.parameters.get('svm') else 'cluster'
+ self.module.fail_json(msg="Error: certificate %s is not installed in %s" % (self.parameters['name'], svm_or_scope))
+ api = 'security/ipsec/ca-certificates/%s' % self.uuid
+ record, error = rest_generic.get_one_record(self.rest_api, api)
+ if error:
+ # REST returns error if ipsec ca-certificates doesn't exist.
+ if "entry doesn't exist" in error:
+ return None
+ self.module.fail_json(msg="Error fetching security IPsec CA certificate %s: %s" % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ return record if record else None
+
+ def create_ipsec_ca_certificate(self):
+ """Create IPsec CA certifcate"""
+ api = 'security/ipsec/ca-certificates'
+ body = {'certificate.uuid': self.uuid}
+ if self.parameters.get('svm'):
+ body['svm.name'] = self.parameters['svm']
+ dummy, error = rest_generic.post_async(self.rest_api, api, body)
+ if error:
+ self.module.fail_json(msg="Error adding security IPsec CA certificate %s: %s" % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_ipsec_ca_certificate(self):
+ """Delete IPSec CA certificate"""
+ api = 'security/ipsec/ca-certificates'
+ dummy, error = rest_generic.delete_async(self.rest_api, api, self.uuid)
+ if error:
+ self.module.fail_json(msg="Error deleting security IPsec CA certificate %s: %s" % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ current = self.get_ipsec_ca_certificate()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_ipsec_ca_certificate()
+ else:
+ self.delete_ipsec_ca_certificate()
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ ipsec_ca_obj = NetAppOntapSecurityCACertificate()
+ ipsec_ca_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_ipsec_config.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_ipsec_config.py
new file mode 100644
index 000000000..ce2fde68f
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_ipsec_config.py
@@ -0,0 +1,127 @@
+#!/usr/bin/python
+
+# (c) 2022, NetApp, Inc. GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = """
+module: na_ontap_security_ipsec_config
+short_description: NetApp ONTAP module to configure IPsec config.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: '22.1.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Enable or disable IPsec config.
+ - Configure replay window.
+options:
+ state:
+ description:
+ - modify IPsec configuration, only present is supported.
+ choices: ['present']
+ type: str
+ default: present
+ enabled:
+ description:
+ - Indicates whether or not IPsec is enabled.
+ type: bool
+ required: false
+ replay_window:
+ description:
+ - Replay window size in packets, where 0 indicates that the relay window is disabled.
+ type: str
+ required: false
+ choices: ['0', '64', '128', '256', '512', '1024']
+
+notes:
+ - Supports check_mode.
+ - Only supported with REST and requires ONTAP 9.8 or later.
+"""
+
+EXAMPLES = """
+ - name: Enable IPsec config and set replay_window.
+ netapp.ontap.na_ontap_security_ipsec_config:
+ enabled: True
+ replay_window: 64
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: "{{ validate_certs }}"
+
+ - name: Disable IPsec config.
+ netapp.ontap.na_ontap_security_ipsec_config:
+ enabled: False
+ replay_window: 64
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: "{{ validate_certs }}"
+"""
+
+RETURN = """
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppOntapSecurityIPsecConfig:
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present'], default='present'),
+ enabled=dict(required=False, type='bool'),
+ replay_window=dict(required=False, type='str', choices=['0', '64', '128', '256', '512', '1024'])
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.uuid = None
+ self.na_helper = NetAppModule(self.module)
+ self.parameters = self.na_helper.check_and_set_parameters(self.module)
+ self.rest_api = netapp_utils.OntapRestAPI(self.module)
+ self.rest_api.fail_if_not_rest_minimum_version('na_ontap_security_ipsec_config:', 9, 8)
+
+ def get_security_ipsec_config(self):
+ """Get IPsec config details"""
+ record, error = rest_generic.get_one_record(self.rest_api, 'security/ipsec', None, 'enabled,replay_window')
+ if error:
+ self.module.fail_json(msg="Error fetching security IPsec config: %s" % to_native(error), exception=traceback.format_exc())
+ if record:
+ return {
+ 'enabled': record.get('enabled'),
+ 'replay_window': record.get('replay_window')
+ }
+ return None
+
+ def modify_security_ipsec_config(self, modify):
+ """
+ Modify security ipsec config
+ """
+ dummy, error = rest_generic.patch_async(self.rest_api, 'security/ipsec', None, modify)
+ if error:
+ self.module.fail_json(msg='Error modifying security IPsec config: %s.' % to_native(error), exception=traceback.format_exc())
+
+ def apply(self):
+ modify = self.na_helper.get_modified_attributes(self.get_security_ipsec_config(), self.parameters)
+ if self.na_helper.changed and not self.module.check_mode:
+ self.modify_security_ipsec_config(modify)
+ result = netapp_utils.generate_result(self.na_helper.changed, modify=modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ ipsec_config = NetAppOntapSecurityIPsecConfig()
+ ipsec_config.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_ipsec_policy.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_ipsec_policy.py
new file mode 100644
index 000000000..e02e0df64
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_ipsec_policy.py
@@ -0,0 +1,458 @@
+#!/usr/bin/python
+
+# (c) 2022, NetApp, Inc. GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = """
+module: na_ontap_security_ipsec_policy
+short_description: NetApp ONTAP module to create, modify or delete security IPsec policy.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: '22.1.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Create, modify or delete security IPsec policy.
+options:
+ state:
+ description:
+ - Create or delete security IPsec policy.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+ name:
+ description:
+ - Name of the security IPsec policy
+ type: str
+ required: true
+ action:
+ description:
+ - Action for the IPsec policy.
+ - Cannot modify after create.
+ type: str
+ choices: ['bypass', 'discard', 'esp_transport', 'esp_udp']
+ authentication_method:
+ description:
+ - Authentication method for the IPsec policy.
+ - Supported from 9.10.1 or later.
+ - Cannot modify after create.
+ type: str
+ choices: ['none', 'psk', 'pki']
+ certificate:
+ description:
+ - Certificate for the IPsec policy.
+ - Supported from 9.10.1 or later.
+ - Required when C(authentication_method) is 'pki' in create.
+ type: str
+ enabled:
+ description:
+ - Indicates whether or not the policy is enabled.
+ type: bool
+ ipspace:
+ description:
+ - IPspace name where C(svm) exist.
+ type: str
+ local_endpoint:
+ description:
+ - Local endpoint for the IPsec policy.
+ type: dict
+ suboptions:
+ address:
+ description:
+ - IPv4 or IPv6 address.
+ type: str
+ required: true
+ netmask:
+ description:
+ - Input as netmask length (16) or IPv4 mask (255.255.0.0).
+ - For IPv6, the default value is 64 with a valid range of 1 to 127.
+ type: str
+ required: true
+ port:
+ description:
+ - Application port to be covered by the IPsec policy, example 23.
+ type: str
+ remote_endpoint:
+ description:
+ - remote endpoint for the IPsec policy.
+ type: dict
+ suboptions:
+ address:
+ description:
+ - IPv4 or IPv6 address.
+ type: str
+ required: true
+ netmask:
+ description:
+ - Input as netmask length (16) or IPv4 mask (255.255.0.0).
+ - For IPv6, the default value is 64 with a valid range of 1 to 127.
+ type: str
+ required: true
+ port:
+ description:
+ - Application port to be covered by the IPsec policy, example 23 or 23-23.
+ type: str
+ local_identity:
+ description:
+ - local IKE endpoint's identity for authentication purpose.
+ type: str
+ remote_identity:
+ description:
+ - remote IKE endpoint's identity for authentication purpose.
+ type: str
+ protocol:
+ description:
+ - protocol to be protected by by this policy.
+ - example 'any' or '0', 'tcp', 'udp' or protocol number.
+ type: str
+ secret_key:
+ description:
+ - Pre-shared key for IKE negotiation.
+ - Required when C(authentication_method) is 'psk' in create.
+ - Cannot modify after create.
+ type: str
+ svm:
+ description:
+ - The name of the SVM.
+ - Required when creating security IPsec policy.
+ type: str
+
+notes:
+ - Supports check_mode.
+ - Only supported with REST and requires ONTAP 9.8 or later.
+"""
+
+EXAMPLES = """
+ - name: Create security IPsec policy with pre-shared Keys.
+ netapp.ontap.na_ontap_security_ipsec_policy:
+ name: ipsec_policy_psk
+ ipspace: Default
+ svm: ansibleSVM
+ authentication_method: psk
+ secret_key: "{{ secret_key }}"
+ action: esp_transport
+ local_endpoint:
+ address: 10.23.43.23
+ netmask: 24
+ port: 201
+ remote_endpoint:
+ address: 10.23.43.30
+ netmask: 24
+ port: 205
+ protocol: tcp
+ enabled: true
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: "{{ validate_certs }}"
+
+ - name: Create security IPsec policy with certificates.
+ netapp.ontap.na_ontap_security_ipsec_policy:
+ name: ipsec_policy_pki
+ ipspace: Default
+ svm: ansibleSVM
+ authentication_method: pki
+ certificate: "{{ cert_name }}"
+ action: esp_transport
+ local_endpoint:
+ address: 10.23.43.23
+ netmask: 24
+ port: 201
+ remote_endpoint:
+ address: 10.23.43.30
+ netmask: 24
+ port: 205
+ protocol: tcp
+ enabled: true
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: "{{ validate_certs }}"
+
+ - name: Create security IPsec policy without psk or certificates.
+ netapp.ontap.na_ontap_security_ipsec_policy:
+ name: ipsec_policy_none
+ ipspace: Default
+ svm: ansibleSVM
+ action: bypass
+ local_endpoint:
+ address: 10.23.43.23
+ netmask: 24
+ port: 201
+ remote_endpoint:
+ address: 10.23.43.30
+ netmask: 24
+ port: 205
+ protocol: tcp
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: "{{ validate_certs }}"
+
+ - name: Modify security IPsec policy local, remote end_point.
+ netapp.ontap.na_ontap_security_ipsec_policy:
+ name: ipsec_policy_pki
+ ipspace: Default
+ svm: ansibleSVM
+ authentication_method: pki
+ certificate: "{{ cert_name }}"
+ action: esp_transport
+ local_endpoint:
+ address: 10.23.43.50
+ netmask: 24
+ port: 201
+ remote_endpoint:
+ address: 10.23.43.60
+ netmask: 24
+ port: 205
+ protocol: tcp
+ enabled: true
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: "{{ validate_certs }}"
+
+ - name: Modify security IPsec protocol, enable options.
+ netapp.ontap.na_ontap_security_ipsec_policy:
+ name: ipsec_policy_pki
+ ipspace: Default
+ svm: ansibleSVM
+ authentication_method: pki
+ certificate: "{{ cert_name }}"
+ action: esp_transport
+ local_endpoint:
+ address: 10.23.43.50
+ netmask: 24
+ port: 201
+ remote_endpoint:
+ address: 10.23.43.60
+ netmask: 24
+ port: 205
+ protocol: udp
+ enabled: false
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: "{{ validate_certs }}"
+
+ - name: Delete security IPsec policy.
+ netapp.ontap.na_ontap_security_ipsec_policy:
+ name: ipsec_policy_pki
+ svm: ansibleSVM
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: "{{ validate_certs }}"
+"""
+
+RETURN = """
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic, netapp_ipaddress
+
+
+class NetAppOntapSecurityIPsecPolicy:
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ action=dict(required=False, type='str', choices=['bypass', 'discard', 'esp_transport', 'esp_udp']),
+ authentication_method=dict(required=False, type='str', choices=['none', 'psk', 'pki']),
+ certificate=dict(required=False, type='str'),
+ enabled=dict(required=False, type='bool'),
+ ipspace=dict(required=False, type='str'),
+ local_endpoint=dict(required=False, type='dict', options=dict(
+ address=dict(required=True, type='str'),
+ netmask=dict(required=True, type='str'),
+ port=dict(required=False, type='str')
+ )),
+ local_identity=dict(required=False, type='str'),
+ remote_identity=dict(required=False, type='str'),
+ protocol=dict(required=False, type='str'),
+ remote_endpoint=dict(required=False, type='dict', options=dict(
+ address=dict(required=True, type='str'),
+ netmask=dict(required=True, type='str'),
+ port=dict(required=False, type='str')
+ )),
+ secret_key=dict(required=False, type='str', no_log=True),
+ svm=dict(required=False, type='str')
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ mutually_exclusive=[('secret_key', 'certificate')],
+ required_if=[
+ ('authentication_method', 'psk', ['secret_key']),
+ ('authentication_method', 'pki', ['certificate'])
+ ],
+ supports_check_mode=True
+ )
+ self.uuid = None
+ self.na_helper = NetAppModule(self.module)
+ self.parameters = self.na_helper.check_and_set_parameters(self.module)
+ self.rest_api = netapp_utils.OntapRestAPI(self.module)
+ self.rest_api.fail_if_not_rest_minimum_version('na_ontap_security_ipsec_policy', 9, 8)
+ partially_supported_rest_properties = [['authentication_method', (9, 10, 1)], ['certificate', (9, 10, 1)]]
+ self.rest_api.is_rest_supported_properties(self.parameters, None, partially_supported_rest_properties)
+ self.parameters = self.na_helper.filter_out_none_entries(self.parameters)
+ if self.parameters['state'] == 'present':
+ self.validate_ipsec()
+
+ def validate_ipsec(self):
+ """
+ validate ipsec options.
+ """
+ for end_point in ['local_endpoint', 'remote_endpoint']:
+ if self.parameters.get(end_point):
+ self.parameters[end_point]['address'] = netapp_ipaddress.validate_and_compress_ip_address(self.parameters[end_point]['address'], self.module)
+ self.parameters[end_point]['netmask'] = str(netapp_ipaddress.netmask_to_netmask_length(self.parameters[end_point]['address'],
+ self.parameters[end_point]['netmask'], self.module))
+ # ONTAP returns port in port ranges. 120 set is returned as 120-120
+ if self.parameters[end_point].get('port') and '-' not in self.parameters[end_point]['port']:
+ self.parameters[end_point]['port'] = self.parameters[end_point]['port'] + '-' + self.parameters[end_point]['port']
+ # if action is bypass/discard and auth_method is psk/pki then security_key/certificate will be ignored in REST.
+ # so delete the authentication_method, secret_key and certificate to avoid idempotent issue.
+ if self.parameters.get('action') in ['bypass', 'discard'] and self.parameters.get('authentication_method') != 'none':
+ msg = "The IPsec action is %s, which does not provide packet protection. The authentication_method and " % self.parameters['action']
+ self.parameters.pop('authentication_method', None)
+ if self.parameters.get('secret_key'):
+ del self.parameters['secret_key']
+ self.module.warn(msg + 'secret_key options are ignored')
+ if self.parameters.get('certificate'):
+ del self.parameters['certificate']
+ self.module.warn(msg + 'certificate options are ignored')
+ # mapping protocol number to protocol to avoid idempotency issue.
+ protocols_info = {'6': 'tcp', '17': 'udp', '0': 'any'}
+ if self.parameters.get('protocol') in protocols_info:
+ self.parameters['protocol'] = protocols_info[self.parameters['protocol']]
+
+ def get_security_ipsec_policy(self):
+ """
+ Get security ipsec policy.
+ """
+ api = 'security/ipsec/policies'
+ query = {
+ 'name': self.parameters['name'],
+ 'fields': 'uuid,enabled,local_endpoint,local_identity,remote_identity,protocol,remote_endpoint,action'
+ }
+ if self.parameters.get('authentication_method'):
+ query['fields'] += ',authentication_method'
+ if self.parameters.get('certificate'):
+ query['fields'] += ',certificate'
+ if self.parameters.get('svm'):
+ query['svm.name'] = self.parameters['svm']
+ else:
+ query['scope'] = 'cluster'
+ # Cannot get current IPsec policy with ipspace - burt1519419
+ # if self.parameters.get('ipspace'):
+ # query['ipspace.name'] = self.parameters['ipspace']
+ record, error = rest_generic.get_one_record(self.rest_api, api, query)
+ if error:
+ self.module.fail_json(msg='Error fetching security ipsec policy %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ if record:
+ self.uuid = record['uuid']
+ return {
+ 'action': self.na_helper.safe_get(record, ['action']),
+ 'authentication_method': self.na_helper.safe_get(record, ['authentication_method']),
+ 'certificate': self.na_helper.safe_get(record, ['certificate', 'name']),
+ 'enabled': self.na_helper.safe_get(record, ['enabled']),
+ 'local_endpoint': self.na_helper.safe_get(record, ['local_endpoint']),
+ 'local_identity': self.na_helper.safe_get(record, ['local_identity']),
+ 'protocol': self.na_helper.safe_get(record, ['protocol']),
+ 'remote_endpoint': self.na_helper.safe_get(record, ['remote_endpoint']),
+ 'remote_identity': self.na_helper.safe_get(record, ['remote_identity'])
+ }
+ return None
+
+ def create_security_ipsec_policy(self):
+ """
+ Create security ipsec policy
+ """
+ api = 'security/ipsec/policies'
+ dummy, error = rest_generic.post_async(self.rest_api, api, self.form_create_modify_body())
+ if error:
+ self.module.fail_json(msg='Error creating security ipsec policy %s: %s.' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_security_ipsec_policy(self, modify):
+ """
+ Modify security ipsec policy.
+ """
+ api = 'security/ipsec/policies'
+ dummy, error = rest_generic.patch_async(self.rest_api, api, self.uuid, self.form_create_modify_body(modify))
+ if error:
+ self.module.fail_json(msg='Error modifying security ipsec policy %s: %s.' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_security_ipsec_policy(self):
+ """
+ Delete security ipsec policy.
+ """
+ api = 'security/ipsec/policies'
+ dummy, error = rest_generic.delete_async(self.rest_api, api, self.uuid)
+ if error:
+ self.module.fail_json(msg='Error deleting security ipsec policy %s: %s.' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def form_create_modify_body(self, params=None):
+ """
+ Returns body for create or modify.
+ """
+ if params is None:
+ params = self.parameters
+ body = {}
+ keys = ['name', 'action', 'authentication_method', 'enabled', 'secret_key',
+ 'local_endpoint', 'local_identity', 'remote_identity', 'protocol', 'remote_endpoint']
+ for key in keys:
+ if key in params:
+ body[key] = self.parameters[key]
+ if 'certificate' in params:
+ body['certificate.name'] = self.parameters['certificate']
+ if 'ipspace' in params:
+ body['ipspace.name'] = self.parameters['ipspace']
+ if 'svm' in params:
+ body['svm.name'] = self.parameters['svm']
+ return body
+
+ def apply(self):
+ current = self.get_security_ipsec_policy()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None:
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ error_keys = [key for key in modify if key in ['authentication_method', 'action']]
+ if error_keys:
+ plural = 's' if len(error_keys) > 1 else ''
+ self.module.fail_json(msg="Error: cannot modify option%s - %s." % (plural, ", ".join(error_keys)))
+ # Expected ONTAP to throw error but restarts instead, if try to set certificate where auth_method is none.
+ if modify.get('certificate') and current['authentication_method'] == 'none':
+ self.module.fail_json(msg="Error: cannot set certificate for IPsec policy where authentication_method is none")
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_security_ipsec_policy()
+ elif cd_action == 'delete':
+ self.delete_security_ipsec_policy()
+ else:
+ self.modify_security_ipsec_policy(modify)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ ipsec_obj = NetAppOntapSecurityIPsecPolicy()
+ ipsec_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_key_manager.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_key_manager.py
new file mode 100644
index 000000000..fcab16e40
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_key_manager.py
@@ -0,0 +1,640 @@
+#!/usr/bin/python
+
+# (c) 2019-2022, NetApp, Inc
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = """
+
+module: na_ontap_security_key_manager
+
+short_description: NetApp ONTAP security key manager.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.8.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+ - Add or delete or setup key management on NetApp ONTAP.
+ - With ZAPI, this module is limited to adding or removing external key servers. It does not manage certificates.
+ - With REST, this module can create an external key manager and certificates are required for creation.
+ - With REST, onboard key manager is supported.
+
+options:
+
+ state:
+ description:
+ - Whether the specified key manager should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: 'present'
+
+ ip_address:
+ description:
+ - The IP address of the external key management server.
+ - Mutually exclusive with external and onboard options.
+ - Required with ZAPI.
+ required: false
+ type: str
+
+ tcp_port:
+ description:
+ - The TCP port on which the key management server listens for incoming connections.
+ default: 5696
+ type: int
+
+ node:
+ description:
+ - The node which key management server runs on.
+ - Ignored, a warning is raised if present.
+ - Deprecated as of 21.22.0, as it was never used.
+ type: str
+
+ external:
+ description:
+ - Configures external key manager.
+ - Not supported with ZAPI.
+ - Mutually exclusive with ip_address and onboard.
+ type: dict
+ suboptions:
+ client_certificate:
+ description:
+ - Client certificate name (already installed in the cluster or SVM).
+ - Required when creating an external key manager.
+ type: str
+ server_ca_certificates:
+ description:
+ - List of server CA certificate names (already installed in the cluster or SVM).
+ - Required when creating an external key manager.
+ type: list
+ elements: str
+ servers:
+ description:
+ - List of external key servers for key management.
+ - Format - ip_address:port or FQDN:port. port defaults to the value of C(tcp_port) when not provided.
+ - The order in the list is not preserved if the key-manager already exists.
+ type: list
+ elements: str
+ version_added: 21.23.0
+
+ onboard:
+ description:
+ - Configures onboard key management.
+ - Not supported with ZAPI.
+ - Mutually exclusive with ip_address and external .
+ type: dict
+ suboptions:
+ from_passphrase:
+ description:
+ - The cluster-wide passphrase.
+ - Ignored if the onboard key manager does not already exists.
+ - Required to change the passphrase.
+ type: str
+ passphrase:
+ description:
+ - The cluster-wide passphrase.
+ type: str
+ synchronize:
+ description:
+ - Synchronizes missing onboard keys on any node in the cluster.
+ type: bool
+ default: false
+ version_added: 21.23.0
+
+ vserver:
+ description:
+ - SVM name when using an external key manager.
+ - Not supported for onboard key manager.
+ - Not supported with ZAPI.
+ type: str
+ version_added: 21.23.0
+
+notes:
+ - Though C(node) is accepted as a parameter, it is not used in the module.
+ - Supports check_mode.
+ - Only supported at cluster level with ZAPI, or for onboard.
+ - ZAPI supports relies on deprecated APIs since ONTAP 9.6.
+"""
+
+EXAMPLES = """
+ # Assuming module_defaults are used to set up hostname, username, password, https, validate_certs
+
+ - name: Delete Key Manager
+ tags:
+ - delete
+ netapp.ontap.na_ontap_security_key_manager:
+ state: absent
+
+ - name: Add Key Manager - ZAPI
+ tags:
+ - add
+ netapp.ontap.na_ontap_security_key_manager:
+ ip_address: 0.0.0.0
+
+ - name: Add/Modify external Key Manager - REST
+ netapp.ontap.na_ontap_security_key_manager:
+ state: present
+ external:
+ servers: 10.10.10.10:5696
+ client_certificate: kmip_client
+ server_ca_certificates: kmip_ca
+ vserver: "{{ vserver | default(omit) }}"
+
+ - name: Add/Modify external Key Manager - REST
+ netapp.ontap.na_ontap_security_key_manager:
+ state: present
+ external:
+ servers: 10.10.10.10:5696,10.10.10.10:5697,10.10.10.11:5696
+ client_certificate: kmip_client
+ server_ca_certificates: kmip_ca
+ vserver: "{{ vserver | default(omit) }}"
+
+ - name: Add onboard Key Manager
+ netapp.ontap.na_ontap_security_key_manager:
+ state: present
+ onboard:
+ passphrase: "hello, le soleil brille, brille, brille!"
+
+ - name: Change passphrase for onboard Key Manager
+ netapp.ontap.na_ontap_security_key_manager:
+ state: present
+ onboard:
+ from_passphrase: "hello, le soleil brille, brille, brille!"
+ passphrase: "hello, le soleil brille, brille, brille! - 2"
+ synchronize: true
+"""
+
+RETURN = """
+"""
+
+import time
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppOntapSecurityKeyManager:
+ """class with key manager operations"""
+
+ def __init__(self):
+ """Initialize module parameters"""
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ ip_address=dict(required=False, type='str'),
+ node=dict(required=False, type='str'),
+ tcp_port=dict(required=False, type='int', default=5696),
+ external=dict(type='dict', options=dict(
+ client_certificate=dict(type='str'),
+ server_ca_certificates=dict(type='list', elements='str'),
+ servers=dict(type='list', elements='str'),
+ )),
+ onboard=dict(type='dict', options=dict(
+ from_passphrase=dict(type='str', no_log=True),
+ passphrase=dict(type='str', no_log=True),
+ synchronize=dict(type='bool', default=False),
+ )),
+ vserver=dict(type='str'),
+ )
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ mutually_exclusive=[
+ ('external', 'onboard'),
+ ('ip_address', 'onboard'),
+ ('ip_address', 'external'),
+ ('onboard', 'vserver'),
+ ],
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ if 'node' in self.parameters:
+ self.module.warn('The option "node" is deprecated and should not be used.')
+ self.rest_api = netapp_utils.OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+
+ if self.use_rest:
+ self.rest_api.fail_if_not_rest_minimum_version('na_ontap_security_key_manager', 9, 7)
+ self.uuid = None
+ self.scope, self.resource = self.set_scope(self.parameters.get('vserver'))
+ # expand parameters to match REST returned info
+ self.update_parameters_rest()
+ else:
+ rest_only = [x for x in ('external', 'onboard', 'vserver') if x in self.parameters]
+ if rest_only:
+ self.module.fail_json(msg='Error: REST is required for %s option%s.'
+ % (', '.join(rest_only), 's' if len(rest_only) > 1 else ''))
+ if 'ip_address' not in self.parameters:
+ self.module.fail_json(msg='missing required arguments: ip_address')
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ self.cluster = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def add_port(self, server):
+ """ ONTAP automatically adds :5696 when the port is not present
+ We need to add it to make the module idempotent
+ """
+ return server if ':' in server else '%s:%s' % (server, self.parameters['tcp_port'])
+
+ def update_parameters_rest(self):
+ """ expand parameters to match REST returned info
+ transform legacy input
+ """
+ if self.scope == 'svm':
+ self.parameters['svm'] = {'name': self.parameters.pop('vserver')}
+ servers = self.na_helper.safe_get(self.parameters, ['external', 'servers'])
+ if servers:
+ # eliminate any empty entry and add port when needed
+ self.parameters['external']['servers'] = [{'server': self.add_port(server)} for server in servers if server]
+
+ ip_address = self.parameters.pop('ip_address', None)
+ if ip_address:
+ ip_address += ':%s' % self.parameters.pop('tcp_port')
+ self.parameters['external'] = {'servers': [{'server': ip_address}]}
+
+ @staticmethod
+ def set_scope(vserver):
+ """ define the scope, and a user friendly resource name"""
+ return (
+ 'cluster' if vserver is None else 'svm',
+ 'cluster' if vserver is None else 'vserver: %s' % vserver
+ )
+
+ def get_key_manager(self):
+ """
+ get key manager by ip address.
+ :return: a dict of key manager
+ """
+ if self.use_rest:
+ return self.get_key_manager_rest()
+ key_manager_info = netapp_utils.zapi.NaElement('security-key-manager-get-iter')
+ query_details = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'key-manager-info', **{'key-manager-ip-address': self.parameters['ip_address']})
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ key_manager_info.add_child_elem(query)
+
+ try:
+ result = self.cluster.invoke_successfully(key_manager_info, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching key manager: %s' % to_native(error),
+ exception=traceback.format_exc())
+
+ return_value = None
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) > 0:
+ key_manager = result.get_child_by_name('attributes-list').get_child_by_name('key-manager-info')
+ return_value = {}
+ if key_manager.get_child_by_name('key-manager-ip-address'):
+ return_value['ip_address'] = key_manager.get_child_content('key-manager-ip-address')
+ if key_manager.get_child_by_name('key-manager-server-status'):
+ return_value['server_status'] = key_manager.get_child_content('key-manager-server-status')
+ if key_manager.get_child_by_name('key-manager-tcp-port'):
+ return_value['tcp_port'] = int(key_manager.get_child_content('key-manager-tcp-port'))
+
+ return return_value
+
+ def key_manager_setup(self):
+ """
+ set up external key manager.
+ deprecated as of ONTAP 9.6.
+ """
+ key_manager_setup = netapp_utils.zapi.NaElement('security-key-manager-setup')
+ # if specify on-boarding passphrase, it is on-boarding key management.
+ # it not, then it's external key management.
+ try:
+ self.cluster.invoke_successfully(key_manager_setup, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error setting up key manager: %s' % to_native(error),
+ exception=traceback.format_exc())
+
+ def create_key_manager(self):
+ """
+ add key manager.
+ """
+ if self.use_rest:
+ return self.create_key_manager_rest()
+ key_manager_create = netapp_utils.zapi.NaElement('security-key-manager-add')
+ key_manager_create.add_new_child('key-manager-ip-address', self.parameters['ip_address'])
+ if self.parameters.get('tcp_port'):
+ key_manager_create.add_new_child('key-manager-tcp-port', str(self.parameters['tcp_port']))
+ try:
+ self.cluster.invoke_successfully(key_manager_create, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating key manager: %s' % to_native(error),
+ exception=traceback.format_exc())
+
+ def delete_key_manager(self):
+ """
+ delete key manager.
+ """
+ if self.use_rest:
+ return self.delete_key_manager_rest()
+ key_manager_delete = netapp_utils.zapi.NaElement('security-key-manager-delete')
+ key_manager_delete.add_new_child('key-manager-ip-address', self.parameters['ip_address'])
+ try:
+ self.cluster.invoke_successfully(key_manager_delete, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting key manager: %s' % to_native(error),
+ exception=traceback.format_exc())
+
+ def _get_security_certificate_uuid_rest_any(self, query, fields):
+ api = 'security/certificates'
+ query['scope'] = self.scope
+ if self.scope == 'svm':
+ # try first at SVM level
+ query['svm.name'] = self.parameters['svm']['name']
+ record, error = rest_generic.get_one_record(self.rest_api, api, query, fields)
+ if record and error is None:
+ return record, error
+ # retry at cluster scope
+ del query['svm.name']
+ query['scope'] = 'cluster'
+ return rest_generic.get_one_record(self.rest_api, api, query, fields)
+
+ def get_security_certificate_uuid_rest_97(self, name, type):
+ query = {'common_name': name, 'type': type}
+ fields = 'uuid,common_name,type'
+ return self._get_security_certificate_uuid_rest_any(query, fields)
+
+ def get_security_certificate_uuid_rest_98(self, name):
+ query = {'name': name}
+ fields = 'uuid,name,common_name,type'
+ return self._get_security_certificate_uuid_rest_any(query, fields)
+
+ def get_security_certificate_uuid_rest(self, name, type):
+ if self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 8):
+ record, error = self.get_security_certificate_uuid_rest_98(name)
+ message = 'certificate %s not found, retrying with common_name and type %s.'\
+ % (name, type)
+ else:
+ record, error = None, None
+ message = 'name is not supported in 9.6 or 9.7, using common_name %s and type %s.'\
+ % (name, type)
+ if not error and not record:
+ self.module.warn(message)
+ record, error = self.get_security_certificate_uuid_rest_97(name, type)
+ if not error and not record:
+ error = 'not found'
+ if error:
+ self.module.fail_json(msg='Error fetching security certificate info for %s of type: %s on %s: %s.' % (name, type, self.resource, error))
+ return record['uuid']
+
+ def get_key_manager_rest(self):
+ api = 'security/key-managers'
+ query = {'scope': self.scope}
+ fields = 'status,external,uuid,onboard'
+ if self.scope == 'svm':
+ query['svm.name'] = self.parameters['svm']['name']
+ record, error = rest_generic.get_one_record(self.rest_api, api, query, fields)
+ if error:
+ if self.scope == 'svm' and 'SVM "%s" does not exist' % self.parameters['svm']['name'] in error:
+ return None
+ self.module.fail_json(msg='Error fetching key manager info for %s: %s' % (self.resource, error))
+ if record:
+ self.uuid = record['uuid']
+ if 'external' in record and (self.na_helper.safe_get(record, ['onboard', 'enabled']) is False):
+ del record['onboard']
+ if 'external' in record and 'servers' in record['external']:
+ # remove extra fields that are readonly and not relevant for modify
+ record['external']['servers'] = [{'server': server['server']} for server in record['external']['servers']]
+ self.na_helper.remove_hal_links(record)
+
+ return record
+
+ def create_body(self, params):
+ if 'external' in params:
+ body = {'external': self.na_helper.filter_out_none_entries(params['external'])}
+ elif 'onboard' in params:
+ body = {'onboard': self.na_helper.filter_out_none_entries(params['onboard'])}
+ body['onboard'].pop('from_passphrase', None)
+ else:
+ return
+ if 'svm' in self.parameters:
+ body['svm'] = self.na_helper.filter_out_none_entries(self.parameters['svm'])
+ return body
+
+ def create_key_manager_rest(self, retrying=None):
+ api = 'security/key-managers'
+ body = self.create_body(self.parameters)
+ dummy, error = rest_generic.post_async(self.rest_api, api, body)
+ if error:
+ # ONTAP returns no record if external manager is configured but no server is present
+ if not retrying and ('already has external key management configured' in error
+ or 'External key management already configured' in error):
+ self.module.warn("deleting and recreating key manager as no key server is configured.")
+ self.delete_key_manager_rest()
+ time.sleep(5)
+ return self.create_key_manager_rest('retrying')
+ resource = 'cluster' if self.parameters.get('vserver') is None else self.parameters['vserver']
+ self.module.fail_json(msg='Error creating key manager for %s: %s' % (resource, error))
+
+ def modify_key_manager_rest(self, modify, current=None, return_error=False):
+ # external key servers cannot be updated in PATCH, they are handled later
+ key_servers = self.na_helper.safe_get(modify, ['external', 'servers'])
+ if key_servers:
+ del modify['external']['servers']
+ if not modify['external']:
+ del modify['external']
+ if modify:
+ api = 'security/key-managers'
+ body = self.create_body(modify)
+ dummy, error = rest_generic.patch_async(self.rest_api, api, self.uuid, body)
+ if error:
+ if return_error:
+ return error
+ resource = 'cluster' if self.parameters.get('vserver') is None else self.parameters['vserver']
+ self.module.fail_json(msg='Error modifying key manager for %s: %s' % (resource, error))
+ if key_servers:
+ self.update_key_server_list(current)
+ return None
+
+ def check_passphrase_rest(self, passphrase):
+ """ API does not return the passphrase
+ In order to check for idempotency, check if the desired passphrase is already active"""
+ params = {
+ 'onboard': {
+ 'existing_passphrase': passphrase,
+ 'passphrase': passphrase,
+ }
+ }
+ error = self.modify_key_manager_rest(params, return_error=True)
+ if not error:
+ return 'unexpected_success in check_passphrase_rest', error
+ if 'Cluster-wide passphrase is incorrect.' in error:
+ return 'incorrect_passphrase', error
+ if 'New passphrase cannot be same as the old passphrase.' in error:
+ return 'current_passphrase', error
+ self.module.warn('Unexpected response in check_passphrase_rest: %s' % error)
+ return 'unexpected_error in check_passphrase_rest', error
+
+ def delete_key_manager_rest(self):
+ api = 'security/key-managers'
+ if self.uuid is None:
+ # ONTAP does not return a record when an external key manager is configured without any external server
+ query = {'scope': self.scope}
+ if self.scope == 'svm':
+ query['svm.name'] = self.parameters['svm']['name']
+ else:
+ query = None
+ dummy, error = rest_generic.delete_async(self.rest_api, api, self.uuid, query)
+ if error:
+ resource = 'cluster' if self.parameters.get('vserver') is None else self.parameters['vserver']
+ self.module.fail_json(msg='Error deleting key manager for %s: %s' % (resource, error))
+
+ def validate_delete_action(self, current):
+ return
+
+ def validate_modify(self, current, modify):
+ error = None if self.use_rest else 'modify is not supported with ZAPI, new values: %s, current values: %s' % (modify, current)
+ if error:
+ self.module.fail_json(msg='Error, cannot modify existing configuraton: %s' % error)
+
+ def substitute_certificate_uuids(self, params):
+ if 'external' not in params:
+ return
+ certificate = self.na_helper.safe_get(params, ['external', 'client_certificate'])
+ if certificate:
+ params['external']['client_certificate'] = {'uuid': self.get_security_certificate_uuid_rest(certificate, 'client')}
+ certificates = self.na_helper.safe_get(params, ['external', 'server_ca_certificates'])
+ if certificates:
+ params['external']['server_ca_certificates'] = [{'uuid': self.get_security_certificate_uuid_rest(certificate, 'server_ca')}
+ for certificate in certificates]
+
+ def is_passphrase_update_required(self, passphrase, from_passphrase):
+ check_new, __ = self.check_passphrase_rest(passphrase)
+ if check_new == 'current_passphrase':
+ return False
+ check_old, error = self.check_passphrase_rest(from_passphrase)
+ if check_old == 'incorrect_passphrase' and check_new == 'incorrect_passphrase':
+ self.module.fail_json(msg='Error: neither from_passphrase nor passphrase match installed passphrase: %s' % error)
+ # if check_old is current, we're good to change the passphrase. For other errors, we'll just try again, we already warned.
+ return True
+
+ def force_onboard_actions(self):
+ """ synchronize and passphrase are not returned in GET so we need to be creative """
+ if 'onboard' not in self.parameters:
+ return None, None
+ passphrase = self.na_helper.safe_get(self.parameters, ['onboard', 'passphrase'])
+ # do we need to synchronize
+ modify_sync = None
+ if self.na_helper.safe_get(self.parameters, ['onboard', 'synchronize']):
+ if passphrase is None:
+ self.module.fail_json(msg='Error: passphrase is required for synchronize.')
+ modify_sync = {'onboard': {
+ 'synchronize': True,
+ 'existing_passphrase': passphrase
+ }}
+ # do we need to update the passphrase
+ modify_passphrase = None
+ from_passphrase = self.na_helper.safe_get(self.parameters, ['onboard', 'from_passphrase'])
+ if passphrase and not from_passphrase:
+ self.module.warn('passphrase is ignored')
+ if not passphrase and from_passphrase and not modify_sync:
+ self.module.warn('from_passphrase is ignored')
+ if passphrase and from_passphrase and self.is_passphrase_update_required(passphrase, from_passphrase):
+ modify_passphrase = {'onboard': {
+ 'passphrase': passphrase,
+ 'existing_passphrase': from_passphrase
+ }}
+ # wrapping up
+ if modify_passphrase or modify_sync:
+ self.na_helper.changed = True
+ return modify_passphrase, modify_sync
+
+ def validate_type_change(self, current):
+ """present moving from onboard to external and reciprocally"""
+ error = None
+ if 'onboard' in current and 'external' in self.parameters:
+ error = 'onboard key-manager is already installed, it needs to be deleted first.'
+ if 'external' in current and 'onboard' in self.parameters:
+ error = 'external key-manager is already installed, it needs to be deleted first.'
+ if error:
+ self.module.fail_json(msg='Error, cannot modify existing configuraton: %s' % error)
+
+ def local_get_modified_attributes(self, current):
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if current and 'external' in self.parameters and not self.na_helper.safe_get(modify, ['external', 'servers']):
+ current_servers = self.na_helper.safe_get(current, ['external', 'servers'])
+ desired_servers = self.na_helper.safe_get(self.parameters, ['external', 'servers'])
+ # order matters for key servers
+ if current_servers != desired_servers:
+ if 'external' not in modify:
+ modify['external'] = {}
+ modify['external']['servers'] = desired_servers
+ self.na_helper.changed = True
+ return modify
+
+ def add_external_server_rest(self, server):
+ api = 'security/key-managers/%s/key-servers' % self.uuid
+ body = {
+ 'server': server
+ }
+ dummy, error = rest_generic.post_async(self.rest_api, api, body)
+ if error:
+ self.module.fail_json(msg='Error adding external key server %s: %s' % (server, error))
+
+ def remove_external_server_rest(self, server):
+ api = 'security/key-managers/%s/key-servers' % self.uuid
+ dummy, error = rest_generic.delete_async(self.rest_api, api, server)
+ if error:
+ self.module.fail_json(msg='Error removing external key server %s: %s' % (server, error))
+
+ def update_key_server_list(self, current):
+ desired_servers = self.na_helper.safe_get(self.parameters, ['external', 'servers'])
+ if desired_servers is None:
+ return
+ desired_servers = [server['server'] for server in desired_servers]
+ current_servers = self.na_helper.safe_get(current, ['external', 'servers']) or []
+ current_servers = [server['server'] for server in current_servers]
+ for server in current_servers:
+ if server not in desired_servers:
+ self.remove_external_server_rest(server)
+ for server in desired_servers:
+ if server not in current_servers:
+ self.add_external_server_rest(server)
+
+ def apply(self):
+ if not self.use_rest:
+ self.key_manager_setup()
+ current = self.get_key_manager()
+ if current:
+ self.validate_type_change(current)
+ if self.parameters['state'] == 'present':
+ self.substitute_certificate_uuids(self.parameters)
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ modify = self.local_get_modified_attributes(current) if cd_action is None else None
+ # with onboard, changing a passphrase or synchronizing cannot be done in the same PATCH request
+ modify_passphrase, modify_sync = self.force_onboard_actions() if cd_action is None and current else (None, None)
+ if cd_action == 'delete' and self.use_rest:
+ self.validate_delete_action(current)
+ if modify:
+ self.validate_modify(current, modify)
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_key_manager()
+ elif cd_action == 'delete':
+ self.delete_key_manager()
+ elif modify:
+ self.modify_key_manager_rest(modify, current)
+ elif modify_passphrase:
+ self.modify_key_manager_rest(modify_passphrase)
+ elif modify_sync:
+ self.modify_key_manager_rest(modify_sync)
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ """Apply volume operations from playbook"""
+ obj = NetAppOntapSecurityKeyManager()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_ssh.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_ssh.py
new file mode 100644
index 000000000..e1f21246c
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_ssh.py
@@ -0,0 +1,197 @@
+#!/usr/bin/python
+
+# (c) 2022, NetApp, Inc
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+'''
+na_ontap_security_ssh
+'''
+
+
+DOCUMENTATION = '''
+module: na_ontap_security_ssh
+short_description: NetApp ONTAP security ssh
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 21.24.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Modify SSH server configuration of SVM on ONTAP
+options:
+ state:
+ description:
+ - SSH service is always enabled.
+ choices: ['present']
+ type: str
+ default: present
+ vserver:
+ description:
+ - Name of the vserver to use for vserver scope.
+ - If absent or null, cluster scope is assumed.
+ type: str
+ ciphers:
+ description:
+ - Ciphers for encrypting the data.
+ - Example list [ aes256_ctr, aes192_ctr, aes128_ctr, aes256_cbc, aes192_cbc ]
+ type: list
+ elements: str
+ key_exchange_algorithms:
+ description:
+ - Key exchange algorithms.
+ - Example list [ diffie_hellman_group_exchange_sha256, diffie_hellman_group14_sha1 ]
+ type: list
+ elements: str
+ mac_algorithms:
+ description:
+ - MAC algorithms.
+ - Example list [ hmac_sha1, hmac_sha2_512_etm ]
+ type: list
+ elements: str
+ max_authentication_retry_count:
+ description:
+ - Maximum authentication retries allowed before closing the connection.
+ - Minimum value is 2 and maximum is 6.
+ - Default value is 2.
+ type: int
+
+notes:
+ - Removing all SSH key exchange algorithms is not supported. SSH login would fail.
+ - This module is only for REST.
+'''
+
+EXAMPLES = """
+ - name: Modify SSH algorithms
+ netapp.ontap.na_ontap_security_ssh:
+ vserver: vserverName
+ ciphers: ["aes256_ctr", "aes192_ctr"]
+ key_exchange_algorithms: ["diffie_hellman_group_exchange_sha256"]
+ mac_algorithms: ["hmac_sha1"]
+ max_authentication_retry_count: 6
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Modify SSH algorithms at cluster level
+ netapp.ontap.na_ontap_security_ssh:
+ vserver:
+ ciphers: ["aes256_ctr", "aes192_ctr"]
+ key_exchange_algorithms: ["diffie_hellman_group_exchange_sha256"]
+ mac_algorithms: ["hmac_sha1"]
+ max_authentication_retry_count: 6
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Modify SSH algorithms at cluster level
+ netapp.ontap.na_ontap_security_ssh:
+ ciphers: ["aes256_ctr", "aes192_ctr"]
+ key_exchange_algorithms: ["diffie_hellman_group_exchange_sha256"]
+ mac_algorithms: ["hmac_sha1"]
+ max_authentication_retry_count: 6
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppOntapSecuritySSH:
+ """ object initialize and class methods """
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present'], default='present'),
+ vserver=dict(required=False, type='str'),
+ ciphers=dict(required=False, type='list', elements='str'),
+ key_exchange_algorithms=dict(required=False, type='list', elements='str', no_log=False),
+ mac_algorithms=dict(required=False, type='list', elements='str'),
+ max_authentication_retry_count=dict(required=False, type='int'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule(self)
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.svm_uuid = None
+ self.rest_api = netapp_utils.OntapRestAPI(self.module)
+ self.rest_api.fail_if_not_rest_minimum_version('na_ontap_security_ssh', 9, 10, 1)
+ self.safe_strip()
+
+ def safe_strip(self):
+ """ strip the left and right spaces of string and also removes an empty string"""
+ for option in ('ciphers', 'key_exchange_algorithms', 'mac_algorithms'):
+ if option in self.parameters:
+ self.parameters[option] = [item.strip() for item in self.parameters[option] if len(item.strip())]
+ # Validation of input parameters
+ if self.parameters[option] == []:
+ self.module.fail_json(msg="Removing all SSH %s is not supported. SSH login would fail. "
+ "There must be at least one %s associated with the SSH configuration." % (option, option))
+ return
+
+ def get_security_ssh_rest(self):
+ '''
+ Retrieves the SSH server configuration for the SVM or cluster.
+ '''
+ fields = ['key_exchange_algorithms', 'ciphers', 'mac_algorithms', 'max_authentication_retry_count']
+ query = {}
+ if self.parameters.get('vserver'):
+ api = 'security/ssh/svms'
+ query['svm.name'] = self.parameters['vserver']
+ fields.append('svm.uuid')
+ else:
+ api = 'security/ssh'
+ query['fields'] = ','.join(fields)
+ record, error = rest_generic.get_one_record(self.rest_api, api, query)
+ if error:
+ self.module.fail_json(msg=error)
+ return record
+
+ def modify_security_ssh_rest(self, modify):
+ '''
+ Updates the SSH server configuration for the specified SVM.
+ '''
+ if self.parameters.get('vserver'):
+ if self.svm_uuid is None:
+ self.module.fail_json(msg="Error: no uuid found for the SVM")
+ api = 'security/ssh/svms'
+ else:
+ api = 'security/ssh'
+ body = {}
+ for option in ('ciphers', 'key_exchange_algorithms', 'mac_algorithms', 'max_authentication_retry_count'):
+ if option in modify:
+ body[option] = modify[option]
+ if body:
+ dummy, error = rest_generic.patch_async(self.rest_api, api, self.svm_uuid, body)
+ if error:
+ self.module.fail_json(msg=error)
+
+ def apply(self):
+ current = self.get_security_ssh_rest()
+ self.svm_uuid = self.na_helper.safe_get(current, ['svm', 'uuid']) if current and self.parameters.get('vserver') else None
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.na_helper.changed and not self.module.check_mode:
+ self.modify_security_ssh_rest(modify)
+ result = netapp_utils.generate_result(self.na_helper.changed, modify=modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ """ Create object and call apply """
+ ssh_obj = NetAppOntapSecuritySSH()
+ ssh_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_service_policy.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_service_policy.py
new file mode 100644
index 000000000..f2969f720
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_service_policy.py
@@ -0,0 +1,339 @@
+#!/usr/bin/python
+
+# (c) 2021-2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_service_policy
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+
+module: na_ontap_service_policy
+
+short_description: NetApp ONTAP service policy configuration
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 21.7.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+ - Add, modify, or remove service policies.
+ - This module requires ONTAP 9.8 or later, and only supports REST.
+
+options:
+ state:
+ description:
+ - Whether the specified service policy should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: 'present'
+ name:
+ description:
+ - The name of the service policy.
+ required: true
+ type: str
+ ipspace:
+ description:
+ - Name of the ipspace.
+ - Required for cluster-scoped service policies.
+ - Optional for SVM-scoped service policies.
+ type: str
+ services:
+ description:
+ - List of services to associate to this service policy.
+ - To remove all services, use "no_service". No other value is allowed when no_service is present.
+ - Note - not all versions of ONTAP support all values, and new ones may be added.
+ - See C(known_services) and C(additional_services) to address unknow service errors.
+ type: list
+ elements: str
+ vserver:
+ description:
+ - The name of the vserver to use.
+ - Omit this option for cluster scoped user accounts.
+ type: str
+ scope:
+ description:
+ - Set to "svm" for interfaces owned by an SVM. Otherwise, set to "cluster".
+ - svm is assumed if vserver is set.
+ - cluster is assumed is vserver is not set.
+ type: str
+ choices: ['cluster', 'svm']
+ known_services:
+ description:
+ - List of known services in 9.11.1
+ - An error is raised if any service in C(services) is not in this list or C(new_services).
+ - Modify this list to restrict the services you want to support if needed.
+ default: [cluster_core, intercluster_core, management_core, management_autosupport, management_bgp, management_ems, management_https, management_http,
+ management_ssh, management_portmap, data_core, data_nfs, data_cifs, data_flexcache, data_iscsi, data_s3_server, data_dns_server,
+ data_fpolicy_client, management_ntp_client, management_dns_client, management_ad_client, management_ldap_client, management_nis_client,
+ management_snmp_server, management_rsh_server, management_telnet_server, management_ntp_server, data_nvme_tcp, backup_ndmp_control]
+ type: list
+ elements: str
+ version_added: 22.0.0
+ additional_services:
+ description:
+ - As an alternative to updating the C(known_services), new services can be specified here.
+ type: list
+ elements: str
+ version_added: 22.0.0
+
+notes:
+ - This module supports check_mode.
+ - This module does not support 'allowed-addresses' as REST does not support it. It defaults to 0.0.0.0/0.
+'''
+
+EXAMPLES = """
+
+ - name: Create service policy
+ netapp.ontap.na_ontap_service_policy:
+ state: present
+ name: "{{ service_policy_name }}"
+ services:
+ - data_core
+ - data_nfs
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Delete single service policy
+ netapp.ontap.na_ontap_service_policy:
+ state: absent
+ name: "{{ service_policy_name }}"
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Modify single service policy
+ netapp.ontap.na_ontap_service_policy:
+ state: present
+ name: "{{ service_policy_name }}"
+ services:
+ - data_core
+ - data_nfs
+ - data_cifs
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Modify service policy, remove services
+ netapp.ontap.na_ontap_service_policy:
+ state: present
+ name: "{{ service_policy_name }}"
+ services:
+ - no_service
+ vserver: "{{ vserver }}"
+
+ - name: Modify service policy at cluster level
+ netapp.ontap.na_ontap_service_policy:
+ state: present
+ name: "{{ service_policy_name }}"
+ ipspace: ansibleIpspace
+ scope: cluster
+ services:
+ - management_core
+ - management_autosupport
+ - management_ems
+"""
+
+RETURN = """
+cd_action:
+ description: whether a public key is created or deleted.
+ returned: success
+ type: str
+
+modify:
+ description: attributes that were modified if the key already exists.
+ returned: success
+ type: dict
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapServicePolicy:
+ """
+ Common operations to manage public keys.
+ """
+
+ def __init__(self):
+ self.use_rest = False
+ argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ argument_spec.update(dict(
+ state=dict(type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ ipspace=dict(type='str'),
+ scope=dict(type='str', choices=['cluster', 'svm']),
+ services=dict(type='list', elements='str'),
+ vserver=dict(type='str'),
+ known_services=dict(type='list', elements='str',
+ default=['cluster_core', 'intercluster_core', 'management_core', 'management_autosupport', 'management_bgp', 'management_ems',
+ 'management_https', 'management_http', 'management_ssh', 'management_portmap', 'data_core', 'data_nfs', 'data_cifs',
+ 'data_flexcache', 'data_iscsi', 'data_s3_server', 'data_dns_server', 'data_fpolicy_client', 'management_ntp_client',
+ 'management_dns_client', 'management_ad_client', 'management_ldap_client', 'management_nis_client',
+ 'management_snmp_server', 'management_rsh_server', 'management_telnet_server', 'management_ntp_server',
+ 'data_nvme_tcp', 'backup_ndmp_control']),
+ additional_services=dict(type='list', elements='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_if=[
+ ('scope', 'cluster', ['ipspace']),
+ ('scope', 'svm', ['vserver']),
+ ('vserver', None, ['ipspace']),
+ ],
+ required_one_of=[
+ ('ipspace', 'vserver')
+ ],
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ # REST API is required
+ self.rest_api = OntapRestAPI(self.module)
+ # check version
+ self.rest_api.fail_if_not_rest_minimum_version('na_ontap_service_policy', 9, 8)
+ self.validate_inputs()
+
+ def validate_inputs(self):
+ services = self.parameters.get('services')
+ if services and 'no_service' in services:
+ if len(services) > 1:
+ self.module.fail_json(msg='Error: no other service can be present when no_service is specified. Got: %s' % services)
+ self.parameters['services'] = []
+ known_services = self.parameters.get('known_services', []) + self.parameters.get('additional_services', [])
+ unknown_services = [service for service in self.parameters.get('services', []) if service not in known_services]
+ if unknown_services:
+ plural = 's' if len(services) > 1 else ''
+ self.module.fail_json(msg='Error: unknown service%s: %s. New services may need to be added to "additional_services".'
+ % (plural, ','.join(unknown_services)))
+
+ scope = self.parameters.get('scope')
+ if scope is None:
+ self.parameters['scope'] = 'cluster' if self.parameters.get('vserver') is None else 'svm'
+ elif scope == 'cluster' and self.parameters.get('vserver') is not None:
+ self.module.fail_json(msg='Error: vserver cannot be set when "scope: cluster" is specified. Got: %s' % self.parameters.get('vserver'))
+ elif scope == 'svm' and self.parameters.get('vserver') is None:
+ self.module.fail_json(msg='Error: vserver cannot be None when "scope: svm" is specified.')
+
+ def get_service_policy(self):
+ api = 'network/ip/service-policies'
+ query = {
+ 'name': self.parameters['name'],
+ 'fields': 'name,uuid,ipspace,services,svm,scope'
+ }
+ if self.parameters.get('vserver') is None:
+ # vserser is empty for cluster
+ query['scope'] = 'cluster'
+ else:
+ query['svm.name'] = self.parameters['vserver']
+
+ if self.parameters.get('ipspace') is not None:
+ query['ipspace.name'] = self.parameters['ipspace']
+ record, error = rest_generic.get_one_record(self.rest_api, api, query)
+ if error:
+ msg = "Error in get_service_policy: %s" % error
+ self.module.fail_json(msg=msg)
+ if record:
+ return {
+ 'uuid': record['uuid'],
+ 'name': record['name'],
+ 'ipspace': record['ipspace']['name'],
+ 'scope': record['scope'],
+ 'vserver': self.na_helper.safe_get(record, ['svm', 'name']),
+ 'services': record['services']
+ }
+ return None
+
+ def create_service_policy(self):
+ api = 'network/ip/service-policies'
+ body = {
+ 'name': self.parameters['name']
+ }
+ if self.parameters.get('vserver') is not None:
+ body['svm.name'] = self.parameters['vserver']
+
+ for attr in ('ipspace', 'scope', 'services'):
+ value = self.parameters.get(attr)
+ if value is not None:
+ body[attr] = value
+
+ dummy, error = rest_generic.post_async(self.rest_api, api, body)
+ if error:
+ msg = "Error in create_service_policy: %s" % error
+ self.module.fail_json(msg=msg)
+
+ def modify_service_policy(self, current, modify):
+ # sourcery skip: dict-comprehension
+ api = 'network/ip/service-policies/%s' % current['uuid']
+ modify_copy = dict(modify)
+ body = {}
+ for key in modify:
+ if key in ('services',):
+ body[key] = modify_copy.pop(key)
+ if modify_copy:
+ msg = 'Error: attributes not supported in modify: %s' % modify_copy
+ self.module.fail_json(msg=msg)
+ if not body:
+ msg = 'Error: nothing to change - modify called with: %s' % modify
+ self.module.fail_json(msg=msg)
+
+ dummy, error = rest_generic.patch_async(self.rest_api, api, None, body)
+ if error:
+ msg = "Error in modify_service_policy: %s" % error
+ self.module.fail_json(msg=msg)
+
+ def delete_service_policy(self, current):
+ api = 'network/ip/service-policies/%s' % current['uuid']
+
+ dummy, error = rest_generic.delete_async(self.rest_api, api, None, None)
+ if error:
+ msg = "Error in delete_service_policy: %s" % error
+ self.module.fail_json(msg=msg)
+
+ def get_actions(self):
+ """Determines whether a create, delete, modify action is required
+ """
+ cd_action, modify, current = None, None, None
+ current = self.get_service_policy()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None:
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ return cd_action, modify, current
+
+ def apply(self):
+ cd_action, modify, current = self.get_actions()
+
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_service_policy()
+ elif cd_action == 'delete':
+ self.delete_service_policy(current)
+ elif modify:
+ self.modify_service_policy(current, modify)
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify, extra_responses={'scope': self.module.params})
+ self.module.exit_json(**result)
+
+
+def main():
+ obj = NetAppOntapServicePolicy()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_service_processor_network.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_service_processor_network.py
new file mode 100644
index 000000000..4c0856f17
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_service_processor_network.py
@@ -0,0 +1,391 @@
+#!/usr/bin/python
+
+# (c) 2018-2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: na_ontap_service_processor_network
+short_description: NetApp ONTAP service processor network
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Modify a ONTAP service processor network
+options:
+ state:
+ description:
+ - Whether the specified service processor network should exist or not.
+ choices: ['present']
+ type: str
+ default: present
+ address_type:
+ description:
+ - Specify address class.
+ required: true
+ type: str
+ choices: ['ipv4', 'ipv6']
+ is_enabled:
+ description:
+ - Specify whether to enable or disable the service processor network.
+ - Required with ZAPI.
+ - Disable service processor network status not supported in REST.
+ - Setting C(ip_address), C(netmask) or C(prefix_length), C(gateway_ip_address) will enable sp network in REST.
+ type: bool
+ node:
+ description:
+ - The node where the service processor network should be enabled
+ required: true
+ type: str
+ dhcp:
+ description:
+ - Specify dhcp type.
+ - Setting C(dhcp=none) requires all of C(ip_address), C(netmask), C(gateway_ip_address) and at least one of its value different from current.
+ type: str
+ choices: ['v4', 'none']
+ gateway_ip_address:
+ description:
+ - Specify the gateway ip.
+ type: str
+ ip_address:
+ description:
+ - Specify the service processor ip address.
+ type: str
+ netmask:
+ description:
+ - Specify the service processor netmask.
+ type: str
+ prefix_length:
+ description:
+ - Specify the service processor prefix_length.
+ type: int
+ wait_for_completion:
+ description:
+ - Set this parameter to 'true' for synchronous execution (wait until SP status is successfully updated)
+ - Set this parameter to 'false' for asynchronous execution
+ - For asynchronous, execution exits as soon as the request is sent, without checking SP status
+ type: bool
+ default: false
+ version_added: 2.8.0
+'''
+
+EXAMPLES = """
+ - name: Modify Service Processor Network, enable dhcp.
+ netapp.ontap.na_ontap_service_processor_network:
+ state: present
+ address_type: ipv4
+ is_enabled: true
+ dhcp: v4
+ node: "{{ netapp_node }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+"""
+
+RETURN = """
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+import time
+
+
+class NetAppOntapServiceProcessorNetwork:
+ """
+ Modify a Service Processor Network
+ """
+
+ def __init__(self):
+ """
+ Initialize the NetAppOntapServiceProcessorNetwork class
+ """
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present'], default='present'),
+ address_type=dict(required=True, type='str', choices=['ipv4', 'ipv6']),
+ is_enabled=dict(required=False, type='bool'),
+ node=dict(required=True, type='str'),
+ dhcp=dict(required=False, type='str', choices=['v4', 'none']),
+ gateway_ip_address=dict(required=False, type='str'),
+ ip_address=dict(required=False, type='str'),
+ netmask=dict(required=False, type='str'),
+ prefix_length=dict(required=False, type='int'),
+ wait_for_completion=dict(required=False, type='bool', default=False)
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[('netmask', 'prefix_length')]
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ # Set up Rest API
+ self.rest_api = netapp_utils.OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+ self.uuid, self.ipv4_or_ipv6 = None, None
+ dhcp_mutual_options = ['ip_address', 'gateway_ip_address', 'netmask']
+ if self.parameters.get('dhcp') == 'v4':
+ # error if dhcp is set to v4 and address_type is ipv6.
+ if self.parameters['address_type'] == 'ipv6':
+ self.module.fail_json(msg="Error: dhcp cannot be set for address_type: ipv6.")
+ # error if dhcp is set to v4 and manual interface options are present.
+ if any(x in self.parameters for x in dhcp_mutual_options):
+ self.module.fail_json(msg="Error: set dhcp v4 or all of 'ip_address, gateway_ip_address, netmask'.")
+ if not self.use_rest:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ if 'is_enabled' not in self.parameters:
+ self.module.fail_json(msg='missing required arguments: is_enabled in ZAPI')
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=None)
+ self.set_playbook_zapi_key_map()
+
+ def set_playbook_zapi_key_map(self):
+ self.na_helper.zapi_string_keys = {
+ 'address_type': 'address-type',
+ 'node': 'node',
+ 'dhcp': 'dhcp',
+ 'gateway_ip_address': 'gateway-ip-address',
+ 'ip_address': 'ip-address',
+ 'netmask': 'netmask'
+ }
+ self.na_helper.zapi_int_keys = {
+ 'prefix_length': 'prefix-length'
+ }
+ self.na_helper.zapi_bool_keys = {
+ 'is_enabled': 'is-enabled',
+ }
+ self.na_helper.zapi_required = {
+ 'address_type': 'address-type',
+ 'node': 'node',
+ 'is_enabled': 'is-enabled'
+ }
+
+ def get_sp_network_status(self):
+ """
+ Return status of service processor network
+ :param:
+ name : name of the node
+ :return: Status of the service processor network
+ :rtype: dict
+ """
+ spn_get_iter = netapp_utils.zapi.NaElement('service-processor-network-get-iter')
+ query_info = {
+ 'query': {
+ 'service-processor-network-info': {
+ 'node': self.parameters['node'],
+ 'address-type': self.parameters['address_type']
+ }
+ }
+ }
+ spn_get_iter.translate_struct(query_info)
+ try:
+ result = self.server.invoke_successfully(spn_get_iter, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching service processor network status for %s: %s' %
+ (self.parameters['node'], to_native(error)), exception=traceback.format_exc())
+ if int(result['num-records']) >= 1:
+ sp_attr_info = result['attributes-list']['service-processor-network-info']
+ return sp_attr_info.get_child_content('setup-status')
+ return None
+
+ def get_service_processor_network(self):
+ """
+ Return details about service processor network
+ :param:
+ name : name of the node
+ :return: Details about service processor network. None if not found.
+ :rtype: dict
+ """
+ if self.use_rest:
+ return self.get_service_processor_network_rest()
+ spn_get_iter = netapp_utils.zapi.NaElement('service-processor-network-get-iter')
+ query_info = {
+ 'query': {
+ 'service-processor-network-info': {
+ 'node': self.parameters['node']
+ }
+ }
+ }
+ spn_get_iter.translate_struct(query_info)
+ try:
+ result = self.server.invoke_successfully(spn_get_iter, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching service processor network info for %s: %s' %
+ (self.parameters['node'], to_native(error)), exception=traceback.format_exc())
+ sp_details = None
+ # check if job exists
+ if int(result['num-records']) >= 1:
+ sp_details = dict()
+ sp_attr_info = result['attributes-list']['service-processor-network-info']
+ for item_key, zapi_key in self.na_helper.zapi_string_keys.items():
+ sp_details[item_key] = sp_attr_info.get_child_content(zapi_key)
+ # set dhcp: 'none' if current dhcp set as None to avoid idempotent issue.
+ if item_key == 'dhcp' and sp_details[item_key] is None:
+ sp_details[item_key] = 'none'
+ for item_key, zapi_key in self.na_helper.zapi_bool_keys.items():
+ sp_details[item_key] = self.na_helper.get_value_for_bool(from_zapi=True,
+ value=sp_attr_info.get_child_content(zapi_key))
+ for item_key, zapi_key in self.na_helper.zapi_int_keys.items():
+ sp_details[item_key] = self.na_helper.get_value_for_int(from_zapi=True,
+ value=sp_attr_info.get_child_content(zapi_key))
+ return sp_details
+
+ def modify_service_processor_network(self, params=None):
+ """
+ Modify a service processor network.
+ :param params: A dict of modified options.
+ When dhcp is not set to v4, ip_address, netmask, and gateway_ip_address must be specified even if remains the same.
+ """
+ if self.use_rest:
+ return self.modify_service_processor_network_rest(params)
+
+ sp_modify = netapp_utils.zapi.NaElement('service-processor-network-modify')
+ sp_attributes = dict()
+ for item_key in self.parameters:
+ if item_key in self.na_helper.zapi_string_keys:
+ zapi_key = self.na_helper.zapi_string_keys.get(item_key)
+ sp_attributes[zapi_key] = self.parameters[item_key]
+ elif item_key in self.na_helper.zapi_bool_keys:
+ zapi_key = self.na_helper.zapi_bool_keys.get(item_key)
+ sp_attributes[zapi_key] = self.na_helper.get_value_for_bool(from_zapi=False, value=self.parameters[item_key])
+ elif item_key in self.na_helper.zapi_int_keys:
+ zapi_key = self.na_helper.zapi_int_keys.get(item_key)
+ sp_attributes[zapi_key] = self.na_helper.get_value_for_int(from_zapi=False, value=self.parameters[item_key])
+ sp_modify.translate_struct(sp_attributes)
+ try:
+ self.server.invoke_successfully(sp_modify, enable_tunneling=True)
+ if self.parameters.get('wait_for_completion'):
+ retries = 25
+ # when try to enable and set dhcp:v4 or manual ip, the status will be 'not_setup' before changes to complete.
+ status_key = 'not_setup' if params.get('is_enabled') else 'in_progress'
+ while self.get_sp_network_status() == status_key and retries > 0:
+ time.sleep(15)
+ retries -= 1
+ # In ZAPI, once the status is 'succeeded', it takes few more seconds for ip details take effect..
+ time.sleep(10)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying service processor network: %s' % (to_native(error)),
+ exception=traceback.format_exc())
+
+ def get_service_processor_network_rest(self):
+ api = 'cluster/nodes'
+ fields = 'uuid,service_processor,service_processor.dhcp_enabled'
+ query = {'name': self.parameters['node']}
+ record, error = rest_generic.get_one_record(self.rest_api, api, query, fields)
+ if error:
+ self.module.fail_json(msg='Error fetching service processor network info for %s: %s' %
+ (self.parameters['node'], error))
+ current = None
+ if record:
+ self.uuid = record['uuid']
+ # if the desired address_type already configured in current, interface details will be returned.
+ # if the desired address_type not configured in current, None will be set in network interface options
+ # and setting either dhcp(for v4) or (ip_address, gateway_ip_address, netmask) will enable and configure the interface.
+ self.ipv4_or_ipv6 = 'ipv4_interface' if self.parameters['address_type'] == 'ipv4' else 'ipv6_interface'
+ netmask_or_prefix = 'netmask' if self.ipv4_or_ipv6 == 'ipv4_interface' else 'prefix_length'
+ current = {
+ 'dhcp': 'v4' if self.na_helper.safe_get(record, ['service_processor', 'dhcp_enabled']) else 'none',
+ 'gateway_ip_address': self.na_helper.safe_get(record, ['service_processor', self.ipv4_or_ipv6, 'gateway']),
+ 'ip_address': self.na_helper.safe_get(record, ['service_processor', self.ipv4_or_ipv6, 'address']),
+ 'is_enabled': True if self.na_helper.safe_get(record, ['service_processor', self.ipv4_or_ipv6]) else False,
+ netmask_or_prefix: self.na_helper.safe_get(record, ['service_processor', self.ipv4_or_ipv6, 'netmask'])
+ }
+ return current
+
+ def modify_service_processor_network_rest(self, modify):
+ api = 'cluster/nodes'
+ body = {'service_processor': {}}
+ ipv4_or_ipv6_body = {}
+ if self.parameters.get('gateway_ip_address'):
+ ipv4_or_ipv6_body['gateway'] = self.parameters['gateway_ip_address']
+ if self.parameters.get('netmask'):
+ ipv4_or_ipv6_body['netmask'] = self.parameters['netmask']
+ if self.parameters.get('prefix_length'):
+ ipv4_or_ipv6_body['netmask'] = self.parameters['prefix_length']
+ if self.parameters.get('ip_address'):
+ ipv4_or_ipv6_body['address'] = self.parameters['ip_address']
+ if ipv4_or_ipv6_body:
+ body['service_processor'][self.ipv4_or_ipv6] = ipv4_or_ipv6_body
+ if 'dhcp' in self.parameters:
+ body['service_processor']['dhcp_enabled'] = True if self.parameters['dhcp'] == 'v4' else False
+ # if dhcp is enabled in REST, setting ip_address details manually requires dhcp: 'none' in params.
+ # if dhcp: 'none' is not in params set it False to disable dhcp and assign manual ip address.
+ elif ipv4_or_ipv6_body.get('gateway') and ipv4_or_ipv6_body.get('address') and ipv4_or_ipv6_body.get('netmask'):
+ body['service_processor']['dhcp_enabled'] = False
+ dummy, error = rest_generic.patch_async(self.rest_api, api, self.uuid, body)
+ if error:
+ self.module.fail_json(msg='Error modifying service processor network: %s' % error)
+ if self.parameters.get('wait_for_completion'):
+ retries = 25
+ while self.is_sp_modified_rest(modify) is False and retries > 0:
+ time.sleep(15)
+ retries -= 1
+
+ def is_sp_modified_rest(self, modify):
+ current = self.get_service_processor_network_rest()
+ if current is None:
+ return False
+ for sp_option in modify:
+ if modify[sp_option] != current[sp_option]:
+ return False
+ return True
+
+ def validate_rest(self, modify):
+ # error if try to disable service processor network status in REST.
+ if modify.get('is_enabled') is False:
+ error = "Error: disable service processor network status not allowed in REST"
+ self.module.fail_json(msg=error)
+ # error if try to enable and modify not have either dhcp or (ip_address, netamsk, gateway)
+ if modify.get('is_enabled') and len(modify) == 1:
+ error = "Error: enable service processor network requires dhcp or ip_address,netmask,gateway details in REST."
+ self.module.fail_json(msg=error)
+
+ def validate_zapi(self, modify):
+ if self.parameters['is_enabled'] is False:
+ if len(modify) > 1 and 'is_enabled' in modify:
+ self.module.fail_json(msg='Error: Cannot modify any other parameter for a service processor network if option "is_enabled" is set to false.')
+ elif modify and 'is_enabled' not in modify:
+ self.module.fail_json(msg='Error: Cannot modify a service processor network if it is disabled in ZAPI.')
+
+ def apply(self):
+ """
+ Run Module based on play book
+ """
+ current = self.get_service_processor_network()
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if not current:
+ self.module.fail_json(msg='Error No Service Processor for node: %s' % self.parameters['node'])
+ if modify:
+ # disable dhcp requires configuring one of ip-address, netmask and gateway different from current.
+ if modify.get('dhcp') == 'none' and not any(x in modify for x in ['ip_address', 'gateway_ip_address', 'netmask']):
+ error = "Error: To disable dhcp, configure ip-address, netmask and gateway details manually."
+ self.module.fail_json(msg=error)
+ self.validate_rest(modify) if self.use_rest else self.validate_zapi(modify)
+ if self.na_helper.changed and not self.module.check_mode:
+ self.modify_service_processor_network(modify)
+ result = netapp_utils.generate_result(self.na_helper.changed, modify=modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ """
+ Create the NetApp Ontap Service Processor Network Object and modify it
+ """
+
+ obj = NetAppOntapServiceProcessorNetwork()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snaplock_clock.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snaplock_clock.py
new file mode 100644
index 000000000..d7f33dbea
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snaplock_clock.py
@@ -0,0 +1,177 @@
+#!/usr/bin/python
+
+# (c) 2021, NetApp, Inc
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+
+module: na_ontap_snaplock_clock
+
+short_description: NetApp ONTAP Sets the snaplock compliance clock.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: '21.4.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Sets the Snaplock compliance clock on NetApp ONTAP.
+
+options:
+ node:
+ description:
+ - Name of the node to set compliance clock on.
+ type: str
+ required: true
+
+'''
+
+EXAMPLES = """
+ - name: Set node compliance clock
+ netapp.ontap.na_ontap_snaplock_clock:
+ node: cluster1-01
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+"""
+
+RETURN = """
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+import ansible_collections.netapp.ontap.plugins.module_utils.rest_response_helpers as rrh
+
+
+class NetAppOntapSnaplockClock:
+ '''Class with SnapLock clock operations'''
+
+ def __init__(self):
+ '''Initialize module parameters'''
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ node=dict(required=True, type='str'),
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ self.rest_api = OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+
+ if not self.use_rest:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp)
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def get_snaplock_node_compliance_clock(self):
+
+ if self.use_rest:
+ """
+ Return snaplock-node-compliance-clock query results
+ :return: dict of clock info
+ """
+ api = "private/cli/snaplock/compliance-clock"
+ query = {
+ 'fields': 'node,time',
+ 'node': self.parameters['node'],
+ }
+ message, error = self.rest_api.get(api, query)
+ records, error = rrh.check_for_0_or_1_records(api, message, error)
+
+ if error is None and records is not None:
+ return_value = {
+ 'node': message['records'][0]['node'],
+ 'compliance_clock_time': message['records'][0]['time']
+ }
+
+ if error:
+ self.module.fail_json(msg=error)
+
+ if not records:
+ error = "REST API did not return snaplock compliance clock for node %s" % (self.parameters['node'])
+ self.module.fail_json(msg=error)
+
+ else:
+ """
+ Return snaplock-node-compliance-clock query results
+ :param node_name: name of the cluster node
+ :return: NaElement
+ """
+
+ node_snaplock_clock = netapp_utils.zapi.NaElement('snaplock-get-node-compliance-clock')
+ node_snaplock_clock.add_new_child('node', self.parameters['node'])
+
+ try:
+ result = self.server.invoke_successfully(node_snaplock_clock, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching snaplock compliance clock for node %s : %s'
+ % (self.parameters['node'], to_native(error)),
+ exception=traceback.format_exc())
+
+ return_value = None
+
+ if result.get_child_by_name('snaplock-node-compliance-clock'):
+ node_snaplock_clock_attributes = result['snaplock-node-compliance-clock']['compliance-clock-info']
+ return_value = {
+ 'compliance_clock_time': node_snaplock_clock_attributes['formatted-snaplock-compliance-clock'],
+ }
+ return return_value
+
+ def set_snaplock_node_compliance_clock(self):
+ '''Set ONTAP snaplock compliance clock for each node'''
+ if self.use_rest:
+ api = "private/cli/snaplock/compliance-clock/initialize"
+ query = {
+ "node": self.parameters['node']
+ }
+
+ body = {}
+ dummy, error = self.rest_api.patch(api, body, query)
+ if error:
+ self.module.fail_json(msg=error)
+ else:
+ node_snaplock_clock_obj = netapp_utils.zapi.NaElement('snaplock-set-node-compliance-clock')
+ node_snaplock_clock_obj.add_new_child('node', self.parameters['node'])
+
+ try:
+ result = self.server.invoke_successfully(node_snaplock_clock_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error setting snaplock compliance clock for node %s : %s'
+ % (self.parameters['node'], to_native(error)),
+ exception=traceback.format_exc())
+ return result
+
+ def apply(self):
+ current = self.get_snaplock_node_compliance_clock()
+
+ if current['compliance_clock_time'] == "ComplianceClock is not configured.":
+ self.na_helper.changed = True
+
+ if self.na_helper.changed and not self.module.check_mode:
+ self.set_snaplock_node_compliance_clock()
+
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ '''Set snaplock compliance clock'''
+ obj = NetAppOntapSnaplockClock()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapmirror.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapmirror.py
new file mode 100644
index 000000000..26254e03b
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapmirror.py
@@ -0,0 +1,1749 @@
+#!/usr/bin/python
+
+'''
+na_ontap_snapmirror
+'''
+
+# (c) 2018-2023, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Create/Delete/Update/Initialize/Break/Resync/Resume SnapMirror volume/vserver relationships for ONTAP/ONTAP
+ - This includes SVM replication, aka vserver DR
+ - Create/Delete/Update/Initialize SnapMirror volume relationship between ElementSW and ONTAP
+ - Modify schedule for a SnapMirror relationship for ONTAP/ONTAP and ElementSW/ONTAP
+ - Pre-requisite for ElementSW to ONTAP relationship or vice-versa is an established SnapMirror endpoint for ONTAP cluster with ElementSW UI
+ - Pre-requisite for ElementSW to ONTAP relationship or vice-versa is to have SnapMirror enabled in the ElementSW volume
+ - For creating a SnapMirror ElementSW/ONTAP relationship, an existing ONTAP/ElementSW relationship should be present
+ - Performs resync if the C(relationship_state=active) and the current mirror state of the snapmirror relationship is broken-off
+ - Performs resume if the C(relationship_state=active), the current snapmirror relationship status is quiesced and mirror state is snapmirrored
+ - Performs restore if the C(relationship_type=restore) and all other operations will not be performed during this task
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+ - netapp.ontap.netapp.na_ontap_peer
+module: na_ontap_snapmirror
+options:
+ state:
+ choices: ['present', 'absent']
+ description:
+ - Whether the specified relationship should exist or not.
+ default: present
+ type: str
+ source_volume:
+ description:
+ - Specifies the name of the source volume for the SnapMirror.
+ - Deprecated as of 21.2.0, use source_endpoint and path.
+ type: str
+ destination_volume:
+ description:
+ - Specifies the name of the destination volume for the SnapMirror.
+ - Deprecated as of 21.2.0, use source_endpoint and path.
+ type: str
+ source_vserver:
+ description:
+ - Name of the source vserver for the SnapMirror.
+ - Deprecated as of 21.2.0, use source_endpoint and path, or svm.
+ type: str
+ destination_vserver:
+ description:
+ - Name of the destination vserver for the SnapMirror.
+ - Deprecated as of 21.2.0, use destination_endpoint and path, or svm.
+ type: str
+ source_path:
+ description:
+ - Specifies the source endpoint of the SnapMirror relationship.
+ - If the source is an ONTAP volume, format should be <[vserver:][volume]> or <[[cluster:]//vserver/]volume>
+ - If the source is an ElementSW volume, format should be <[Element_SVIP]:/lun/[Element_VOLUME_ID]>
+ - If the source is an ElementSW volume, the volume should have SnapMirror enabled.
+ - Deprecated as of 21.2.0, use source_endpoint and path.
+ type: str
+ destination_path:
+ description:
+ - Specifies the destination endpoint of the SnapMirror relationship.
+ - Deprecated as of 21.2.0, use destination_endpoint and path.
+ type: str
+ relationship_type:
+ choices: ['data_protection', 'load_sharing', 'vault', 'restore', 'transition_data_protection',
+ 'extended_data_protection']
+ type: str
+ description:
+ - Specify the type of SnapMirror relationship.
+ - for 'restore' unless 'source_snapshot' is specified the most recent Snapshot copy on the source volume is restored.
+ - restore SnapMirror is not idempotent.
+ - With REST, only 'extended_data_protection' and 'restore' are supported.
+ schedule:
+ description:
+ - Specify the name of the current schedule, which is used to update the SnapMirror relationship.
+ - Optional for create, modifiable.
+ - With REST, this option requires ONTAP 9.11.1 or later.
+ type: str
+ aliases: ['transfer_schedule']
+ version_added: 22.2.0
+ policy:
+ description:
+ - Specify the name of the SnapMirror policy that applies to this relationship.
+ version_added: 2.8.0
+ type: str
+ source_hostname:
+ description:
+ - DEPRECATED - please use C(peer_options).
+ - Source hostname or management IP address for ONTAP or ElementSW cluster.
+ - If present, when state is absent, the relationship is released at the source before being deleted at destination.
+ - It is recommended to always release before deleting, so make sure this parameter is present if the source hostname is known.
+ type: str
+ source_username:
+ description:
+ - DEPRECATED - please use C(peer_options).
+ - Source username for ONTAP or ElementSW cluster.
+ - Optional if this is same as destination username.
+ type: str
+ source_password:
+ description:
+ - DEPRECATED - please use C(peer_options).
+ - Source password for ONTAP or ElementSW cluster.
+ - Optional if this is same as destination password.
+ type: str
+ connection_type:
+ description:
+ - Type of SnapMirror relationship.
+ - Pre-requisite for either elementsw_ontap or ontap_elementsw the ElementSW volume should have enableSnapmirror option set to true.
+ - For using ontap_elementsw, elementsw_ontap snapmirror relationship should exist.
+ choices: ['ontap_ontap', 'elementsw_ontap', 'ontap_elementsw']
+ default: ontap_ontap
+ type: str
+ version_added: 2.9.0
+ max_transfer_rate:
+ description:
+ - Specifies the upper bound, in kilobytes per second, at which data is transferred.
+ - Default is unlimited, it can be explicitly set to 0 as unlimited.
+ type: int
+ version_added: 2.9.0
+ initialize:
+ description:
+ - Specifies whether to initialize SnapMirror relation.
+ - Default is True, it can be explicitly set to False to avoid initializing SnapMirror relation.
+ default: true
+ type: bool
+ version_added: '19.11.0'
+ update:
+ description:
+ - Specifies whether to update the destination endpoint of the SnapMirror relationship only if the relationship is already present and active.
+ - Default is True.
+ default: true
+ type: bool
+ version_added: '20.2.0'
+ relationship_info_only:
+ description:
+ - If relationship-info-only is set to true then only relationship information is removed.
+ default: false
+ type: bool
+ version_added: '20.4.0'
+ relationship_state:
+ description:
+ - Specifies whether to break SnapMirror relation or establish a SnapMirror relationship.
+ - state must be present to use this option.
+ default: active
+ choices: ['active', 'broken']
+ type: str
+ version_added: '20.2.0'
+ source_snapshot:
+ description:
+ - Specifies the Snapshot from the source to be restored.
+ type: str
+ version_added: '20.6.0'
+ identity_preserve:
+ description:
+ - Specifies whether or not the identity of the source Vserver is replicated to the destination Vserver.
+ - If this parameter is set to true, the source Vserver's configuration will additionally be replicated to the destination.
+ - If the parameter is set to false, then only the source Vserver's volumes and RBAC configuration are replicated to the destination.
+ type: bool
+ version_added: 2.9.0
+ create_destination:
+ description:
+ - Requires ONTAP 9.7 or later.
+ - Creates the destination volume if enabled and destination_volume is present or destination_path includes a volume name.
+ - Creates and peers the destination vserver for SVM DR.
+ type: dict
+ version_added: 21.1.0
+ suboptions:
+ enabled:
+ description:
+ - Whether to create the destination volume or vserver.
+ - This is automatically enabled if any other suboption is present.
+ type: bool
+ default: true
+ storage_service:
+ description: storage service associated with the destination endpoint.
+ type: dict
+ suboptions:
+ enabled:
+ description: whether to create the destination endpoint using storage service.
+ type: bool
+ enforce_performance:
+ description: whether to enforce storage service performance on the destination endpoint.
+ type: bool
+ name:
+ description: the performance service level (PSL) for this volume endpoint.
+ type: str
+ choices: ['value', 'performance', 'extreme']
+ tiering:
+ description:
+ - Cloud tiering policy.
+ type: dict
+ suboptions:
+ policy:
+ description:
+ - Cloud tiering policy.
+ choices: ['all', 'auto', 'none', 'snapshot-only']
+ type: str
+ supported:
+ description:
+ - enable provisioning of the destination endpoint volumes on FabricPool aggregates.
+ - only supported for FlexVol volume, FlexGroup volume, and Consistency Group endpoints.
+ type: bool
+ destination_cluster:
+ description:
+ - Requires ONTAP 9.7 or higher.
+ - Required to create the destination vserver for SVM DR or the destination volume.
+ - Deprecated as of 21.2.0, use destination_endpoint and cluster.
+ type: str
+ version_added: 21.1.0
+ source_cluster:
+ description:
+ - Requires ONTAP 9.7 or higher.
+ - Required to create the peering relationship between source and destination SVMs.
+ - Deprecated as of 21.2.0, use source_endpoint and cluster.
+ type: str
+ version_added: 21.1.0
+ source_endpoint:
+ description:
+ - source endpoint of a SnapMirror relationship.
+ type: dict
+ version_added: 21.2.0
+ suboptions:
+ cluster:
+ description:
+ - Requires ONTAP 9.7 or higher.
+ - Required to create the peering relationship between source and destination SVMs.
+ type: str
+ consistency_group_volumes:
+ description:
+ - Requires ONTAP 9.8 or higher.
+ - Mandatory property for a Consistency Group endpoint. Specifies the list of FlexVol volumes for a Consistency Group.
+ type: list
+ elements: str
+ ipspace:
+ description:
+ - Requires ONTAP 9.8 or higher.
+ - Optional property to specify the IPSpace of the SVM.
+ type: str
+ path:
+ description:
+ - The source endpoint for the relationship.
+ - If the source is an ONTAP volume (FlexVol or FlexGroup), format should be <vserver:volume>
+ - For SVM DR, format should be <vserver:>
+ - For a consistency group, format should be <vserver:/cg/cg_name>
+ - If the source is an ElementSW volume, format should be <Element_SVIP:/lun/Element_VOLUME_ID>
+ - If the source is an ElementSW volume, the volume should have SnapMirror enabled.
+ type: str
+ required: true
+ svm:
+ description:
+ - The name of the SVM. Not sure when this is needed.
+ type: str
+ destination_endpoint:
+ description:
+ - destination endpoint of a SnapMirror relationship.
+ type: dict
+ version_added: 21.2.0
+ suboptions:
+ cluster:
+ description:
+ - Requires ONTAP 9.7 or higher.
+ - Required to create the destination vserver for SVM DR or the destination volume.
+ type: str
+ consistency_group_volumes:
+ description:
+ - Requires ONTAP 9.8 or higher.
+ - Mandatory property for a Consistency Group endpoint. Specifies the list of FlexVol volumes for a Consistency Group.
+ type: list
+ elements: str
+ ipspace:
+ description:
+ - Requires ONTAP 9.8 or higher.
+ - Optional property to specify the IPSpace of the SVM.
+ type: str
+ path:
+ description:
+ - The destination endpoint for the relationship.
+ - format is <vserver:volume>, <vserver:>, <vserver:/cg/cg_name>
+ type: str
+ required: true
+ svm:
+ description:
+ - The name of the SVM. Not sure when this is needed.
+ type: str
+ transferring_time_out:
+ description:
+ - How long to wait when a transfer is in progress (after initializing for instance). Unit is seconds.
+ default: 300
+ type: int
+ version_added: 21.20.0
+ clean_up_failure:
+ description:
+ - An optional parameter to recover from an aborted or failed restore operation.
+ - Any temporary RST relationship is removed from the destination Vserver.
+ - Only supported with ZAPI.
+ default: False
+ type: bool
+ version_added: 21.20.0
+ validate_source_path:
+ description:
+ - The relationship is found based on the destination as it is unique.
+ - By default, the source information is verified and an error is reported if there is a mismatch.
+ This would mean the destination is already used by another relationship.
+ - The check accounts for a local vserver name that may be different from the remote vserver name.
+ - This may be disabled in case the check is too strict, to unconditionally delete a realtionship for instance.
+ default: True
+ type: bool
+ version_added: 21.21.0
+ identity_preservation:
+ description:
+ - Specifies which configuration of the source SVM is replicated to the destination SVM.
+ - This property is applicable only for SVM data protection with "async" policy type.
+ - Only supported with REST and requires ONTAP 9.11.1 or later.
+ type: str
+ choices: ['full', 'exclude_network_config', 'exclude_network_and_protocol_config']
+ version_added: '22.4.0'
+
+short_description: "NetApp ONTAP or ElementSW Manage SnapMirror"
+version_added: 2.7.0
+notes:
+ - supports REST and ZAPI.
+ - supports check_mode.
+ - restore is not idempotent.
+ - snapmirror runs on the destination for most operations, peer_options identify the source cluster.
+ - ONTAP supports either username/password or a SSL certificate for authentication.
+ - ElementSW only supports username/password for authentication.
+'''
+
+EXAMPLES = """
+
+ # creates and initializes the snapmirror
+ - name: Create ONTAP/ONTAP SnapMirror
+ netapp.ontap.na_ontap_snapmirror:
+ state: present
+ source_volume: test_src
+ destination_volume: test_dest
+ source_vserver: ansible_src
+ destination_vserver: ansible_dest
+ schedule: hourly
+ policy: MirrorAllSnapshots
+ max_transfer_rate: 1000
+ initialize: False
+ hostname: "{{ destination_cluster_hostname }}"
+ username: "{{ destination_cluster_username }}"
+ password: "{{ destination_cluster_password }}"
+
+ # creates and initializes the snapmirror between vservers
+ - name: Create ONTAP/ONTAP vserver SnapMirror
+ netapp.ontap.na_ontap_snapmirror:
+ state: present
+ source_vserver: ansible_src
+ destination_vserver: ansible_dest
+ identity_preserve: true
+ hostname: "{{ destination_cluster_hostname }}"
+ username: "{{ destination_cluster_username }}"
+ password: "{{ destination_cluster_password }}"
+
+ # existing snapmirror relation with status 'snapmirrored' will be initialized
+ - name: Inititalize ONTAP/ONTAP SnapMirror
+ netapp.ontap.na_ontap_snapmirror:
+ state: present
+ source_path: 'ansible:test'
+ destination_path: 'ansible:dest'
+ relationship_state: active
+ hostname: "{{ destination_cluster_hostname }}"
+ username: "{{ destination_cluster_username }}"
+ password: "{{ destination_cluster_password }}"
+
+ - name: Delete SnapMirror
+ netapp.ontap.na_ontap_snapmirror:
+ state: absent
+ destination_path: <path>
+ relationship_info_only: True
+ source_hostname: "{{ source_hostname }}"
+ hostname: "{{ destination_cluster_hostname }}"
+ username: "{{ destination_cluster_username }}"
+ password: "{{ destination_cluster_password }}"
+
+ - name: Break SnapMirror
+ netapp.ontap.na_ontap_snapmirror:
+ state: present
+ relationship_state: broken
+ destination_path: <path>
+ source_hostname: "{{ source_hostname }}"
+ hostname: "{{ destination_cluster_hostname }}"
+ username: "{{ destination_cluster_username }}"
+ password: "{{ destination_cluster_password }}"
+
+ - name: Restore SnapMirror volume using location (Idempotency)
+ netapp.ontap.na_ontap_snapmirror:
+ state: present
+ source_path: <path>
+ destination_path: <path>
+ relationship_type: restore
+ source_snapshot: "{{ snapshot }}"
+ hostname: "{{ destination_cluster_hostname }}"
+ username: "{{ destination_cluster_username }}"
+ password: "{{ destination_cluster_password }}"
+
+ - name: Set schedule to NULL
+ netapp.ontap.na_ontap_snapmirror:
+ state: present
+ destination_path: <path>
+ schedule: ""
+ hostname: "{{ destination_cluster_hostname }}"
+ username: "{{ destination_cluster_username }}"
+ password: "{{ destination_cluster_password }}"
+
+ - name: Create SnapMirror from ElementSW to ONTAP
+ netapp.ontap.na_ontap_snapmirror:
+ state: present
+ connection_type: elementsw_ontap
+ source_path: '10.10.10.10:/lun/300'
+ destination_path: 'ansible_test:ansible_dest_vol'
+ schedule: hourly
+ policy: MirrorLatest
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ source_hostname: " {{ Element_cluster_mvip }}"
+ source_username: "{{ Element_cluster_username }}"
+ source_password: "{{ Element_cluster_password }}"
+
+ - name: Create SnapMirror from ONTAP to ElementSW
+ netapp.ontap.na_ontap_snapmirror:
+ state: present
+ connection_type: ontap_elementsw
+ destination_path: '10.10.10.10:/lun/300'
+ source_path: 'ansible_test:ansible_dest_vol'
+ policy: MirrorLatest
+ hostname: "{{ Element_cluster_mvip }}"
+ username: "{{ Element_cluster_username }}"
+ password: "{{ Element_cluster_password }}"
+ source_hostname: " {{ netapp_hostname }}"
+ source_username: "{{ netapp_username }}"
+ source_password: "{{ netapp_password }}"
+
+ - name: Create SnapMirror relationship (create destination volume)
+ tags: create
+ netapp.ontap.na_ontap_snapmirror:
+ state: present
+ source_endpoint:
+ cluster: "{{ _source_cluster }}"
+ path: "{{ source_vserver + ':' + source_volume }}"
+ destination_endpoint:
+ cluster: "{{ _destination_cluster }}"
+ path: "{{ destination_vserver_VOLDP + ':' + destination_volume }}"
+ create_destination:
+ enabled: true
+ hostname: "{{ destination_hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ https: true
+ validate_certs: false
+
+ - name: Create SnapMirror relationship - SVM DR (creating and peering destination svm)
+ tags: create_svmdr
+ netapp.ontap.na_ontap_snapmirror:
+ state: present
+ source_endpoint:
+ cluster: "{{ _source_cluster }}"
+ path: "{{ source_vserver + ':' }}"
+ destination_endpoint:
+ cluster: "{{ _destination_cluster }}"
+ path: "{{ destination_vserver_SVMDR + ':' }}"
+ create_destination:
+ enabled: true
+ hostname: "{{ destination_hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ https: true
+ validate_certs: false
+"""
+
+RETURN = """
+"""
+
+import re
+import time
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_elementsw_module import NaElementSWModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+try:
+ import solidfire.common
+except ImportError:
+ HAS_SF_SDK = False
+
+
+class NetAppONTAPSnapmirror(object):
+ """
+ Class with SnapMirror methods
+ """
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ destination_endpoint=dict(type='dict', options=dict(
+ cluster=dict(type='str'),
+ consistency_group_volumes=dict(type='list', elements='str'),
+ ipspace=dict(type='str'),
+ path=dict(required=True, type='str'),
+ svm=dict(type='str'),
+ )),
+ source_endpoint=dict(type='dict', options=dict(
+ cluster=dict(type='str'),
+ consistency_group_volumes=dict(type='list', elements='str'),
+ ipspace=dict(type='str'),
+ path=dict(required=True, type='str'),
+ svm=dict(type='str'),
+ )),
+ source_vserver=dict(required=False, type='str'),
+ destination_vserver=dict(required=False, type='str'),
+ source_volume=dict(required=False, type='str'),
+ destination_volume=dict(required=False, type='str'),
+ source_path=dict(required=False, type='str'),
+ destination_path=dict(required=False, type='str'),
+ schedule=dict(required=False, type='str', aliases=['transfer_schedule']),
+ policy=dict(required=False, type='str'),
+ relationship_type=dict(required=False, type='str',
+ choices=['data_protection', 'load_sharing',
+ 'vault', 'restore',
+ 'transition_data_protection',
+ 'extended_data_protection']
+ ),
+ connection_type=dict(required=False, type='str',
+ choices=['ontap_ontap', 'elementsw_ontap', 'ontap_elementsw'],
+ default='ontap_ontap'),
+ peer_options=dict(type='dict', options=netapp_utils.na_ontap_host_argument_spec_peer()),
+ source_hostname=dict(required=False, type='str'),
+ source_username=dict(required=False, type='str'),
+ source_password=dict(required=False, type='str', no_log=True),
+ max_transfer_rate=dict(required=False, type='int'),
+ initialize=dict(required=False, type='bool', default=True),
+ update=dict(required=False, type='bool', default=True),
+ identity_preserve=dict(required=False, type='bool'),
+ identity_preservation=dict(required=False, type="str", choices=['full', 'exclude_network_config', 'exclude_network_and_protocol_config']),
+ relationship_state=dict(required=False, type='str', choices=['active', 'broken'], default='active'),
+ relationship_info_only=dict(required=False, type='bool', default=False),
+ source_snapshot=dict(required=False, type='str'),
+ create_destination=dict(required=False, type='dict', options=dict(
+ enabled=dict(type='bool', default=True),
+ storage_service=dict(type='dict', options=dict(
+ enabled=dict(type='bool'),
+ enforce_performance=dict(type='bool'),
+ name=dict(type='str', choices=['value', 'performance', 'extreme']),
+ )),
+ tiering=dict(type='dict', options=dict(
+ policy=dict(type='str', choices=['all', 'auto', 'none', 'snapshot-only']),
+ supported=dict(type='bool')
+ )),
+ )),
+ source_cluster=dict(required=False, type='str'),
+ destination_cluster=dict(required=False, type='str'),
+ transferring_time_out=dict(required=False, type='int', default=300),
+ clean_up_failure=dict(required=False, type='bool', default=False),
+ validate_source_path=dict(required=False, type='bool', default=True)
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ mutually_exclusive=[
+ ('source_endpoint', 'source_cluster'),
+ ('source_endpoint', 'source_path'),
+ ('source_endpoint', 'source_volume'),
+ ('source_endpoint', 'source_vserver'),
+ ('destination_endpoint', 'destination_cluster'),
+ ('destination_endpoint', 'destination_path'),
+ ('destination_endpoint', 'destination_volume'),
+ ('destination_endpoint', 'destination_vserver'),
+ ('peer_options', 'source_hostname'),
+ ('peer_options', 'source_username'),
+ ('peer_options', 'source_password'),
+ ('identity_preserve', 'identity_preservation')
+ ],
+ required_together=(['source_volume', 'destination_volume'],
+ ['source_vserver', 'destination_vserver'],
+ ['source_endpoint', 'destination_endpoint'],
+ ),
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.new_style = False
+ # when deleting, ignore previous errors, but report them if delete fails
+ self.previous_errors = []
+ # setup later if required
+ self.source_server = None
+ # only for ElementSW -> ONTAP snapmirroring, validate if ElementSW SDK is available
+ if self.parameters.get('connection_type') in ['elementsw_ontap', 'ontap_elementsw'] and HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
+
+ self.src_rest_api = None
+ self.src_use_rest = None
+ self.set_source_peer()
+ self.rest_api, self.use_rest = self.setup_rest()
+ if not self.use_rest:
+ self.server = self.setup_zapi()
+
+ def set_source_peer(self):
+ if self.parameters.get('source_hostname') is None and self.parameters.get('peer_options') is None:
+ if self.parameters.get('connection_type') == 'ontap_elementsw':
+ return self.module.fail_json(msg='Error: peer_options are required to identify ONTAP cluster with connection_type: ontap_elementsw')
+ if self.parameters.get('connection_type') == 'elementsw_ontap':
+ return self.module.fail_json(msg='Error: peer_options are required to identify SolidFire cluster with connection_type: elementsw_ontap')
+ if self.parameters.get('source_hostname') is not None:
+ # if source_hostname is present, peer_options is absent
+ self.parameters['peer_options'] = dict(
+ hostname=self.parameters.get('source_hostname'),
+ username=self.parameters.get('source_username'),
+ password=self.parameters.get('source_password'),
+ )
+ elif self.na_helper.safe_get(self.parameters, ['peer_options', 'hostname']):
+ self.parameters['source_hostname'] = self.parameters['peer_options']['hostname']
+ if 'peer_options' in self.parameters:
+ netapp_utils.setup_host_options_from_module_params(
+ self.parameters['peer_options'], self.module,
+ netapp_utils.na_ontap_host_argument_spec_peer().keys())
+
+ def setup_rest(self):
+ unsupported_rest_properties = ['identity_preserve', 'max_transfer_rate']
+ host_options = self.parameters['peer_options'] if self.parameters.get('connection_type') == 'ontap_elementsw' else None
+ rest_api = netapp_utils.OntapRestAPI(self.module, host_options=host_options)
+ rtype = self.parameters.get('relationship_type')
+ if rtype not in (None, 'extended_data_protection', 'restore'):
+ unsupported_rest_properties.append('relationship_type')
+ used_unsupported_rest_properties = [x for x in unsupported_rest_properties if x in self.parameters]
+ ontap_97_options = ['create_destination', 'source_cluster', 'destination_cluster']
+ partially_supported_rest_properties = [(property, (9, 7)) for property in ontap_97_options]
+ partially_supported_rest_properties.extend([('schedule', (9, 11, 1)), ('identity_preservation', (9, 11, 1))])
+ use_rest, error = rest_api.is_rest_supported_properties(
+ self.parameters, used_unsupported_rest_properties, partially_supported_rest_properties, report_error=True)
+ if error is not None:
+ if 'relationship_type' in error:
+ error = error.replace('relationship_type', 'relationship_type: %s' % rtype)
+ if 'schedule' in error:
+ error += ' - With REST use the policy option to define a schedule.'
+ self.module.fail_json(msg=error)
+
+ if not use_rest and any(x in self.parameters for x in ontap_97_options):
+ self.module.fail_json(msg='Error: %s' % rest_api.options_require_ontap_version(ontap_97_options, version='9.7', use_rest=use_rest))
+ return rest_api, use_rest
+
+ def setup_zapi(self):
+ if self.parameters.get('identity_preservation'):
+ self.module.fail_json(msg="Error: The option identity_preservation is supported only with REST.")
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ host_options = self.parameters['peer_options'] if self.parameters.get('connection_type') == 'ontap_elementsw' else None
+ return netapp_utils.setup_na_ontap_zapi(module=self.module, host_options=host_options)
+
+ def set_element_connection(self, kind):
+ if kind == 'source':
+ elem = netapp_utils.create_sf_connection(module=self.module, host_options=self.parameters['peer_options'])
+ elif kind == 'destination':
+ elem = netapp_utils.create_sf_connection(module=self.module, host_options=self.parameters)
+ elementsw_helper = NaElementSWModule(elem)
+ return elementsw_helper, elem
+
+ def snapmirror_get_iter(self, destination=None):
+ """
+ Compose NaElement object to query current SnapMirror relations using destination-path
+ SnapMirror relation for a destination path is unique
+ :return: NaElement object for SnapMirror-get-iter
+ """
+ snapmirror_get_iter = netapp_utils.zapi.NaElement('snapmirror-get-iter')
+ query = netapp_utils.zapi.NaElement('query')
+ snapmirror_info = netapp_utils.zapi.NaElement('snapmirror-info')
+ if destination is None:
+ destination = self.parameters['destination_path']
+ snapmirror_info.add_new_child('destination-location', destination)
+ query.add_child_elem(snapmirror_info)
+ snapmirror_get_iter.add_child_elem(query)
+ return snapmirror_get_iter
+
+ def snapmirror_get(self, destination=None):
+ """
+ Get current SnapMirror relations
+ :return: Dictionary of current SnapMirror details if query successful, else None
+ """
+ if self.use_rest:
+ return self.snapmirror_get_rest(destination)
+
+ snapmirror_get_iter = self.snapmirror_get_iter(destination)
+ try:
+ result = self.server.invoke_successfully(snapmirror_get_iter, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching snapmirror info: %s' % to_native(error),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) > 0:
+ snapmirror_info = result.get_child_by_name('attributes-list').get_child_by_name(
+ 'snapmirror-info')
+ snap_info = {}
+ snap_info['mirror_state'] = snapmirror_info.get_child_content('mirror-state')
+ snap_info['status'] = snapmirror_info.get_child_content('relationship-status')
+ snap_info['schedule'] = snapmirror_info.get_child_content('schedule')
+ snap_info['policy'] = snapmirror_info.get_child_content('policy')
+ snap_info['relationship_type'] = snapmirror_info.get_child_content('relationship-type')
+ snap_info['current_transfer_type'] = snapmirror_info.get_child_content('current-transfer-type')
+ snap_info['source_path'] = snapmirror_info.get_child_content('source-location')
+ if snapmirror_info.get_child_by_name('max-transfer-rate'):
+ snap_info['max_transfer_rate'] = int(snapmirror_info.get_child_content('max-transfer-rate'))
+ if snapmirror_info.get_child_by_name('last-transfer-error'):
+ snap_info['last_transfer_error'] = snapmirror_info.get_child_content('last-transfer-error')
+ if snapmirror_info.get_child_by_name('is-healthy') is not None:
+ snap_info['is_healthy'] = self.na_helper.get_value_for_bool(True, snapmirror_info.get_child_content('is-healthy'))
+ if snapmirror_info.get_child_by_name('unhealthy-reason'):
+ snap_info['unhealthy_reason'] = snapmirror_info.get_child_content('unhealthy-reason')
+ if snap_info['schedule'] is None:
+ snap_info['schedule'] = ""
+ return snap_info
+ return None
+
+ def wait_for_idle_status(self):
+ # sleep for a maximum of X seconds (with a default of 5 minutes), in 30 seconds increments
+ transferring_time_out = self.parameters['transferring_time_out']
+ increment = 30
+ if transferring_time_out <= 0:
+ return self.snapmirror_get()
+ for __ in range(0, transferring_time_out, increment):
+ time.sleep(increment)
+ current = self.snapmirror_get()
+ if current and current['status'] != 'transferring':
+ return current
+ self.module.warn('SnapMirror relationship is still transferring after %d seconds.' % transferring_time_out)
+ return current
+
+ def wait_for_quiesced_status(self):
+ # sleep for a maximum of 25 seconds, in 5 seconds increments
+ for __ in range(5):
+ time.sleep(5)
+ sm_info = self.snapmirror_get()
+ if sm_info['status'] == 'quiesced' or sm_info['mirror_state'] == 'paused':
+ return
+ self.module.fail_json(msg='Taking a long time to quiesce SnapMirror relationship, try again later')
+
+ def check_if_remote_volume_exists(self):
+ """
+ Validate existence of source volume
+ :return: True if volume exists, False otherwise
+ """
+ self.set_source_cluster_connection()
+
+ if self.src_use_rest:
+ return self.check_if_remote_volume_exists_rest()
+
+ # do a get volume to check if volume exists or not
+ volume_info = netapp_utils.zapi.NaElement('volume-get-iter')
+ volume_attributes = netapp_utils.zapi.NaElement('volume-attributes')
+ volume_id_attributes = netapp_utils.zapi.NaElement('volume-id-attributes')
+ volume_id_attributes.add_new_child('name', self.parameters['source_volume'])
+ # if source_volume is present, then source_vserver is also guaranteed to be present
+ volume_id_attributes.add_new_child('vserver-name', self.parameters['source_vserver'])
+ volume_attributes.add_child_elem(volume_id_attributes)
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(volume_attributes)
+ volume_info.add_child_elem(query)
+ try:
+ result = self.source_server.invoke_successfully(volume_info, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching source volume details %s: %s'
+ % (self.parameters['source_volume'], to_native(error)),
+ exception=traceback.format_exc())
+ return bool(result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) > 0)
+
+ def get_svm_from_destination_vserver_or_path(self):
+ svm_name = self.parameters.get('destination_vserver')
+ if svm_name is None:
+ path = self.parameters.get('destination_path')
+ if path is not None:
+ # if there is no ':' in path, it returns path
+ svm_name = path.split(':', 1)[0]
+ return svm_name
+
+ def set_initialization_state(self):
+ """
+ return:
+ 'snapmirrored' for relationships with a policy of type 'async'
+ 'in_sync' for relationships with a policy of type 'sync'
+ """
+ policy_type = 'async' # REST defaults to Asynchronous
+ if self.na_helper.safe_get(self.parameters, ['destination_endpoint', 'consistency_group_volumes']) is not None:
+ # except for consistency groups
+ policy_type = 'sync'
+ if self.parameters.get('policy') is not None:
+ svm_name = self.get_svm_from_destination_vserver_or_path()
+ policy_type, error = self.snapmirror_policy_rest_get(self.parameters['policy'], svm_name)
+ if error:
+ error = 'Error fetching SnapMirror policy: %s' % error
+ elif policy_type is None:
+ error = 'Error: cannot find policy %s for vserver %s' % (self.parameters['policy'], svm_name)
+ elif policy_type not in ('async', 'sync'):
+ error = 'Error: unexpected type: %s for policy %s for vserver %s' % (policy_type, self.parameters['policy'], svm_name)
+ if error:
+ self.module.fail_json(msg=error)
+ return 'snapmirrored' if policy_type == 'async' else 'in_sync'
+
+ @staticmethod
+ def string_or_none(value):
+ """ REST expect null for "" """
+ return value or None
+
+ def get_create_body(self):
+ """
+ It gathers the required information for snapmirror create
+ """
+ initialized = False
+ body = {
+ "source": self.na_helper.filter_out_none_entries(self.parameters['source_endpoint']),
+ "destination": self.na_helper.filter_out_none_entries(self.parameters['destination_endpoint'])
+ }
+ if self.na_helper.safe_get(self.parameters, ['create_destination', 'enabled']): # testing for True
+ body['create_destination'] = self.na_helper.filter_out_none_entries(self.parameters['create_destination'])
+ if self.parameters['initialize']:
+ body['state'] = self.set_initialization_state()
+ initialized = True
+ if self.na_helper.safe_get(self.parameters, ['policy']) is not None:
+ body['policy'] = {'name': self.parameters['policy']}
+ if self.na_helper.safe_get(self.parameters, ['schedule']) is not None:
+ body['transfer_schedule'] = {'name': self.string_or_none(self.parameters['schedule'])}
+ if self.parameters.get('identity_preservation'):
+ body['identity_preservation'] = self.parameters['identity_preservation']
+ return body, initialized
+
+ def snapmirror_create(self):
+ """
+ Create a SnapMirror relationship
+ """
+ if self.parameters.get('peer_options') and self.parameters.get('source_volume') and not self.check_if_remote_volume_exists():
+ self.module.fail_json(msg='Source volume does not exist. Please specify a volume that exists')
+ if self.use_rest:
+ return self.snapmirror_rest_create()
+
+ options = {'source-location': self.parameters['source_path'],
+ 'destination-location': self.parameters['destination_path']}
+ snapmirror_create = netapp_utils.zapi.NaElement.create_node_with_children('snapmirror-create', **options)
+ if self.parameters.get('relationship_type'):
+ snapmirror_create.add_new_child('relationship-type', self.parameters['relationship_type'])
+ if self.parameters.get('schedule'):
+ snapmirror_create.add_new_child('schedule', self.parameters['schedule'])
+ if self.parameters.get('policy'):
+ snapmirror_create.add_new_child('policy', self.parameters['policy'])
+ if self.parameters.get('max_transfer_rate'):
+ snapmirror_create.add_new_child('max-transfer-rate', str(self.parameters['max_transfer_rate']))
+ if self.parameters.get('identity_preserve'):
+ snapmirror_create.add_new_child('identity-preserve', self.na_helper.get_value_for_bool(False, self.parameters['identity_preserve']))
+ try:
+ self.server.invoke_successfully(snapmirror_create, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating SnapMirror %s' % to_native(error),
+ exception=traceback.format_exc())
+ if self.parameters['initialize']:
+ self.snapmirror_initialize()
+
+ def set_source_cluster_connection(self):
+ """
+ Setup ontap ZAPI or REST server connection for source hostname
+ :return: None
+ """
+ self.src_rest_api = netapp_utils.OntapRestAPI(self.module, host_options=self.parameters['peer_options'])
+ unsupported_rest_properties = ['identity_preserve', 'max_transfer_rate', 'schedule']
+ rtype = self.parameters.get('relationship_type')
+ if rtype not in (None, 'extended_data_protection', 'restore'):
+ unsupported_rest_properties.append('relationship_type')
+ used_unsupported_rest_properties = [x for x in unsupported_rest_properties if x in self.parameters]
+ self.src_use_rest, error = self.src_rest_api.is_rest(used_unsupported_rest_properties)
+ if error is not None:
+ if 'relationship_type' in error:
+ error = error.replace('relationship_type', 'relationship_type: %s' % rtype)
+ self.module.fail_json(msg=error)
+ if not self.src_use_rest:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ self.source_server = netapp_utils.setup_na_ontap_zapi(module=self.module, host_options=self.parameters['peer_options'])
+
+ def delete_snapmirror(self, relationship_type, mirror_state):
+ """
+ Delete a SnapMirror relationship
+ #1. Quiesce the SnapMirror relationship at destination
+ #2. Break the SnapMirror relationship at the destination
+ #3. Release the SnapMirror at source
+ #4. Delete SnapMirror at destination
+ """
+ # Quiesce and Break at destination
+ if relationship_type not in ['load_sharing', 'vault'] and mirror_state not in ['uninitialized', 'broken-off', 'broken_off']:
+ self.snapmirror_break(before_delete=True)
+ # if source is ONTAP, release the destination at source cluster
+ # if the source_hostname is unknown, do not run snapmirror_release
+ if self.parameters.get('peer_options') is not None and self.parameters.get('connection_type') != 'elementsw_ontap' and not self.use_rest:
+ self.set_source_cluster_connection()
+ if self.get_destination():
+ # Release at source
+ # Note: REST remove the source from destination, so not required to release from source for REST
+ self.snapmirror_release()
+ # Delete at destination
+ self.snapmirror_delete()
+
+ def snapmirror_quiesce(self):
+ """
+ Quiesce SnapMirror relationship - disable all future transfers to this destination
+ """
+ if self.use_rest:
+ return self.snapmirror_quiesce_rest()
+
+ options = {'destination-location': self.parameters['destination_path']}
+
+ snapmirror_quiesce = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'snapmirror-quiesce', **options)
+ try:
+ self.server.invoke_successfully(snapmirror_quiesce, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error quiescing SnapMirror: %s'
+ % (to_native(error)), exception=traceback.format_exc())
+ # checking if quiesce was passed successfully
+ self.wait_for_quiesced_status()
+
+ def snapmirror_delete(self):
+ """
+ Delete SnapMirror relationship at destination cluster
+ """
+ if self.use_rest:
+ return self.snapmirror_delete_rest()
+ options = {'destination-location': self.parameters['destination_path']}
+
+ snapmirror_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'snapmirror-destroy', **options)
+ try:
+ self.server.invoke_successfully(snapmirror_delete,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ msg = 'Error deleting SnapMirror: %s' % to_native(error)
+ if self.previous_errors:
+ msg += '. Previous error(s): %s' % ' -- '.join(self.previous_errors)
+ self.module.fail_json(msg=msg, exception=traceback.format_exc())
+
+ def snapmirror_break(self, destination=None, before_delete=False):
+ """
+ Break SnapMirror relationship at destination cluster
+ #1. Quiesce the SnapMirror relationship at destination
+ #2. Break the SnapMirror relationship at the destination
+ """
+ self.snapmirror_quiesce()
+
+ if self.use_rest:
+ if self.parameters['current_mirror_state'] == 'broken_off' or self.parameters['current_transfer_status'] == 'transferring':
+ self.na_helper.changed = False
+ self.module.fail_json(msg="snapmirror data are transferring")
+ return self.snapmirror_mod_init_resync_break_quiesce_resume_rest(state="broken_off", before_delete=before_delete)
+ if destination is None:
+ destination = self.parameters['destination_path']
+ options = {'destination-location': destination}
+ snapmirror_break = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'snapmirror-break', **options)
+ try:
+ self.server.invoke_successfully(snapmirror_break,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ msg = 'Error breaking SnapMirror relationship: %s' % to_native(error)
+ if before_delete:
+ # record error but proceed with deletion
+ self.previous_errors.append(msg)
+ else:
+ self.module.fail_json(msg=msg, exception=traceback.format_exc())
+
+ def snapmirror_release(self):
+ """
+ Release SnapMirror relationship from source cluster
+ """
+ # if it's REST call, then not required to run release
+ if self.use_rest:
+ return
+ options = {'destination-location': self.parameters['destination_path'],
+ 'relationship-info-only': self.na_helper.get_value_for_bool(False, self.parameters['relationship_info_only'])}
+ snapmirror_release = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'snapmirror-release', **options)
+ try:
+ self.source_server.invoke_successfully(snapmirror_release,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error releasing SnapMirror relationship: %s'
+ % (to_native(error)),
+ exception=traceback.format_exc())
+
+ def snapmirror_abort(self):
+ """
+ Abort a SnapMirror relationship in progress
+ """
+ if self.use_rest:
+ return self.snapmirror_abort_rest()
+
+ options = {'destination-location': self.parameters['destination_path']}
+ snapmirror_abort = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'snapmirror-abort', **options)
+ try:
+ self.server.invoke_successfully(snapmirror_abort,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error aborting SnapMirror relationship: %s'
+ % (to_native(error)),
+ exception=traceback.format_exc())
+
+ def snapmirror_initialize(self, current=None):
+ """
+ Initialize SnapMirror based on relationship state
+ """
+ if current and current['status'] == 'transferring' or self.parameters.get('current_transfer_status') == 'transferring':
+ # Operation already in progress, let's wait for it to end
+ current = self.wait_for_idle_status()
+ if not current:
+ current = self.snapmirror_get()
+ if self.use_rest:
+ if current['mirror_state'] == 'uninitialized' and current['status'] != 'transferring':
+ self.snapmirror_mod_init_resync_break_quiesce_resume_rest(state="snapmirrored")
+ self.wait_for_idle_status()
+ return
+ if current['mirror_state'] != 'snapmirrored':
+ initialize_zapi = 'snapmirror-initialize'
+ if self.parameters.get('relationship_type') and self.parameters['relationship_type'] == 'load_sharing':
+ initialize_zapi = 'snapmirror-initialize-ls-set'
+ options = {'source-location': self.parameters['source_path']}
+ else:
+ options = {'destination-location': self.parameters['destination_path']}
+ snapmirror_init = netapp_utils.zapi.NaElement.create_node_with_children(
+ initialize_zapi, **options)
+ try:
+ self.server.invoke_successfully(snapmirror_init,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error initializing SnapMirror: %s'
+ % (to_native(error)),
+ exception=traceback.format_exc())
+ self.wait_for_idle_status()
+
+ def snapmirror_resync(self):
+ """
+ resync SnapMirror based on relationship state
+ """
+ if self.use_rest:
+ self.snapmirror_mod_init_resync_break_quiesce_resume_rest(state="snapmirrored")
+ else:
+ options = {'destination-location': self.parameters['destination_path']}
+ snapmirror_resync = netapp_utils.zapi.NaElement.create_node_with_children('snapmirror-resync', **options)
+ try:
+ self.server.invoke_successfully(snapmirror_resync, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error resyncing SnapMirror relationship: %s' % (to_native(error)),
+ exception=traceback.format_exc())
+ self.wait_for_idle_status()
+
+ def snapmirror_resume(self):
+ """
+ resume SnapMirror based on relationship state
+ """
+ if self.use_rest:
+ return self.snapmirror_mod_init_resync_break_quiesce_resume_rest(state="snapmirrored")
+
+ options = {'destination-location': self.parameters['destination_path']}
+ snapmirror_resume = netapp_utils.zapi.NaElement.create_node_with_children('snapmirror-resume', **options)
+ try:
+ self.server.invoke_successfully(snapmirror_resume, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error resuming SnapMirror relationship: %s' % (to_native(error)), exception=traceback.format_exc())
+
+ def snapmirror_restore(self):
+ """
+ restore SnapMirror based on relationship state
+ """
+ if self.use_rest:
+ return self.snapmirror_restore_rest()
+
+ options = {'destination-location': self.parameters['destination_path'],
+ 'source-location': self.parameters['source_path']}
+ if self.parameters.get('source_snapshot'):
+ options['source-snapshot'] = self.parameters['source_snapshot']
+ if self.parameters.get('clean_up_failure'):
+ # only send it when True
+ options['clean-up-failure'] = self.na_helper.get_value_for_bool(False, self.parameters['clean_up_failure'])
+ snapmirror_restore = netapp_utils.zapi.NaElement.create_node_with_children('snapmirror-restore', **options)
+ try:
+ self.server.invoke_successfully(snapmirror_restore, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error restoring SnapMirror relationship: %s' % (to_native(error)), exception=traceback.format_exc())
+
+ def snapmirror_modify(self, modify):
+ """
+ Modify SnapMirror schedule or policy
+ """
+ if self.use_rest:
+ return self.snapmirror_mod_init_resync_break_quiesce_resume_rest(modify=modify)
+
+ options = {'destination-location': self.parameters['destination_path']}
+ snapmirror_modify = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'snapmirror-modify', **options)
+ param_to_zapi = {
+ 'schedule': 'schedule',
+ 'policy': 'policy',
+ 'max_transfer_rate': 'max-transfer-rate'
+ }
+ for param_key, value in modify.items():
+ snapmirror_modify.add_new_child(param_to_zapi[param_key], str(value))
+ try:
+ self.server.invoke_successfully(snapmirror_modify,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying SnapMirror schedule or policy: %s'
+ % (to_native(error)),
+ exception=traceback.format_exc())
+
+ def snapmirror_update(self, relationship_type):
+ """
+ Update data in destination endpoint
+ """
+ if self.use_rest:
+ return self.snapmirror_update_rest()
+
+ zapi = 'snapmirror-update'
+ options = {'destination-location': self.parameters['destination_path']}
+ if relationship_type == 'load_sharing':
+ zapi = 'snapmirror-update-ls-set'
+ options = {'source-location': self.parameters['source_path']}
+
+ snapmirror_update = netapp_utils.zapi.NaElement.create_node_with_children(
+ zapi, **options)
+ try:
+ self.server.invoke_successfully(snapmirror_update, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error updating SnapMirror: %s'
+ % (to_native(error)),
+ exception=traceback.format_exc())
+
+ @staticmethod
+ def new_option(option, prefix):
+ new_option_name = option[len(prefix):]
+ if new_option_name == 'vserver':
+ new_option_name = 'path (or svm)'
+ elif new_option_name == 'volume':
+ new_option_name = 'path'
+ return '%sendpoint:%s' % (prefix, new_option_name)
+
+ def too_old(self, minimum_generation, minimum_major):
+ return not self.rest_api.meets_rest_minimum_version(self.use_rest, minimum_generation, minimum_major, 0)
+
+ def set_new_style(self):
+ # if source_endpoint or destination_endpoint if present, both are required
+ # then sanitize inputs to support new style
+ if not self.parameters.get('destination_endpoint') or not self.parameters.get('source_endpoint'):
+ self.module.fail_json(msg='Missing parameters: Source endpoint or Destination endpoint')
+ # sanitize inputs
+ self.parameters['source_endpoint'] = self.na_helper.filter_out_none_entries(self.parameters['source_endpoint'])
+ self.parameters['destination_endpoint'] = self.na_helper.filter_out_none_entries(self.parameters['destination_endpoint'])
+ # options requiring 9.7 or better, and REST
+ ontap_97_options = ['cluster', 'ipspace']
+ if self.too_old(9, 7) and any(x in self.parameters['source_endpoint'] for x in ontap_97_options):
+ self.module.fail_json(msg='Error: %s' % self.rest_api.options_require_ontap_version(ontap_97_options, version='9.7', use_rest=self.use_rest))
+ if self.too_old(9, 7) and any(x in self.parameters['destination_endpoint'] for x in ontap_97_options):
+ self.module.fail_json(msg='Error: %s' % self.rest_api.options_require_ontap_version(ontap_97_options, version='9.7', use_rest=self.use_rest))
+ # options requiring 9.8 or better, and REST
+ ontap_98_options = ['consistency_group_volumes']
+ if self.too_old(9, 8) and any(x in self.parameters['source_endpoint'] for x in ontap_98_options):
+ self.module.fail_json(msg='Error: %s' % self.rest_api.options_require_ontap_version(ontap_98_options, version='9.8', use_rest=self.use_rest))
+ if self.too_old(9, 8) and any(x in self.parameters['destination_endpoint'] for x in ontap_98_options):
+ self.module.fail_json(msg='Error: %s' % self.rest_api.options_require_ontap_version(ontap_98_options, version='9.8', use_rest=self.use_rest))
+ # fill in old style parameters
+ self.parameters['source_cluster'] = self.na_helper.safe_get(self.parameters, ['source_endpoint', 'cluster'])
+ self.parameters['source_path'] = self.na_helper.safe_get(self.parameters, ['source_endpoint', 'path'])
+ self.parameters['source_vserver'] = self.na_helper.safe_get(self.parameters, ['source_endpoint', 'svm'])
+ self.parameters['destination_cluster'] = self.na_helper.safe_get(self.parameters, ['destination_endpoint', 'cluster'])
+ self.parameters['destination_path'] = self.na_helper.safe_get(self.parameters, ['destination_endpoint', 'path'])
+ self.parameters['destination_vserver'] = self.na_helper.safe_get(self.parameters, ['destination_endpoint', 'svm'])
+ self.new_style = True
+
+ def set_endpoints(self):
+ # use new structures for source and destination endpoints
+ for location in ('source', 'destination'):
+ endpoint = '%s_endpoint' % location
+ self.parameters[endpoint] = {}
+ # skipping svm for now, as it is not accepted and not needed with path
+ # for old, new in (('path', 'path'), ('vserver', 'svm'), ('cluster', 'cluster')):
+ for old, new in (('path', 'path'), ('cluster', 'cluster')):
+ value = self.parameters.get('%s_%s' % (location, old))
+ if value is not None:
+ self.parameters[endpoint][new] = value
+
+ def check_parameters(self):
+ """
+ Validate parameters and fail if one or more required params are missing
+ Update source and destination path from vserver and volume parameters
+ """
+ for option in ['source_cluster', 'source_path', 'source_volume', 'source_vserver']:
+ if option in self.parameters:
+ self.module.warn('option: %s is deprecated, please use %s' % (option, self.new_option(option, 'source_')))
+ for option in ['destination_cluster', 'destination_path', 'destination_volume', 'destination_vserver']:
+ if option in self.parameters:
+ self.module.warn('option: %s is deprecated, please use %s' % (option, self.new_option(option, 'destination_')))
+
+ if self.parameters.get('source_endpoint') or self.parameters.get('destination_endpoint'):
+ self.set_new_style()
+ if self.parameters.get('source_path') or self.parameters.get('destination_path'):
+ if (not self.parameters.get('destination_path') or not self.parameters.get('source_path'))\
+ and (self.parameters['state'] == 'present' or (self.parameters['state'] == 'absent' and not self.parameters.get('destination_path'))):
+ self.module.fail_json(msg='Missing parameters: Source path or Destination path')
+ elif self.parameters.get('source_volume'):
+ if not self.parameters.get('source_vserver') or not self.parameters.get('destination_vserver'):
+ self.module.fail_json(msg='Missing parameters: source vserver or destination vserver or both')
+ self.parameters['source_path'] = self.parameters['source_vserver'] + ":" + self.parameters['source_volume']
+ self.parameters['destination_path'] = self.parameters['destination_vserver'] + ":" +\
+ self.parameters['destination_volume']
+ elif self.parameters.get('source_vserver') and self.parameters.get('source_endpoint') is None:
+ self.parameters['source_path'] = self.parameters['source_vserver'] + ":"
+ self.parameters['destination_path'] = self.parameters['destination_vserver'] + ":"
+
+ if self.use_rest and not self.new_style:
+ self.set_endpoints()
+
+ def get_destination(self):
+ """
+ get the destination info
+ # Note: REST module to get_destination is not required as it's used in only ZAPI.
+ """
+ result = None
+ get_dest_iter = netapp_utils.zapi.NaElement('snapmirror-get-destination-iter')
+ query = netapp_utils.zapi.NaElement('query')
+ snapmirror_dest_info = netapp_utils.zapi.NaElement('snapmirror-destination-info')
+ snapmirror_dest_info.add_new_child('destination-location', self.parameters['destination_path'])
+ query.add_child_elem(snapmirror_dest_info)
+ get_dest_iter.add_child_elem(query)
+ try:
+ result = self.source_server.invoke_successfully(get_dest_iter, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching snapmirror destinations info: %s' % to_native(error),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) > 0:
+ return True
+ return None
+
+ @staticmethod
+ def element_source_path_format_matches(value):
+ return re.match(pattern=r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\/lun\/[0-9]+",
+ string=value)
+
+ def check_elementsw_parameters(self, kind='source'):
+ """
+ Validate all ElementSW cluster parameters required for managing the SnapMirror relationship
+ Validate if both source and destination paths are present
+ Validate if source_path follows the required format
+ Validate SVIP
+ Validate if ElementSW volume exists
+ :return: None
+ """
+ path = None
+ if kind == 'destination':
+ path = self.parameters.get('destination_path')
+ elif kind == 'source':
+ path = self.parameters.get('source_path')
+ if path is None:
+ self.module.fail_json(msg="Error: Missing required parameter %s_path for "
+ "connection_type %s" % (kind, self.parameters['connection_type']))
+ if NetAppONTAPSnapmirror.element_source_path_format_matches(path) is None:
+ self.module.fail_json(msg="Error: invalid %s_path %s. "
+ "If the path is a ElementSW cluster, the value should be of the format"
+ " <Element_SVIP>:/lun/<Element_VOLUME_ID>" % (kind, path))
+ # validate source_path
+ elementsw_helper, elem = self.set_element_connection(kind)
+ self.validate_elementsw_svip(path, elem)
+ self.check_if_elementsw_volume_exists(path, elementsw_helper)
+
+ def validate_elementsw_svip(self, path, elem):
+ """
+ Validate ElementSW cluster SVIP
+ :return: None
+ """
+ result = None
+ try:
+ result = elem.get_cluster_info()
+ except solidfire.common.ApiServerError as err:
+ self.module.fail_json(msg="Error fetching SVIP", exception=to_native(err))
+ if result and result.cluster_info.svip:
+ cluster_svip = result.cluster_info.svip
+ svip = path.split(':')[0] # split IP address from source_path
+ if svip != cluster_svip:
+ self.module.fail_json(msg="Error: Invalid SVIP")
+
+ def check_if_elementsw_volume_exists(self, path, elementsw_helper):
+ """
+ Check if remote ElementSW volume exists
+ :return: None
+ """
+ volume_id, vol_id = None, path.split('/')[-1]
+ try:
+ volume_id = elementsw_helper.volume_id_exists(int(vol_id))
+ except solidfire.common.ApiServerError as err:
+ self.module.fail_json(msg="Error fetching Volume details", exception=to_native(err))
+
+ if volume_id is None:
+ self.module.fail_json(msg="Error: Source volume does not exist in the ElementSW cluster")
+
+ def check_health(self):
+ """
+ Checking the health of the snapmirror
+ """
+ if self.parameters.get('connection_type') == 'ontap_elementsw':
+ return
+ current = self.snapmirror_get()
+ if current is not None and not current.get('is_healthy', True):
+ msg = ['SnapMirror relationship exists but is not healthy.']
+ if 'unhealthy_reason' in current:
+ msg.append('Unhealthy reason: %s' % current['unhealthy_reason'])
+ if 'last_transfer_error' in current:
+ msg.append('Last transfer error: %s' % current['last_transfer_error'])
+ self.module.warn(' '.join(msg))
+
+ def check_if_remote_volume_exists_rest(self):
+ """
+ Check the remote volume exists using REST
+ """
+ if self.src_use_rest:
+ if self.parameters.get('source_volume') is not None and self.parameters.get('source_vserver') is not None:
+ volume_name = self.parameters['source_volume']
+ svm_name = self.parameters['source_vserver']
+ options = {'name': volume_name, 'svm.name': svm_name, 'fields': 'name,svm.name'}
+ api = 'storage/volumes'
+ record, error = rest_generic.get_one_record(self.src_rest_api, api, options)
+ if error:
+ self.module.fail_json(msg='Error fetching source volume: %s' % error)
+ return record is not None
+ return False
+ self.module.fail_json(msg='REST is not supported on Source')
+
+ def snapmirror_restore_rest(self):
+ ''' snapmirror restore using rest '''
+ # Use the POST /api/snapmirror/relationships REST API call with the property "restore=true" to create the SnapMirror restore relationship
+ # Use the POST /api/snapmirror/relationships/{relationship.uuid}/transfers REST API call to start the restore transfer on the SnapMirror relationship
+ # run this API calls on Source cluster
+ # if the source_hostname is unknown, do not run snapmirror_restore
+ body = {'destination.path': self.parameters['destination_path'], 'source.path': self.parameters['source_path'], 'restore': 'true'}
+ api = 'snapmirror/relationships'
+ dummy, error = rest_generic.post_async(self.rest_api, api, body, timeout=120)
+ if error:
+ self.module.fail_json(msg='Error restoring SnapMirror: %s' % to_native(error), exception=traceback.format_exc())
+ relationship_uuid = self.get_relationship_uuid()
+ # REST API call to start the restore transfer on the SnapMirror relationship
+ if relationship_uuid is None:
+ self.module.fail_json(msg="Error restoring SnapMirror: unable to get UUID for the SnapMirror relationship.")
+
+ body = {'source_snapshot': self.parameters['source_snapshot']} if self.parameters.get('source_snapshot') else {}
+ api = 'snapmirror/relationships/%s/transfers' % relationship_uuid
+ dummy, error = rest_generic.post_async(self.rest_api, api, body, timeout=60, job_timeout=120)
+ if error:
+ self.module.fail_json(msg='Error restoring SnapMirror Transfer: %s' % to_native(error), exception=traceback.format_exc())
+
+ def get_relationship_uuid(self, after_create=True):
+ # this may be called after a create including restore, so we may need to fetch the data
+ if after_create and self.parameters.get('uuid') is None:
+ self.snapmirror_get()
+ return self.parameters.get('uuid')
+
+ def snapmirror_mod_init_resync_break_quiesce_resume_rest(self, state=None, modify=None, before_delete=False):
+ """
+ To perform SnapMirror modify, init, resume, resync and break.
+ 1. Modify only update SnapMirror policy which passes the policy in body.
+ 2. To perform SnapMirror init - state=snapmirrored and mirror_state=uninitialized.
+ 3. To perform SnapMirror resync - state=snapmirrored and mirror_state=broken_off.
+ 4. To perform SnapMirror break - state=broken_off and transfer_state not transferring.
+ 5. To perform SnapMirror quiesce - state=pause and mirror_state not broken_off.
+ 6. To perform SnapMirror resume - state=snapmirrored.
+ """
+ uuid = self.get_relationship_uuid()
+ if uuid is None:
+ self.module.fail_json(msg="Error in updating SnapMirror relationship: unable to get UUID for the SnapMirror relationship.")
+
+ body = {}
+ if state is not None:
+ body["state"] = state
+ elif modify:
+ for key in modify:
+ if key == 'policy':
+ body[key] = {"name": modify[key]}
+ elif key == 'schedule':
+ body['transfer_schedule'] = {"name": self.string_or_none(modify[key])}
+ else:
+ self.module.warn(msg="Unexpected key in modify: %s, value: %s" % (key, modify[key]))
+ else:
+ self.na_helper.changed = False
+ return
+ api = 'snapmirror/relationships'
+ dummy, error = rest_generic.patch_async(self.rest_api, api, uuid, body)
+ if error:
+ msg = 'Error patching SnapMirror: %s: %s' % (body, to_native(error))
+ if before_delete:
+ self.previous_errors.append(msg)
+ else:
+ self.module.fail_json(msg=msg, exception=traceback.format_exc())
+
+ def snapmirror_update_rest(self):
+ """
+ Perform an update on the relationship using POST on /snapmirror/relationships/{relationship.uuid}/transfers
+ """
+ uuid = self.get_relationship_uuid()
+ if uuid is None:
+ self.module.fail_json(msg="Error in updating SnapMirror relationship: unable to get UUID for the SnapMirror relationship.")
+ api = 'snapmirror/relationships/%s/transfers' % uuid
+ body = {}
+ dummy, error = rest_generic.post_async(self.rest_api, api, body)
+ if error:
+ self.module.fail_json(msg='Error updating SnapMirror relationship: %s:' % to_native(error), exception=traceback.format_exc())
+
+ def snapmirror_abort_rest(self):
+ """
+ Abort a SnapMirror relationship in progress using REST
+ """
+ uuid = self.get_relationship_uuid(after_create=False)
+ transfer_uuid = self.parameters.get('transfer_uuid')
+ if uuid is None or transfer_uuid is None:
+ self.module.fail_json(msg="Error in aborting SnapMirror: unable to get either uuid: %s or transfer_uuid: %s." % (uuid, transfer_uuid))
+ api = 'snapmirror/relationships/%s/transfers' % uuid
+ body = {"state": "aborted"}
+ dummy, error = rest_generic.patch_async(self.rest_api, api, transfer_uuid, body)
+ if error:
+ self.module.fail_json(msg='Error aborting SnapMirror: %s' % to_native(error), exception=traceback.format_exc())
+
+ def snapmirror_quiesce_rest(self):
+ """
+ SnapMirror quiesce using REST
+ """
+ if (self.parameters['current_mirror_state'] == 'paused'
+ or self.parameters['current_mirror_state'] == 'broken_off'
+ or self.parameters['current_transfer_status'] == 'transferring'):
+ return
+ self.snapmirror_mod_init_resync_break_quiesce_resume_rest(state="paused")
+ self.wait_for_quiesced_status()
+
+ def snapmirror_delete_rest(self):
+ """
+ Delete SnapMirror relationship at destination cluster using REST
+ """
+ uuid = self.get_relationship_uuid(after_create=False)
+ if uuid is None:
+ self.module.fail_json(msg='Error in deleting SnapMirror: %s, unable to get UUID for the SnapMirror relationship.' % uuid)
+ api = 'snapmirror/relationships'
+ dummy, error = rest_generic.delete_async(self.rest_api, api, uuid)
+ if error:
+ msg = 'Error deleting SnapMirror: %s' % to_native(error)
+ if self.previous_errors:
+ msg += '. Previous error(s): %s' % ' -- '.join(self.previous_errors)
+ self.module.fail_json(msg=msg, exception=traceback.format_exc())
+
+ def snapmirror_rest_create(self):
+ """
+ Create a SnapMirror relationship using REST
+ """
+ body, initialized = self.get_create_body()
+ api = 'snapmirror/relationships'
+ dummy, error = rest_generic.post_async(self.rest_api, api, body, timeout=120)
+ if error:
+ self.module.fail_json(msg='Error creating SnapMirror: %s' % to_native(error), exception=traceback.format_exc())
+ if self.parameters['initialize']:
+ if initialized:
+ self.wait_for_idle_status()
+ else:
+ self.snapmirror_initialize()
+
+ def snapmirror_get_rest(self, destination=None):
+ """ Get the current snapmirror info """
+ if destination is None and "destination_path" in self.parameters:
+ # check_param get the value if it's given in other format like destination_endpoint etc..
+ destination = self.parameters['destination_path']
+
+ api = 'snapmirror/relationships'
+ fields = 'uuid,state,transfer.state,transfer.uuid,policy.name,unhealthy_reason.message,healthy,source'
+ if 'schedule' in self.parameters:
+ fields += ',transfer_schedule'
+ options = {'destination.path': destination, 'fields': fields}
+ record, error = rest_generic.get_one_record(self.rest_api, api, options)
+ if error:
+ self.module.fail_json(msg="Error getting SnapMirror %s: %s" % (destination, to_native(error)),
+ exception=traceback.format_exc())
+ if record is not None:
+ snap_info = {}
+ self.parameters['uuid'] = self.na_helper.safe_get(record, ['uuid'])
+ self.parameters['transfer_uuid'] = self.na_helper.safe_get(record, ['transfer', 'uuid'])
+ self.parameters['current_mirror_state'] = self.na_helper.safe_get(record, ['state'])
+ snap_info['mirror_state'] = self.na_helper.safe_get(record, ['state'])
+ snap_info['status'] = self.na_helper.safe_get(record, ['transfer', 'state'])
+ self.parameters['current_transfer_status'] = self.na_helper.safe_get(record, ['transfer', 'state'])
+ snap_info['policy'] = self.na_helper.safe_get(record, ['policy', 'name'])
+ # REST API supports only Extended Data Protection (XDP) SnapMirror relationship
+ snap_info['relationship_type'] = 'extended_data_protection'
+ # initilized to avoid name keyerror
+ snap_info['current_transfer_type'] = ""
+ snap_info['max_transfer_rate'] = ""
+ if 'unhealthy_reason' in record:
+ snap_info['last_transfer_error'] = self.na_helper.safe_get(record, ['unhealthy_reason'])
+ snap_info['unhealthy_reason'] = self.na_helper.safe_get(record, ['unhealthy_reason'])
+ snap_info['is_healthy'] = self.na_helper.safe_get(record, ['healthy'])
+ snap_info['source_path'] = self.na_helper.safe_get(record, ['source', 'path'])
+ # if the field is absent, assume ""
+ snap_info['schedule'] = self.na_helper.safe_get(record, ['transfer_schedule', 'name']) or ""
+ return snap_info
+ return None
+
+ def snapmirror_policy_rest_get(self, policy_name, svm_name):
+ """
+ get policy type
+ There is a set of system level policies, and users can create their own for a SVM
+ REST does not return a svm entry for system policies
+ svm_name may not exist yet as it can be created when creating the snapmirror relationship
+ """
+ policy_type = None
+ system_policy_type = None # policies not associated to a SVM
+ api = 'snapmirror/policies'
+ query = {
+ "name": policy_name,
+ "fields": "svm.name,type"
+ }
+ records, error = rest_generic.get_0_or_more_records(self.rest_api, api, query)
+ if error is None and records is not None:
+ for record in records:
+ if 'svm' in record:
+ if record['svm']['name'] == svm_name:
+ policy_type = record['type']
+ break
+ else:
+ system_policy_type = record['type']
+ if policy_type is None:
+ policy_type = system_policy_type
+ return policy_type, error
+
+ def add_break_action(self, actions, current):
+ # If current is not None, it means the state is present otherwise we would take a delete action
+ if current and self.parameters['relationship_state'] == 'broken':
+ if current['mirror_state'] == 'uninitialized':
+ self.module.fail_json(msg='SnapMirror relationship cannot be broken if mirror state is uninitialized')
+ elif current['relationship_type'] in ['load_sharing', 'vault']:
+ self.module.fail_json(msg='SnapMirror break is not allowed in a load_sharing or vault relationship')
+ elif current['mirror_state'] not in ['broken-off', 'broken_off']:
+ actions.append('break')
+ self.na_helper.changed = True
+
+ def add_active_actions(self, actions, current):
+ # add initialize or resume action as needed
+ # add resync or check_for_update action as needed
+ # If current is not None, it means the state is present otherwise we would take a delete action
+ if current and self.parameters['relationship_state'] == 'active':
+ # check for initialize
+ if self.parameters['initialize'] and current['mirror_state'] == 'uninitialized' and current['current_transfer_type'] != 'initialize':
+ actions.append('initialize')
+ # set changed explicitly for initialize
+ self.na_helper.changed = True
+ # resume when state is quiesced
+ if current['status'] == 'quiesced' or current['mirror_state'] == 'paused':
+ actions.append('resume')
+ # set changed explicitly for resume
+ self.na_helper.changed = True
+ # resync when state is broken-off
+ if current['mirror_state'] in ['broken-off', 'broken_off']:
+ actions.append('resync')
+ # set changed explicitly for resync
+ self.na_helper.changed = True
+ # Update when create is called again, or modify is being called
+ elif self.parameters['update']:
+ actions.append('check_for_update')
+
+ def get_svm_peer(self, source_svm, destination_svm):
+ if self.use_rest:
+ api = 'svm/peers'
+ query = {'name': source_svm, 'svm.name': destination_svm}
+ record, error = rest_generic.get_one_record(self.rest_api, api, query, fields='peer')
+ if error:
+ self.module.fail_json(msg='Error retrieving SVM peer: %s' % error)
+ if record:
+ return self.na_helper.safe_get(record, ['peer', 'svm', 'name']), self.na_helper.safe_get(record, ['peer', 'cluster', 'name'])
+ else:
+ query = {
+ 'query': {
+ 'vserver-peer-info': {
+ 'peer-vserver': source_svm,
+ 'vserver': destination_svm
+ }
+ }
+ }
+ get_request = netapp_utils.zapi.NaElement('vserver-peer-get-iter')
+ get_request.translate_struct(query)
+ try:
+ result = self.server.invoke_successfully(get_request, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching vserver peer info: %s' % to_native(error),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) > 0:
+ info = result.get_child_by_name('attributes-list').get_child_by_name('vserver-peer-info')
+ return info['remote-vserver-name'], info['peer-cluster']
+
+ return None, None
+
+ def validate_source_path(self, current):
+ """ There can only be one destination, so we use it as the key
+ But we want to make sure another relationship is not already using the destination
+ It's a bit complicated as the source SVM name can be aliased to a local name if there are conflicts
+ So the source can be ansibleSVM: and show locally as ansibleSVM: if there is not conflict or ansibleSVM.1:
+ or any alias the user likes.
+ And in the input paramters, it may use the remote name or local alias.
+ """
+ if not current:
+ return
+ source_path = self.na_helper.safe_get(self.parameters, ['source_endpoint', 'path']) or self.parameters.get('source_path')
+ destination_path = self.na_helper.safe_get(self.parameters, ['destination_endpoint', 'path']) or self.parameters.get('destination_path')
+ source_cluster = self.na_helper.safe_get(self.parameters, ['source_endpoint', 'cluster']) or self.parameters.get('source_cluster')
+ current_source_path = current.pop('source_path', None)
+ if source_path and current_source_path and self.parameters.get('validate_source_path'):
+ if self.parameters['connection_type'] != 'ontap_ontap':
+ # take things at face value
+ if current_source_path != source_path:
+ self.module.fail_json(msg='Error: another relationship is present for the same destination with source_path:'
+ ' "%s". Desired: %s on %s'
+ % (current_source_path, source_path, source_cluster))
+ return
+ # with ONTAP -> ONTAP, vserver names can be aliased
+ current_source_svm, dummy, dummy = current_source_path.rpartition(':')
+ if not current_source_svm:
+ self.module.warn('Unexpected source path: %s, skipping validation.' % current_source_path)
+ destination_svm, dummy, dummy = destination_path.rpartition(':')
+ if not destination_svm:
+ self.module.warn('Unexpected destination path: %s, skipping validation.' % destination_path)
+ if not current_source_svm or not destination_svm:
+ return
+ peer_svm, peer_cluster = self.get_svm_peer(current_source_svm, destination_svm)
+ if peer_svm is not None:
+ real_source_path = current_source_path.replace(current_source_svm, peer_svm, 1)
+ # match either the local name or the remote name
+ if (real_source_path != source_path and current_source_path != source_path)\
+ or (peer_cluster is not None and source_cluster is not None and source_cluster != peer_cluster):
+ self.module.fail_json(msg='Error: another relationship is present for the same destination with source_path:'
+ ' "%s" (%s on cluster %s). Desired: %s on %s'
+ % (current_source_path, real_source_path, peer_cluster, source_path, source_cluster))
+
+ def get_actions(self):
+ restore = self.parameters.get('relationship_type', '') == 'restore'
+ current = None if restore else self.snapmirror_get()
+ self.validate_source_path(current)
+ # ONTAP automatically convert DP to XDP
+ if current and current['relationship_type'] == 'extended_data_protection' and self.parameters.get('relationship_type') == 'data_protection':
+ self.parameters['relationship_type'] = 'extended_data_protection'
+ cd_action = None if restore else self.na_helper.get_cd_action(current, self.parameters)
+ modify = None
+ if cd_action is None and self.parameters['state'] == 'present' and not restore:
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if modify and 'relationship_type' in modify:
+ self.module.fail_json(msg='Error: cannot modify relationship_type from %s to %s.' %
+ (current['relationship_type'], modify['relationship_type']))
+ actions = []
+ if self.parameters['state'] == 'present' and restore:
+ actions.append('restore')
+ self.na_helper.changed = True
+ elif cd_action == 'create':
+ actions.append('create')
+ elif cd_action == 'delete':
+ if current['status'] == 'transferring' or self.parameters.get('current_transfer_status') == 'transferring':
+ actions.append('abort')
+ actions.append('delete')
+ else:
+ if modify:
+ actions.append('modify')
+ # If current is not None, it means the state is present otherwise we would take a delete action
+ self.add_break_action(actions, current)
+ self.add_active_actions(actions, current)
+ return actions, current, modify
+
+ def take_actions(self, actions, current, modify):
+ if 'restore' in actions:
+ self.snapmirror_restore()
+ if 'create' in actions:
+ self.snapmirror_create()
+ if 'abort' in actions:
+ self.snapmirror_abort()
+ self.wait_for_idle_status()
+ if 'delete' in actions:
+ self.delete_snapmirror(current['relationship_type'], current['mirror_state'])
+ if 'modify' in actions:
+ self.snapmirror_modify(modify)
+ if 'break' in actions:
+ self.snapmirror_break()
+ if 'initialize' in actions:
+ self.snapmirror_initialize(current)
+ if 'resume' in actions:
+ self.snapmirror_resume()
+ if 'resync' in actions:
+ self.snapmirror_resync()
+
+ def apply(self):
+ """
+ Apply action to SnapMirror
+ """
+ # source is ElementSW
+ if self.parameters['state'] == 'present' and self.parameters.get('connection_type') == 'elementsw_ontap':
+ self.check_elementsw_parameters()
+ elif self.parameters.get('connection_type') == 'ontap_elementsw':
+ self.check_elementsw_parameters('destination')
+ else:
+ self.check_parameters()
+ if self.parameters['state'] == 'present' and self.parameters.get('connection_type') == 'ontap_elementsw':
+ current_elementsw_ontap = self.snapmirror_get(self.parameters['source_path'])
+ if current_elementsw_ontap is None:
+ self.module.fail_json(msg='Error: creating an ONTAP to ElementSW snapmirror relationship requires an '
+ 'established SnapMirror relation from ElementSW to ONTAP cluster')
+
+ actions, current, modify = self.get_actions()
+ if self.na_helper.changed and not self.module.check_mode:
+ self.take_actions(actions, current, modify)
+ if 'check_for_update' in actions:
+ current = self.snapmirror_get()
+ if current['mirror_state'] == 'snapmirrored':
+ actions.append('update')
+ if not self.module.check_mode:
+ self.snapmirror_update(current['relationship_type'])
+ self.na_helper.changed = True
+
+ self.check_health()
+ if self.previous_errors:
+ self.module.warn('Ignored error(s): %s' % ' -- '.join(self.previous_errors))
+
+ results = dict(changed=self.na_helper.changed)
+ if actions:
+ results['actions'] = actions
+ self.module.exit_json(**results)
+
+
+def main():
+ """Execute action"""
+ community_obj = NetAppONTAPSnapmirror()
+ community_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapmirror_policy.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapmirror_policy.py
new file mode 100644
index 000000000..9c9f371e3
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapmirror_policy.py
@@ -0,0 +1,1038 @@
+#!/usr/bin/python
+
+# (c) 2019-2023, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_snapmirror_policy
+'''
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = """
+module: na_ontap_snapmirror_policy
+short_description: NetApp ONTAP create, delete or modify SnapMirror policies
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: '20.3.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - NetApp ONTAP create, modify, or destroy the SnapMirror policy
+ - Add, modify and remove SnapMirror policy rules
+ - Following parameters are not supported in REST; 'owner', 'restart', 'transfer_priority', 'tries', 'ignore_atime', 'common_snapshot_schedule'
+options:
+ state:
+ description:
+ - Whether the specified SnapMirror policy should exist or not.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ vserver:
+ description:
+ - Specifies the vserver for the SnapMirror policy.
+ - Required with ZAPI.
+ - Name of a data vserver with REST.
+ - With current versions of ONTAP, when using REST, this must be set to the cluster name for cluster scoped policies (9.12.1 and older).
+ - Current versions of ONTAP fail with "svm.uuid" is required when the vserver field is not set.
+ - With newer versions of ONTAP, omit the value, or omit this option for a cluster scoped policy with REST.
+ type: str
+ policy_name:
+ description:
+ - Specifies the SnapMirror policy name.
+ - C(name) added as an alias in 22.0.0.
+ required: true
+ type: str
+ aliases: ['name']
+ version_added: '22.0.0'
+ policy_type:
+ description:
+ - Specifies the SnapMirror policy type. Modifying the type of an existing SnapMirror policy is not supported.
+ - The Policy types 'sync' and 'async' are only supported in REST.
+ choices: ['vault', 'async_mirror', 'mirror_vault', 'strict_sync_mirror', 'sync_mirror', 'sync', 'async']
+ type: str
+ comment:
+ description:
+ - Specifies the SnapMirror policy comment.
+ type: str
+ tries:
+ description:
+ - Specifies the number of tries.
+ - Not supported with REST.
+ type: str
+ transfer_priority:
+ description:
+ - Specifies the priority at which a SnapMirror transfer runs.
+ - Not supported with REST.
+ choices: ['low', 'normal']
+ type: str
+ transfer_schedule:
+ description:
+ - Specifies the name of the schedule used to update asynchronous SnapMirror relationships.
+ - Not supported with ZAPI.
+ type: str
+ version_added: '22.2.0'
+ common_snapshot_schedule:
+ description:
+ - Specifies the common Snapshot copy schedule associated with the policy, only required for strict_sync_mirror and sync_mirror.
+ - Not supported with REST.
+ type: str
+ owner:
+ description:
+ - Specifies the owner of the SnapMirror policy.
+ - Not supported with REST.
+ choices: ['cluster_admin', 'vserver_admin']
+ type: str
+ is_network_compression_enabled:
+ description:
+ - Specifies whether network compression is enabled for transfers.
+ type: bool
+ ignore_atime:
+ description:
+ - Specifies whether incremental transfers will ignore files which have only their access time changed. Applies to SnapMirror vault relationships only.
+ - Not supported with REST.
+ type: bool
+ restart:
+ description:
+ - Defines the behavior of SnapMirror if an interrupted transfer exists, applies to data protection only.
+ - Not supported with REST.
+ choices: ['always', 'never', 'default']
+ type: str
+ snapmirror_label:
+ description:
+ - SnapMirror policy rule label.
+ - Required when defining policy rules.
+ - Use an empty list to remove all user-defined rules.
+ type: list
+ elements: str
+ version_added: '20.7.0'
+ keep:
+ description:
+ - SnapMirror policy rule retention count for snapshots created.
+ - Required when defining policy rules.
+ type: list
+ elements: int
+ version_added: '20.7.0'
+ prefix:
+ description:
+ - SnapMirror policy rule prefix.
+ - Optional when defining policy rules.
+ - Set to '' to not set or remove an existing custom prefix.
+ - Prefix name should be unique within the policy.
+ - When specifying a custom prefix, schedule must also be specified.
+ type: list
+ elements: str
+ version_added: '20.7.0'
+ schedule:
+ description:
+ - SnapMirror policy rule schedule.
+ - Optional when defining policy rules.
+ - Set to '' to not set or remove a schedule.
+ - When specifying a schedule a custom prefix can be set otherwise the prefix will be set to snapmirror_label.
+ type: list
+ elements: str
+ version_added: '20.7.0'
+ identity_preservation:
+ description:
+ - Specifies which configuration of the source SVM is replicated to the destination SVM.
+ - This property is applicable only for SVM data protection with "async" policy type.
+ - Only supported with REST.
+ type: str
+ choices: ['full', 'exclude_network_config', 'exclude_network_and_protocol_config']
+ version_added: '22.0.0'
+ copy_all_source_snapshots:
+ description:
+ - Specifies whether all source Snapshot copies should be copied to the destination on a transfer rather than specifying specific retentions.
+ - This property is applicable only to async policies.
+ - Property can only be set to 'true'.
+ - Only supported with REST and requires ONTAP 9.10.1 or later.
+ type: bool
+ version_added: '22.1.0'
+ copy_latest_source_snapshot:
+ description:
+ - Specifies that the latest source Snapshot copy (created by SnapMirror before the transfer begins) should be copied to the destination on a transfer.
+ - Retention properties cannot be specified along with this property.
+ - Property can only be set to 'true'.
+ - Only supported with REST and requires ONTAP 9.11.1 or later.
+ type: bool
+ version_added: '22.2.0'
+ create_snapshot_on_source:
+ description:
+ - Specifies whether a new Snapshot copy should be created on the source at the beginning of an update or resync operation.
+ - This property is applicable only to async policies.
+ - Property can only be set to 'false'.
+ - Only supported with REST and requires ONTAP 9.11.1 or later.
+ type: bool
+ version_added: '22.2.0'
+ sync_type:
+ description:
+ - This property is only applicable to sync policy types.
+ - If the "sync_type" is "sync" then a write success is returned to the client
+ after writing the data to the primary endpoint and before writing the data to the secondary endpoint.
+ - If the "sync_type" is "strict_sync" then a write success is returned to the client after writing the data to the both primary and secondary endpoints.
+ - The "sync_type" of "automated_failover" can be associated with a SnapMirror relationship that has Consistency Group as the endpoint and
+ it requires ONTAP 9.7 or later.
+ - Only supported with REST.
+ type: str
+ choices: ['sync', 'strict_sync', 'automated_failover']
+ version_added: '22.2.0'
+
+notes:
+ - In REST, policy types 'mirror_vault', 'vault' and 'async_mirror' are mapped to 'async' policy_type.
+ - In REST, policy types 'sync_mirror' and 'strict_sync_mirror' are mapped to 'sync' policy_type.
+ - In REST, use policy_type 'async' to configure 'mirror-vault' in CLI.
+ - In REST, use policy_type 'async' with 'copy_all_source_snapshots' to configure 'async-mirror' with
+ 'all_source_snapshots' in CLI.
+ - In REST, use policy_type 'async' with 'copy_latest_source_snapshot' to configure 'async-mirror' without
+ 'all_source_snapshots' in CLI.
+ - In REST, use policy_type 'async' with 'create_snapshot_on_source' to configure 'vault' in CLI.
+ - In REST, use policy_type 'sync' with sync_type 'sync' to configure 'sync-mirror' in CLI.
+ - In REST, use policy_type 'sync' with sync_type 'strict_sync' to configure 'strict-sync-mirror' in CLI.
+ - In REST, use policy_type 'sync' with sync_type 'automated_failover' to configure 'automated-failover' in CLI.
+"""
+
+EXAMPLES = """
+ - name: Create SnapMirror policy
+ na_ontap_snapmirror_policy:
+ state: present
+ vserver: "SVM1"
+ policy_name: "ansible_policy"
+ policy_type: "mirror_vault"
+ comment: "created by ansible"
+ transfer_schedule: "daily" # when using REST
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ https: true
+ validate_certs: false
+
+ - name: Modify SnapMirror policy
+ na_ontap_snapmirror_policy:
+ state: present
+ vserver: "SVM1"
+ policy_name: "ansible_policy"
+ policy_type: "async_mirror"
+ transfer_priority: "low"
+ transfer_schedule: "weekly" # when using REST
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ https: true
+ validate_certs: false
+
+ - name: Create SnapMirror policy with basic rules
+ na_ontap_snapmirror_policy:
+ state: present
+ vserver: "SVM1"
+ policy_name: "ansible_policy"
+ policy_type: "async_mirror"
+ snapmirror_label: ['daily', 'weekly', 'monthly']
+ keep: [7, 5, 12]
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ https: true
+ validate_certs: false
+
+ - name: Create SnapMirror policy with rules and schedules (no schedule for daily rule)
+ na_ontap_snapmirror_policy:
+ state: present
+ vserver: "SVM1"
+ policy_name: "ansible_policy"
+ policy_type: "mirror_vault"
+ snapmirror_label: ['daily', 'weekly', 'monthly']
+ keep: [7, 5, 12]
+ schedule: ['','weekly','monthly']
+ prefix: ['','','monthly_mv']
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ https: true
+ validate_certs: false
+
+ - name: Modify SnapMirror policy with rules, remove existing schedules and prefixes
+ na_ontap_snapmirror_policy:
+ state: present
+ vserver: "SVM1"
+ policy_name: "ansible_policy"
+ policy_type: "mirror_vault"
+ snapmirror_label: ['daily', 'weekly', 'monthly']
+ keep: [7, 5, 12]
+ schedule: ['','','']
+ prefix: ['','','']
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ https: true
+ validate_certs: false
+
+ - name: Modify SnapMirror policy, delete all rules (excludes builtin rules)
+ na_ontap_snapmirror_policy:
+ state: present
+ vserver: "SVM1"
+ policy_name: "ansible_policy"
+ policy_type: "mirror_vault"
+ snapmirror_label: []
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ https: true
+ validate_certs: false
+
+ - name: Delete SnapMirror policy
+ na_ontap_snapmirror_policy:
+ state: absent
+ vserver: "SVM1"
+ policy_type: "async_mirror"
+ policy_name: "ansible_policy"
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ https: true
+ validate_certs: false
+"""
+
+RETURN = """
+
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_vserver
+
+
+class NetAppOntapSnapMirrorPolicy:
+ """
+ Create, Modifies and Destroys a SnapMirror policy
+ """
+ def __init__(self):
+ """
+ Initialize the Ontap SnapMirror policy class
+ """
+
+ self.use_rest = False
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ vserver=dict(required=False, type='str'),
+ policy_name=dict(required=True, type='str', aliases=['name']),
+ comment=dict(required=False, type='str'),
+ policy_type=dict(required=False, type='str',
+ choices=['vault', 'async_mirror', 'mirror_vault', 'strict_sync_mirror', 'sync_mirror', 'sync', 'async']),
+ tries=dict(required=False, type='str'),
+ transfer_priority=dict(required=False, type='str', choices=['low', 'normal']),
+ transfer_schedule=dict(required=False, type='str'),
+ common_snapshot_schedule=dict(required=False, type='str'),
+ ignore_atime=dict(required=False, type='bool'),
+ is_network_compression_enabled=dict(required=False, type='bool'),
+ owner=dict(required=False, type='str', choices=['cluster_admin', 'vserver_admin']),
+ restart=dict(required=False, type='str', choices=['always', 'never', 'default']),
+ snapmirror_label=dict(required=False, type="list", elements="str"),
+ keep=dict(required=False, type="list", elements="int"),
+ prefix=dict(required=False, type="list", elements="str"),
+ schedule=dict(required=False, type="list", elements="str"),
+ identity_preservation=dict(required=False, type="str", choices=['full', 'exclude_network_config', 'exclude_network_and_protocol_config']),
+ copy_all_source_snapshots=dict(required=False, type='bool'),
+ copy_latest_source_snapshot=dict(required=False, type='bool'),
+ create_snapshot_on_source=dict(required=False, type='bool'),
+ sync_type=dict(required=False, type="str", choices=['sync', 'strict_sync', 'automated_failover']),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[('copy_all_source_snapshots', 'copy_latest_source_snapshot', 'create_snapshot_on_source', 'sync_type')]
+ )
+
+ # set up variables
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ # API should be used for ONTAP 9.6 or higher, Zapi for lower version
+ self.rest_api = netapp_utils.OntapRestAPI(self.module)
+ # some attributes are not supported in earlier REST implementation
+ unsupported_rest_properties = ['owner', 'restart', 'transfer_priority', 'tries', 'ignore_atime',
+ 'common_snapshot_schedule']
+ partially_supported_rest_properties = [['copy_all_source_snapshots', (9, 10, 1)], ['copy_latest_source_snapshot', (9, 11, 1)],
+ ['create_snapshot_on_source', (9, 11, 1)]]
+ self.unsupported_zapi_properties = ['identity_preservation', 'copy_all_source_snapshots', 'copy_latest_source_snapshot', 'sync_type',
+ 'transfer_schedule']
+ self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, unsupported_rest_properties, partially_supported_rest_properties)
+ self.validate_policy_type()
+ if self.use_rest:
+ self.scope = self.set_scope()
+ else:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ for unsupported_zapi_property in self.unsupported_zapi_properties:
+ if self.parameters.get(unsupported_zapi_property) is not None:
+ msg = "Error: %s option is not supported with ZAPI. It can only be used with REST." % unsupported_zapi_property
+ self.module.fail_json(msg=msg)
+ if 'vserver' not in self.parameters:
+ self.module.fail_json(msg="Error: vserver is a required parameter when using ZAPI.")
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def set_scope(self):
+ if self.parameters.get('vserver') is None:
+ return 'cluster'
+ record, error = rest_vserver.get_vserver(self.rest_api, self.parameters['vserver'])
+ if error:
+ self.module.fail_json(msg='Error getting vserver %s info: %s' % (self.parameters['vserver'], error))
+ if record:
+ return 'svm'
+ self.module.warn("vserver %s is not a data vserver, assuming cluster scope" % self.parameters['vserver'])
+ return 'cluster'
+
+ def get_snapmirror_policy(self):
+ if self.use_rest:
+ return self.get_snapmirror_policy_rest()
+
+ snapmirror_policy_get_iter = netapp_utils.zapi.NaElement('snapmirror-policy-get-iter')
+ snapmirror_policy_info = netapp_utils.zapi.NaElement('snapmirror-policy-info')
+ snapmirror_policy_info.add_new_child('policy-name', self.parameters['policy_name'])
+ snapmirror_policy_info.add_new_child('vserver', self.parameters['vserver'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(snapmirror_policy_info)
+ snapmirror_policy_get_iter.add_child_elem(query)
+
+ try:
+ result = self.server.invoke_successfully(snapmirror_policy_get_iter, True)
+ except netapp_utils.zapi.NaApiError as error:
+ if 'NetApp API failed. Reason - 13001:' in to_native(error):
+ # policy does not exist
+ return None
+ self.module.fail_json(msg='Error getting snapmirror policy %s: %s' % (self.parameters['policy_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ return_value = None
+ if result and result.get_child_by_name('attributes-list'):
+ snapmirror_policy_attributes = result['attributes-list']['snapmirror-policy-info']
+
+ return_value = {
+ 'policy_name': snapmirror_policy_attributes['policy-name'],
+ 'tries': snapmirror_policy_attributes['tries'],
+ 'transfer_priority': snapmirror_policy_attributes['transfer-priority'],
+ 'is_network_compression_enabled': self.na_helper.get_value_for_bool(True,
+ snapmirror_policy_attributes['is-network-compression-enabled']),
+ 'restart': snapmirror_policy_attributes['restart'],
+ 'ignore_atime': self.na_helper.get_value_for_bool(True, snapmirror_policy_attributes['ignore-atime']),
+ 'vserver': snapmirror_policy_attributes['vserver-name'],
+ 'comment': '',
+ 'snapmirror_label': [],
+ 'keep': [],
+ 'prefix': [],
+ 'schedule': [],
+ }
+ if snapmirror_policy_attributes.get_child_content('comment') is not None:
+ return_value['comment'] = snapmirror_policy_attributes['comment']
+
+ if snapmirror_policy_attributes.get_child_content('type') is not None:
+ return_value['policy_type'] = snapmirror_policy_attributes['type']
+
+ if snapmirror_policy_attributes.get_child_content('common-snapshot-schedule') is not None:
+ return_value['common_snapshot_schedule'] = snapmirror_policy_attributes['common-snapshot-schedule']
+
+ if snapmirror_policy_attributes.get_child_by_name('snapmirror-policy-rules'):
+ for rule in snapmirror_policy_attributes['snapmirror-policy-rules'].get_children():
+ # Ignore builtin rules
+ if rule.get_child_content('snapmirror-label') in ["sm_created", "all_source_snapshots"]:
+ continue
+
+ return_value['snapmirror_label'].append(rule.get_child_content('snapmirror-label'))
+ return_value['keep'].append(int(rule.get_child_content('keep')))
+
+ prefix = rule.get_child_content('prefix')
+ if prefix is None or prefix == '-':
+ prefix = ''
+ return_value['prefix'].append(prefix)
+
+ schedule = rule.get_child_content('schedule')
+ if schedule is None or schedule == '-':
+ schedule = ''
+ return_value['schedule'].append(schedule)
+
+ return return_value
+
+ def get_snapmirror_policy_rest(self):
+ query = {'fields': 'uuid,name,svm.name,comment,network_compression_enabled,type,retention,identity_preservation,sync_type,transfer_schedule,',
+ 'name': self.parameters['policy_name'],
+ 'scope': self.scope}
+ if self.scope == 'svm':
+ query['svm.name'] = self.parameters['vserver']
+ if self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 10, 1):
+ query['fields'] += 'copy_all_source_snapshots,'
+ if self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 11, 1):
+ query['fields'] += 'copy_latest_source_snapshot,create_snapshot_on_source'
+ api = "snapmirror/policies"
+ record, error = rest_generic.get_one_record(self.rest_api, api, query)
+ if error:
+ self.module.fail_json(msg='Error getting snapmirror policy: %s' % error)
+ return self.format_record(record) if record else None
+
+ def format_record(self, record):
+ return_value = {
+ 'uuid': record['uuid'],
+ 'vserver': self.na_helper.safe_get(record, ['svm', 'name']),
+ 'policy_name': record['name'],
+ 'comment': '',
+ 'is_network_compression_enabled': False,
+ 'snapmirror_label': [],
+ 'keep': [],
+ 'prefix': [],
+ 'schedule': [],
+ 'identity_preservation': '',
+ 'copy_all_source_snapshots': False,
+ 'copy_latest_source_snapshot': False,
+ 'transfer_schedule': '',
+ }
+ if 'type' in record:
+ return_value['policy_type'] = record['type']
+ if 'network_compression_enabled' in record:
+ return_value['is_network_compression_enabled'] = record['network_compression_enabled']
+ if 'comment' in record:
+ return_value['comment'] = record['comment']
+ if 'retention' in record:
+ for rule in record['retention']:
+ return_value['snapmirror_label'].append(rule['label'])
+ return_value['keep'].append(int(rule['count']))
+ if 'prefix' in rule and rule['prefix'] != '-':
+ return_value['prefix'].append(rule['prefix'])
+ else:
+ return_value['prefix'].append('')
+ if 'creation_schedule' in rule and rule['creation_schedule']['name'] != '-':
+ return_value['schedule'].append(rule['creation_schedule']['name'])
+ else:
+ return_value['schedule'].append('')
+ if 'identity_preservation' in record:
+ return_value['identity_preservation'] = record['identity_preservation']
+ if 'sync_type' in record:
+ return_value['sync_type'] = record['sync_type']
+ if 'copy_all_source_snapshots' in record:
+ return_value['copy_all_source_snapshots'] = record['copy_all_source_snapshots']
+ if 'copy_latest_source_snapshot' in record:
+ return_value['copy_latest_source_snapshot'] = record['copy_latest_source_snapshot']
+ if 'create_snapshot_on_source' in record:
+ return_value['create_snapshot_on_source'] = record['create_snapshot_on_source']
+ if 'transfer_schedule' in record:
+ return_value['transfer_schedule'] = record['transfer_schedule']['name']
+ return return_value
+
+ def validate_parameters(self):
+ """
+ Validate snapmirror policy rules
+ :return: None
+ """
+
+ # For snapmirror policy rules, 'snapmirror_label' is required.
+ if 'snapmirror_label' in self.parameters:
+
+ # Check size of 'snapmirror_label' list is 0-10. Can have zero rules.
+ # Take builtin 'sm_created' rule into account for 'mirror_vault'.
+ if (('policy_type' in self.parameters and self.parameters['policy_type'] == 'mirror_vault' and len(self.parameters['snapmirror_label']) > 9)
+ or len(self.parameters['snapmirror_label']) > 10):
+ self.module.fail_json(msg="Error: A SnapMirror Policy can have up to a maximum of "
+ "10 rules (including builtin rules), with a 'keep' value "
+ "representing the maximum number of Snapshot copies for each rule")
+
+ # 'keep' must be supplied as long as there is at least one snapmirror_label
+ if len(self.parameters['snapmirror_label']) > 0 and 'keep' not in self.parameters:
+ self.module.fail_json(msg="Error: Missing 'keep' parameter. When specifying the "
+ "'snapmirror_label' parameter, the 'keep' parameter must "
+ "also be supplied")
+
+ # Make sure other rule values match same number of 'snapmirror_label' values.
+ for rule_parameter in ['keep', 'prefix', 'schedule']:
+ if rule_parameter in self.parameters:
+ if len(self.parameters['snapmirror_label']) > len(self.parameters[rule_parameter]):
+ self.module.fail_json(msg="Error: Each 'snapmirror_label' value must have "
+ "an accompanying '%s' value" % rule_parameter)
+ if len(self.parameters[rule_parameter]) > len(self.parameters['snapmirror_label']):
+ self.module.fail_json(msg="Error: Each '%s' value must have an accompanying "
+ "'snapmirror_label' value" % rule_parameter)
+ else:
+ # 'snapmirror_label' not supplied.
+ # Bail out if other rule parameters have been supplied.
+ for rule_parameter in ['keep', 'prefix', 'schedule']:
+ if rule_parameter in self.parameters:
+ self.module.fail_json(msg="Error: Missing 'snapmirror_label' parameter. When "
+ "specifying the '%s' parameter, the 'snapmirror_label' "
+ "parameter must also be supplied" % rule_parameter)
+
+ # Schedule must be supplied if prefix is supplied.
+ if 'prefix' in self.parameters and 'schedule' not in self.parameters:
+ self.module.fail_json(msg="Error: Missing 'schedule' parameter. When "
+ "specifying the 'prefix' parameter, the 'schedule' "
+ "parameter must also be supplied")
+
+ def create_snapmirror_policy(self, body=None):
+ """
+ Creates a new storage efficiency policy
+ """
+ if self.use_rest:
+ api = "snapmirror/policies"
+ dummy, error = rest_generic.post_async(self.rest_api, api, body)
+ if error:
+ self.module.fail_json(msg='Error creating snapmirror policy: %s' % error)
+ else:
+ snapmirror_policy_obj = netapp_utils.zapi.NaElement("snapmirror-policy-create")
+ snapmirror_policy_obj.add_new_child("policy-name", self.parameters['policy_name'])
+ if 'policy_type' in self.parameters.keys():
+ snapmirror_policy_obj.add_new_child("type", self.parameters['policy_type'])
+ snapmirror_policy_obj = self.create_snapmirror_policy_obj(snapmirror_policy_obj)
+
+ try:
+ self.server.invoke_successfully(snapmirror_policy_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating snapmirror policy %s: %s' % (self.parameters['policy_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def create_snapmirror_policy_obj(self, snapmirror_policy_obj):
+ if 'comment' in self.parameters.keys():
+ snapmirror_policy_obj.add_new_child("comment", self.parameters['comment'])
+ if 'common_snapshot_schedule' in self.parameters.keys() and self.parameters['policy_type'] in ('sync_mirror', 'strict_sync_mirror'):
+ snapmirror_policy_obj.add_new_child("common-snapshot-schedule", self.parameters['common_snapshot_schedule'])
+ if 'ignore_atime' in self.parameters.keys():
+ snapmirror_policy_obj.add_new_child("ignore-atime", self.na_helper.get_value_for_bool(False, self.parameters['ignore_atime']))
+ if 'is_network_compression_enabled' in self.parameters.keys():
+ snapmirror_policy_obj.add_new_child("is-network-compression-enabled",
+ self.na_helper.get_value_for_bool(False, self.parameters['is_network_compression_enabled']))
+ if 'owner' in self.parameters.keys():
+ snapmirror_policy_obj.add_new_child("owner", self.parameters['owner'])
+ if 'restart' in self.parameters.keys():
+ snapmirror_policy_obj.add_new_child("restart", self.parameters['restart'])
+ if 'transfer_priority' in self.parameters.keys():
+ snapmirror_policy_obj.add_new_child("transfer-priority", self.parameters['transfer_priority'])
+ if 'tries' in self.parameters.keys():
+ snapmirror_policy_obj.add_new_child("tries", self.parameters['tries'])
+ return snapmirror_policy_obj
+
+ def build_body_for_create(self):
+
+ body = {'name': self.parameters['policy_name']}
+ if self.parameters.get('vserver') is not None:
+ body['svm'] = {'name': self.parameters['vserver']}
+ # if policy type is omitted, REST assumes async
+ policy_type = 'async'
+ if 'policy_type' in self.parameters:
+ if 'async' in self.parameters['policy_type']:
+ policy_type = 'async'
+ elif 'sync' in self.parameters['policy_type']:
+ policy_type = 'sync'
+ body['sync_type'] = 'sync'
+ if 'sync_type' in self.parameters:
+ body['sync_type'] = self.parameters['sync_type']
+ body['type'] = policy_type
+ if 'copy_all_source_snapshots' in self.parameters:
+ body["copy_all_source_snapshots"] = self.parameters['copy_all_source_snapshots']
+ if 'copy_latest_source_snapshot' in self.parameters:
+ body["copy_latest_source_snapshot"] = self.parameters['copy_latest_source_snapshot']
+ if 'create_snapshot_on_source' in self.parameters:
+ # To set 'create_snapshot_on_source' as 'False' requires retention objects label(snapmirror_label) and count(keep)
+ snapmirror_policy_retention_objs = []
+ for index, rule in enumerate(self.parameters['snapmirror_label']):
+ retention = {'label': rule, 'count': str(self.parameters['keep'][index])}
+ if 'prefix' in self.parameters and self.parameters['prefix'] != '':
+ retention['prefix'] = self.parameters['prefix'][index]
+ if 'schedule' in self.parameters and self.parameters['schedule'] != '':
+ retention['creation_schedule'] = {'name': self.parameters['schedule'][index]}
+ snapmirror_policy_retention_objs.append(retention)
+ body['retention'] = snapmirror_policy_retention_objs
+ body["create_snapshot_on_source"] = self.parameters['create_snapshot_on_source']
+
+ return self.build_body_for_create_or_modify(policy_type, body)
+
+ def build_body_for_create_or_modify(self, policy_type, body=None):
+
+ if body is None:
+ body = {}
+ if 'comment' in self.parameters.keys():
+ body["comment"] = self.parameters['comment']
+ if 'is_network_compression_enabled' in self.parameters:
+ if policy_type == 'sync':
+ self.module.fail_json(msg="Error: input parameter network_compression_enabled is not valid for SnapMirror policy type sync")
+ body["network_compression_enabled"] = self.parameters['is_network_compression_enabled']
+ for option in ('identity_preservation', 'transfer_schedule'):
+ if option in self.parameters:
+ if policy_type == 'sync':
+ self.module.fail_json(msg='Error: %s is only supported with async (async) policy_type, got: %s'
+ % (option, self.parameters['policy_type']))
+ body[option] = self.parameters[option]
+ return body
+
+ def create_snapmirror_policy_retention_obj_for_rest(self, rules=None):
+ """
+ Create SnapMirror policy retention REST object.
+ :param list rules: e.g. [{'snapmirror_label': 'daily', 'keep': 7, 'prefix': 'daily', 'schedule': 'daily'}, ... ]
+ :return: List of retention REST objects.
+ e.g. [{'label': 'daily', 'count': 7, 'prefix': 'daily', 'creation_schedule': {'name': 'daily'}}, ... ]
+ """
+ snapmirror_policy_retention_objs = []
+ if rules is not None:
+ for rule in rules:
+ retention = {'label': rule['snapmirror_label'], 'count': str(rule['keep'])}
+ if 'prefix' in rule and rule['prefix'] != '':
+ retention['prefix'] = rule['prefix']
+ if 'schedule' in rule and rule['schedule'] != '':
+ retention['creation_schedule'] = {'name': rule['schedule']}
+ snapmirror_policy_retention_objs.append(retention)
+ return snapmirror_policy_retention_objs
+
+ def delete_snapmirror_policy(self, uuid=None):
+ """
+ Deletes a snapmirror policy
+ """
+ if self.use_rest:
+ api = "snapmirror/policies"
+ dummy, error = rest_generic.delete_async(self.rest_api, api, uuid)
+ if error:
+ self.module.fail_json(msg='Error deleting snapmirror policy: %s' % error)
+ else:
+ snapmirror_policy_obj = netapp_utils.zapi.NaElement("snapmirror-policy-delete")
+ snapmirror_policy_obj.add_new_child("policy-name", self.parameters['policy_name'])
+
+ try:
+ self.server.invoke_successfully(snapmirror_policy_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting snapmirror policy %s: %s' % (self.parameters['policy_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_snapmirror_policy(self, uuid=None, body=None):
+ """
+ Modifies a snapmirror policy
+ """
+ if self.use_rest:
+ if not body:
+ return
+ api = "snapmirror/policies"
+ dummy, error = rest_generic.patch_async(self.rest_api, api, uuid, body)
+ if error:
+ self.module.fail_json(msg='Error modifying snapmirror policy: %s' % error)
+ else:
+ snapmirror_policy_obj = netapp_utils.zapi.NaElement("snapmirror-policy-modify")
+ snapmirror_policy_obj = self.create_snapmirror_policy_obj(snapmirror_policy_obj)
+ # Only modify snapmirror policy if a specific snapmirror policy attribute needs
+ # modifying. It may be that only snapmirror policy rules are being modified.
+ if snapmirror_policy_obj.get_children():
+ snapmirror_policy_obj.add_new_child("policy-name", self.parameters['policy_name'])
+
+ try:
+ self.server.invoke_successfully(snapmirror_policy_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying snapmirror policy %s: %s' % (self.parameters['policy_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def identify_new_snapmirror_policy_rules(self, current=None):
+ """
+ Identify new rules that should be added.
+ :return: List of new rules to be added
+ e.g. [{'snapmirror_label': 'daily', 'keep': 7, 'prefix': '', 'schedule': ''}, ... ]
+ """
+ new_rules = []
+ if 'snapmirror_label' in self.parameters:
+ for snapmirror_label in self.parameters['snapmirror_label']:
+ snapmirror_label = snapmirror_label.strip()
+
+ # Construct new rule. prefix and schedule are optional.
+ snapmirror_label_index = self.parameters['snapmirror_label'].index(snapmirror_label)
+ rule = dict({
+ 'snapmirror_label': snapmirror_label,
+ 'keep': self.parameters['keep'][snapmirror_label_index]
+ })
+ if 'prefix' in self.parameters:
+ rule['prefix'] = self.parameters['prefix'][snapmirror_label_index]
+ else:
+ rule['prefix'] = ''
+ if 'schedule' in self.parameters:
+ rule['schedule'] = self.parameters['schedule'][snapmirror_label_index]
+ else:
+ rule['schedule'] = ''
+
+ if current is None or 'snapmirror_label' not in current or snapmirror_label not in current['snapmirror_label']:
+ # Rule doesn't exist. Add new rule.
+ new_rules.append(rule)
+ return new_rules
+
+ def identify_obsolete_snapmirror_policy_rules(self, current=None):
+ """
+ Identify existing rules that should be deleted
+ :return: List of rules to be deleted
+ e.g. [{'snapmirror_label': 'daily', 'keep': 7, 'prefix': '', 'schedule': ''}, ... ]
+ """
+ obsolete_rules = []
+ if 'snapmirror_label' in self.parameters and current is not None and 'snapmirror_label' in current:
+ # Iterate existing rules.
+ for snapmirror_label in current['snapmirror_label']:
+ snapmirror_label = snapmirror_label.strip()
+ if snapmirror_label not in [item.strip() for item in self.parameters['snapmirror_label']]:
+ # Existing rule isn't in parameters. Delete existing rule.
+ current_snapmirror_label_index = current['snapmirror_label'].index(snapmirror_label)
+ rule = dict({
+ 'snapmirror_label': snapmirror_label,
+ 'keep': current['keep'][current_snapmirror_label_index],
+ 'prefix': current['prefix'][current_snapmirror_label_index],
+ 'schedule': current['schedule'][current_snapmirror_label_index]
+ })
+ obsolete_rules.append(rule)
+ return obsolete_rules
+
+ def set_rule(self, rule, key, current, snapmirror_label_index, current_snapmirror_label_index):
+ if key not in self.parameters or self.parameters[key][snapmirror_label_index] == current[key][current_snapmirror_label_index]:
+ modified = False
+ rule[key] = current[key][current_snapmirror_label_index]
+ else:
+ modified = True
+ rule[key] = self.parameters[key][snapmirror_label_index]
+ return modified
+
+ def identify_modified_snapmirror_policy_rules(self, current=None):
+ """
+ Identify self.parameters rules that will be modified or not.
+ :return: List of 'modified' rules and a list of 'unmodified' rules
+ e.g. [{'snapmirror_label': 'daily', 'keep': 7, 'prefix': '', 'schedule': ''}, ... ]
+ """
+ modified_rules = []
+ unmodified_rules = []
+ if 'snapmirror_label' in self.parameters:
+ for snapmirror_label in self.parameters['snapmirror_label']:
+ snapmirror_label = snapmirror_label.strip()
+ if current is not None and 'snapmirror_label' in current and snapmirror_label in current['snapmirror_label']:
+ # Rule exists. Identify whether it requires modification or not.
+ modified = False
+ rule = {'snapmirror_label': snapmirror_label}
+ # Get indexes of current and supplied rule.
+ current_snapmirror_label_index = current['snapmirror_label'].index(snapmirror_label)
+ snapmirror_label_index = self.parameters['snapmirror_label'].index(snapmirror_label)
+
+ # Check if keep modified
+ if self.set_rule(rule, 'keep', current, snapmirror_label_index, current_snapmirror_label_index):
+ modified = True
+
+ # Check if prefix modified
+ if self.set_rule(rule, 'prefix', current, snapmirror_label_index, current_snapmirror_label_index):
+ modified = True
+
+ # Check if schedule modified
+ if self.set_rule(rule, 'schedule', current, snapmirror_label_index, current_snapmirror_label_index):
+ modified = True
+
+ if modified:
+ modified_rules.append(rule)
+ else:
+ unmodified_rules.append(rule)
+ return modified_rules, unmodified_rules
+
+ def identify_snapmirror_policy_rules_with_schedule(self, rules=None):
+ """
+ Identify rules that are using a schedule or not. At least one
+ non-schedule rule must be added to a policy before schedule rules
+ are added.
+ :return: List of rules with schedules and a list of rules without schedules
+ e.g. [{'snapmirror_label': 'daily', 'keep': 7, 'prefix': 'daily', 'schedule': 'daily'}, ... ],
+ [{'snapmirror_label': 'weekly', 'keep': 5, 'prefix': '', 'schedule': ''}, ... ]
+ """
+ schedule_rules = []
+ non_schedule_rules = []
+ if rules is not None:
+ for rule in rules:
+ if 'schedule' in rule:
+ schedule_rules.append(rule)
+ else:
+ non_schedule_rules.append(rule)
+ return schedule_rules, non_schedule_rules
+
+ def modify_snapmirror_policy_rules(self, current=None, uuid=None):
+ """
+ Modify existing rules in snapmirror policy
+ :return: None
+ """
+ # Need 'snapmirror_label' to add/modify/delete rules
+ if 'snapmirror_label' not in self.parameters:
+ return
+
+ obsolete_rules = self.identify_obsolete_snapmirror_policy_rules(current)
+ new_rules = self.identify_new_snapmirror_policy_rules(current)
+ modified_rules, unmodified_rules = self.identify_modified_snapmirror_policy_rules(current)
+ self.rest_api.log_debug('OBS', obsolete_rules)
+ self.rest_api.log_debug('NEW', new_rules)
+ self.rest_api.log_debug('MOD', modified_rules)
+ self.rest_api.log_debug('UNM', unmodified_rules)
+
+ if self.use_rest:
+ return self.modify_snapmirror_policy_rules_rest(uuid, obsolete_rules, unmodified_rules, modified_rules, new_rules)
+
+ delete_rules = obsolete_rules + modified_rules
+ add_schedule_rules, add_non_schedule_rules = self.identify_snapmirror_policy_rules_with_schedule(new_rules + modified_rules)
+ # Delete rules no longer required or modified rules that will be re-added.
+ for rule in delete_rules:
+ options = {'policy-name': self.parameters['policy_name'],
+ 'snapmirror-label': rule['snapmirror_label']}
+ self.modify_snapmirror_policy_rule(options, 'snapmirror-policy-remove-rule')
+
+ # Add rules. At least one non-schedule rule must exist before
+ # a rule with a schedule can be added, otherwise zapi will complain.
+ for rule in add_non_schedule_rules + add_schedule_rules:
+ options = {'policy-name': self.parameters['policy_name'],
+ 'snapmirror-label': rule['snapmirror_label'],
+ 'keep': str(rule['keep'])}
+ if 'prefix' in rule and rule['prefix'] != '':
+ options['prefix'] = rule['prefix']
+ if 'schedule' in rule and rule['schedule'] != '':
+ options['schedule'] = rule['schedule']
+ self.modify_snapmirror_policy_rule(options, 'snapmirror-policy-add-rule')
+
+ def modify_snapmirror_policy_rules_rest(self, uuid, obsolete_rules, unmodified_rules, modified_rules, new_rules):
+ api = "snapmirror/policies"
+ if not modified_rules and not new_rules and not obsolete_rules:
+ return
+ rules = unmodified_rules + modified_rules + new_rules
+ # This will also delete all existing rules if everything is now obsolete
+ body = {'retention': self.create_snapmirror_policy_retention_obj_for_rest(rules)}
+ dummy, error = rest_generic.patch_async(self.rest_api, api, uuid, body)
+ if error:
+ self.module.fail_json(msg='Error modifying snapmirror policy rules: %s' % error)
+
+ def modify_snapmirror_policy_rule(self, options, zapi):
+ """
+ Add, modify or remove a rule to/from a snapmirror policy
+ """
+ snapmirror_obj = netapp_utils.zapi.NaElement.create_node_with_children(zapi, **options)
+ try:
+ self.server.invoke_successfully(snapmirror_obj, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying snapmirror policy rule %s: %s' %
+ (self.parameters['policy_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def fail_invalid_option(self, policy_type, option):
+ self.module.fail_json(msg="Error: option %s is not supported with policy type %s." % (option, policy_type))
+
+ def validate_async_options(self):
+ if 'policy_type' in self.parameters:
+ disallowed_options = {
+ 'vault': ['copy_latest_source_snapshot', 'copy_all_source_snapshots'],
+ 'mirror-vault': ['copy_latest_source_snapshot', 'copy_all_source_snapshots', 'create_snapshot_on_source'],
+ 'async_mirror': ['create_snapshot_on_source'],
+ 'async': [],
+ 'sync': ['copy_latest_source_snapshot', 'copy_all_source_snapshots', 'create_snapshot_on_source'],
+ }
+ try:
+ options = disallowed_options[self.parameters['policy_type']]
+ except KeyError:
+ options = disallowed_options['sync']
+ for option in options:
+ if option in self.parameters:
+ self.fail_invalid_option(self.parameters['policy_type'], option)
+
+ if self.use_rest:
+ if 'copy_all_source_snapshots' in self.parameters and self.parameters.get('copy_all_source_snapshots') is not True:
+ self.module.fail_json(msg='Error: the property copy_all_source_snapshots can only be set to true when present')
+ if 'copy_latest_source_snapshot' in self.parameters and self.parameters.get('copy_latest_source_snapshot') is not True:
+ self.module.fail_json(msg='Error: the property copy_latest_source_snapshot can only be set to true when present')
+ if 'create_snapshot_on_source' in self.parameters and self.parameters['create_snapshot_on_source'] is not False:
+ self.module.fail_json(msg='Error: the property create_snapshot_on_source can only be set to false when present')
+
+ def validate_policy_type(self):
+ # policy_type is only required for create or modify
+ if self.parameters['state'] != 'present':
+ return
+ self.validate_async_options()
+ if 'policy_type' in self.parameters:
+ if self.use_rest:
+ # Policy types 'mirror_vault', 'vault', 'async_mirror' are mapped to async policy type
+ if self.parameters['policy_type'] == 'vault':
+ self.parameters['policy_type'] = 'async'
+ self.parameters['create_snapshot_on_source'] = False
+ self.module.warn("policy type changed to 'async' with 'create_snapshot_on_source' set to False")
+ if self.parameters['policy_type'] == 'async_mirror':
+ # async_mirror accepts two choices with copy_all_source_snapshots or copy_latest_source_snapshot
+ self.parameters['policy_type'] = 'async'
+ if 'copy_latest_source_snapshot' not in self.parameters or 'copy_all_source_snapshots' not in self.parameters:
+ self.parameters['copy_latest_source_snapshot'] = True
+ self.module.warn("policy type changed to 'async' with copy_latest_source_snapshot set to True. "
+ "Use async with copy_latest_source_snapshot or copy_all_source_snapshots for async-mirror")
+ if 'copy_all_source_snapshots' in self.parameters or 'copy_latest_source_snapshot' in self.parameters:
+ if 'snapmirror_label' in self.parameters or 'keep' in self.parameters or 'prefix' in self.parameters or 'schedule' in self.parameters:
+ self.module.fail_json(msg='Error: Retention properties cannot be specified along with copy_all_source_snapshots or '
+ 'copy_latest_source_snapshot properties')
+
+ if 'create_snapshot_on_source' in self.parameters:
+ if 'snapmirror_label' not in self.parameters or 'keep' not in self.parameters:
+ self.module.fail_json(msg="Error: The properties snapmirror_label and keep must be specified with "
+ "create_snapshot_on_source set to false")
+ if self.parameters['policy_type'] == 'mirror_vault':
+ self.parameters['policy_type'] = 'async'
+ # Policy types ''sync_mirror', 'strict_sync_mirror' are mapped to sync policy type
+ if self.parameters['policy_type'] in ('sync_mirror', 'strict_sync_mirror'):
+ self.parameters['sync_type'] = 'sync' if self.parameters['policy_type'] == 'sync_mirror' else 'strict_sync'
+ self.parameters['policy_type'] = 'sync'
+ if self.parameters['policy_type'] != 'sync' and 'sync_type' in self.parameters:
+ self.module.fail_json(msg="Error: 'sync_type' is only applicable for sync policy_type")
+
+ elif self.parameters['policy_type'] in ['async', 'sync']:
+ self.module.fail_json(msg='Error: The policy types async and sync are not supported in ZAPI.')
+
+ def get_actions(self):
+ current, modify = self.get_snapmirror_policy(), None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if current and cd_action is None and self.parameters['state'] == 'present':
+ # Inconsistency in REST API - POST requires a vserver, but GET does not return it
+ current.pop('vserver')
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ for property in ('policy_type', 'copy_all_source_snapshots', 'copy_latest_source_snapshot', 'sync_type', 'create_snapshot_on_source'):
+ if property in modify:
+ self.module.fail_json(msg='Error: The policy property %s cannot be modified from %s to %s'
+ % (property, current.get(property), modify[property]))
+
+ body = None
+ modify_body = any(key not in ('keep', 'prefix', 'schedule', 'snapmirror_label', 'copy_all_source_snapshots', 'copy_latest_source_snapshot',
+ 'sync_type', 'create_snapshot_on_source') for key in modify) if modify else False
+ if self.na_helper.changed and (cd_action == 'create' or modify):
+ # report any error even in check_mode
+ self.validate_parameters()
+ if self.use_rest and (cd_action == 'create' or modify_body):
+ body = self.build_body_for_create_or_modify(current.get('policy_type')) if modify_body else self.build_body_for_create()
+ return cd_action, modify, current, body
+
+ def apply(self):
+ cd_action, modify, current, body = self.get_actions()
+ if self.na_helper.changed and not self.module.check_mode:
+ uuid = None
+ if cd_action == 'create':
+ self.create_snapmirror_policy(body)
+ if self.use_rest:
+ current = self.get_snapmirror_policy()
+ if not current:
+ self.module.fail_json(msg="Error: policy %s not present after create." % self.parameters['policy_name'])
+ uuid = current['uuid']
+ if 'create_snapshot_on_source' not in self.parameters:
+ self.modify_snapmirror_policy_rules(current, uuid)
+ elif cd_action == 'delete':
+ if self.use_rest:
+ uuid = current['uuid']
+ self.delete_snapmirror_policy(uuid)
+ elif modify:
+ if self.use_rest:
+ uuid = current['uuid']
+ self.modify_snapmirror_policy(uuid, body)
+ self.modify_snapmirror_policy_rules(current, uuid)
+
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ """
+ Creates the NetApp Ontap SnapMirror policy object and runs the correct play task
+ """
+ obj = NetAppOntapSnapMirrorPolicy()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapshot.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapshot.py
new file mode 100644
index 000000000..50d703abb
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapshot.py
@@ -0,0 +1,437 @@
+#!/usr/bin/python
+
+# (c) 2018-2022, NetApp, Inc
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_snapshot
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: na_ontap_snapshot
+short_description: NetApp ONTAP manage Snapshots
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Create/Modify/Delete ONTAP snapshots
+options:
+ state:
+ description:
+ - If you want to create/modify a snapshot, or delete it.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+ snapshot:
+ description:
+ - Name of the snapshot to be managed.
+ - The maximum string length is 256 characters.
+ required: true
+ type: str
+ from_name:
+ description:
+ - Name of the existing snapshot to be renamed to.
+ version_added: 2.8.0
+ type: str
+ volume:
+ description:
+ - Name of the volume on which the snapshot is to be created.
+ required: true
+ type: str
+ async_bool:
+ description:
+ - If true, the snapshot is to be created asynchronously.
+ type: bool
+ comment:
+ description:
+ - A human readable comment attached with the snapshot.
+ - The size of the comment can be at most 255 characters.
+ type: str
+ snapmirror_label:
+ description:
+ - A human readable SnapMirror Label attached with the snapshot.
+ - Size of the label can be at most 31 characters.
+ - Supported with REST on Ontap 9.7 or higher.
+ type: str
+ ignore_owners:
+ description:
+ - if this field is true, snapshot will be deleted even if some other processes are accessing it.
+ type: bool
+ snapshot_instance_uuid:
+ description:
+ - The 128 bit unique snapshot identifier expressed in the form of UUID.
+ type: str
+ vserver:
+ description:
+ - The Vserver name
+ required: true
+ type: str
+ expiry_time:
+ description:
+ - Snapshot expire time, only available with REST.
+ - format should be in the timezone configured with cluster.
+ type: str
+ version_added: 21.8.0
+'''
+EXAMPLES = """
+ - name: create SnapShot
+ tags:
+ - create
+ netapp.ontap.na_ontap_snapshot:
+ state: present
+ snapshot: "{{ snapshot name }}"
+ volume: "{{ vol name }}"
+ comment: "i am a comment"
+ expiry_time: "2022-02-04T14:00:00-05:00"
+ vserver: "{{ vserver name }}"
+ username: "{{ netapp username }}"
+ password: "{{ netapp password }}"
+ hostname: "{{ netapp hostname }}"
+ - name: delete SnapShot
+ tags:
+ - delete
+ netapp.ontap.na_ontap_snapshot:
+ state: absent
+ snapshot: "{{ snapshot name }}"
+ volume: "{{ vol name }}"
+ vserver: "{{ vserver name }}"
+ username: "{{ netapp username }}"
+ password: "{{ netapp password }}"
+ hostname: "{{ netapp hostname }}"
+ - name: modify SnapShot
+ tags:
+ - modify
+ netapp.ontap.na_ontap_snapshot:
+ state: present
+ snapshot: "{{ snapshot name }}"
+ comment: "New comments are great"
+ volume: "{{ vol name }}"
+ vserver: "{{ vserver name }}"
+ username: "{{ netapp username }}"
+ password: "{{ netapp password }}"
+ hostname: "{{ netapp hostname }}"
+"""
+
+RETURN = """
+"""
+
+import traceback
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_volume
+
+
+class NetAppOntapSnapshot:
+ """
+ Creates, modifies, and deletes a Snapshot
+ """
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ from_name=dict(required=False, type='str'),
+ snapshot=dict(required=True, type="str"),
+ volume=dict(required=True, type="str"),
+ async_bool=dict(required=False, type="bool"),
+ comment=dict(required=False, type="str"),
+ snapmirror_label=dict(required=False, type="str"),
+ ignore_owners=dict(required=False, type="bool"),
+ snapshot_instance_uuid=dict(required=False, type="str"),
+ vserver=dict(required=True, type="str"),
+ expiry_time=dict(required=False, type="str")
+
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ self.rest_api = netapp_utils.OntapRestAPI(self.module)
+ unsupported_rest_properties = ['async_bool', 'ignore_owners', 'snapshot_instance_uuid']
+ self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, unsupported_rest_properties, [['snapmirror_label', (9, 7)]])
+
+ if not self.use_rest:
+ if self.parameters.get('expiry_time'):
+ self.module.fail_json(msg="expiry_time is currently only supported with REST on Ontap 9.6 or higher")
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_snapshot(self, snapshot_name=None, volume_id=None):
+ """
+ Checks to see if a snapshot exists or not
+ :return: Return True if a snapshot exists, False if it doesn't
+ """
+ if self.use_rest:
+ api = ('storage/volumes/%s/snapshots' % volume_id)
+ params = {
+ 'svm.name': self.parameters['vserver'],
+ 'fields': 'uuid,comment,expiry_time,volume,name',
+ }
+ if self.parameters.get('snapmirror_label'):
+ params['fields'] += ',snapmirror_label'
+ params['name'] = snapshot_name or self.parameters['snapshot']
+ snapshot, error = rest_generic.get_one_record(self.rest_api, api, params)
+ if error:
+ self.module.fail_json(msg='Error fetching snapshot %s: %s' %
+ (params['name'], to_native(error)),
+ exception=traceback.format_exc())
+ if snapshot:
+ return {
+ 'uuid': snapshot['uuid'],
+ 'snapshot': snapshot['name'],
+ 'snapmirror_label': snapshot.get('snapmirror_label'),
+ 'expiry_time': snapshot.get('expiry_time'),
+ 'comment': snapshot.get('comment')
+ }
+ return None
+
+ else:
+ if snapshot_name is None:
+ snapshot_name = self.parameters['snapshot']
+ snapshot_obj = netapp_utils.zapi.NaElement("snapshot-get-iter")
+ desired_attr = netapp_utils.zapi.NaElement("desired-attributes")
+ snapshot_info = netapp_utils.zapi.NaElement('snapshot-info')
+ comment = netapp_utils.zapi.NaElement('comment')
+ snapmirror_label = netapp_utils.zapi.NaElement('snapmirror-label')
+ # add more desired attributes that are allowed to be modified
+ snapshot_info.add_child_elem(comment)
+ snapshot_info.add_child_elem(snapmirror_label)
+ desired_attr.add_child_elem(snapshot_info)
+ snapshot_obj.add_child_elem(desired_attr)
+ # compose query
+ query = netapp_utils.zapi.NaElement("query")
+ snapshot_info_obj = netapp_utils.zapi.NaElement("snapshot-info")
+ snapshot_info_obj.add_new_child("name", snapshot_name)
+ snapshot_info_obj.add_new_child("volume", self.parameters['volume'])
+ snapshot_info_obj.add_new_child("vserver", self.parameters['vserver'])
+ query.add_child_elem(snapshot_info_obj)
+ snapshot_obj.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(snapshot_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching snapshot %s: %s' %
+ (snapshot_name, to_native(error)),
+ exception=traceback.format_exc())
+ return_value = None
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) == 1:
+ attributes_list = result.get_child_by_name('attributes-list')
+ snap_info = attributes_list.get_child_by_name('snapshot-info')
+ return_value = {
+ 'comment': snap_info.get_child_content('comment'),
+ 'snapmirror_label': None
+ }
+ if snap_info.get_child_by_name('snapmirror-label'):
+ return_value['snapmirror_label'] = snap_info.get_child_content('snapmirror-label')
+ return return_value
+
+ def create_snapshot(self, volume_id=None):
+ """
+ Creates a new snapshot
+ """
+
+ if self.use_rest:
+ api = ('storage/volumes/%s/snapshots' % volume_id)
+ body = {
+ 'name': self.parameters['snapshot'],
+ 'svm': {
+ 'name': self.parameters['vserver']
+ }
+ }
+ if self.parameters.get('comment'):
+ body['comment'] = self.parameters['comment']
+ if self.parameters.get('snapmirror_label'):
+ body['snapmirror_label'] = self.parameters['snapmirror_label']
+ if self.parameters.get('expiry_time'):
+ body['expiry_time'] = self.parameters['expiry_time']
+ response, error = rest_generic.post_async(self.rest_api, api, body)
+ if error:
+ self.module.fail_json(msg="Error when creating snapshot: %s" % error)
+
+ else:
+ snapshot_obj = netapp_utils.zapi.NaElement("snapshot-create")
+
+ # set up required variables to create a snapshot
+ snapshot_obj.add_new_child("snapshot", self.parameters['snapshot'])
+ snapshot_obj.add_new_child("volume", self.parameters['volume'])
+ # Set up optional variables to create a snapshot
+ if self.parameters.get('async_bool'):
+ snapshot_obj.add_new_child("async", str(self.parameters['async_bool']))
+ if self.parameters.get('comment'):
+ snapshot_obj.add_new_child("comment", self.parameters['comment'])
+ if self.parameters.get('snapmirror_label'):
+ snapshot_obj.add_new_child(
+ "snapmirror-label", self.parameters['snapmirror_label'])
+ try:
+ self.server.invoke_successfully(snapshot_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating snapshot %s: %s' %
+ (self.parameters['snapshot'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_snapshot(self, volume_id=None, uuid=None):
+ """
+ Deletes an existing snapshot
+ """
+ if self.use_rest:
+ api = ('storage/volumes/%s/snapshots/%s' % (volume_id, uuid))
+ response, error = rest_generic.delete_async(self.rest_api, api, None)
+ if error:
+ self.module.fail_json(msg="Error when deleting snapshot: %s" % error)
+
+ else:
+ snapshot_obj = netapp_utils.zapi.NaElement("snapshot-delete")
+
+ # Set up required variables to delete a snapshot
+ snapshot_obj.add_new_child("snapshot", self.parameters['snapshot'])
+ snapshot_obj.add_new_child("volume", self.parameters['volume'])
+ # set up optional variables to delete a snapshot
+ if self.parameters.get('ignore_owners'):
+ snapshot_obj.add_new_child("ignore-owners", str(self.parameters['ignore_owners']))
+ if self.parameters.get('snapshot_instance_uuid'):
+ snapshot_obj.add_new_child("snapshot-instance-uuid", self.parameters['snapshot_instance_uuid'])
+ try:
+ self.server.invoke_successfully(snapshot_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting snapshot %s: %s' %
+ (self.parameters['snapshot'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_snapshot(self, volume_id=None, uuid=None, rename=False):
+ """
+ Modify an existing snapshot
+ :return:
+ """
+ if self.use_rest:
+ api = 'storage/volumes/%s/snapshots/%s' % (volume_id, uuid)
+ body = {'name': self.parameters['snapshot']} if rename else {}
+ if self.parameters.get('comment'):
+ body['comment'] = self.parameters['comment']
+ if self.parameters.get('snapmirror_label'):
+ body['snapmirror_label'] = self.parameters['snapmirror_label']
+ if self.parameters.get('expiry_time'):
+ body['expiry_time'] = self.parameters['expiry_time']
+ response, error = rest_generic.patch_async(self.rest_api, api, None, body)
+ if error:
+ self.module.fail_json(msg="Error when modifying snapshot: %s" % error)
+
+ else:
+ snapshot_obj = netapp_utils.zapi.NaElement("snapshot-modify-iter")
+ # Create query object, this is the existing object
+ query = netapp_utils.zapi.NaElement("query")
+ snapshot_info_obj = netapp_utils.zapi.NaElement("snapshot-info")
+ snapshot_info_obj.add_new_child("name", self.parameters['snapshot'])
+ snapshot_info_obj.add_new_child("vserver", self.parameters['vserver'])
+ query.add_child_elem(snapshot_info_obj)
+ snapshot_obj.add_child_elem(query)
+
+ # this is what we want to modify in the snapshot object
+ attributes = netapp_utils.zapi.NaElement("attributes")
+ snapshot_info_obj = netapp_utils.zapi.NaElement("snapshot-info")
+ snapshot_info_obj.add_new_child("name", self.parameters['snapshot'])
+ if self.parameters.get('comment'):
+ snapshot_info_obj.add_new_child("comment", self.parameters['comment'])
+ if self.parameters.get('snapmirror_label'):
+ snapshot_info_obj.add_new_child("snapmirror-label", self.parameters['snapmirror_label'])
+ attributes.add_child_elem(snapshot_info_obj)
+ snapshot_obj.add_child_elem(attributes)
+ try:
+ self.server.invoke_successfully(snapshot_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying snapshot %s: %s' %
+ (self.parameters['snapshot'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def rename_snapshot(self):
+ """
+ Rename the sanpshot
+ """
+ snapshot_obj = netapp_utils.zapi.NaElement("snapshot-rename")
+
+ # set up required variables to rename a snapshot
+ snapshot_obj.add_new_child("current-name", self.parameters['from_name'])
+ snapshot_obj.add_new_child("new-name", self.parameters['snapshot'])
+ snapshot_obj.add_new_child("volume", self.parameters['volume'])
+ try:
+ self.server.invoke_successfully(snapshot_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error renaming snapshot %s to %s: %s' %
+ (self.parameters['from_name'], self.parameters['snapshot'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def get_volume_uuid(self):
+ """
+ Get a volume's UUID
+ :return: uuid of the volume
+ """
+ response, error = rest_volume.get_volume(self.rest_api, self.parameters['vserver'], self.parameters['volume'])
+ if error is not None:
+ self.module.fail_json(msg="Error getting volume info: %s" % error)
+ return response['uuid'] if response else None
+
+ def apply(self):
+ """
+ Check to see which play we should run
+ """
+ volume_id = None
+ uuid = None
+ current = None
+ if not self.use_rest:
+ current = self.get_snapshot()
+ else:
+ volume_id = self.get_volume_uuid()
+ if volume_id is None:
+ self.module.fail_json(msg="Error: volume %s not found for vserver %s." % (self.parameters['volume'], self.parameters['vserver']))
+ current = self.get_snapshot(volume_id=volume_id)
+
+ rename = False
+ modify = {}
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action == 'create' and self.parameters.get('from_name'):
+ current = self.get_snapshot(self.parameters['from_name'], volume_id=volume_id)
+ if current is None:
+ self.module.fail_json(msg='Error renaming snapshot: %s - no snapshot with from_name: %s.'
+ % (self.parameters['snapshot'], self.parameters['from_name']))
+ rename = True
+ cd_action = None
+ if cd_action is None:
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.na_helper.changed and not self.module.check_mode:
+ uuid = current['uuid'] if current and self.use_rest else None
+ if rename and not self.use_rest:
+ # with REST, rename forces a change in modify for 'name'
+ self.rename_snapshot()
+ if cd_action == 'create':
+ self.create_snapshot(volume_id=volume_id)
+ elif cd_action == 'delete':
+ self.delete_snapshot(volume_id=volume_id, uuid=uuid)
+ elif modify:
+ self.modify_snapshot(volume_id=volume_id, uuid=uuid, rename=rename)
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ """
+ Creates, modifies, and deletes a Snapshot
+ """
+ obj = NetAppOntapSnapshot()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapshot_policy.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapshot_policy.py
new file mode 100644
index 000000000..1d271657a
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapshot_policy.py
@@ -0,0 +1,742 @@
+#!/usr/bin/python
+
+# (c) 2018-2022, NetApp, Inc
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = '''
+module: na_ontap_snapshot_policy
+short_description: NetApp ONTAP manage Snapshot Policy
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.8.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create/Modify/Delete ONTAP snapshot policies
+options:
+ state:
+ description:
+ - If you want to create, modify or delete a snapshot policy.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+ name:
+ description:
+ Name of the snapshot policy to be managed.
+ The maximum string length is 256 characters.
+ required: true
+ type: str
+ enabled:
+ description:
+ - Status of the snapshot policy indicating whether the policy will be enabled or disabled.
+ type: bool
+ comment:
+ description:
+ A human readable comment attached with the snapshot.
+ The size of the comment can be at most 255 characters.
+ type: str
+ count:
+ description:
+ Retention count for the snapshots created by the schedule.
+ type: list
+ elements: int
+ schedule:
+ description:
+ - Schedule to be added inside the policy.
+ type: list
+ elements: str
+ prefix:
+ description:
+ - Snapshot name prefix for the schedule.
+ - Prefix name should be unique within the policy.
+ - Cannot set a different prefix to a schedule that has already been assigned to a snapshot policy.
+ - Prefix cannot be modifed after schedule has been added.
+ type: list
+ elements: str
+ required: false
+ version_added: '19.11.0'
+ snapmirror_label:
+ description:
+ - SnapMirror label assigned to each schedule inside the policy. Use an empty
+ string ('') for no label.
+ type: list
+ elements: str
+ required: false
+ version_added: 2.9.0
+ vserver:
+ description:
+ - The name of the vserver to use. In a multi-tenanted environment, assigning a
+ Snapshot Policy to a vserver will restrict its use to that vserver.
+ required: false
+ type: str
+ version_added: 2.9.0
+'''
+EXAMPLES = """
+ - name: Create Snapshot policy
+ na_ontap_snapshot_policy:
+ state: present
+ name: ansible2
+ schedule: hourly
+ prefix: hourly
+ count: 150
+ enabled: True
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ https: False
+
+ - name: Create Snapshot policy with multiple schedules
+ na_ontap_snapshot_policy:
+ state: present
+ name: ansible2
+ schedule: ['hourly', 'daily', 'weekly', 'monthly', '5min']
+ prefix: ['hourly', 'daily', 'weekly', 'monthly', '5min']
+ count: [1, 2, 3, 4, 5]
+ enabled: True
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ https: False
+
+ - name: Create Snapshot policy owned by a vserver
+ na_ontap_snapshot_policy:
+ state: present
+ name: ansible3
+ vserver: ansible
+ schedule: ['hourly', 'daily', 'weekly', 'monthly', '5min']
+ prefix: ['hourly', 'daily', 'weekly', 'monthly', '5min']
+ count: [1, 2, 3, 4, 5]
+ snapmirror_label: ['hourly', 'daily', 'weekly', 'monthly', '']
+ enabled: True
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ https: False
+
+ - name: Modify Snapshot policy with multiple schedules
+ na_ontap_snapshot_policy:
+ state: present
+ name: ansible2
+ schedule: ['daily', 'weekly']
+ count: [20, 30]
+ snapmirror_label: ['daily', 'weekly']
+ enabled: True
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ https: False
+
+ - name: Delete Snapshot policy
+ na_ontap_snapshot_policy:
+ state: absent
+ name: ansible2
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ hostname: "{{ netapp_hostname }}"
+ https: False
+"""
+
+RETURN = """
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppOntapSnapshotPolicy(object):
+ """
+ Creates and deletes a Snapshot Policy
+ """
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type="str"),
+ enabled=dict(required=False, type="bool"),
+ # count is a list of integers
+ count=dict(required=False, type="list", elements="int"),
+ comment=dict(required=False, type="str"),
+ schedule=dict(required=False, type="list", elements="str"),
+ prefix=dict(required=False, type="list", elements="str"),
+ snapmirror_label=dict(required=False, type="list", elements="str"),
+ vserver=dict(required=False, type="str")
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['enabled', 'count', 'schedule']),
+ ],
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ # Set up Rest API
+ self.rest_api = OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+ if self.use_rest and not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 8, 0):
+ msg = 'REST requires ONTAP 9.8 or later for snapshot schedules.'
+ self.use_rest = self.na_helper.fall_back_to_zapi(self.module, msg, self.parameters)
+ if not self.use_rest:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ if 'vserver' in self.parameters:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def safe_strip(self, option):
+ """ strip the given string """
+ return option.strip() if option is not None else None
+
+ def get_snapshot_policy(self):
+ """
+ Checks to see if a snapshot policy exists or not
+ :return: Return policy details if a snapshot policy exists, None if it doesn't
+ """
+ snapshot_obj = netapp_utils.zapi.NaElement("snapshot-policy-get-iter")
+ # compose query
+ query = netapp_utils.zapi.NaElement("query")
+ snapshot_info_obj = netapp_utils.zapi.NaElement("snapshot-policy-info")
+ snapshot_info_obj.add_new_child("policy", self.parameters['name'])
+ if 'vserver' in self.parameters:
+ snapshot_info_obj.add_new_child("vserver-name", self.parameters['vserver'])
+ query.add_child_elem(snapshot_info_obj)
+ snapshot_obj.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(snapshot_obj, True)
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) == 1:
+ snapshot_policy = result.get_child_by_name('attributes-list').get_child_by_name('snapshot-policy-info')
+ current = {'name': snapshot_policy.get_child_content('policy')}
+ current['vserver'] = snapshot_policy.get_child_content('vserver-name')
+ current['enabled'] = snapshot_policy.get_child_content('enabled').lower() != 'false'
+ current['comment'] = snapshot_policy.get_child_content('comment') or ''
+ current['schedule'], current['count'], current['snapmirror_label'], current['prefix'] = [], [], [], []
+ if snapshot_policy.get_child_by_name('snapshot-policy-schedules'):
+ for schedule in snapshot_policy['snapshot-policy-schedules'].get_children():
+ current['schedule'].append(schedule.get_child_content('schedule'))
+ current['count'].append(int(schedule.get_child_content('count')))
+
+ snapmirror_label = schedule.get_child_content('snapmirror-label')
+ if snapmirror_label is None or snapmirror_label == '-':
+ snapmirror_label = ''
+ current['snapmirror_label'].append(snapmirror_label)
+
+ prefix = schedule.get_child_content('prefix')
+ if prefix is None or prefix == '-':
+ prefix = ''
+ current['prefix'].append(prefix)
+ return current
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg=to_native(error), exception=traceback.format_exc())
+ return None
+
+ def validate_parameters(self):
+ """
+ Validate if each schedule has a count associated
+ :return: None
+ """
+ if 'count' not in self.parameters or 'schedule' not in self.parameters or \
+ len(self.parameters['count']) > 5 or len(self.parameters['schedule']) > 5 or \
+ len(self.parameters['count']) < 1 or len(self.parameters['schedule']) < 1 or \
+ len(self.parameters['count']) != len(self.parameters['schedule']):
+ self.module.fail_json(msg="Error: A Snapshot policy must have at least 1 "
+ "schedule and can have up to a maximum of 5 schedules, with a count "
+ "representing the maximum number of Snapshot copies for each schedule")
+
+ if 'snapmirror_label' in self.parameters and len(self.parameters['snapmirror_label']) != len(self.parameters['schedule']):
+ self.module.fail_json(msg="Error: Each Snapshot Policy schedule must have an accompanying SnapMirror Label")
+
+ if 'prefix' in self.parameters and len(self.parameters['prefix']) != len(self.parameters['schedule']):
+ self.module.fail_json(msg="Error: Each Snapshot Policy schedule must have an accompanying prefix")
+
+ def modify_snapshot_policy(self, current):
+ """
+ Modifies an existing snapshot policy
+ """
+ # Set up required variables to modify snapshot policy
+ options = {'policy': self.parameters['name']}
+ modify = False
+
+ # Set up optional variables to modify snapshot policy
+ if 'enabled' in self.parameters and self.parameters['enabled'] != current['enabled']:
+ options['enabled'] = str(self.parameters['enabled'])
+ modify = True
+ if 'comment' in self.parameters and self.parameters['comment'] != current['comment']:
+ options['comment'] = self.parameters['comment']
+ modify = True
+
+ if modify:
+ snapshot_obj = netapp_utils.zapi.NaElement.create_node_with_children('snapshot-policy-modify', **options)
+ try:
+ self.server.invoke_successfully(snapshot_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying snapshot policy %s: %s' %
+ (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_snapshot_policy_schedules(self, current):
+ """
+ Modify existing schedules in snapshot policy
+ :return: None
+ """
+ self.validate_parameters()
+ delete_schedules, modify_schedules, add_schedules = [], [], []
+
+ if 'snapmirror_label' in self.parameters:
+ snapmirror_labels = self.parameters['snapmirror_label']
+ else:
+ # User hasn't supplied any snapmirror labels.
+ snapmirror_labels = [None] * len(self.parameters['schedule'])
+
+ # Identify schedules for deletion
+ for schedule in current['schedule']:
+ schedule = self.safe_strip(schedule)
+ if schedule not in [item.strip() for item in self.parameters['schedule']]:
+ options = {'policy': current['name'],
+ 'schedule': schedule}
+ delete_schedules.append(options)
+
+ # Identify schedules to be modified or added
+ for schedule, count, snapmirror_label in zip(self.parameters['schedule'], self.parameters['count'], snapmirror_labels):
+ schedule = self.safe_strip(schedule)
+ snapmirror_label = self.safe_strip(snapmirror_label)
+
+ options = {'policy': current['name'],
+ 'schedule': schedule}
+
+ if schedule in current['schedule']:
+ # Schedule exists. Only modify if it has changed.
+ modify = False
+ schedule_index = current['schedule'].index(schedule)
+
+ if count != current['count'][schedule_index]:
+ options['new-count'] = str(count)
+ modify = True
+
+ if snapmirror_label is not None and snapmirror_label != current['snapmirror_label'][schedule_index]:
+ options['new-snapmirror-label'] = snapmirror_label
+ modify = True
+
+ if modify:
+ modify_schedules.append(options)
+ else:
+ # New schedule
+ options['count'] = str(count)
+ if snapmirror_label is not None and snapmirror_label != '':
+ options['snapmirror-label'] = snapmirror_label
+ add_schedules.append(options)
+
+ # Delete N-1 schedules no longer required. Must leave 1 schedule in policy
+ # at any one time. Delete last one afterwards.
+ while len(delete_schedules) > 1:
+ options = delete_schedules.pop()
+ self.modify_snapshot_policy_schedule(options, 'snapshot-policy-remove-schedule')
+
+ # Modify schedules.
+ while modify_schedules:
+ options = modify_schedules.pop()
+ self.modify_snapshot_policy_schedule(options, 'snapshot-policy-modify-schedule')
+
+ # Add 1 new schedule. Add other ones after last schedule has been deleted.
+ if add_schedules:
+ options = add_schedules.pop()
+ self.modify_snapshot_policy_schedule(options, 'snapshot-policy-add-schedule')
+
+ # Delete last schedule no longer required.
+ while delete_schedules:
+ options = delete_schedules.pop()
+ self.modify_snapshot_policy_schedule(options, 'snapshot-policy-remove-schedule')
+
+ # Add remaining new schedules.
+ while add_schedules:
+ options = add_schedules.pop()
+ self.modify_snapshot_policy_schedule(options, 'snapshot-policy-add-schedule')
+
+ def modify_snapshot_policy_schedule(self, options, zapi):
+ """
+ Add, modify or remove a schedule to/from a snapshot policy
+ """
+ snapshot_obj = netapp_utils.zapi.NaElement.create_node_with_children(zapi, **options)
+ try:
+ self.server.invoke_successfully(snapshot_obj, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying snapshot policy schedule %s: %s' %
+ (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def create_snapshot_policy(self):
+ """
+ Creates a new snapshot policy
+ """
+ # set up required variables to create a snapshot policy
+ self.validate_parameters()
+ options = {'policy': self.parameters['name'],
+ 'enabled': str(self.parameters['enabled']),
+ }
+
+ if 'snapmirror_label' in self.parameters:
+ snapmirror_labels = self.parameters['snapmirror_label']
+ else:
+ # User hasn't supplied any snapmirror labels.
+ snapmirror_labels = [None] * len(self.parameters['schedule'])
+
+ if 'prefix' in self.parameters:
+ prefixes = self.parameters['prefix']
+ else:
+ # User hasn't supplied any prefixes.
+ prefixes = [None] * len(self.parameters['schedule'])
+
+ # zapi attribute for first schedule is schedule1, second is schedule2 and so on
+ positions = [str(i) for i in range(1, len(self.parameters['schedule']) + 1)]
+ for schedule, prefix, count, snapmirror_label, position in \
+ zip(self.parameters['schedule'], prefixes,
+ self.parameters['count'], snapmirror_labels, positions):
+ schedule = self.safe_strip(schedule)
+ options['count' + position] = str(count)
+ options['schedule' + position] = schedule
+ snapmirror_label = self.safe_strip(snapmirror_label)
+ if snapmirror_label:
+ options['snapmirror-label' + position] = snapmirror_label
+ prefix = self.safe_strip(prefix)
+ if prefix:
+ options['prefix' + position] = prefix
+
+ snapshot_obj = netapp_utils.zapi.NaElement.create_node_with_children('snapshot-policy-create', **options)
+
+ # Set up optional variables to create a snapshot policy
+ if self.parameters.get('comment'):
+ snapshot_obj.add_new_child("comment", self.parameters['comment'])
+ try:
+ self.server.invoke_successfully(snapshot_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating snapshot policy %s: %s' %
+ (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_snapshot_policy(self):
+ """
+ Deletes an existing snapshot policy
+ """
+ snapshot_obj = netapp_utils.zapi.NaElement("snapshot-policy-delete")
+
+ # Set up required variables to delete a snapshot policy
+ snapshot_obj.add_new_child("policy", self.parameters['name'])
+ try:
+ self.server.invoke_successfully(snapshot_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting snapshot policy %s: %s' %
+ (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def asup_log_for_cserver(self, event_name):
+ """
+ Fetch admin vserver for the given cluster
+ Create and Autosupport log event with the given module name
+ :param event_name: Name of the event log
+ :return: None
+ """
+ if 'vserver' in self.parameters:
+ netapp_utils.ems_log_event(event_name, self.server)
+ else:
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event(event_name, cserver)
+
+# REST API support for create, delete and modify snapshot policy
+ def get_snapshot_schedule_rest(self, current):
+ """
+ get details of the snapshot schedule with rest API.
+ """
+ query = {'snapshot_policy.name': current['name']}
+ api = 'storage/snapshot-policies/%s/schedules' % current['uuid']
+ fields = 'schedule.name,schedule.uuid,snapmirror_label,count,prefix'
+ records, error = rest_generic.get_0_or_more_records(self.rest_api, api, query, fields)
+ if error:
+ self.module.fail_json(msg="Error on fetching snapshot schedule: %s" % error)
+ if records:
+ scheduleRecords = {
+ 'counts': [],
+ 'prefixes': [],
+ 'schedule_names': [],
+ 'schedule_uuids': [],
+ 'snapmirror_labels': []
+ }
+ for item in records:
+ scheduleRecords['counts'].append(item['count'])
+ scheduleRecords['prefixes'].append(item['prefix'])
+ scheduleRecords['schedule_names'].append(item['schedule']['name'])
+ scheduleRecords['schedule_uuids'].append(item['schedule']['uuid'])
+ scheduleRecords['snapmirror_labels'].append(item['snapmirror_label'])
+ return scheduleRecords
+ return None
+
+ def get_snapshot_policy_rest(self):
+ """
+ get details of the snapshot policy with rest API.
+ """
+ if not self.use_rest:
+ return self.get_snapshot_policy()
+ query = {'name': self.parameters['name']}
+ if self.parameters.get('vserver'):
+ query['svm.name'] = self.parameters['vserver']
+ query['scope'] = 'svm'
+ else:
+ query['scope'] = 'cluster'
+ api = 'storage/snapshot-policies'
+ fields = 'enabled,svm.uuid,comment,copies.snapmirror_label,copies.count,copies.prefix,copies.schedule.name,scope'
+ record, error = rest_generic.get_one_record(self.rest_api, api, query, fields)
+ if error:
+ self.module.fail_json(msg="Error on fetching snapshot policy: %s" % error)
+ if record:
+ current = {
+ 'enabled': record['enabled'],
+ 'name': record['name'],
+ 'uuid': record['uuid'],
+ 'comment': record.get('comment', ''),
+ 'count': [],
+ 'prefix': [],
+ 'schedule': [],
+ 'snapmirror_label': []
+ }
+ if query['scope'] == 'svm':
+ current['svm_name'] = record['svm']['name']
+ current['svm_uuid'] = record['svm']['uuid']
+ if record['copies']:
+ for item in record['copies']:
+ current['count'].append(item['count'])
+ current['prefix'].append(item['prefix'])
+ current['schedule'].append(item['schedule']['name'])
+ current['snapmirror_label'].append(item['snapmirror_label'])
+ return current
+ return record
+
+ def create_snapshot_policy_rest(self):
+ """
+ create snapshot policy with rest API.
+ """
+ if not self.use_rest:
+ return self.create_snapshot_policy()
+
+ body = {
+ 'name': self.parameters.get('name'),
+ 'enabled': self.parameters.get('enabled'),
+ 'copies': []
+ }
+ if self.parameters.get('vserver'):
+ body['svm.name'] = self.parameters['vserver']
+ if 'comment' in self.parameters:
+ body['comment'] = self.parameters['comment']
+ if 'snapmirror_label' in self.parameters:
+ snapmirror_labels = self.parameters['snapmirror_label']
+ else:
+ # User hasn't supplied any snapmirror labels.
+ snapmirror_labels = [None] * len(self.parameters['schedule'])
+
+ if 'prefix' in self.parameters:
+ prefixes = self.parameters['prefix']
+ else:
+ # User hasn't supplied any prefixes.
+ prefixes = [None] * len(self.parameters['schedule'])
+ for schedule, prefix, count, snapmirror_label in \
+ zip(self.parameters['schedule'], prefixes,
+ self.parameters['count'], snapmirror_labels):
+ copy = {
+ 'schedule': {'name': self.safe_strip(schedule)},
+ 'count': count
+ }
+ snapmirror_label = self.safe_strip(snapmirror_label)
+ if snapmirror_label:
+ copy['snapmirror_label'] = snapmirror_label
+ prefix = self.safe_strip(prefix)
+ if prefix:
+ copy['prefix'] = prefix
+ body['copies'].append(copy)
+ api = 'storage/snapshot-policies'
+ dummy, error = rest_generic.post_async(self.rest_api, api, body)
+ if error is not None:
+ self.module.fail_json(msg="Error on creating snapshot policy: %s" % error)
+
+ def delete_snapshot_policy_rest(self, current):
+ """
+ delete snapshot policy with rest API.
+ """
+ if not self.use_rest:
+ return self.delete_snapshot_policy()
+ api = 'storage/snapshot-policies'
+ dummy, error = rest_generic.delete_async(self.rest_api, api, current['uuid'])
+ if error is not None:
+ self.module.fail_json(msg="Error on deleting snapshot policy: %s" % error)
+
+ def modify_snapshot_policy_rest(self, modify, current=None):
+ """
+ Modify snapshot policy with rest API.
+ """
+ if not self.use_rest:
+ return self.modify_snapshot_policy(current)
+ api = 'storage/snapshot-policies'
+ body = {}
+ if 'enabled' in modify:
+ body['enabled'] = modify['enabled']
+ if 'comment' in modify:
+ body['comment'] = modify['comment']
+ if body:
+ dummy, error = rest_generic.patch_async(self.rest_api, api, current['uuid'], body)
+ if error is not None:
+ self.module.fail_json(msg="Error on modifying snapshot policy: %s" % error)
+
+ def modify_snapshot_policy_schedule_rest(self, modify, current):
+ """
+ Modify snapshot schedule with rest API.
+ """
+ if not self.use_rest:
+ return self.modify_snapshot_policy_schedules(current)
+
+ schedule_info = None
+ api = 'storage/snapshot-policies/%s/schedules' % current['uuid']
+ schedule_info = self.get_snapshot_schedule_rest(current)
+ delete_schedules, modify_schedules, add_schedules = [], [], []
+
+ if 'snapmirror_label' in self.parameters:
+ snapmirror_labels = self.parameters['snapmirror_label']
+ else:
+ # User hasn't supplied any snapmirror labels.
+ snapmirror_labels = [None] * len(self.parameters['schedule'])
+
+ if 'prefix' in self.parameters:
+ prefixes = self.parameters['prefix']
+ else:
+ # User hasn't supplied any prefix.
+ prefixes = [None] * len(self.parameters['schedule'])
+
+ # Identify schedules to be deleted
+ for schedule_name, schedule_uuid in zip(schedule_info['schedule_names'], schedule_info['schedule_uuids']):
+ schedule_name = self.safe_strip(schedule_name)
+ if schedule_name not in [item.strip() for item in self.parameters['schedule']]:
+ delete_schedules.append(schedule_uuid)
+
+ # Identify schedules to be modified or added
+ for schedule_name, count, snapmirror_label, prefix in zip(self.parameters['schedule'], self.parameters['count'], snapmirror_labels, prefixes):
+ schedule_name = self.safe_strip(schedule_name)
+ if snapmirror_label:
+ snapmirror_label = self.safe_strip(snapmirror_label)
+ if prefix:
+ prefix = self.safe_strip(prefix)
+ body = {}
+ if schedule_name in schedule_info['schedule_names']:
+ # Schedule exists. Only modify if it has changed.
+ modify = False
+ schedule_index = schedule_info['schedule_names'].index(schedule_name)
+ schedule_uuid = schedule_info['schedule_uuids'][schedule_index]
+ if count != schedule_info['counts'][schedule_index]:
+ body['count'] = str(count)
+ modify = True
+
+ if snapmirror_label is not None and snapmirror_label != schedule_info['snapmirror_labels'][schedule_index]:
+ body['snapmirror_label'] = snapmirror_label
+ modify = True
+
+ if prefix is not None and prefix != schedule_info['prefixes'][schedule_index]:
+ body['prefix'] = prefix
+ modify = True
+
+ if modify:
+ body['schedule_uuid'] = schedule_uuid
+ modify_schedules.append(body)
+ else:
+ # New schedule
+ body['schedule.name'] = schedule_name
+ body['count'] = str(count)
+ if snapmirror_label is not None and snapmirror_label != '':
+ body['snapmirror_label'] = snapmirror_label
+ if prefix is not None and prefix != '':
+ body['prefix'] = prefix
+ add_schedules.append(body)
+
+ # Delete N-1 schedules no longer required. Must leave 1 schedule in policy
+ # at any one time. Delete last one afterwards.
+ while len(delete_schedules) > 1:
+ schedule_uuid = delete_schedules.pop()
+ record, error = rest_generic.delete_async(self.rest_api, api, schedule_uuid)
+ if error is not None:
+ self.module.fail_json(msg="Error on deleting snapshot policy schedule: %s" % error)
+
+ # Modify schedules.
+ while modify_schedules:
+ body = modify_schedules.pop()
+ schedule_id = body.pop('schedule_uuid')
+ record, error = rest_generic.patch_async(self.rest_api, api, schedule_id, body)
+ if error is not None:
+ self.module.fail_json(msg="Error on modifying snapshot policy schedule: %s" % error)
+
+ # Add 1 new schedule. At least one schedule must be present, before we can delete the last old one.
+ if add_schedules:
+ body = add_schedules.pop()
+ record, error = rest_generic.post_async(self.rest_api, api, body)
+ if error is not None:
+ self.module.fail_json(msg="Error on adding snapshot policy schedule: %s" % error)
+
+ while delete_schedules:
+ schedule_uuid = delete_schedules.pop()
+ record, error = rest_generic.delete_async(self.rest_api, api, schedule_uuid)
+ if error is not None:
+ self.module.fail_json(msg="Error on deleting snapshot policy schedule: %s" % error)
+
+ while add_schedules:
+ body = add_schedules.pop()
+ record, error = rest_generic.post_async(self.rest_api, api, body)
+ if error is not None:
+ self.module.fail_json(msg="Error on adding snapshot policy schedule: %s" % error)
+
+ def apply(self):
+ """
+ Check to see which play we should run
+ """
+ current = self.get_snapshot_policy_rest()
+ modify = None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if self.parameters['state'] == 'present':
+ self.validate_parameters()
+ if cd_action is None and self.parameters['state'] == 'present':
+ # Don't sort schedule/prefix/count/snapmirror_label lists as it can
+ # mess up the intended parameter order.
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_snapshot_policy_rest()
+ elif cd_action == 'delete':
+ self.delete_snapshot_policy_rest(current)
+
+ if modify:
+ self.modify_snapshot_policy_rest(modify, current)
+ self.modify_snapshot_policy_schedule_rest(modify, current)
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ """
+ Creates and deletes a Snapshot Policy
+ """
+ obj = NetAppOntapSnapshotPolicy()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snmp.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snmp.py
new file mode 100644
index 000000000..c1f278e0d
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snmp.py
@@ -0,0 +1,235 @@
+#!/usr/bin/python
+"""
+create SNMP module to add/delete/modify SNMP user
+"""
+
+# (c) 2018-2021, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - "Create/Delete SNMP community"
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_snmp
+options:
+ access_control:
+ choices: ['ro']
+ description:
+ - "Access control for the community. The only supported value is 'ro' (read-only). Ignored with REST"
+ default: 'ro'
+ type: str
+ community_name:
+ description:
+ - "The name of the SNMP community to manage."
+ required: true
+ type: str
+ state:
+ choices: ['present', 'absent']
+ description:
+ - "Whether the specified SNMP community should exist or not."
+ default: 'present'
+ type: str
+short_description: NetApp ONTAP SNMP community
+version_added: 2.6.0
+'''
+
+EXAMPLES = """
+ - name: Create SNMP community (ZAPI only)
+ netapp.ontap.na_ontap_snmp:
+ state: present
+ community_name: communityName
+ access_control: 'ro'
+ use_rest: never
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: Create SNMP community (snmpv1 or snmpv2) (REST only)
+ netapp.ontap.na_ontap_snmp:
+ state: present
+ community_name: communityName
+ use_rest: always
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Delete SNMP community (ZAPI only)
+ netapp.ontap.na_ontap_snmp:
+ state: absent
+ community_name: communityName
+ access_control: 'ro'
+ use_rest: never
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ - name: Delete SNMP community (snmpv1 or snmpv2) (REST only)
+ netapp.ontap.na_ontap_snmp:
+ state: absent
+ community_name: communityName
+ use_rest: always
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+"""
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+import ansible_collections.netapp.ontap.plugins.module_utils.rest_response_helpers as rrh
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPSnmp(object):
+ '''Class with SNMP methods, doesn't support check mode'''
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ community_name=dict(required=True, type='str'),
+ access_control=dict(required=False, type='str', choices=['ro'], default='ro'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ # set up state variables
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ # Set up Rest API
+ self.rest_api = OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+
+ if not self.use_rest:
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def invoke_snmp_community(self, zapi):
+ """
+ Invoke zapi - add/delete take the same NaElement structure
+ """
+ snmp_community = netapp_utils.zapi.NaElement.create_node_with_children(
+ zapi, **{'community': self.parameters['community_name'],
+ 'access-control': self.parameters['access_control']})
+ try:
+ self.server.invoke_successfully(snmp_community, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ if zapi == 'snmp-community-add':
+ action = 'adding'
+ elif zapi == 'snmp-community-delete':
+ action = 'deleting'
+ else:
+ action = 'unexpected'
+ self.module.fail_json(msg='Error %s community %s: %s' % (action, self.parameters['community_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def get_snmp(self):
+ """
+ Check if SNMP community exists
+ """
+ if self.use_rest:
+ return self.get_snmp_rest()
+ snmp_obj = netapp_utils.zapi.NaElement('snmp-status')
+ try:
+ result = self.server.invoke_successfully(snmp_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg=to_native(error), exception=traceback.format_exc())
+ if result.get_child_by_name('communities') is not None:
+ for snmp_entry in result.get_child_by_name('communities').get_children():
+ community_name = snmp_entry.get_child_content('community')
+ if community_name == self.parameters['community_name']:
+ return {
+ 'community_name': snmp_entry.get_child_content('community'),
+ 'access_control': snmp_entry.get_child_content('access-control'),
+ }
+ return None
+
+ def get_snmp_rest(self):
+ # There can be SNMPv1, SNMPv2 (called community) or
+ # SNMPv3 local or SNMPv3 remote (called users)
+ api = 'support/snmp/users'
+ params = {'name': self.parameters['community_name'],
+ 'fields': 'name,engine_id'}
+ message, error = self.rest_api.get(api, params)
+ record, error = rrh.check_for_0_or_1_records(api, message, error)
+ if error:
+ self.module.fail_json(msg=error)
+ if record:
+ # access control does not exist in rest
+ return dict(community_name=record['name'], engine_id=record['engine_id'], access_control='ro')
+ return None
+
+ def add_snmp_community(self):
+ """
+ Adds a SNMP community
+ """
+ if self.use_rest:
+ self.add_snmp_community_rest()
+ else:
+ self.invoke_snmp_community('snmp-community-add')
+
+ def add_snmp_community_rest(self):
+ api = 'support/snmp/users'
+ params = {'name': self.parameters['community_name'],
+ 'authentication_method': 'community'}
+ message, error = self.rest_api.post(api, params)
+ if error:
+ self.module.fail_json(msg=error)
+
+ def delete_snmp_community(self, current=None):
+ """
+ Delete a SNMP community
+ """
+ if self.use_rest:
+ self.delete_snmp_community_rest(current)
+ else:
+ self.invoke_snmp_community('snmp-community-delete')
+
+ def delete_snmp_community_rest(self, current):
+ api = 'support/snmp/users/' + current['engine_id'] + '/' + self.parameters["community_name"]
+ dummy, error = self.rest_api.delete(api)
+ if error:
+ self.module.fail_json(msg=error)
+
+ def apply(self):
+ """
+ Apply action to SNMP community
+ This module is not idempotent:
+ Add doesn't fail the playbook if user is trying
+ to add an already existing snmp community
+ """
+ # TODO: This module should of been called snmp_community has it only deals with community and not snmp
+ current = self.get_snmp()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.add_snmp_community()
+ elif cd_action == 'delete':
+ self.delete_snmp_community(current)
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action)
+ self.module.exit_json(**result)
+
+
+def main():
+ '''Execute action'''
+ community_obj = NetAppONTAPSnmp()
+ community_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snmp_traphosts.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snmp_traphosts.py
new file mode 100644
index 000000000..e27e8e7e5
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snmp_traphosts.py
@@ -0,0 +1,126 @@
+#!/usr/bin/python
+"""
+create SNMP module to add/delete/modify SNMP user
+"""
+
+# (c) 2020-2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: na_ontap_snmp_traphosts
+short_description: NetApp ONTAP SNMP traphosts.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: '20.3.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Whether the specified SNMP traphost should exist or not. Requires REST with 9.7 or higher
+options:
+ state:
+ choices: ['present', 'absent']
+ description:
+ - "Whether the specified SNMP traphost should exist or not."
+ default: 'present'
+ type: str
+ host:
+ description:
+ - "Fully qualified domain name (FQDN), IPv4 address or IPv6 address of SNMP traphost."
+ aliases: ['ip_address']
+ required: true
+ type: str
+ version_added: 21.24.0
+'''
+
+EXAMPLES = """
+ - name: Create SNMP traphost
+ netapp.ontap.na_ontap_snmp_traphosts:
+ state: present
+ host: example1.com
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ - name: Delete SNMP traphost
+ netapp.ontap.na_ontap_snmp_traphosts:
+ state: absent
+ host: example1.com
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+"""
+
+RETURN = """
+"""
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppONTAPSnmpTraphosts:
+ """Class with SNMP methods"""
+
+ def __init__(self):
+ self.use_rest = False
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ host=dict(required=True, type='str', aliases=['ip_address']),
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.rest_api = netapp_utils.OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+ self.rest_api.fail_if_not_rest_minimum_version('na_ontap_snmp_traphosts', 9, 7)
+
+ def get_snmp_traphosts(self):
+ query = {'host': self.parameters.get('host'),
+ 'fields': 'host'}
+ api = 'support/snmp/traphosts'
+ record, error = rest_generic.get_one_record(self.rest_api, api, query)
+ if error:
+ self.module.fail_json(msg="Error on fetching snmp traphosts info: %s" % error)
+ return record
+
+ def create_snmp_traphost(self):
+ api = 'support/snmp/traphosts'
+ params = {'host': self.parameters.get('host')}
+ dummy, error = rest_generic.post_async(self.rest_api, api, params)
+ if error:
+ self.module.fail_json(msg="Error creating traphost: %s" % error)
+
+ def delete_snmp_traphost(self):
+ api = 'support/snmp/traphosts'
+ dummy, error = rest_generic.delete_async(self.rest_api, api, self.parameters.get('host'))
+ if error is not None:
+ self.module.fail_json(msg="Error deleting traphost: %s" % error)
+
+ def apply(self):
+ """
+ Apply action to SNMP traphost
+ """
+ current = self.get_snmp_traphosts()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_snmp_traphost()
+ elif cd_action == 'delete':
+ self.delete_snmp_traphost()
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action)
+ self.module.exit_json(**result)
+
+
+def main():
+ """Execute action"""
+ community_obj = NetAppONTAPSnmpTraphosts()
+ community_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_software_update.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_software_update.py
new file mode 100644
index 000000000..941d23eac
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_software_update.py
@@ -0,0 +1,722 @@
+#!/usr/bin/python
+
+# (c) 2018-2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_software_update
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Update ONTAP software
+ - Requires an https connection and is not supported over http
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_software_update
+options:
+ state:
+ choices: ['present', 'absent']
+ description:
+ - This module downloads and optionally installs ONTAP software on a cluster.
+ - The software package is deleted after a successful installation.
+ - If the software package is already present, it is not downloaded and not replaced.
+ - When state is absent, the package is deleted from disk.
+ default: present
+ type: str
+ nodes:
+ description:
+ - List of nodes to be updated, the nodes have to be a part of a HA Pair.
+ - Requires ONTAP 9.9 with REST.
+ aliases:
+ - node
+ - nodes_to_update
+ type: list
+ elements: str
+ package_version:
+ required: true
+ description:
+ - Specifies the package version to update ONTAP software to, or to be deleted.
+ type: str
+ package_url:
+ type: str
+ description:
+ - Specifies the package URL to download the package.
+ - Required when state is present unless the package is already present on disk.
+ ignore_validation_warning:
+ description:
+ - Allows the update to continue if warnings are encountered during the validation phase.
+ default: False
+ type: bool
+ aliases:
+ - skip_warnings
+ download_only:
+ description:
+ - Allows to download image without update.
+ default: False
+ type: bool
+ version_added: 20.4.0
+ validate_after_download:
+ description:
+ - By default validation is not run after download, as it is already done in the update step.
+ - This option is useful when using C(download_only), for instance when updating a MetroCluster system.
+ default: False
+ type: bool
+ version_added: 21.11.0
+ stabilize_minutes:
+ description:
+ - Number of minutes that the update should wait after a takeover or giveback is completed.
+ - Requires ONTAP 9.8 with REST.
+ type: int
+ version_added: 20.6.0
+ timeout:
+ description:
+ - how long to wait for the update to complete, in seconds.
+ default: 1800
+ type: int
+ force_update:
+ description:
+ - force an update, even if package_version matches what is reported as installed.
+ default: false
+ type: bool
+ version_added: 20.11.0
+short_description: NetApp ONTAP Update Software
+version_added: 2.7.0
+notes:
+ - ONTAP expects the nodes to be in HA pairs to perform non disruptive updates.
+ - In a single node setup, the node is updated, and rebooted.
+ - Supports ZAPI and REST.
+ - Support check_mode.
+'''
+
+EXAMPLES = """
+
+ - name: ONTAP software update
+ netapp.ontap.na_ontap_software_update:
+ state: present
+ nodes: vsim1
+ package_url: "{{ url }}"
+ package_version: "{{ version_name }}"
+ ignore_validation_warning: True
+ download_only: True
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+validation_reports:
+ description: C(validation_reports_after_update) as a string, for backward compatibility.
+ returned: always
+ type: str
+validation_reports_after_download:
+ description:
+ - List of validation reports, after downloading the software package.
+ - Note that it is different from the validation checks reported after attempting an update.
+ returned: always
+ type: list
+validation_reports_after_updates:
+ description:
+ - List of validation reports, after attemting to update the software package.
+ returned: always
+ type: list
+"""
+
+import time
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPSoftwareUpdate:
+ """
+ Class with ONTAP software update methods
+ """
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ nodes=dict(required=False, type='list', elements='str', aliases=["node", "nodes_to_update"]),
+ package_version=dict(required=True, type='str'),
+ package_url=dict(required=False, type='str'),
+ ignore_validation_warning=dict(required=False, type='bool', default=False, aliases=["skip_warnings"]),
+ download_only=dict(required=False, type='bool', default=False),
+ stabilize_minutes=dict(required=False, type='int'),
+ timeout=dict(required=False, type='int', default=1800),
+ force_update=dict(required=False, type='bool', default=False),
+ validate_after_download=dict(required=False, type='bool', default=False),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ if self.parameters.get('https') is not True:
+ self.module.fail_json(msg='Error: https parameter must be True')
+ self.validation_reports_after_download = ['only available if validate_after_download is true']
+ self.versions = ['not available with force_update']
+ self.rest_api = OntapRestAPI(self.module)
+ partially_supported_rest_properties = [['stabilize_minutes', (9, 8)], ['nodes', (9, 9)]]
+ self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, None, partially_supported_rest_properties)
+ if not self.use_rest:
+ if netapp_utils.has_netapp_lib() is False:
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ @staticmethod
+ def cluster_image_get_iter():
+ """
+ Compose NaElement object to query current version
+ :return: NaElement object for cluster-image-get-iter with query
+ """
+ cluster_image_get = netapp_utils.zapi.NaElement('cluster-image-get-iter')
+ query = netapp_utils.zapi.NaElement('query')
+ cluster_image_info = netapp_utils.zapi.NaElement('cluster-image-info')
+ query.add_child_elem(cluster_image_info)
+ cluster_image_get.add_child_elem(query)
+ return cluster_image_get
+
+ def cluster_image_get_versions(self):
+ """
+ Get current cluster image versions for each node
+ :return: list of tuples (node_id, node_version) or empty list
+ """
+ if self.use_rest:
+ return self.cluster_image_get_rest('versions')
+ cluster_image_get_iter = self.cluster_image_get_iter()
+ try:
+ result = self.server.invoke_successfully(cluster_image_get_iter, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching cluster image details: %s: %s'
+ % (self.parameters['package_version'], to_native(error)),
+ exception=traceback.format_exc())
+ return ([(image_info.get_child_content('node-id'), image_info.get_child_content('current-version'))
+ for image_info in result.get_child_by_name('attributes-list').get_children()]
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) > 0 else [])
+
+ def cluster_image_get_for_node(self, node_name):
+ """
+ Get current cluster image info for given node
+ """
+ cluster_image_get = netapp_utils.zapi.NaElement('cluster-image-get')
+ cluster_image_get.add_new_child('node-id', node_name)
+ try:
+ result = self.server.invoke_successfully(cluster_image_get, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching cluster image details for %s: %s'
+ % (node_name, to_native(error)),
+ exception=traceback.format_exc())
+ # return cluster image version
+ image_info = self.na_helper.safe_get(result, ['attributes', 'cluster-image-info'])
+ if image_info:
+ return image_info.get_child_content('node-id'), image_info.get_child_content('current-version')
+ return None, None
+
+ @staticmethod
+ def get_localname(tag):
+ return netapp_utils.zapi.etree.QName(tag).localname
+
+ def cluster_image_update_progress_get(self, ignore_connection_error=True):
+ """
+ Get current cluster image update progress info
+ :return: Dictionary of cluster image update progress if query successful, else return None
+ """
+ cluster_update_progress_get = netapp_utils.zapi.NaElement('cluster-image-update-progress-info')
+ cluster_update_progress_info = {}
+ try:
+ result = self.server.invoke_successfully(cluster_update_progress_get, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ # return empty dict on error to satisfy package delete upon image update
+ if ignore_connection_error:
+ return cluster_update_progress_info
+ self.module.fail_json(msg='Error fetching cluster image update progress details: %s' % (to_native(error)),
+ exception=traceback.format_exc())
+ # return cluster image update progress details
+ if result.get_child_by_name('attributes').get_child_by_name('ndu-progress-info'):
+ update_progress_info = result.get_child_by_name('attributes').get_child_by_name('ndu-progress-info')
+ cluster_update_progress_info['overall_status'] = update_progress_info.get_child_content('overall-status')
+ cluster_update_progress_info['completed_node_count'] = update_progress_info.\
+ get_child_content('completed-node-count')
+ reports = update_progress_info.get_child_by_name('validation-reports')
+ if reports:
+ cluster_update_progress_info['validation_reports'] = []
+ for report in reports.get_children():
+ checks = {}
+ for check in report.get_children():
+ checks[self.get_localname(check.get_name())] = check.get_content()
+ cluster_update_progress_info['validation_reports'].append(checks)
+ return cluster_update_progress_info
+
+ def cluster_image_update(self):
+ """
+ Update current cluster image
+ """
+ cluster_update_info = netapp_utils.zapi.NaElement('cluster-image-update')
+ cluster_update_info.add_new_child('package-version', self.parameters['package_version'])
+ cluster_update_info.add_new_child('ignore-validation-warning',
+ str(self.parameters['ignore_validation_warning']))
+ if self.parameters.get('stabilize_minutes'):
+ cluster_update_info.add_new_child('stabilize-minutes',
+ self.na_helper.get_value_for_int(False, self.parameters['stabilize_minutes']))
+ if self.parameters.get('nodes'):
+ cluster_nodes = netapp_utils.zapi.NaElement('nodes')
+ for node in self.parameters['nodes']:
+ cluster_nodes.add_new_child('node-name', node)
+ cluster_update_info.add_child_elem(cluster_nodes)
+ try:
+ self.server.invoke_successfully(cluster_update_info, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ msg = 'Error updating cluster image for %s: %s' % (self.parameters['package_version'], to_native(error))
+ cluster_update_progress_info = self.cluster_image_update_progress_get(ignore_connection_error=True)
+ validation_reports = cluster_update_progress_info.get('validation_reports')
+ if validation_reports is None:
+ validation_reports = self.cluster_image_validate()
+ self.module.fail_json(
+ msg=msg,
+ validation_reports=str(validation_reports),
+ validation_reports_after_download=self.validation_reports_after_download,
+ validation_reports_after_update=validation_reports,
+ exception=traceback.format_exc())
+
+ def cluster_image_package_download(self):
+ """
+ Get current cluster image package download
+ :return: True if package already exists, else return False
+ """
+ cluster_image_package_download_info = netapp_utils.zapi.NaElement('cluster-image-package-download')
+ cluster_image_package_download_info.add_new_child('package-url', self.parameters['package_url'])
+ try:
+ self.server.invoke_successfully(cluster_image_package_download_info, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ # Error 18408 denotes Package image with the same name already exists
+ if to_native(error.code) == "18408":
+ return self.check_for_existing_package(error)
+ else:
+ self.module.fail_json(msg='Error downloading cluster image package for %s: %s'
+ % (self.parameters['package_url'], to_native(error)),
+ exception=traceback.format_exc())
+ return False
+
+ def cluster_image_package_delete(self):
+ """
+ Delete current cluster image package
+ """
+ if self.use_rest:
+ return self.cluster_image_package_delete_rest()
+ cluster_image_package_delete_info = netapp_utils.zapi.NaElement('cluster-image-package-delete')
+ cluster_image_package_delete_info.add_new_child('package-version', self.parameters['package_version'])
+ try:
+ self.server.invoke_successfully(cluster_image_package_delete_info, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting cluster image package for %s: %s'
+ % (self.parameters['package_version'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def cluster_image_package_download_progress(self):
+ """
+ Get current cluster image package download progress
+ :return: Dictionary of cluster image download progress if query successful, else return None
+ """
+ cluster_image_package_download_progress_info = netapp_utils.zapi.\
+ NaElement('cluster-image-get-download-progress')
+ try:
+ result = self.server.invoke_successfully(
+ cluster_image_package_download_progress_info, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching cluster image package download progress for %s: %s'
+ % (self.parameters['package_url'], to_native(error)),
+ exception=traceback.format_exc())
+ # return cluster image download progress details
+ cluster_download_progress_info = {}
+ if result.get_child_by_name('progress-status'):
+ cluster_download_progress_info['progress_status'] = result.get_child_content('progress-status')
+ cluster_download_progress_info['progress_details'] = result.get_child_content('progress-details')
+ cluster_download_progress_info['failure_reason'] = result.get_child_content('failure-reason')
+ return cluster_download_progress_info
+ return None
+
+ def cluster_image_validate(self):
+ """
+ Validate that NDU is feasible.
+ :return: List of dictionaries
+ """
+ if self.use_rest:
+ return self.cluster_image_validate_rest()
+ cluster_image_validation_info = netapp_utils.zapi.NaElement('cluster-image-validate')
+ cluster_image_validation_info.add_new_child('package-version', self.parameters['package_version'])
+ try:
+ result = self.server.invoke_successfully(
+ cluster_image_validation_info, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ return 'Error running cluster image validate: %s' % to_native(error)
+ # return cluster validation report
+ cluster_report_info = []
+ if result.get_child_by_name('cluster-image-validation-report-list'):
+ for report in result.get_child_by_name('cluster-image-validation-report-list').get_children():
+ info = self.na_helper.safe_get(report, ['required-action', 'required-action-info'])
+ required_action = {}
+ if info:
+ for action in info.get_children():
+ if action.get_content():
+ required_action[self.get_localname(action.get_name())] = action.get_content()
+ cluster_report_info.append(dict(
+ ndu_check=report.get_child_content('ndu-check'),
+ ndu_status=report.get_child_content('ndu-status'),
+ required_action=required_action
+ ))
+ return cluster_report_info
+
+ def is_update_required(self):
+ ''' return True if at least one node is not at the correct version '''
+ if self.parameters.get('nodes') and not self.use_rest:
+ self.versions = [self.cluster_image_get_for_node(node) for node in self.parameters['nodes']]
+ else:
+ self.versions = self.cluster_image_get_versions()
+ # set comnprehension not supported on 2.6
+ current_versions = set([x[1] for x in self.versions])
+ if len(current_versions) != 1:
+ # mixed set, need to update
+ return True
+ # only update if versions differ
+ return current_versions.pop() != self.parameters['package_version']
+
+ def download_software(self):
+ if self.use_rest:
+ return self.download_software_rest()
+ package_exists = self.cluster_image_package_download()
+ if package_exists is False:
+ cluster_download_progress = self.cluster_image_package_download_progress()
+ while cluster_download_progress is None or cluster_download_progress.get('progress_status') == 'async_pkg_get_phase_running':
+ time.sleep(10)
+ cluster_download_progress = self.cluster_image_package_download_progress()
+ if cluster_download_progress.get('progress_status') != 'async_pkg_get_phase_complete':
+ self.module.fail_json(msg='Error downloading package: %s - installed versions: %s'
+ % (cluster_download_progress['failure_reason'], self.versions))
+
+ def update_software(self):
+ if self.use_rest:
+ return self.update_software_rest()
+ self.cluster_image_update()
+ # delete package once update is completed
+ cluster_update_progress = {}
+ time_left = self.parameters['timeout']
+ polling_interval = 25
+ # assume in_progress if dict is empty
+ while time_left > 0 and cluster_update_progress.get('overall_status', 'in_progress') == 'in_progress':
+ time.sleep(polling_interval)
+ time_left -= polling_interval
+ cluster_update_progress = self.cluster_image_update_progress_get(ignore_connection_error=True)
+
+ if cluster_update_progress.get('overall_status') != 'completed':
+ cluster_update_progress = self.cluster_image_update_progress_get(ignore_connection_error=False)
+
+ validation_reports = cluster_update_progress.get('validation_reports')
+
+ if cluster_update_progress.get('overall_status') == 'completed':
+ self.cluster_image_package_delete()
+ return validation_reports
+
+ if cluster_update_progress.get('overall_status') == 'in_progress':
+ msg = 'Timeout error'
+ action = ' Should the timeout value be increased? Current value is %d seconds.' % self.parameters['timeout']
+ action += ' The software update continues in background.'
+ else:
+ msg = 'Error'
+ action = ''
+ msg += ' updating image using ZAPI: overall_status: %s.' % (cluster_update_progress.get('overall_status', 'cannot get status'))
+ msg += action
+ self.module.fail_json(
+ msg=msg,
+ validation_reports=str(validation_reports),
+ validation_reports_after_download=self.validation_reports_after_download,
+ validation_reports_after_update=validation_reports)
+
+ def cluster_image_get_rest(self, what, fail_on_error=True):
+ """return field information for:
+ - nodes if what == versions
+ - validation_results if what == validation_results
+ - state if what == state
+ - any other field if what is a valid field name
+ call fail_json when there is an error and fail_on_error is True
+ return a tuple (info, error) when fail_on_error is False
+ return info when fail_on_error is Trie
+ """
+ api = 'cluster/software'
+ field = 'nodes' if what == 'versions' else what
+ record, error = rest_generic.get_one_record(self.rest_api, api, fields=field)
+ # record can be empty or these keys may not be present when validation is still in progress
+ optional_fields = ['validation_results']
+ info, error_msg = None, None
+ if error or not record:
+ if error or field not in optional_fields:
+ error_msg = "Error fetching software information for %s: %s" % (field, error or 'no record calling %s' % api)
+ elif what == 'versions' and 'nodes' in record:
+ nodes = self.parameters.get('nodes')
+ if nodes:
+ known_nodes = [node['name'] for node in record['nodes']]
+ unknown_nodes = [node for node in nodes if node not in known_nodes]
+ if unknown_nodes:
+ error_msg = 'Error: node%s not found in cluster: %s.' % ('s' if len(unknown_nodes) > 1 else '', ', '.join(unknown_nodes))
+ info = [(node['name'], node['version']) for node in record['nodes'] if nodes is None or node['name'] in nodes]
+ elif field in record:
+ info = record[field]
+ elif field not in optional_fields:
+ error_msg = "Unexpected results for what: %s, record: %s" % (what, record)
+ if fail_on_error and error_msg:
+ self.module.fail_json(msg=error_msg)
+ return info if fail_on_error else (info, error_msg)
+
+ def check_for_existing_package(self, error):
+ ''' ONTAP returns 'Package image with the same name already exists'
+ if a file with the same name already exists.
+ We need to confirm the version: if the version matches, we're good,
+ otherwise we need to error out.
+ '''
+ versions, error2 = self.cluster_image_packages_get_rest()
+ if self.parameters['package_version'] in versions:
+ return True
+ if versions:
+ self.module.fail_json(msg='Error: another package with the same file name exists: found: %s' % ', '.join(versions))
+ self.module.fail_json(msg='Error: ONTAP reported package already exists, but no package found: %s, getting versions: %s' % (error, error2))
+
+ def cluster_image_download_get_rest(self):
+ api = 'cluster/software/download'
+ field = 'message,state'
+ record, error = rest_generic.get_one_record(self.rest_api, api, fields=field)
+ if record:
+ return record.get('state'), record.get('message'), error
+ return None, None, error
+
+ def download_software_rest(self):
+ api = 'cluster/software/download'
+ body = {
+ 'url': self.parameters['package_url']
+ }
+ dummy, error = rest_generic.post_async(self.rest_api, api, body, timeout=0, job_timeout=self.parameters['timeout'])
+ if error:
+ if 'Package image with the same name already exists' in error:
+ return self.check_for_existing_package(error)
+ if 'Software get operation already in progress' in error:
+ self.module.warn("A download is already in progress. Resuming existing download.")
+ return self.wait_for_condition(self.is_download_complete_rest, 'image download state')
+ self.module.fail_json(msg="Error downloading software: %s - current versions: %s" % (error, self.versions))
+ return False
+
+ def is_download_complete_rest(self):
+ state, dummy, error = self.cluster_image_download_get_rest()
+ if error:
+ return None, None, error
+ return state in ['success', 'failure'], state, error
+
+ def cluster_image_validate_rest(self):
+ api = 'cluster/software'
+ body = {
+ 'version': self.parameters['package_version']
+ }
+ query = {
+ 'validate_only': 'true'
+ }
+ dummy, error = rest_generic.patch_async(self.rest_api, api, None, body, query, timeout=0, job_timeout=self.parameters['timeout'])
+ if error:
+ return "Error validating software: %s" % error
+
+ validation_results = None
+ for __ in range(30):
+ time.sleep(10)
+ validation_results = self.cluster_image_get_rest('validation_results')
+ if validation_results is not None:
+ break
+ return validation_results
+
+ def update_software_rest(self):
+ """install the software and invoke clean up and reporting function
+ """
+ state = self.cluster_image_update_rest()
+ self.post_update_tasks_rest(state)
+
+ def post_update_tasks_rest(self, state):
+ """delete software package when installation is successful
+ report validation_results whether update succeeded or failed
+ """
+ # fetch validation results
+ (validation_reports, error) = self.cluster_image_get_rest('validation_results', fail_on_error=False)
+
+ # success: delete and return
+ if state == 'completed':
+ self.cluster_image_package_delete()
+ return error or validation_reports
+
+ # report error
+ if state == 'in_progress':
+ msg = 'Timeout error'
+ action = ' Should the timeout value be increased? Current value is %d seconds.' % self.parameters['timeout']
+ action += ' The software update continues in background.'
+ else:
+ msg = 'Error'
+ action = ''
+ msg += ' updating image using REST: state: %s.' % state
+ msg += action
+ self.module.fail_json(
+ msg=msg,
+ validation_reports_after_download=self.validation_reports_after_download,
+ validation_reports_after_update=(error or validation_reports))
+
+ def error_is_fatal(self, error):
+ ''' a node may not be available during reboot, or the job may be lost '''
+ if not error:
+ return False
+ self.rest_api.log_debug('transient_error', error)
+ error_messages = [
+ "entry doesn't exist", # job not found
+ "Max retries exceeded with url: /api/cluster/jobs" # connection errors
+ ]
+ return all(error_message not in error for error_message in error_messages)
+
+ def cluster_image_update_rest(self):
+ api = 'cluster/software'
+ body = {
+ 'version': self.parameters['package_version']
+ }
+ query = {}
+ params_to_rest = {
+ # module keys to REST keys
+ 'ignore_validation_warning': 'skip_warnings',
+ 'nodes': 'nodes_to_update',
+ 'stabilize_minutes': 'stabilize_minutes',
+ }
+ for (param_key, rest_key) in params_to_rest.items():
+ value = self.parameters.get(param_key)
+ if value is not None:
+ query[rest_key] = ','.join(value) if rest_key == 'nodes_to_update' else value
+ # With ONTAP 9.8, the job persists until the node is rebooted
+ # With ONTAP 9.9, the job returns quickly
+ dummy, error = rest_generic.patch_async(self.rest_api, api, None, body, query=query or None, timeout=0, job_timeout=self.parameters['timeout'])
+ if self.error_is_fatal(error):
+ validation_results, v_error = self.cluster_image_get_rest('validation_results', fail_on_error=False)
+ self.module.fail_json(msg="Error updating software: %s - validation results: %s" % (error, v_error or validation_results))
+
+ return self.wait_for_condition(self.is_update_complete_rest, 'image update state')
+
+ def is_update_complete_rest(self):
+ state, error = self.cluster_image_get_rest('state', fail_on_error=False)
+ if error:
+ return None, None, error
+ return state in ['paused_by_user', 'paused_on_error', 'completed', 'canceled', 'failed'], state, error
+
+ def wait_for_condition(self, is_task_complete, description):
+ ''' loop until a condition is met
+ is_task_complete is a function that returns (is_complete, state, error)
+ if is_complete is True, the condition is met and state is returned
+ if is complete is False, the task is called until a timeout is reached
+ errors are ignored unless there are too many or a timeout is reached
+ '''
+ errors = []
+ for __ in range(1 + self.parameters['timeout'] // 60): # floor division
+ time.sleep(60)
+ is_complete, state, error = is_task_complete()
+ if error:
+ self.rest_api.log_debug('transient_error', error)
+ errors.append(error)
+ if len(errors) < 20:
+ continue
+ break
+ errors = []
+ if is_complete:
+ break
+ if errors:
+ msg = "Error: unable to read %s, using timeout %s." % (description, self.parameters['timeout'])
+ msg += " Last error: %s" % error
+ msg += " All errors: %s" % ' -- '.join(errors)
+ self.module.fail_json(msg=msg)
+ return state
+
+ def cluster_image_packages_get_zapi(self):
+ versions = []
+ packages_obj = netapp_utils.zapi.NaElement('cluster-image-package-local-get-iter')
+ try:
+ result = self.server.invoke_successfully(packages_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error getting list of local packages: %s' % to_native(error), exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) > 0:
+ packages_info = result.get_child_by_name('attributes-list')
+ versions = [packages_details.get_child_content('package-version') for packages_details in packages_info.get_children()]
+ return versions, None
+
+ def cluster_image_packages_get_rest(self):
+ if not self.use_rest:
+ return self.cluster_image_packages_get_zapi()
+ api = 'cluster/software/packages'
+ records, error = rest_generic.get_0_or_more_records(self.rest_api, api, fields='version')
+ return [record.get('version') for record in records] if records else [], error
+
+ def cluster_image_package_delete_rest(self):
+ api = 'cluster/software/packages'
+ dummy, error = rest_generic.delete_async(self.rest_api, api, self.parameters['package_version'])
+ if error:
+ self.module.fail_json(msg='Error deleting cluster software package for %s: %s' % (self.parameters['package_version'], error))
+
+ def apply(self):
+ """
+ Apply action to update ONTAP software
+ """
+ # TODO: cluster image update only works for HA configurations.
+ # check if node image update can be used for other cases.
+ versions, error = self.cluster_image_packages_get_rest()
+ already_downloaded = not error and self.parameters['package_version'] in versions
+ if self.parameters['state'] == 'absent':
+ if error:
+ self.module.fail_json(msg='Error: unable to fetch local package list: %s' % error)
+ changed = already_downloaded
+ else:
+ if already_downloaded:
+ self.module.warn('Package %s is already present, skipping download.' % self.parameters['package_version'])
+ elif not self.parameters.get('package_url'):
+ self.module.fail_json(msg='Error: packague_url is a required parameter to download the software package.')
+ changed = self.parameters['force_update'] or self.is_update_required()
+ validation_reports_after_update = ['only available after update']
+
+ results = {}
+ if not self.module.check_mode and changed:
+ if self.parameters['state'] == 'absent':
+ self.cluster_image_package_delete()
+ else:
+ if not already_downloaded:
+ already_downloaded = self.download_software()
+ if self.parameters['validate_after_download']:
+ self.validation_reports_after_download = self.cluster_image_validate()
+ if self.parameters['download_only']:
+ changed = not already_downloaded
+ else:
+ validation_reports_after_update = self.update_software()
+ results = {
+ 'validation_reports': str(validation_reports_after_update),
+ 'validation_reports_after_download': self.validation_reports_after_download,
+ 'validation_reports_after_update': validation_reports_after_update
+ }
+
+ self.module.exit_json(changed=changed, **results)
+
+
+def main():
+ """Execute action"""
+ package_obj = NetAppONTAPSoftwareUpdate()
+ package_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ssh_command.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ssh_command.py
new file mode 100644
index 000000000..bf6ca9031
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ssh_command.py
@@ -0,0 +1,254 @@
+#!/usr/bin/python
+'''
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Run cli commands on ONTAP over SSH using paramiko.
+ - Output is returned in C(stdout) and C(stderr), and also as C(stdout_lines), C(stdout_lines_filtered), C(stderr_lines).
+ - Note that the module can succeed even though the command failed. You need to analyze stdout and check the results.
+ - If the SSH host key is unknown and accepted, C(warnings) is updated.
+ - Options related to ZAPI or REST APIs are ignored.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_ssh_command
+short_description: NetApp ONTAP Run any cli command over plain SSH using paramiko.
+version_added: 20.8.0
+options:
+ command:
+ description:
+ - a string containing the command and arguments.
+ required: true
+ type: str
+ privilege:
+ description:
+ - privilege level at which to run the command, eg admin, advanced.
+ - if set, the command is prefixed with C(set -privilege <privilege>;).
+ type: str
+ accept_unknown_host_keys:
+ description:
+ - When false, reject the connection if the host key is not in known_hosts file.
+ - When true, if the host key is unknown, accept it, but report a warning.
+ - Note that the key is not added to the file. You could add the key by manually using SSH.
+ type: bool
+ default: false
+ include_lines:
+ description:
+ - return only lines containing string pattern in C(stdout_lines_filtered)
+ default: ''
+ type: str
+ exclude_lines:
+ description:
+ - return only lines containing string pattern in C(stdout_lines_filtered)
+ default: ''
+ type: str
+ service_processor:
+ description:
+ - whether the target system is ONTAP or the service processor (SP)
+ - only menaningful when privilege is set
+ aliases: [sp]
+ default: false
+ type: bool
+'''
+
+EXAMPLES = """
+ - name: run ontap cli command using SSH
+ na_ontap_ssh_command:
+ hostname: "{{ hostname }}"
+ username: "{{ admin_username }}"
+ password: "{{ admin_password }}"
+ command: version
+
+ # Same as above, with parameters
+ - name: run ontap cli command
+ na_ontap_ssh_command:
+ hostname: "{{ hostname }}"
+ username: "{{ admin_username }}"
+ password: "{{ admin_password }}"
+ command: node show -fields node,health,uptime,model
+ privilege: admin
+
+ # Same as above, but with lines filtering
+ - name: run ontap cli command
+ na_ontap_ssh_command:
+ hostname: "{{ hostname }}"
+ username: "{{ admin_username }}"
+ password: "{{ admin_password }}"
+ command: node show -fields node,health,uptime,model
+ exclude_lines: 'ode ' # Exclude lines with 'Node ' or 'node'
+ # use with caution!
+ accept_unknown_host_keys: true
+ privilege: admin
+
+ - name: run ontap SSH command on SP
+ na_ontap_ssh_command:
+ # <<: *sp_login
+ command: sp switch-version
+ privilege: diag
+ sp: true
+ register: result
+ - debug: var=result
+"""
+
+RETURN = """
+stdout_lines_filtered:
+ description:
+ - In addition to stdout and stdout_lines, a list of non-white lines, excluding last and failed login information.
+ - The list can be further refined using the include_lines and exclude_lines filters.
+ returned: always
+ type: list
+"""
+
+import traceback
+import warnings
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+try:
+ import paramiko
+ HAS_PARAMIKO = True
+except ImportError:
+ HAS_PARAMIKO = False
+
+
+class NetAppONTAPSSHCommand(object):
+ ''' calls a CLI command using SSH'''
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ command=dict(required=True, type='str'),
+ privilege=dict(required=False, type='str'),
+ accept_unknown_host_keys=dict(required=False, type='bool', default=False),
+ include_lines=dict(required=False, type='str', default=''),
+ exclude_lines=dict(required=False, type='str', default=''),
+ service_processor=dict(required=False, type='bool', default=False, aliases=['sp']),
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ parameters = self.module.params
+ # set up state variables
+ self.command = parameters['command']
+ self.privilege = parameters['privilege']
+ self.include_lines = parameters['include_lines']
+ self.exclude_lines = parameters['exclude_lines']
+ self.accept_unknown_host_keys = parameters['accept_unknown_host_keys']
+ self.service_processor = parameters['service_processor']
+ self.warnings = list()
+ self.failed = False
+
+ if not HAS_PARAMIKO:
+ self.module.fail_json(msg="the python paramiko module is required")
+
+ client = paramiko.SSHClient()
+ client.load_system_host_keys() # load ~/.ssh/known_hosts if it exists
+ if self.accept_unknown_host_keys:
+ # accept unknown key, but raise a python warning
+ client.set_missing_host_key_policy(paramiko.WarningPolicy())
+
+ with warnings.catch_warnings(record=True) as wngs:
+ try:
+ client.connect(hostname=parameters['hostname'], username=parameters['username'], password=parameters['password'])
+ if len(wngs) > 0:
+ self.warnings.extend([str(warning.message) for warning in wngs])
+ except paramiko.SSHException as exc:
+ self.module.fail_json(msg="SSH connection failed: %s" % repr(exc))
+
+ self.client = client
+
+ def parse_output(self, out):
+ out_string = out.read()
+ # ONTAP makes copious use of \r
+ out_string = out_string.replace(b'\r\r\n', b'\n')
+ out_string = out_string.replace(b'\r\n', b'\n')
+ return out_string
+
+ def run_ssh_command(self, command):
+ ''' calls SSH '''
+ try:
+ stdin, stdout, stderr = self.client.exec_command(command)
+ except paramiko.SSHException as exc:
+ self.module.fail_json(msg='Error running command %s: %s' %
+ (command, to_native(exc)),
+ exception=traceback.format_exc())
+ stdin.close() # if we don't close, we may see a TypeError
+ return stdout, stderr
+
+ def filter_output(self, output):
+ ''' Generate stdout_lines_filtered list
+ Remove login information if found in the first non white lines
+ '''
+ result = list()
+ find_banner = True
+ for line in output.splitlines():
+ try:
+ stripped_line = line.strip().decode()
+ except Exception as exc:
+ self.warnings.append("Unable to decode ONTAP output. Skipping filtering. Error: %s" % repr(exc))
+ result.append('ERROR: truncated, cannot decode: %s' % line)
+ self.failed = False
+ return result
+
+ if not stripped_line:
+ continue
+ if find_banner and stripped_line.startswith(('Last login time:', 'Unsuccessful login attempts since last login:')):
+ continue
+ find_banner = False
+ if self.exclude_lines:
+ if self.include_lines in stripped_line and self.exclude_lines not in stripped_line:
+ result.append(stripped_line)
+ elif self.include_lines:
+ if self.include_lines in stripped_line:
+ result.append(stripped_line)
+ else:
+ result.append(stripped_line)
+
+ return result
+
+ def run_command(self):
+ ''' calls SSH '''
+ command = self.command
+ if self.privilege is not None:
+ if self.service_processor:
+ command = "priv set %s;%s" % (self.privilege, command)
+ else:
+ command = "set -privilege %s;%s" % (self.privilege, command)
+ stdout, stderr = self.run_ssh_command(command)
+ stdout_string = self.parse_output(stdout)
+ stdout_filtered = self.filter_output(stdout_string)
+ return stdout_string, stdout_filtered, self.parse_output(stderr)
+
+ def apply(self):
+ ''' calls the command and returns raw output '''
+ changed = True
+ stdout, filtered, stderr = '', '', ''
+ if not self.module.check_mode:
+ stdout, filtered, stderr = self.run_command()
+ if stderr:
+ self.failed = True
+ self.module.exit_json(changed=changed, failed=self.failed, stdout=stdout, stdout_lines_filtered=filtered, stderr=stderr, warnings=self.warnings)
+
+
+def main():
+ """
+ Execute action from playbook
+ """
+ command = NetAppONTAPSSHCommand()
+ command.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_storage_auto_giveback.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_storage_auto_giveback.py
new file mode 100644
index 000000000..4446371d1
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_storage_auto_giveback.py
@@ -0,0 +1,248 @@
+#!/usr/bin/python
+
+# (c) 2021, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = """
+module: na_ontap_storage_auto_giveback
+short_description: Enables or disables NetApp ONTAP storage auto giveback for a specified node
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: '21.3.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Enable or disable storage auto giveback
+options:
+ name:
+ description:
+ - Specifies the node name to enable or disable storage auto giveback on.
+ required: true
+ type: str
+
+ auto_giveback_enabled:
+ description:
+ - specifies whether auto give back should be enabled or disabled
+ required: true
+ type: bool
+
+ auto_giveback_after_panic_enabled:
+ description:
+ - specifies whether auto give back on panic should be enabled or disabled
+ type: bool
+
+"""
+
+EXAMPLES = """
+ - name: Enable storage auto giveback
+ na_ontap_storage_auto_giveback:
+ name: node1
+ auto_giveback_enabled: true
+ auto_giveback_after_panic_enabled: true
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Disable storage auto giveback
+ na_ontap_storage_auto_giveback:
+ name: node1
+ auto_giveback_enabled: false
+ auto_giveback_after_panic_enabled: false
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+"""
+
+RETURN = """
+
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+import ansible_collections.netapp.ontap.plugins.module_utils.rest_response_helpers as rrh
+
+
+class NetAppOntapStorageAutoGiveback(object):
+ """
+ Enable or disable storage failover for a specified node
+ """
+ def __init__(self):
+ """
+ Initialize the ONTAP Storage auto giveback class
+ """
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ name=dict(required=True, type='str'),
+ auto_giveback_enabled=dict(required=True, type='bool'),
+ auto_giveback_after_panic_enabled=dict(required=False, type='bool')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ # set up variables
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ self.rest_api = OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+
+ if not self.use_rest:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg='The python NetApp-Lib module is required')
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def get_storage_auto_giveback(self):
+ """
+ get the storage failover giveback options for a given node
+ :return: dict for options
+ """
+ return_value = None
+
+ if self.use_rest:
+
+ api = "private/cli/storage/failover"
+ query = {
+ 'fields': 'node,auto_giveback,auto_giveback_after_panic',
+ 'node': self.parameters['name'],
+ }
+ message, error = self.rest_api.get(api, query)
+ records, error = rrh.check_for_0_or_1_records(api, message, error)
+
+ if error is None and records is not None:
+ return_value = {
+ 'name': message['records'][0]['node'],
+ 'auto_giveback_enabled': message['records'][0]['auto_giveback'],
+ 'auto_giveback_after_panic_enabled': message['records'][0]['auto_giveback_after_panic']
+ }
+
+ if error:
+ self.module.fail_json(msg=error)
+
+ if not records:
+ error = "REST API did not return failover options for node %s" % (self.parameters['name'])
+ self.module.fail_json(msg=error)
+
+ else:
+
+ storage_auto_giveback_get_iter = netapp_utils.zapi.NaElement('cf-get-iter')
+
+ try:
+ result = self.server.invoke_successfully(storage_auto_giveback_get_iter, True)
+
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error getting auto giveback info for node %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ if result.get_child_by_name('attributes-list'):
+ attributes_list = result.get_child_by_name('attributes-list')
+ for storage_failover_info_attributes in attributes_list.get_children():
+
+ sfo_node_info = storage_failover_info_attributes.get_child_by_name('sfo-node-info')
+ node_related_info = sfo_node_info.get_child_by_name('node-related-info')
+
+ if node_related_info.get_child_content('node') == self.parameters['name']:
+
+ sfo_options_info = storage_failover_info_attributes.get_child_by_name('sfo-options-info')
+ options_related_info = sfo_options_info.get_child_by_name('options-related-info')
+ sfo_giveback_options_info = options_related_info.get_child_by_name('sfo-giveback-options-info')
+ giveback_options = sfo_giveback_options_info.get_child_by_name('giveback-options')
+
+ return_value = {
+ 'name': node_related_info.get_child_content('node'),
+ 'auto_giveback_enabled': self.na_helper.get_value_for_bool(
+ True, options_related_info.get_child_content('auto-giveback-enabled')),
+ 'auto_giveback_after_panic_enabled': self.na_helper.get_value_for_bool(
+ True, giveback_options.get_child_content('auto-giveback-after-panic-enabled')),
+ }
+ break
+
+ return return_value
+
+ def modify_storage_auto_giveback(self):
+ """
+ Modifies storage failover giveback options for a specified node
+ """
+ if self.use_rest:
+ api = "private/cli/storage/failover"
+ body = dict()
+ query = {
+ 'node': self.parameters['name']
+ }
+
+ body['auto_giveback'] = self.parameters['auto_giveback_enabled']
+ if 'auto_giveback_after_panic_enabled' in self.parameters:
+ body['auto_giveback_after_panic'] = self.parameters['auto_giveback_after_panic_enabled']
+
+ dummy, error = self.rest_api.patch(api, body, query)
+ if error:
+ self.module.fail_json(msg=error)
+
+ else:
+
+ storage_auto_giveback_enable = netapp_utils.zapi.NaElement('cf-modify-iter')
+ attributes_info = netapp_utils.zapi.NaElement('options-related-info-modify')
+ query_info = netapp_utils.zapi.NaElement('options-related-info-modify')
+
+ attributes_info.add_new_child('node', self.parameters['name'])
+ attributes_info.add_new_child('auto-giveback-enabled', self.na_helper.get_value_for_bool(
+ from_zapi=False, value=self.parameters['auto_giveback_enabled']))
+
+ if 'auto_giveback_after_panic_enabled' in self.parameters:
+ sfo_give_back_options_info_modify = netapp_utils.zapi.NaElement('sfo-giveback-options-info-modify')
+ give_back_options_modify = netapp_utils.zapi.NaElement('giveback-options-modify')
+ give_back_options_modify.add_new_child('auto-giveback-after-panic-enabled', self.na_helper.get_value_for_bool(
+ from_zapi=False, value=self.parameters['auto_giveback_after_panic_enabled']))
+ sfo_give_back_options_info_modify.add_child_elem(give_back_options_modify)
+ attributes_info.add_child_elem(sfo_give_back_options_info_modify)
+
+ query = netapp_utils.zapi.NaElement('query')
+ attributes = netapp_utils.zapi.NaElement("attributes")
+ query.add_child_elem(query_info)
+ attributes.add_child_elem(attributes_info)
+
+ storage_auto_giveback_enable.add_child_elem(query)
+ storage_auto_giveback_enable.add_child_elem(attributes)
+
+ try:
+ self.server.invoke_successfully(storage_auto_giveback_enable, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying auto giveback for node %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ current = self.get_storage_auto_giveback()
+ self.na_helper.get_modified_attributes(current, self.parameters)
+
+ if self.na_helper.changed:
+ if not self.module.check_mode:
+ self.modify_storage_auto_giveback()
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """
+ Enables or disables NetApp ONTAP storage auto giveback for a specified node
+ """
+ obj = NetAppOntapStorageAutoGiveback()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_storage_failover.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_storage_failover.py
new file mode 100644
index 000000000..ff9306ac6
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_storage_failover.py
@@ -0,0 +1,208 @@
+#!/usr/bin/python
+
+# (c) 2021, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = """
+module: na_ontap_storage_failover
+short_description: Enables or disables NetApp Ontap storage failover for a specified node
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: '21.3.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+ - Enable or disable storage failover
+
+options:
+
+ state:
+ description:
+ - Whether storage failover should be enabled (present) or disabled (absent).
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ node_name:
+ description:
+ - Specifies the node name to enable or disable storage failover.
+ required: true
+ type: str
+
+"""
+
+EXAMPLES = """
+- name: Enable storage failover
+ netapp.ontap.na_ontap_storage_failover:
+ state: present
+ node_name: node1
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+- name: Disable storage failover
+ netapp.ontap.na_ontap_storage_failover:
+ state: absent
+ node_name: node1
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+"""
+
+RETURN = """
+
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppOntapStorageFailover:
+ """
+ Enable or disable storage failover for a specified node
+ """
+ def __init__(self):
+ """
+ Initialize the Ontap Storage failover class
+ """
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ node_name=dict(required=True, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ # set up variables
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ self.parameters['is_enabled'] = self.parameters['state'] == 'present'
+ self.rest_api = OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+
+ if not self.use_rest:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def get_node_names(self):
+ api = "cluster/nodes"
+ records, error = rest_generic.get_0_or_more_records(self.rest_api, api, fields='name')
+ if records and not error:
+ records = [record['name'] for record in records]
+ return records, error
+
+ def get_node_names_as_str(self):
+ names, error = self.get_node_names()
+ if error:
+ return 'failed to get list of nodes: %s' % error
+ if names:
+ return 'current nodes: %s' % ', '.join(names)
+ return 'could not get node names'
+
+ def get_storage_failover(self):
+ """
+ get the storage failover for a given node
+ :return: dict of is-enabled: true if enabled is true None if not
+ """
+
+ if self.use_rest:
+ return_value = None
+ api = "cluster/nodes"
+ query = {
+ 'fields': 'uuid,ha.enabled',
+ 'name': self.parameters['node_name']
+ }
+ record, error = rest_generic.get_one_record(self.rest_api, api, query)
+
+ if error:
+ self.module.fail_json(msg=error)
+
+ if not record:
+ msg = self.get_node_names_as_str()
+ error = "REST API did not return failover details for node %s, %s" % (self.parameters['node_name'], msg)
+ self.module.fail_json(msg=error)
+
+ return_value = {'uuid': record['uuid']}
+ if 'ha' in record:
+ return_value['is_enabled'] = record['ha']['enabled']
+
+ else:
+ storage_failover_get_iter = netapp_utils.zapi.NaElement('cf-status')
+ storage_failover_get_iter.add_new_child('node', self.parameters['node_name'])
+
+ try:
+ result = self.server.invoke_successfully(storage_failover_get_iter, True)
+ return_value = {'is_enabled': self.na_helper.get_value_for_bool(True, result.get_child_content('is-enabled'))}
+
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error getting storage failover info for node %s: %s' % (
+ self.parameters['node_name'], to_native(error)), exception=traceback.format_exc())
+
+ return return_value
+
+ def modify_storage_failover(self, current):
+ """
+ Modifies storage failover for a specified node
+ """
+
+ if self.use_rest:
+ api = "cluster/nodes"
+ body = {'ha': {'enabled': self.parameters['is_enabled']}}
+ dummy, error = rest_generic.patch_async(self.rest_api, api, current['uuid'], body)
+ if error:
+ self.module.fail_json(msg=error)
+
+ else:
+
+ if self.parameters['state'] == 'present':
+ cf_service = 'cf-service-enable'
+ else:
+ cf_service = 'cf-service-disable'
+
+ storage_failover_modify = netapp_utils.zapi.NaElement(cf_service)
+ storage_failover_modify.add_new_child('node', self.parameters['node_name'])
+
+ try:
+ self.server.invoke_successfully(storage_failover_modify, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying storage failover for node %s: %s' % (
+ self.parameters['node_name'], to_native(error)), exception=traceback.format_exc())
+
+ def apply(self):
+ current = self.get_storage_failover()
+ self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.parameters['is_enabled'] and 'is_enabled' not in current:
+ self.module.fail_json(msg='HA is not available on node: %s.' % self.parameters['node_name'])
+
+ if self.na_helper.changed and not self.module.check_mode:
+ self.modify_storage_failover(current)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """
+ Enables or disables NetApp Ontap storage failover for a specified node
+ """
+
+ obj = NetAppOntapStorageFailover()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_svm.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_svm.py
new file mode 100644
index 000000000..9d5fc6c66
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_svm.py
@@ -0,0 +1,939 @@
+#!/usr/bin/python
+
+# (c) 2018-2023, NetApp, Inc
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_svm
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: na_ontap_svm
+
+short_description: NetApp ONTAP SVM
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Create, modify or delete SVM on NetApp ONTAP
+
+options:
+
+ state:
+ description:
+ - Whether the specified SVM should exist or not.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ name:
+ description:
+ - The name of the SVM to manage.
+ - vserver is a convenient alias when using module_defaults.
+ type: str
+ required: true
+ aliases:
+ - vserver
+
+ from_name:
+ description:
+ - Name of the SVM to be renamed
+ type: str
+ version_added: 2.7.0
+
+ admin_state:
+ description:
+ - when the SVM is created, it will be in the running state, unless specified otherwise.
+ - This is ignored with ZAPI.
+ choices: ['running', 'stopped']
+ type: str
+ version_added: 21.15.0
+
+ root_volume:
+ description:
+ - Root volume of the SVM.
+ - Cannot be modified after creation.
+ type: str
+
+ root_volume_aggregate:
+ description:
+ - The aggregate on which the root volume will be created.
+ - Cannot be modified after creation.
+ type: str
+
+ root_volume_security_style:
+ description:
+ - Security Style of the root volume.
+ - When specified as part of the vserver-create,
+ this field represents the security style for the Vserver root volume.
+ - When specified as part of vserver-get-iter call,
+ this will return the list of matching Vservers.
+ - The 'unified' security style, which applies only to Infinite Volumes,
+ cannot be applied to a Vserver's root volume.
+ - Cannot be modified after creation.
+ choices: ['unix', 'ntfs', 'mixed', 'unified']
+ type: str
+
+ allowed_protocols:
+ description:
+ - Allowed Protocols.
+ - This field represent the list of protocols allowed on the Vserver.
+ - When part of modify,
+ this field should include the existing list
+ along with new protocol list to be added to prevent data disruptions.
+ - Possible values
+ - nfs NFS protocol,
+ - cifs CIFS protocol,
+ - fcp FCP protocol,
+ - iscsi iSCSI protocol,
+ - ndmp NDMP protocol,
+ - http HTTP protocol - ZAPI only,
+ - nvme NVMe protocol
+ type: list
+ elements: str
+
+ services:
+ description:
+ - Enabled Protocols, only available with REST.
+ - The service will be started if needed. A valid license may be required.
+ - C(enabled) is not supported for CIFS, to enable it use na_ontap_cifs_server.
+ - If a service is not present, it is left unchanged.
+ type: dict
+ version_added: 21.10.0
+ suboptions:
+ cifs:
+ description:
+ - CIFS protocol service
+ type: dict
+ suboptions:
+ allowed:
+ description: If true, an SVM administrator can manage the CIFS service. If false, only the cluster administrator can manage the service.
+ type: bool
+ iscsi:
+ description:
+ - iSCSI protocol service
+ type: dict
+ suboptions:
+ allowed:
+ description: If true, an SVM administrator can manage the iSCSI service. If false, only the cluster administrator can manage the service.
+ type: bool
+ enabled:
+ description: If allowed, setting to true enables the iSCSI service.
+ type: bool
+ fcp:
+ description:
+ - FCP protocol service
+ type: dict
+ suboptions:
+ allowed:
+ description: If true, an SVM administrator can manage the FCP service. If false, only the cluster administrator can manage the service.
+ type: bool
+ enabled:
+ description: If allowed, setting to true enables the FCP service.
+ type: bool
+ nfs:
+ description:
+ - NFS protocol service
+ type: dict
+ suboptions:
+ allowed:
+ description: If true, an SVM administrator can manage the NFS service. If false, only the cluster administrator can manage the service.
+ type: bool
+ enabled:
+ description: If allowed, setting to true enables the NFS service.
+ type: bool
+ nvme:
+ description:
+ - nvme protocol service
+ type: dict
+ suboptions:
+ allowed:
+ description: If true, an SVM administrator can manage the NVMe service. If false, only the cluster administrator can manage the service.
+ type: bool
+ enabled:
+ description: If allowed, setting to true enables the NVMe service.
+ type: bool
+ ndmp:
+ description:
+ - Network Data Management Protocol service
+ type: dict
+ suboptions:
+ allowed:
+ description:
+ - If this is set to true, an SVM administrator can manage the NDMP service
+ - If it is false, only the cluster administrator can manage the service.
+ - Requires ONTAP 9.7 or later.
+ type: bool
+ version_added: 21.24.0
+ aggr_list:
+ description:
+ - List of aggregates assigned for volume operations.
+ - These aggregates could be shared for use with other Vservers.
+ - When specified as part of a vserver-create,
+ this field represents the list of aggregates
+ that are assigned to the Vserver for volume operations.
+ - When part of vserver-get-iter call,
+ this will return the list of Vservers
+ which have any of the aggregates specified as part of the aggr list.
+ type: list
+ elements: str
+
+ ipspace:
+ description:
+ - IPSpace name
+ - Cannot be modified after creation.
+ type: str
+ version_added: 2.7.0
+
+ snapshot_policy:
+ description:
+ - Default snapshot policy setting for all volumes of the Vserver.
+ This policy will be assigned to all volumes created in this
+ Vserver unless the volume create request explicitly provides a
+ snapshot policy or volume is modified later with a specific
+ snapshot policy. A volume-level snapshot policy always overrides
+ the default Vserver-wide snapshot policy.
+ version_added: 2.7.0
+ type: str
+
+ language:
+ description:
+ - Language to use for the SVM
+ - Default to C.UTF-8
+ - Possible values Language
+ - c POSIX
+ - ar Arabic
+ - cs Czech
+ - da Danish
+ - de German
+ - en English
+ - en_us English (US)
+ - es Spanish
+ - fi Finnish
+ - fr French
+ - he Hebrew
+ - hr Croatian
+ - hu Hungarian
+ - it Italian
+ - ja Japanese euc-j
+ - ja_v1 Japanese euc-j
+ - ja_jp.pck Japanese PCK (sjis)
+ - ja_jp.932 Japanese cp932
+ - ja_jp.pck_v2 Japanese PCK (sjis)
+ - ko Korean
+ - no Norwegian
+ - nl Dutch
+ - pl Polish
+ - pt Portuguese
+ - ro Romanian
+ - ru Russian
+ - sk Slovak
+ - sl Slovenian
+ - sv Swedish
+ - tr Turkish
+ - zh Simplified Chinese
+ - zh.gbk Simplified Chinese (GBK)
+ - zh_tw Traditional Chinese euc-tw
+ - zh_tw.big5 Traditional Chinese Big 5
+ - utf8mb4
+ - Most of the values accept a .utf_8 suffix, e.g. fr.utf_8
+ type: str
+ version_added: 2.7.0
+
+ subtype:
+ description:
+ - The subtype for vserver to be created.
+ - Cannot be modified after creation.
+ choices: ['default', 'dp_destination', 'sync_source', 'sync_destination']
+ type: str
+ version_added: 2.7.0
+
+ comment:
+ description:
+ - When specified as part of a vserver-create, this field represents the comment associated with the Vserver.
+ - When part of vserver-get-iter call, this will return the list of matching Vservers.
+ type: str
+ version_added: 2.8.0
+
+ ignore_rest_unsupported_options:
+ description:
+ - When true, ignore C(root_volume), C(root_volume_aggregate), C(root_volume_security_style) options if target supports REST.
+ - Ignored when C(use_rest) is set to never.
+ type: bool
+ default: false
+ version_added: 21.10.0
+
+ max_volumes:
+ description:
+ - Maximum number of volumes that can be created on the vserver.
+ - Expects an integer or C(unlimited).
+ type: str
+ version_added: 21.12.0
+
+ web:
+ description:
+ - web services security configuration.
+ - requires ONTAP 9.8 or later for certificate name.
+ - requires ONTAP 9.10.1 or later for the other options.
+ type: dict
+ suboptions:
+ certificate:
+ description:
+ - name of certificate used by cluster and node management interfaces for TLS connection requests.
+ - The certificate must be of type "server".
+ type: str
+ client_enabled:
+ description: whether client authentication is enabled.
+ type: bool
+ ocsp_enabled:
+ description: whether online certificate status protocol verification is enabled.
+ type: bool
+'''
+
+EXAMPLES = """
+
+ - name: Create SVM
+ netapp.ontap.na_ontap_svm:
+ state: present
+ name: ansibleVServer
+ root_volume: vol1
+ root_volume_aggregate: aggr1
+ root_volume_security_style: mixed
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Create SVM
+ netapp.ontap.na_ontap_svm:
+ state: present
+ services:
+ cifs:
+ allowed: true
+ fcp:
+ allowed: true
+ nfs:
+ allowed: true
+ enabled: true
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: false
+
+ - name: Stop SVM REST
+ netapp.ontap.na_ontap_svm:
+ state: present
+ name: ansibleVServer
+ admin_state: stopped
+ use_rest: always
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+"""
+import copy
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic, rest_vserver, zapis_svm
+
+
+class NetAppOntapSVM():
+ ''' create, delete, modify, rename SVM (aka vserver) '''
+
+ def __init__(self):
+ self.use_rest = False
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str', aliases=['vserver']),
+ from_name=dict(required=False, type='str'),
+ admin_state=dict(required=False, type='str', choices=['running', 'stopped']),
+ root_volume=dict(type='str'),
+ root_volume_aggregate=dict(type='str'),
+ root_volume_security_style=dict(type='str', choices=['unix',
+ 'ntfs',
+ 'mixed',
+ 'unified'
+ ]),
+ allowed_protocols=dict(type='list', elements='str'),
+ aggr_list=dict(type='list', elements='str'),
+ ipspace=dict(type='str', required=False),
+ snapshot_policy=dict(type='str', required=False),
+ language=dict(type='str', required=False),
+ subtype=dict(type='str', choices=['default', 'dp_destination', 'sync_source', 'sync_destination']),
+ comment=dict(type='str', required=False),
+ ignore_rest_unsupported_options=dict(type='bool', default=False),
+ max_volumes=dict(type='str'),
+ # TODO: add CIFS options, and S3
+ services=dict(type='dict', options=dict(
+ cifs=dict(type='dict', options=dict(allowed=dict(type='bool'))),
+ iscsi=dict(type='dict', options=dict(allowed=dict(type='bool'), enabled=dict(type='bool'))),
+ fcp=dict(type='dict', options=dict(allowed=dict(type='bool'), enabled=dict(type='bool'))),
+ nfs=dict(type='dict', options=dict(allowed=dict(type='bool'), enabled=dict(type='bool'))),
+ nvme=dict(type='dict', options=dict(allowed=dict(type='bool'), enabled=dict(type='bool'))),
+ ndmp=dict(type='dict', options=dict(allowed=dict(type='bool'))),
+ )),
+ web=dict(type='dict', options=dict(
+ certificate=dict(type='str'),
+ client_enabled=dict(type='bool'),
+ ocsp_enabled=dict(type='bool'),
+ ))
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[('allowed_protocols', 'services'),
+ ('services', 'root_volume'),
+ ('services', 'root_volume_aggregate'),
+ ('services', 'root_volume_security_style')]
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ # Ontap documentation uses C.UTF-8, but actually stores as c.utf_8.
+ if 'language' in self.parameters and self.parameters['language'].lower() == 'c.utf-8':
+ self.parameters['language'] = 'c.utf_8'
+
+ self.rest_api = OntapRestAPI(self.module)
+ # with REST, to force synchronous operations
+ self.timeout = self.rest_api.timeout
+ # with REST, to know which protocols to look for
+ self.allowable_protocols_rest = netapp_utils.get_feature(self.module, 'svm_allowable_protocols_rest')
+ self.allowable_protocols_zapi = netapp_utils.get_feature(self.module, 'svm_allowable_protocols_zapi')
+ self.use_rest = self.validate_options()
+ if not self.use_rest:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+ if self.parameters.get('admin_state') is not None:
+ self.parameters.pop('admin_state')
+ self.module.warn('admin_state is ignored when ZAPI is used.')
+
+ def validate_int_or_string(self, value, astring):
+ if value is None or value == astring:
+ return
+ try:
+ int_value = int(value)
+ except ValueError:
+ int_value = None
+ if int_value is None or str(int_value) != value:
+ self.module.fail_json(msg="Error: expecting int value or '%s', got: %s - %s" % (astring, value, int_value))
+
+ def validate_options(self):
+
+ # root volume not supported with rest api
+ unsupported_rest_properties = ['root_volume', 'root_volume_aggregate', 'root_volume_security_style']
+ required_unsupported_rest_properties = [] if self.parameters['ignore_rest_unsupported_options'] else unsupported_rest_properties
+ ignored_unsupported_rest_properties = unsupported_rest_properties if self.parameters['ignore_rest_unsupported_options'] else []
+ used_required_unsupported_rest_properties = [x for x in required_unsupported_rest_properties if x in self.parameters]
+ used_ignored_unsupported_rest_properties = [x for x in ignored_unsupported_rest_properties if x in self.parameters]
+ use_rest, error = self.rest_api.is_rest(used_required_unsupported_rest_properties)
+ if error is not None:
+ self.module.fail_json(msg=error)
+ if use_rest and used_ignored_unsupported_rest_properties:
+ self.module.warn('Using REST and ignoring: %s' % used_ignored_unsupported_rest_properties)
+ for attr in used_ignored_unsupported_rest_properties:
+ del self.parameters[attr]
+ if use_rest and 'aggr_list' in self.parameters and self.parameters['aggr_list'] == ['*']:
+ self.module.warn("Using REST and ignoring aggr_list: '*'")
+ del self.parameters['aggr_list']
+ if use_rest and self.parameters.get('allowed_protocols') is not None:
+ # python 2.6 does not support dict comprehension with k: v
+ self.parameters['services'] = dict(
+ # using old semantics, anything not present is disallowed
+ (protocol, {'allowed': protocol in self.parameters['allowed_protocols']})
+ for protocol in self.allowable_protocols_rest
+ )
+
+ if self.parameters.get('allowed_protocols'):
+ allowable = self.allowable_protocols_rest if use_rest else self.allowable_protocols_zapi
+ errors = [
+ 'Unexpected value %s in allowed_protocols.' % protocol
+ for protocol in self.parameters['allowed_protocols']
+ if protocol not in allowable
+ ]
+ if errors:
+ self.module.fail_json(msg='Error - %s' % ' '.join(errors))
+ if use_rest and self.parameters.get('services') and not self.parameters.get('allowed_protocols') and self.parameters['services'].get('ndmp')\
+ and not self.rest_api.meets_rest_minimum_version(use_rest, 9, 7):
+ self.module.fail_json(msg=self.rest_api.options_require_ontap_version('ndmp', '9.7', use_rest=use_rest))
+ if self.parameters.get('services') and not use_rest:
+ self.module.fail_json(msg=self.rest_api.options_require_ontap_version('services', use_rest=use_rest))
+ if self.parameters.get('web'):
+ if not use_rest or not self.rest_api.meets_rest_minimum_version(use_rest, 9, 8, 0):
+ self.module.fail_json(msg=self.rest_api.options_require_ontap_version('web', '9.8', use_rest=use_rest))
+ if not self.rest_api.meets_rest_minimum_version(use_rest, 9, 10, 1):
+ suboptions = ('client_enabled', 'ocsp_enabled')
+ for suboption in suboptions:
+ if self.parameters['web'].get(suboption) is not None:
+ self.module.fail_json(msg=self.rest_api.options_require_ontap_version(suboptions, '9.10.1', use_rest=use_rest))
+ if self.parameters['web'].get('certificate'):
+ # so that we can compare UUIDs while using a more friendly name in the user interface
+ self.parameters['web']['certificate'] = {'name': self.parameters['web']['certificate']}
+ self.set_certificate_uuid()
+
+ self.validate_int_or_string(self.parameters.get('max_volumes'), 'unlimited')
+ return use_rest
+
+ def clean_up_output(self, vserver_details):
+ vserver_details['root_volume'] = None
+ vserver_details['root_volume_aggregate'] = None
+ vserver_details['root_volume_security_style'] = None
+ vserver_details['aggr_list'] = [aggr['name'] for aggr in vserver_details['aggregates']]
+ vserver_details.pop('aggregates')
+ vserver_details['ipspace'] = vserver_details['ipspace']['name']
+ vserver_details['snapshot_policy'] = vserver_details['snapshot_policy']['name']
+ vserver_details['admin_state'] = vserver_details.pop('state')
+ if 'max_volumes' in vserver_details:
+ vserver_details['max_volumes'] = str(vserver_details['max_volumes'])
+ if vserver_details.get('web') is None and self.parameters.get('web'):
+ # force an entry to enable modify
+ vserver_details['web'] = {
+ 'certificate': {
+ # ignore name, as only certificate UUID is supported in svm/svms/uuid/web
+ 'uuid': vserver_details['certificate']['uuid'] if 'certificate' in vserver_details else None,
+ },
+ 'client_enabled': None,
+ 'ocsp_enabled': None
+ }
+
+ services = {}
+ # REST returns allowed: True/False with recent versions, and a list of protocols in allowed_protocols for older versions
+ allowed_protocols = (None if self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 9, 1)
+ else vserver_details.get('allowed_protocols'))
+
+ for protocol in self.allowable_protocols_rest:
+ # protocols are not present when the vserver is stopped
+ allowed = self.na_helper.safe_get(vserver_details, [protocol, 'allowed'])
+ if allowed is None and allowed_protocols is not None:
+ # earlier ONTAP versions
+ allowed = protocol in allowed_protocols
+ enabled = self.na_helper.safe_get(vserver_details, [protocol, 'enabled'])
+ if allowed is not None or enabled is not None:
+ services[protocol] = {}
+ if allowed is not None:
+ services[protocol]['allowed'] = allowed
+ if enabled is not None:
+ services[protocol]['enabled'] = enabled
+
+ if services:
+ vserver_details['services'] = services
+
+ return vserver_details
+
+ def get_certificates(self, cert_type):
+ """Retrieve list of certificates"""
+ api = 'security/certificates'
+ query = {
+ 'svm.name': self.parameters['name'],
+ 'type': cert_type
+ }
+ records, error = rest_generic.get_0_or_more_records(self.rest_api, api, query)
+ if error:
+ self.module.fail_json(msg='Error retrieving certificates: %s' % error)
+ return [record['name'] for record in records] if records else []
+
+ def set_certificate_uuid(self):
+ """Retrieve certicate uuid for 9.8 or later"""
+ api = 'security/certificates'
+ query = {
+ 'name': self.parameters['web']['certificate']['name'],
+ 'svm.name': self.parameters['name'],
+ 'type': 'server'
+ }
+ record, error = rest_generic.get_one_record(self.rest_api, api, query)
+ if error:
+ self.module.fail_json(msg='Error retrieving certificate %s: %s' % (self.parameters['web']['certificate'], error))
+ if not record:
+ self.module.fail_json(msg='Error certificate not found: %s. Current certificates with type=server: %s'
+ % (self.parameters['web']['certificate'], self.get_certificates('server')))
+ self.parameters['web']['certificate']['uuid'] = record['uuid']
+
+ def get_web_service(self, uuid):
+ """Retrieve web service info for 9.10.1 or later"""
+ api = 'svm/svms/%s/web' % uuid
+ record, error = rest_generic.get_one_record(self.rest_api, api)
+ if error:
+ self.module.fail_json(msg='Error retrieving web info: %s' % error)
+ return record
+
+ def get_vserver(self, vserver_name=None):
+ """
+ Checks if vserver exists.
+
+ :return:
+ vserver object if vserver found
+ None if vserver is not found
+ :rtype: object/None
+ """
+ if vserver_name is None:
+ vserver_name = self.parameters['name']
+
+ if self.use_rest:
+ fields = 'subtype,aggregates,language,snapshot_policy,ipspace,comment,nfs,cifs,fcp,iscsi,nvme,state'
+ if self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 9, 1):
+ fields += ',max_volumes'
+ if self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 8, 0) and not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 10, 1):
+ # certificate is available starting with 9.7 and is deprecated with 9.10.1.
+ # we don't use certificate with 9.7 as name is only supported with 9.8 in /security/certificates
+ fields += ',certificate'
+ if self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 10, 1):
+ fields += ',ndmp'
+
+ record, error = rest_vserver.get_vserver(self.rest_api, vserver_name, fields)
+ if error:
+ self.module.fail_json(msg=error)
+ if record:
+ if self.parameters.get('web') and self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 9, 1):
+ # only collect the info if the user wants to configure the web service, and ONTAP supports it
+ record['web'] = self.get_web_service(record['uuid'])
+ if not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 9, 1):
+ # 9.6 to 9.8 do not support max_volumes for svm/svms, using private/cli
+ record['allowed_protocols'], max_volumes = self.get_allowed_protocols_and_max_volumes()
+ if self.parameters.get('max_volumes') is not None:
+ record['max_volumes'] = max_volumes
+ return self.clean_up_output(copy.deepcopy(record))
+ return None
+
+ return zapis_svm.get_vserver(self.server, vserver_name)
+
+ def create_vserver(self):
+ if self.use_rest:
+ self.create_vserver_rest()
+ else:
+ options = {'vserver-name': self.parameters['name']}
+ self.add_parameter_to_dict(options, 'root_volume', 'root-volume')
+ self.add_parameter_to_dict(options, 'root_volume_aggregate', 'root-volume-aggregate')
+ self.add_parameter_to_dict(options, 'root_volume_security_style', 'root-volume-security-style')
+ self.add_parameter_to_dict(options, 'language', 'language')
+ self.add_parameter_to_dict(options, 'ipspace', 'ipspace')
+ self.add_parameter_to_dict(options, 'snapshot_policy', 'snapshot-policy')
+ self.add_parameter_to_dict(options, 'subtype', 'vserver-subtype')
+ self.add_parameter_to_dict(options, 'comment', 'comment')
+ vserver_create = netapp_utils.zapi.NaElement.create_node_with_children('vserver-create', **options)
+ try:
+ self.server.invoke_successfully(vserver_create,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as exc:
+ self.module.fail_json(msg='Error provisioning SVM %s: %s'
+ % (self.parameters['name'], to_native(exc)),
+ exception=traceback.format_exc())
+ # add allowed-protocols, aggr-list, max_volume after creation
+ # since vserver-create doesn't allow these attributes during creation
+ # python 2.6 does not support dict comprehension {k: v for ...}
+ options = dict(
+ (key, self.parameters[key])
+ for key in ('allowed_protocols', 'aggr_list', 'max_volumes')
+ if self.parameters.get(key)
+ )
+ if options:
+ self.modify_vserver(options)
+
+ def create_body_contents(self, modify=None):
+ keys_to_modify = self.parameters.keys() if modify is None else modify.keys()
+ protocols_to_modify = self.parameters.get('services', {}) if modify is None else modify.get('services', {})
+ simple_keys = ['name', 'language', 'ipspace', 'snapshot_policy', 'subtype', 'comment']
+ if self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 9, 1):
+ simple_keys.append('max_volumes')
+ body = dict(
+ (key, self.parameters[key])
+ for key in simple_keys
+ if self.parameters.get(key) and key in keys_to_modify
+ )
+ # admin_state is only supported in modify
+ if modify and 'admin_state' in keys_to_modify:
+ body['state'] = self.parameters['admin_state']
+ if 'aggr_list' in keys_to_modify:
+ body['aggregates'] = [{'name': aggr} for aggr in self.parameters['aggr_list']]
+ if 'certificate' in keys_to_modify:
+ body['certificate'] = modify['certificate']
+ allowed_protocols = {}
+ for protocol, config in protocols_to_modify.items():
+ # Ansible sets unset suboptions to None
+ if not config:
+ continue
+ # Ansible sets unset suboptions to None
+ acopy = self.na_helper.filter_out_none_entries(config)
+ if modify is not None:
+ # REST does not allow to modify this directly
+ acopy.pop('enabled', None)
+ if not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 9, 1):
+ # allowed is not supported in earlier REST versions
+ allowed = acopy.pop('allowed', None)
+ # if allowed is not set, retrieve current value
+ if allowed is not None:
+ allowed_protocols[protocol] = allowed
+ if acopy:
+ body[protocol] = acopy
+ return body, allowed_protocols
+
+ def get_allowed_protocols_and_max_volumes(self):
+ # use REST CLI for older versions of ONTAP
+ query = {'vserver': self.parameters['name']}
+ fields = 'allowed_protocols'
+ if self.parameters.get('max_volumes') is not None:
+ fields += ',max_volumes'
+ response, error = rest_generic.get_one_record(self.rest_api, 'private/cli/vserver', query, fields)
+ if error:
+ self.module.fail_json(msg='Error getting vserver info: %s - %s' % (error, response))
+ if response and 'max_volumes' in response:
+ max_volumes = str(response['max_volumes'])
+ allowed_protocols, max_volumes = [], None
+ if response and 'allowed_protocols' in response:
+ allowed_protocols = response['allowed_protocols']
+ if response and 'max_volumes' in response:
+ max_volumes = str(response['max_volumes'])
+ return allowed_protocols, max_volumes
+
+ def rest_cli_set_max_volumes(self):
+ # use REST CLI for older versions of ONTAP
+ query = {'vserver': self.parameters['name']}
+ body = {'max_volumes': self.parameters['max_volumes']}
+ response, error = rest_generic.patch_async(self.rest_api, 'private/cli/vserver', None, body, query)
+ if error:
+ self.module.fail_json(msg='Error updating max_volumes: %s - %s' % (error, response))
+
+ def rest_cli_add_remove_protocols(self, protocols):
+ protocols_to_add = [protocol for protocol, value in protocols.items() if value]
+ if protocols_to_add:
+ self.rest_cli_add_protocols(protocols_to_add)
+ protocols_to_delete = [protocol for protocol, value in protocols.items() if not value]
+ if protocols_to_delete:
+ self.rest_cli_remove_protocols(protocols_to_delete)
+
+ def rest_cli_add_protocols(self, protocols):
+ # use REST CLI for older versions of ONTAP
+ query = {'vserver': self.parameters['name']}
+ body = {'protocols': protocols}
+ response, error = rest_generic.patch_async(self.rest_api, 'private/cli/vserver/add-protocols', None, body, query)
+ if error:
+ self.module.fail_json(msg='Error adding protocols: %s - %s' % (error, response))
+
+ def rest_cli_remove_protocols(self, protocols):
+ # use REST CLI for older versions of ONTAP
+ query = {'vserver': self.parameters['name']}
+ body = {'protocols': protocols}
+ response, error = rest_generic.patch_async(self.rest_api, 'private/cli/vserver/remove-protocols', None, body, query)
+ if error:
+ self.module.fail_json(msg='Error removing protocols: %s - %s' % (error, response))
+
+ def create_vserver_rest(self):
+ # python 2.6 does not support dict comprehension {k: v for ...}
+ body, allowed_protocols = self.create_body_contents()
+ dummy, error = rest_generic.post_async(self.rest_api, 'svm/svms', body, timeout=self.timeout)
+ if error:
+ self.module.fail_json(msg='Error in create: %s' % error)
+ # add max_volumes and update allowed protocols after creation for older ONTAP versions
+ if self.parameters.get('max_volumes') is not None and not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 9, 1):
+ self.rest_cli_set_max_volumes()
+ if allowed_protocols:
+ self.rest_cli_add_remove_protocols(allowed_protocols)
+
+ def delete_vserver(self, current=None):
+ if self.use_rest:
+ if current is None:
+ self.module.fail_json(msg='Internal error, expecting SVM object in delete')
+ dummy, error = rest_generic.delete_async(self.rest_api, 'svm/svms', current['uuid'], timeout=self.timeout)
+ if error:
+ self.module.fail_json(msg='Error in delete: %s' % error)
+ else:
+ vserver_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'vserver-destroy', **{'vserver-name': self.parameters['name']})
+
+ try:
+ self.server.invoke_successfully(vserver_delete,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as exc:
+ self.module.fail_json(msg='Error deleting SVM %s: %s'
+ % (self.parameters['name'], to_native(exc)),
+ exception=traceback.format_exc())
+
+ def rename_vserver(self):
+ ''' ZAPI only, for REST it is handled as a modify'''
+ vserver_rename = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'vserver-rename', **{'vserver-name': self.parameters['from_name'],
+ 'new-name': self.parameters['name']})
+
+ try:
+ self.server.invoke_successfully(vserver_rename,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as exc:
+ self.module.fail_json(msg='Error renaming SVM %s: %s'
+ % (self.parameters['from_name'], to_native(exc)),
+ exception=traceback.format_exc())
+
+ def modify_vserver(self, modify, current=None):
+ '''
+ Modify vserver.
+ :param modify: list of modify attributes
+ :param current: with rest, SVM object to modify
+ '''
+ if self.use_rest:
+ if current is None:
+ self.module.fail_json(msg='Internal error, expecting SVM object in modify.')
+ if not modify:
+ self.module.fail_json(msg='Internal error, expecting something to modify in modify.')
+ # REST reports an error if we modify the name and something else at the same time
+ if 'name' in modify:
+ body = {'name': modify['name']}
+ dummy, error = rest_generic.patch_async(self.rest_api, 'svm/svms', current['uuid'], body, timeout=self.timeout)
+ if error:
+ self.module.fail_json(msg='Error in rename: %s' % error, modify=modify)
+ del modify['name']
+ if 'web' in modify and not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 10, 1):
+ # certificate is a deprecated field for 9.10.1, only use it for 9.8 and 9.9
+ uuid = self.na_helper.safe_get(modify, ['web', 'certificate', 'uuid'])
+ if uuid:
+ modify['certificate'] = {'uuid': uuid}
+ modify.pop('web')
+ body, allowed_protocols = self.create_body_contents(modify)
+ if body:
+ dummy, error = rest_generic.patch_async(self.rest_api, 'svm/svms', current['uuid'], body, timeout=self.timeout)
+ if error:
+ self.module.fail_json(msg='Error in modify: %s' % error, modify=modify)
+ # use REST CLI for max_volumes and allowed protocols with older ONTAP versions
+ if 'max_volumes' in modify and not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 9, 1):
+ self.rest_cli_set_max_volumes()
+ if allowed_protocols:
+ self.rest_cli_add_remove_protocols(allowed_protocols)
+ if 'services' in modify:
+ self.modify_services(modify, current)
+ if 'web' in modify and self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 10, 1):
+ self.modify_web_services(modify['web'], current)
+ else:
+ zapis_svm.modify_vserver(self.server, self.module, self.parameters['name'], modify, self.parameters)
+
+ def modify_services(self, modify, current):
+ apis = {
+ 'fcp': 'protocols/san/fcp/services',
+ 'iscsi': 'protocols/san/iscsi/services',
+ 'nfs': 'protocols/nfs/services',
+ 'nvme': 'protocols/nvme/services',
+ 'ndmp': 'protocols/ndmp/svms'
+ }
+ for protocol, config in modify['services'].items():
+ enabled = config.get('enabled')
+ if enabled is None:
+ # nothing to do
+ continue
+ api = apis.get(protocol)
+ if not api:
+ self.module.fail_json(msg='Internal error, unexpecting service: %s.' % protocol)
+ if enabled:
+ # we don't know if the service is already started or not, link will tell us
+ link = self.na_helper.safe_get(current, [protocol, '_links', 'self', 'href'])
+ body = {'enabled': enabled}
+ if enabled and not link:
+ body['svm.name'] = self.parameters['name']
+ dummy, error = rest_generic.post_async(self.rest_api, api, body)
+ else:
+ dummy, error = rest_generic.patch_async(self.rest_api, api, current['uuid'], body)
+ if error:
+ self.module.fail_json(msg='Error in modify service for %s: %s' % (protocol, error))
+
+ def modify_web_services(self, record, current):
+ """Patch web service for 9.10.1 or later"""
+ api = 'svm/svms/%s/web' % current['uuid']
+ if 'certificate' in record:
+ # API only accepts a UUID
+ record['certificate'].pop('name', None)
+ body = self.na_helper.filter_out_none_entries(copy.deepcopy(record))
+ if not body:
+ self.module.warn('Nothing to change: %s' % record)
+ return
+ dummy, error = rest_generic.patch_async(self.rest_api, api, None, body)
+ if error:
+ self.module.fail_json(msg='Error in modify web service for %s: %s' % (body, error))
+
+ def add_parameter_to_dict(self, adict, name, key=None, tostr=False):
+ '''
+ add defined parameter (not None) to adict using key.
+ :param adict: a dictionary.
+ :param name: name in self.parameters.
+ :param key: key in adict.
+ :param tostr: boolean.
+ '''
+ if key is None:
+ key = name
+ if self.parameters.get(name) is not None:
+ if tostr:
+ adict[key] = str(self.parameters.get(name))
+ else:
+ adict[key] = self.parameters.get(name)
+
+ def warn_when_possible_language_match(self, desired, current):
+ transformed = desired.lower().replace('-', '_')
+ if transformed == current:
+ self.module.warn("Attempting to change language from ONTAP value %s to %s. Use %s to suppress this warning and maintain idempotency."
+ % (current, desired, current))
+
+ def apply(self):
+ '''Call create/modify/delete operations.'''
+ current = self.get_vserver()
+ cd_action, rename = None, None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action == 'create' and self.parameters.get('from_name'):
+ # create by renaming existing SVM
+ old_svm = self.get_vserver(self.parameters['from_name'])
+ rename = self.na_helper.is_rename_action(old_svm, current)
+ if rename is None:
+ self.module.fail_json(msg='Error renaming SVM %s: no SVM with from_name %s.' % (self.parameters['name'], self.parameters['from_name']))
+ if rename:
+ current = old_svm
+ cd_action = None
+ modify = self.na_helper.get_modified_attributes(current, self.parameters) if cd_action is None else {}
+ if 'language' in modify:
+ self.warn_when_possible_language_match(modify['language'], current['language'])
+ fixed_attributes = ['root_volume', 'root_volume_aggregate', 'root_volume_security_style', 'subtype', 'ipspace']
+ msgs = ['%s - current: %s - desired: %s' % (attribute, current[attribute], self.parameters[attribute])
+ for attribute in fixed_attributes
+ if attribute in modify]
+ if msgs:
+ self.module.fail_json(msg='Error modifying SVM %s: cannot modify %s.' % (self.parameters['name'], ', '.join(msgs)))
+
+ if self.na_helper.changed and not self.module.check_mode:
+ if rename:
+ if self.use_rest:
+ modify['name'] = self.parameters['name']
+ else:
+ self.rename_vserver()
+ modify.pop('name', None)
+ # If rename is True, cd_action is None, but modify could be true or false.
+ if cd_action == 'create':
+ self.create_vserver()
+ if self.parameters.get('admin_state') == 'stopped':
+ current = self.get_vserver()
+ modify = {'admin_state': 'stopped'}
+ elif cd_action == 'delete':
+ self.delete_vserver(current)
+ if modify:
+ self.modify_vserver(modify, current)
+ if modify and 'aggr_list' in modify and '*' in modify['aggr_list']:
+ self.module.warn("na_ontap_svm: changed always 'True' when aggr_list is '*'.")
+ results = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify)
+ self.module.exit_json(**results)
+
+
+def main():
+ '''Apply vserver operations from playbook'''
+ svm = NetAppOntapSVM()
+ svm.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_svm_options.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_svm_options.py
new file mode 100644
index 000000000..c018a7c21
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_svm_options.py
@@ -0,0 +1,163 @@
+#!/usr/bin/python
+
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = '''
+short_description: NetApp ONTAP Modify SVM Options
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Modify ONTAP SVM Options
+ - Only Options that appear on "vserver options show" can be set
+ - This module only supports ZAPI and is deprecated.
+ - The final version of ONTAP to support ZAPI is 9.12.1.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap_zapi
+module: na_ontap_svm_options
+version_added: 2.7.0
+options:
+ name:
+ description:
+ - Name of the option.
+ type: str
+ value:
+ description:
+ - Value of the option.
+ - Value must be in quote
+ type: str
+ vserver:
+ description:
+ - The name of the vserver to which this option belongs to.
+ required: True
+ type: str
+'''
+
+EXAMPLES = """
+ - name: Set SVM Options
+ na_ontap_svm_options:
+ vserver: "{{ netapp_vserver_name }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ name: snmp.enable
+ value: 'on'
+"""
+
+RETURN = """
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPSvnOptions(object):
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_zapi_only_spec()
+ self.argument_spec.update(dict(
+ name=dict(required=False, type="str", default=None),
+ value=dict(required=False, type='str', default=None),
+ vserver=dict(required=True, type='str')
+
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.na_helper.module_deprecated(self.module)
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+ return
+
+ def set_options(self):
+ """
+ Set a specific option
+ :return: None
+ """
+ option_obj = netapp_utils.zapi.NaElement("options-set")
+ option_obj.add_new_child('name', self.parameters['name'])
+ option_obj.add_new_child('value', self.parameters['value'])
+ try:
+ self.server.invoke_successfully(option_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error setting options: %s" % to_native(error), exception=traceback.format_exc())
+
+ def list_options(self):
+ """
+ List all Options on the Vserver
+ :return: None
+ """
+ option_obj = netapp_utils.zapi.NaElement("options-list-info")
+ try:
+ self.server.invoke_successfully(option_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error getting options: %s" % to_native(error), exception=traceback.format_exc())
+
+ def is_option_set(self):
+ """
+ Checks to see if an option is set or not
+ :return: If option is set return True, else return False
+ """
+ option_obj = netapp_utils.zapi.NaElement("options-get-iter")
+ options_info = netapp_utils.zapi.NaElement("option-info")
+ if self.parameters.get('name') is not None:
+ options_info.add_new_child("name", self.parameters['name'])
+ if self.parameters.get('value') is not None:
+ options_info.add_new_child("value", self.parameters['value'])
+ if "vserver" in self.parameters.keys():
+ if self.parameters['vserver'] is not None:
+ options_info.add_new_child("vserver", self.parameters['vserver'])
+ query = netapp_utils.zapi.NaElement("query")
+ query.add_child_elem(options_info)
+ option_obj.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(option_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error finding option: %s" % to_native(error), exception=traceback.format_exc())
+
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ return True
+ return False
+
+ def apply(self):
+ changed = False
+ is_set = self.is_option_set()
+ if not is_set:
+ if self.module.check_mode:
+ pass
+ else:
+ self.set_options()
+ changed = True
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ """
+ Execute action from playbook
+ :return: none
+ """
+ cg_obj = NetAppONTAPSvnOptions()
+ cg_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ucadapter.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ucadapter.py
new file mode 100644
index 000000000..46344e381
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ucadapter.py
@@ -0,0 +1,303 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+
+module: na_ontap_ucadapter
+short_description: NetApp ONTAP UC adapter configuration
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+ - modify the UC adapter mode and type taking pending type and mode into account.
+
+options:
+ state:
+ description:
+ - Whether the specified adapter should exist.
+ required: false
+ choices: ['present']
+ default: 'present'
+ type: str
+
+ adapter_name:
+ description:
+ - Specifies the adapter name.
+ required: true
+ type: str
+
+ node_name:
+ description:
+ - Specifies the adapter home node.
+ required: true
+ type: str
+
+ mode:
+ description:
+ - Specifies the mode of the adapter.
+ type: str
+
+ type:
+ description:
+ - Specifies the fc4 type of the adapter.
+ type: str
+
+ pair_adapters:
+ description:
+ - Specifies the list of adapters which also need to be offline along with the current adapter during modifying.
+ - If specified adapter works in a group or pair, the other adapters might also need to offline before modify the specified adapter.
+ - The mode of pair_adapters are modified along with the adapter, the type of the pair_adapters are not modified.
+ type: list
+ elements: str
+ version_added: '20.6.0'
+
+'''
+
+EXAMPLES = '''
+ - name: Modify adapter
+ netapp.ontap.na_ontap_adapter:
+ state: present
+ adapter_name: 0e
+ pair_adapters: 0f
+ node_name: laurentn-vsim1
+ mode: fc
+ type: target
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+'''
+
+RETURN = '''
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppOntapadapter:
+ ''' object to describe adapter info '''
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present'], default='present', type='str'),
+ adapter_name=dict(required=True, type='str'),
+ node_name=dict(required=True, type='str'),
+ mode=dict(required=False, type='str'),
+ type=dict(required=False, type='str'),
+ pair_adapters=dict(required=False, type='list', elements='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.adapters_uuids = {}
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ # Set up Rest API
+ self.rest_api = netapp_utils.OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+
+ if not self.use_rest:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def get_adapter(self):
+ """
+ Return details about the adapter
+ :param:
+ name : Name of the name of the adapter
+
+ :return: Details about the adapter. None if not found.
+ :rtype: dict
+ """
+ if self.use_rest:
+ return self.get_adapter_rest()
+ adapter_info = netapp_utils.zapi.NaElement('ucm-adapter-get')
+ adapter_info.add_new_child('adapter-name', self.parameters['adapter_name'])
+ adapter_info.add_new_child('node-name', self.parameters['node_name'])
+ try:
+ result = self.server.invoke_successfully(adapter_info, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching ucadapter details: %s: %s'
+ % (self.parameters['node_name'], to_native(error)),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('attributes'):
+ adapter_attributes = result.get_child_by_name('attributes').\
+ get_child_by_name('uc-adapter-info')
+ return_value = {
+ 'mode': adapter_attributes.get_child_content('mode'),
+ 'pending-mode': adapter_attributes.get_child_content('pending-mode'),
+ 'type': adapter_attributes.get_child_content('fc4-type'),
+ 'pending-type': adapter_attributes.get_child_content('pending-fc4-type'),
+ 'status': adapter_attributes.get_child_content('status'),
+ }
+ return return_value
+ return None
+
+ def modify_adapter(self):
+ """
+ Modify the adapter.
+ """
+ if self.use_rest:
+ return self.modify_adapter_rest()
+ params = {'adapter-name': self.parameters['adapter_name'],
+ 'node-name': self.parameters['node_name']}
+ if self.parameters.get('type') is not None:
+ params['fc4-type'] = self.parameters['type']
+ if self.parameters.get('mode') is not None:
+ params['mode'] = self.parameters['mode']
+ adapter_modify = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'ucm-adapter-modify', ** params)
+ try:
+ self.server.invoke_successfully(adapter_modify,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error modifying adapter %s: %s' % (self.parameters['adapter_name'], to_native(e)),
+ exception=traceback.format_exc())
+
+ def online_or_offline_adapter(self, status, adapter_name):
+ """
+ Bring a Fibre Channel target adapter offline/online.
+ """
+ if self.use_rest:
+ return self.online_or_offline_adapter_rest(status, adapter_name)
+ if status == 'down':
+ adapter = netapp_utils.zapi.NaElement('fcp-adapter-config-down')
+ elif status == 'up':
+ adapter = netapp_utils.zapi.NaElement('fcp-adapter-config-up')
+ adapter.add_new_child('fcp-adapter', adapter_name)
+ adapter.add_new_child('node', self.parameters['node_name'])
+ try:
+ self.server.invoke_successfully(adapter,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error trying to %s fc-adapter %s: %s' % (status, adapter_name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def get_adapters_uuids(self):
+ missing_adapters = []
+ adapters = [self.parameters['adapter_name']] + self.parameters.get('pair_adapters', [])
+ for adapter in adapters:
+ adapter_uuid = self.get_adapter_uuid(adapter)
+ if adapter_uuid is None:
+ missing_adapters.append(adapter)
+ else:
+ self.adapters_uuids[adapter] = adapter_uuid
+ if missing_adapters:
+ self.module.fail_json(msg="Error: Adapter(s) %s not exist" % (', ').join(missing_adapters))
+
+ def get_adapter_uuid(self, adapter):
+ api = 'network/fc/ports'
+ params = {
+ 'name': adapter,
+ 'node.name': self.parameters['node_name'],
+ 'fields': 'uuid'
+ }
+ record, error = rest_generic.get_one_record(self.rest_api, api, params)
+ if error:
+ self.module.fail_json(msg="Error fetching adapter %s uuid" % adapter)
+ return record['uuid'] if record else None
+
+ def get_adapter_rest(self):
+ api = 'private/cli/ucadmin'
+ params = {
+ 'node': self.parameters['node_name'],
+ 'adapter': self.parameters['adapter_name'],
+ 'fields': 'pending_mode,pending_type,current_mode,current_type,status_admin'
+ }
+ record, error = rest_generic.get_one_record(self.rest_api, api, params)
+ if error:
+ self.module.fail_json(msg='Error fetching ucadapter details: %s: %s'
+ % (self.parameters['node_name'], to_native(error)))
+ if record:
+ return {
+ 'mode': self.na_helper.safe_get(record, ['current_mode']),
+ 'pending-mode': self.na_helper.safe_get(record, ['pending_mode']),
+ 'type': self.na_helper.safe_get(record, ['current_type']),
+ 'pending-type': self.na_helper.safe_get(record, ['pending_type']),
+ 'status': self.na_helper.safe_get(record, ['status_admin'])
+ }
+ return None
+
+ def modify_adapter_rest(self):
+ api = 'private/cli/ucadmin'
+ query = {
+ 'node': self.parameters['node_name'],
+ 'adapter': self.parameters['adapter_name']
+ }
+ body = {}
+ if self.parameters.get('type') is not None:
+ body['type'] = self.parameters['type']
+ if self.parameters.get('mode') is not None:
+ body['mode'] = self.parameters['mode']
+ dummy, error = rest_generic.patch_async(self.rest_api, api, None, body, query)
+ if error:
+ self.module.fail_json(msg='Error modifying adapter %s: %s' % (self.parameters['adapter_name'], to_native(error)))
+
+ def online_or_offline_adapter_rest(self, status, adapter_name):
+ api = 'network/fc/ports'
+ body = {'enabled': True if status == 'up' else False}
+ dummy, error = rest_generic.patch_async(self.rest_api, api, self.adapters_uuids[adapter_name], body)
+ if error:
+ self.module.fail_json(msg='Error trying to %s fc-adapter %s: %s' % (status, adapter_name, to_native(error)))
+
+ def apply(self):
+ ''' calling all adapter features '''
+ changed = False
+ current = self.get_adapter()
+
+ def need_to_change(expected, pending, current):
+ if expected is None:
+ return False
+ elif pending is not None:
+ return pending != expected
+ elif current is not None:
+ return current != expected
+ return False
+
+ if current:
+ if self.parameters.get('type') is not None:
+ changed = need_to_change(self.parameters['type'], current['pending-type'], current['type'])
+ changed = changed or need_to_change(self.parameters.get('mode'), current['pending-mode'], current['mode'])
+ if changed and self.use_rest:
+ self.get_adapters_uuids()
+ if changed and not self.module.check_mode:
+ self.online_or_offline_adapter('down', self.parameters['adapter_name'])
+ if self.parameters.get('pair_adapters') is not None:
+ for adapter in self.parameters['pair_adapters']:
+ self.online_or_offline_adapter('down', adapter)
+ self.modify_adapter()
+ self.online_or_offline_adapter('up', self.parameters['adapter_name'])
+ if self.parameters.get('pair_adapters') is not None:
+ for adapter in self.parameters['pair_adapters']:
+ self.online_or_offline_adapter('up', adapter)
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ adapter = NetAppOntapadapter()
+ adapter.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_unix_group.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_unix_group.py
new file mode 100644
index 000000000..76f28ad96
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_unix_group.py
@@ -0,0 +1,459 @@
+#!/usr/bin/python
+"""
+na_ontap_unix_group
+"""
+
+# (c) 2019-2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - "Create/Delete Unix user group"
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_unix_group
+options:
+ state:
+ description:
+ - Whether the specified group should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: 'present'
+
+ name:
+ description:
+ - Specifies UNIX group's name, unique for each group.
+ - Non-modifiable.
+ required: true
+ type: str
+
+ id:
+ description:
+ - Specifies an identification number for the UNIX group.
+ - Group ID is unique for each UNIX group.
+ - Required for create, modifiable.
+ type: int
+
+ vserver:
+ description:
+ - Specifies the Vserver for the UNIX group.
+ - Non-modifiable.
+ required: true
+ type: str
+
+ skip_name_validation:
+ description:
+ - Specifies if group name validation is skipped.
+ type: bool
+
+ users:
+ description:
+ - Specifies the users associated with this group. Should be comma separated.
+ - It represents the expected state of a list of users at any time.
+ - Add a user into group if it is specified in expected state but not in current state.
+ - Delete a user from group if it is specified in current state but not in expected state.
+ - To delete all current users, use '' as value.
+ type: list
+ elements: str
+ version_added: 2.9.0
+
+short_description: NetApp ONTAP UNIX Group
+version_added: 2.8.0
+
+"""
+
+EXAMPLES = """
+ - name: Create UNIX group
+ na_ontap_unix_group:
+ state: present
+ name: SampleGroup
+ vserver: ansibleVServer
+ id: 2
+ users: user1,user2
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Delete all users in UNIX group
+ na_ontap_unix_group:
+ state: present
+ name: SampleGroup
+ vserver: ansibleVServer
+ users: ''
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Delete UNIX group
+ na_ontap_unix_group:
+ state: absent
+ name: SampleGroup
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+"""
+
+RETURN = """
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppOntapUnixGroup:
+ """
+ Common operations to manage UNIX groups
+ """
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ id=dict(required=False, type='int'),
+ skip_name_validation=dict(required=False, type='bool'),
+ vserver=dict(required=True, type='str'),
+ users=dict(required=False, type='list', elements='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ # Set up Rest API
+ self.rest_api = OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+ if self.use_rest:
+ self.parameters['users'] = self.safe_strip(self.parameters.get('users')) if self.parameters.get('users') is not None else None
+
+ if self.use_rest and not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 9, 1):
+ msg = 'REST requires ONTAP 9.9.1 or later for UNIX group APIs.'
+ self.use_rest = self.na_helper.fall_back_to_zapi(self.module, msg, self.parameters)
+
+ if not self.use_rest:
+ if netapp_utils.has_netapp_lib() is False:
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ self.set_playbook_zapi_key_map()
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def safe_strip(self, users):
+ """ strip the given user """
+ return [user.strip() for user in users if len(user.strip())]
+
+ def set_playbook_zapi_key_map(self):
+ self.na_helper.zapi_string_keys = {
+ 'name': 'group-name'
+ }
+ self.na_helper.zapi_int_keys = {
+ 'id': 'group-id'
+ }
+ self.na_helper.zapi_bool_keys = {
+ 'skip_name_validation': 'skip-name-validation'
+ }
+
+ def get_unix_group(self):
+ """
+ Checks if the UNIX group exists.
+
+ :return:
+ dict() if group found
+ None if group is not found
+ """
+
+ get_unix_group = netapp_utils.zapi.NaElement('name-mapping-unix-group-get-iter')
+ attributes = {
+ 'query': {
+ 'unix-group-info': {
+ 'group-name': self.parameters['name'],
+ 'vserver': self.parameters['vserver'],
+ }
+ }
+ }
+ get_unix_group.translate_struct(attributes)
+ try:
+ result = self.server.invoke_successfully(get_unix_group, enable_tunneling=True)
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ group_info = result['attributes-list']['unix-group-info']
+ group_details = dict()
+ else:
+ return None
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error getting UNIX group %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ for item_key, zapi_key in self.na_helper.zapi_string_keys.items():
+ group_details[item_key] = group_info[zapi_key]
+ for item_key, zapi_key in self.na_helper.zapi_int_keys.items():
+ group_details[item_key] = self.na_helper.get_value_for_int(from_zapi=True,
+ value=group_info[zapi_key])
+ if group_info.get_child_by_name('users') is not None:
+ group_details['users'] = [user.get_child_content('user-name')
+ for user in group_info.get_child_by_name('users').get_children()]
+ else:
+ group_details['users'] = None
+ return group_details
+
+ def create_unix_group(self):
+ """
+ Creates an UNIX group in the specified Vserver
+
+ :return: None
+ """
+ if self.parameters.get('id') is None:
+ self.module.fail_json(msg='Error: Missing a required parameter for create: (id)')
+
+ group_create = netapp_utils.zapi.NaElement('name-mapping-unix-group-create')
+ group_details = {}
+ for item in self.parameters:
+ if item in self.na_helper.zapi_string_keys:
+ zapi_key = self.na_helper.zapi_string_keys.get(item)
+ group_details[zapi_key] = self.parameters[item]
+ elif item in self.na_helper.zapi_bool_keys:
+ zapi_key = self.na_helper.zapi_bool_keys.get(item)
+ group_details[zapi_key] = self.na_helper.get_value_for_bool(from_zapi=False,
+ value=self.parameters[item])
+ elif item in self.na_helper.zapi_int_keys:
+ zapi_key = self.na_helper.zapi_int_keys.get(item)
+ group_details[zapi_key] = self.na_helper.get_value_for_int(from_zapi=True,
+ value=self.parameters[item])
+ group_create.translate_struct(group_details)
+ try:
+ self.server.invoke_successfully(group_create, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating UNIX group %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ if self.parameters.get('users') is not None:
+ self.modify_users_in_group()
+
+ def delete_unix_group(self):
+ """
+ Deletes an UNIX group from a vserver
+
+ :return: None
+ """
+ group_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'name-mapping-unix-group-destroy', **{'group-name': self.parameters['name']})
+
+ try:
+ self.server.invoke_successfully(group_delete, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error removing UNIX group %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_unix_group(self, params):
+ """
+ Modify an UNIX group from a vserver
+ :param params: modify parameters
+ :return: None
+ """
+ # modify users requires separate zapi.
+ if 'users' in params:
+ self.modify_users_in_group()
+ if len(params) == 1:
+ return
+
+ group_modify = netapp_utils.zapi.NaElement('name-mapping-unix-group-modify')
+ group_details = {'group-name': self.parameters['name']}
+ for key in params:
+ if key in self.na_helper.zapi_int_keys:
+ zapi_key = self.na_helper.zapi_int_keys.get(key)
+ group_details[zapi_key] = self.na_helper.get_value_for_int(from_zapi=True,
+ value=params[key])
+ group_modify.translate_struct(group_details)
+
+ try:
+ self.server.invoke_successfully(group_modify, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying UNIX group %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_users_in_group(self):
+ """
+ Add/delete one or many users in a UNIX group
+
+ :return: None
+ """
+ current_users = self.get_unix_group().get('users')
+ expect_users = self.parameters.get('users')
+
+ if current_users is None:
+ current_users = []
+ if expect_users[0] == '' and len(expect_users) == 1:
+ expect_users = []
+ users_to_remove = list(set(current_users) - set(expect_users))
+ users_to_add = list(set(expect_users) - set(current_users))
+ if len(users_to_add) > 0:
+ for user in users_to_add:
+ add_user = netapp_utils.zapi.NaElement('name-mapping-unix-group-add-user')
+ group_details = {'group-name': self.parameters['name'], 'user-name': user}
+ add_user.translate_struct(group_details)
+ try:
+ self.server.invoke_successfully(add_user, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(
+ msg='Error adding user %s to UNIX group %s: %s' % (user, self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ if len(users_to_remove) > 0:
+ for user in users_to_remove:
+ delete_user = netapp_utils.zapi.NaElement('name-mapping-unix-group-delete-user')
+ group_details = {'group-name': self.parameters['name'], 'user-name': user}
+ delete_user.translate_struct(group_details)
+ try:
+ self.server.invoke_successfully(delete_user, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(
+ msg='Error deleting user %s from UNIX group %s: %s' % (user, self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def get_unix_group_rest(self):
+ """
+ Retrieves the UNIX groups for all of the SVMs.
+ UNIX users who are the members of the group are also displayed.
+ """
+ if not self.use_rest:
+ return self.get_unix_group()
+ query = {'svm.name': self.parameters.get('vserver'),
+ 'name': self.parameters.get('name')}
+ api = 'name-services/unix-groups'
+ fields = 'svm.uuid,id,name,users.name'
+ record, error = rest_generic.get_one_record(self.rest_api, api, query, fields)
+ if error:
+ self.module.fail_json(msg="Error getting UNIX group: %s" % error)
+ if record:
+ if 'users' in record:
+ record['users'] = [user['name'] for user in record['users']]
+ return {
+ 'svm': {'uuid': self.na_helper.safe_get(record, ['svm', 'uuid'])},
+ 'name': self.na_helper.safe_get(record, ['name']),
+ 'id': self.na_helper.safe_get(record, ['id']),
+ 'users': self.na_helper.safe_get(record, ['users'])
+ }
+ return None
+
+ def create_unix_group_rest(self):
+ """
+ Creates the local UNIX group configuration for the specified SVM.
+ Group name and group ID are mandatory parameters.
+ """
+ if not self.use_rest:
+ return self.create_unix_group()
+
+ body = {'svm.name': self.parameters.get('vserver')}
+ if 'name' in self.parameters:
+ body['name'] = self.parameters['name']
+ if 'id' in self.parameters:
+ body['id'] = self.parameters['id']
+ if 'skip_name_validation' in self.parameters:
+ body['skip_name_validation'] = self.parameters['skip_name_validation']
+ api = 'name-services/unix-groups'
+ dummy, error = rest_generic.post_async(self.rest_api, api, body)
+ if error is not None:
+ self.module.fail_json(msg="Error creating UNIX group: %s" % error)
+ if self.parameters.get('users') is not None:
+ self.modify_users_in_group_rest()
+
+ def modify_users_in_group_rest(self, current=None):
+ """
+ Add/delete one or many users in a UNIX group
+ """
+ body = {'records': []}
+ # current is to add user when creating a group
+ if not current:
+ current = self.get_unix_group_rest()
+ current_users = current['users'] or []
+ expect_users = self.parameters.get('users')
+ users_to_remove = list(set(current_users) - set(expect_users))
+ users_to_add = list(set(expect_users) - set(current_users))
+ if len(users_to_add) > 0:
+ body['records'] = [{'name': user} for user in users_to_add]
+ if 'skip_name_validation' in self.parameters:
+ body['skip_name_validation'] = self.parameters['skip_name_validation']
+ api = 'name-services/unix-groups/%s/%s/users' % (current['svm']['uuid'], current['name'])
+ dummy, error = rest_generic.post_async(self.rest_api, api, body)
+ if error is not None:
+ self.module.fail_json(msg="Error Adding user to UNIX group: %s" % error)
+
+ if len(users_to_remove) > 0:
+ for user in users_to_remove:
+ api = 'name-services/unix-groups/%s/%s/users' % (current['svm']['uuid'], current['name'])
+ dummy, error = rest_generic.delete_async(self.rest_api, api, user, body=None)
+ if error is not None:
+ self.module.fail_json(msg="Error removing user from UNIX group: %s" % error)
+
+ def delete_unix_group_rest(self, current):
+ """
+ Deletes a UNIX user configuration for the specified SVM with rest API.
+ """
+ if not self.use_rest:
+ return self.delete_unix_group()
+
+ api = 'name-services/unix-groups/%s' % current['svm']['uuid']
+ dummy, error = rest_generic.delete_async(self.rest_api, api, self.parameters['name'])
+ if error is not None:
+ self.module.fail_json(msg="Error deleting UNIX group: %s" % error)
+
+ def modify_unix_group_rest(self, modify, current=None):
+ """
+ Updates UNIX group information for the specified user and SVM with rest API.
+ """
+ if not self.use_rest:
+ return self.modify_unix_group(modify)
+
+ if 'users' in modify:
+ self.modify_users_in_group_rest(current)
+ if len(modify) == 1:
+ return
+
+ api = 'name-services/unix-groups/%s' % current['svm']['uuid']
+ body = {}
+ if 'id' in modify:
+ body['id'] = modify['id']
+ if body:
+ dummy, error = rest_generic.patch_async(self.rest_api, api, self.parameters['name'], body)
+ if error is not None:
+ self.module.fail_json(msg="Error on modifying UNIX group: %s" % error)
+
+ def apply(self):
+ """
+ Invoke appropriate action based on playbook parameters
+
+ :return: None
+ """
+ cd_action = None
+ current = self.get_unix_group_rest()
+ if current and current['users'] is None:
+ current['users'] = []
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ modify = self.na_helper.get_modified_attributes(current, self.parameters) if cd_action is None else None
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_unix_group_rest()
+ elif cd_action == 'delete':
+ self.delete_unix_group_rest(current)
+ else:
+ self.modify_unix_group_rest(modify, current)
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ obj = NetAppOntapUnixGroup()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_unix_user.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_unix_user.py
new file mode 100644
index 000000000..708bb48d0
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_unix_user.py
@@ -0,0 +1,330 @@
+#!/usr/bin/python
+
+# (c) 2018-2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+
+module: na_ontap_unix_user
+
+short_description: NetApp ONTAP UNIX users
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.8.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Create, delete or modify UNIX users local to ONTAP.
+
+options:
+
+ state:
+ description:
+ - Whether the specified user should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: 'present'
+
+ name:
+ description:
+ - Specifies user's UNIX account name.
+ - REST support requires ONTAP version 9.9.0 or later.
+ - Non-modifiable.
+ required: true
+ type: str
+
+ primary_gid:
+ description:
+ - Specifies the primary group identification number for the UNIX user.
+ - REST support requires ONTAP version 9.9.0 or later.
+ - Required for create, modifiable.
+ aliases: ['group_id']
+ type: int
+ version_added: 21.21.0
+
+ vserver:
+ description:
+ - Specifies the Vserver for the UNIX user.
+ - REST support requires ONTAP version 9.9.0 or later.
+ - Non-modifiable.
+ required: true
+ type: str
+
+ id:
+ description:
+ - Specifies an identification number for the UNIX user.
+ - REST support requires ONTAP version 9.9.0 or later.
+ - Required for create, modifiable.
+ type: int
+
+ full_name:
+ description:
+ - Specifies the full name of the UNIX user
+ - REST support requires ONTAP version 9.9.0 or later.
+ - Optional for create, modifiable.
+ type: str
+'''
+
+EXAMPLES = """
+
+ - name: Create UNIX User
+ netapp.ontap.na_ontap_unix_user:
+ state: present
+ name: SampleUser
+ vserver: ansibleVServer
+ group_id: 1
+ id: 2
+ full_name: Test User
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Delete UNIX User
+ netapp.ontap.na_ontap_unix_user:
+ state: absent
+ name: SampleUser
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppOntapUnixUser:
+ """
+ Common operations to manage users and roles.
+ """
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ primary_gid=dict(required=False, type='int', aliases=['group_id']),
+ id=dict(required=False, type='int'),
+ full_name=dict(required=False, type='str'),
+ vserver=dict(required=True, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ # Set up Rest API
+ self.rest_api = OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+
+ if self.use_rest and not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 9, 0):
+ msg = 'REST requires ONTAP 9.9.0 or later for unix-users APIs.'
+ self.use_rest = self.na_helper.fall_back_to_zapi(self.module, msg, self.parameters)
+
+ if not self.use_rest:
+ if netapp_utils.has_netapp_lib() is False:
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_unix_user(self):
+ """
+ Checks if the UNIX user exists.
+
+ :return:
+ dict() if user found
+ None if user is not found
+ """
+ get_unix_user = netapp_utils.zapi.NaElement('name-mapping-unix-user-get-iter')
+ attributes = {
+ 'query': {
+ 'unix-user-info': {
+ 'user-name': self.parameters['name'],
+ 'vserver': self.parameters['vserver'],
+ }
+ }
+ }
+ get_unix_user.translate_struct(attributes)
+ try:
+ result = self.server.invoke_successfully(get_unix_user, enable_tunneling=True)
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ user_info = result['attributes-list']['unix-user-info']
+ return {'primary_gid': int(user_info['group-id']),
+ 'id': int(user_info['user-id']),
+ 'full_name': user_info['full-name']}
+ return None
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error getting UNIX user %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def create_unix_user(self):
+ """
+ Creates an UNIX user in the specified Vserver
+
+ :return: None
+ """
+ if self.parameters.get('primary_gid') is None or self.parameters.get('id') is None:
+ self.module.fail_json(msg='Error: Missing one or more required parameters for create: (primary_gid, id)')
+
+ user_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'name-mapping-unix-user-create', **{'user-name': self.parameters['name'],
+ 'group-id': str(self.parameters['primary_gid']),
+ 'user-id': str(self.parameters['id'])})
+ if self.parameters.get('full_name') is not None:
+ user_create.add_new_child('full-name', self.parameters['full_name'])
+
+ try:
+ self.server.invoke_successfully(user_create, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating UNIX user %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_unix_user(self):
+ """
+ Deletes an UNIX user from a vserver
+
+ :return: None
+ """
+ user_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'name-mapping-unix-user-destroy', **{'user-name': self.parameters['name']})
+
+ try:
+ self.server.invoke_successfully(user_delete, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error removing UNIX user %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_unix_user(self, params):
+ user_modify = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'name-mapping-unix-user-modify', **{'user-name': self.parameters['name']})
+ for key in params:
+ if key == 'primary_gid':
+ user_modify.add_new_child('group-id', str(params['primary_gid']))
+ if key == 'id':
+ user_modify.add_new_child('user-id', str(params['id']))
+ if key == 'full_name':
+ user_modify.add_new_child('full-name', params['full_name'])
+
+ try:
+ self.server.invoke_successfully(user_modify, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying UNIX user %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def get_unix_user_rest(self):
+ """
+ Retrieves UNIX user information for the specified user and SVM with rest API.
+ """
+ if not self.use_rest:
+ return self.get_unix_user()
+ query = {'svm.name': self.parameters.get('vserver'),
+ 'name': self.parameters.get('name')}
+ api = 'name-services/unix-users'
+ fields = 'svm.uuid,id,primary_gid,name,full_name'
+ record, error = rest_generic.get_one_record(self.rest_api, api, query, fields)
+ if error:
+ self.module.fail_json(msg="Error on getting unix-user info: %s" % error)
+ if record:
+ return {
+ 'svm': {'uuid': self.na_helper.safe_get(record, ['svm', 'uuid'])},
+ 'name': self.na_helper.safe_get(record, ['name']),
+ 'full_name': self.na_helper.safe_get(record, ['full_name']),
+ 'id': self.na_helper.safe_get(record, ['id']),
+ 'primary_gid': self.na_helper.safe_get(record, ['primary_gid']),
+ }
+ return None
+
+ def create_unix_user_rest(self):
+ """
+ Creates the local UNIX user configuration for an SVM with rest API.
+ """
+ if not self.use_rest:
+ return self.create_unix_user()
+
+ body = {'svm.name': self.parameters.get('vserver')}
+ for key in ('name', 'full_name', 'id', 'primary_gid'):
+ if key in self.parameters:
+ body[key] = self.parameters.get(key)
+ api = 'name-services/unix-users'
+ dummy, error = rest_generic.post_async(self.rest_api, api, body)
+ if error is not None:
+ self.module.fail_json(msg="Error on creating unix-user: %s" % error)
+
+ def delete_unix_user_rest(self, current):
+ """
+ Deletes a UNIX user configuration for the specified SVM with rest API.
+ """
+ if not self.use_rest:
+ return self.delete_unix_user()
+
+ api = 'name-services/unix-users/%s' % current['svm']['uuid']
+ dummy, error = rest_generic.delete_async(self.rest_api, api, self.parameters['name'])
+ if error is not None:
+ self.module.fail_json(msg="Error on deleting unix-user: %s" % error)
+
+ def modify_unix_user_rest(self, modify, current=None):
+ """
+ Updates UNIX user information for the specified user and SVM with rest API.
+ """
+ if not self.use_rest:
+ return self.modify_unix_user(modify)
+
+ query = {'svm.name': self.parameters.get('vserver')}
+ body = {}
+ for key in ('full_name', 'id', 'primary_gid'):
+ if key in modify:
+ body[key] = modify[key]
+ api = 'name-services/unix-users/%s' % current['svm']['uuid']
+ dummy, error = rest_generic.patch_async(self.rest_api, api, self.parameters['name'], body, query)
+ if error is not None:
+ self.module.fail_json(msg="Error on modifying unix-user: %s" % error)
+
+ def apply(self):
+ """
+ Invoke appropriate action based on playbook parameters
+
+ :return: None
+ """
+ cd_action = None
+ current = self.get_unix_user_rest()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ modify = self.na_helper.get_modified_attributes(current, self.parameters) if cd_action is None else None
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_unix_user_rest()
+ elif cd_action == 'delete':
+ self.delete_unix_user_rest(current)
+ else:
+ self.modify_unix_user_rest(modify, current)
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ obj = NetAppOntapUnixUser()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_user.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_user.py
new file mode 100644
index 000000000..7fada8ac6
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_user.py
@@ -0,0 +1,854 @@
+#!/usr/bin/python
+
+# (c) 2018-2023, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_user
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: na_ontap_user
+
+short_description: NetApp ONTAP user configuration and management
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Create or destroy users.
+
+options:
+ state:
+ description:
+ - Whether the specified user should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: 'present'
+ name:
+ description:
+ - The name of the user to manage.
+ required: true
+ type: str
+ application_strs:
+ version_added: 21.6.0
+ description:
+ - List of applications to grant access to.
+ - This option maintains backward compatibility with the existing C(applications) option, but is limited.
+ - It is recommended to use the new C(application_dicts) option which provides more flexibility.
+ - Creating a login with application console, telnet, rsh, and service-processor for a data vserver is not supported.
+ - Module supports both service-processor and service_processor choices.
+ - ZAPI requires service-processor, while REST requires service_processor, except for an issue with ONTAP 9.6 and 9.7.
+ - snmp is not supported in REST.
+ - Either C(application_dicts) or C(application_strs) is required.
+ type: list
+ elements: str
+ choices: ['console', 'http','ontapi','rsh','snmp','service_processor','service-processor','sp','ssh','telnet']
+ aliases:
+ - application
+ - applications
+ application_dicts:
+ version_added: 21.6.0
+ description:
+ - List of applications to grant access to. Provides better control on applications and authentication methods.
+ - Creating a login with application console, telnet, rsh, and service-processor for a data vserver is not supported.
+ - Module supports both service-processor and service_processor choices.
+ - ZAPI requires service-processor, while REST requires service_processor, except for an issue with ONTAP 9.6 and 9.7.
+ - snmp is not supported in REST.
+ - Either C(application_dicts) or C(application_strs) is required.
+ type: list
+ elements: dict
+ suboptions:
+ application:
+ description: name of the application.
+ type: str
+ choices: ['console', 'http','ontapi','rsh','snmp','service_processor','service-processor','sp','ssh','telnet']
+ required: true
+ authentication_methods:
+ description: list of authentication methods for the application (see C(authentication_method)).
+ type: list
+ elements: str
+ choices: ['community', 'password', 'publickey', 'domain', 'nsswitch', 'usm', 'cert', 'saml']
+ required: true
+ second_authentication_method:
+ description: when using ssh, optional additional authentication method for MFA.
+ type: str
+ choices: ['none', 'password', 'publickey', 'nsswitch']
+ authentication_method:
+ description:
+ - Authentication method for the application. If you need more than one method, use C(application_dicts).
+ - Not all authentication methods are valid for an application.
+ - Valid authentication methods for each application are as denoted in I(authentication_choices_description).
+ - Password for console application
+ - Password, domain, nsswitch, cert, saml for http application.
+ - Password, domain, nsswitch, cert, saml for ontapi application.
+ - SAML is only supported with REST, but seems to work with ZAPI as well.
+ - Community for snmp application (when creating SNMPv1 and SNMPv2 users).
+ - The usm and community for snmp application (when creating SNMPv3 users).
+ - Password for sp application.
+ - Password for rsh application.
+ - Password for telnet application.
+ - Password, publickey, domain, nsswitch for ssh application.
+ - Required when C(application_strs) is present.
+ type: str
+ choices: ['community', 'password', 'publickey', 'domain', 'nsswitch', 'usm', 'cert', 'saml']
+ set_password:
+ description:
+ - Password for the user account.
+ - It is ignored for creating snmp users, but is required for creating non-snmp users.
+ - For an existing user, this value will be used as the new password.
+ type: str
+ role_name:
+ description:
+ - The name of the role. Required when C(state=present)
+ type: str
+ lock_user:
+ description:
+ - Whether the specified user account is locked.
+ type: bool
+ vserver:
+ description:
+ - The name of the vserver to use.
+ - Required with ZAPI.
+ - With REST, ignore this option for creating cluster scoped interface.
+ aliases:
+ - svm
+ type: str
+ authentication_protocol:
+ description:
+ - Authentication protocol for the snmp user.
+ - When cluster FIPS mode is on, 'sha' and 'sha2-256' are the only possible and valid values.
+ - When cluster FIPS mode is off, the default value is 'none'.
+ - When cluster FIPS mode is on, the default value is 'sha'.
+ - Only available for 'usm' authentication method and non modifiable.
+ choices: ['none', 'md5', 'sha', 'sha2-256']
+ type: str
+ version_added: '20.6.0'
+ authentication_password:
+ description:
+ - Password for the authentication protocol. This should be minimum 8 characters long.
+ - This is required for 'md5', 'sha' and 'sha2-256' authentication protocols and not required for 'none'.
+ - Only available for 'usm' authentication method and non modifiable.
+ type: str
+ version_added: '20.6.0'
+ engine_id:
+ description:
+ - Authoritative entity's EngineID for the SNMPv3 user.
+ - This should be specified as a hexadecimal string.
+ - Engine ID with first bit set to 1 in first octet should have a minimum of 5 or maximum of 32 octets.
+ - Engine Id with first bit set to 0 in the first octet should be 12 octets in length.
+ - Engine Id cannot have all zeros in its address.
+ - Only available for 'usm' authentication method and non modifiable.
+ type: str
+ version_added: '20.6.0'
+ privacy_protocol:
+ description:
+ - Privacy protocol for the snmp user.
+ - When cluster FIPS mode is on, 'aes128' is the only possible and valid value.
+ - When cluster FIPS mode is off, the default value is 'none'. When cluster FIPS mode is on, the default value is 'aes128'.
+ - Only available for 'usm' authentication method and non modifiable.
+ choices: ['none', 'des', 'aes128']
+ type: str
+ version_added: '20.6.0'
+ privacy_password:
+ description:
+ - Password for the privacy protocol. This should be minimum 8 characters long.
+ - This is required for 'des' and 'aes128' privacy protocols and not required for 'none'.
+ - Only available for 'usm' authentication method and non modifiable.
+ type: str
+ version_added: '20.6.0'
+ remote_switch_ipaddress:
+ description:
+ - This optionally specifies the IP Address of the remote switch.
+ - The remote switch could be a cluster switch monitored by Cluster Switch Health Monitor (CSHM)
+ or a Fiber Channel (FC) switch monitored by Metro Cluster Health Monitor (MCC-HM).
+ - This is applicable only for a remote SNMPv3 user i.e. only if user is a remote (non-local) user,
+ application is snmp and authentication method is usm.
+ type: str
+ version_added: '20.6.0'
+ replace_existing_apps_and_methods:
+ description:
+ - If the user already exists, the current applications and authentications methods are replaced when state=present.
+ - If the user already exists, the current applications and authentications methods are removed when state=absent.
+ - When using application_dicts or REST, this the only supported behavior.
+ - When using application_strs and ZAPI, this is the behavior when this option is set to always.
+ - When using application_strs and ZAPI, if the option is set to auto, applications that are not listed are not removed.
+ - When using application_strs and ZAPI, if the option is set to auto, authentication mehods that are not listed are not removed.
+ - C(auto) preserve the existing behavior for backward compatibility, but note that REST and ZAPI have inconsistent behavior.
+ - This is another reason to recommend to use C(application_dicts).
+ type: str
+ choices: ['always', 'auto']
+ default: 'auto'
+ version_added: '20.6.0'
+'''
+
+EXAMPLES = """
+
+ - name: Create User
+ netapp.ontap.na_ontap_user:
+ state: present
+ name: SampleUser
+ applications: ssh,console
+ authentication_method: password
+ set_password: apn1242183u1298u41
+ lock_user: True
+ role_name: vsadmin
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Create cluster scoped user in REST.
+ netapp.ontap.na_ontap_user:
+ state: present
+ name: SampleUser
+ applications: ssh,console
+ authentication_method: password
+ set_password: apn1242183u1298u41
+ lock_user: True
+ role_name: admin
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Delete User
+ netapp.ontap.na_ontap_user:
+ state: absent
+ name: SampleUser
+ applications: ssh
+ authentication_method: password
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Create user with snmp application (ZAPI)
+ netapp.ontap.na_ontap_user:
+ state: present
+ name: test_cert_snmp
+ applications: snmp
+ authentication_method: usm
+ role_name: admin
+ authentication_protocol: md5
+ authentication_password: '12345678'
+ privacy_protocol: 'aes128'
+ privacy_password: '12345678'
+ engine_id: '7063514941000000000000'
+ remote_switch_ipaddress: 10.0.0.0
+ vserver: "{{ vserver }}"
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Create user
+ netapp.ontap.na_ontap_user:
+ state: present
+ name: test123
+ application_dicts:
+ - application: http
+ authentication_methods: password
+ - application: ssh
+ authentication_methods: password,publickey
+ role_name: vsadmin
+ set_password: bobdole1234566
+ vserver: "{{ vserver }}"
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+
+class NetAppOntapUser:
+ """
+ Common operations to manage users and roles.
+ """
+
+ def __init__(self):
+ self.use_rest = False
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+
+ application_strs=dict(type='list', elements='str', aliases=['application', 'applications'],
+ choices=['console', 'http', 'ontapi', 'rsh', 'snmp',
+ 'sp', 'service-processor', 'service_processor', 'ssh', 'telnet'],),
+ application_dicts=dict(type='list', elements='dict',
+ options=dict(
+ application=dict(required=True, type='str',
+ choices=['console', 'http', 'ontapi', 'rsh', 'snmp',
+ 'sp', 'service-processor', 'service_processor', 'ssh', 'telnet'],),
+ authentication_methods=dict(required=True, type='list', elements='str',
+ choices=['community', 'password', 'publickey', 'domain', 'nsswitch', 'usm', 'cert', 'saml']),
+ second_authentication_method=dict(type='str', choices=['none', 'password', 'publickey', 'nsswitch']))),
+ authentication_method=dict(type='str',
+ choices=['community', 'password', 'publickey', 'domain', 'nsswitch', 'usm', 'cert', 'saml']),
+ set_password=dict(type='str', no_log=True),
+ role_name=dict(type='str'),
+ lock_user=dict(type='bool'),
+ vserver=dict(type='str', aliases=['svm']),
+ authentication_protocol=dict(type='str', choices=['none', 'md5', 'sha', 'sha2-256']),
+ authentication_password=dict(type='str', no_log=True),
+ engine_id=dict(type='str'),
+ privacy_protocol=dict(type='str', choices=['none', 'des', 'aes128']),
+ privacy_password=dict(type='str', no_log=True),
+ remote_switch_ipaddress=dict(type='str'),
+ replace_existing_apps_and_methods=dict(type='str', choices=['always', 'auto'], default='auto')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ mutually_exclusive=[
+ ('application_strs', 'application_dicts')
+ ],
+ required_together=[
+ ('application_strs', 'authentication_method')
+ ],
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.strs_to_dicts()
+
+ # REST API should be used for ONTAP 9.6 or higher
+ self.rest_api = netapp_utils.OntapRestAPI(self.module)
+ # some attributes are not supported in earlier REST implementation
+ unsupported_rest_properties = ['authentication_password', 'authentication_protocol', 'engine_id',
+ 'privacy_password', 'privacy_protocol']
+ self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, unsupported_rest_properties)
+ if not self.use_rest:
+ if self.parameters.get('vserver') is None:
+ self.module.fail_json(msg="Error: vserver is required with ZAPI")
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+ self.validate_applications()
+
+ def validate_applications(self):
+ if not self.use_rest:
+ if self.parameters['applications'] is None:
+ self.module.fail_json(msg="application_dicts or application_strs is a required parameter with ZAPI")
+ for application in self.parameters['applications']:
+ if application['application'] == 'service_processor':
+ application['application'] = 'service-processor'
+ if self.parameters['applications'] is None:
+ return
+ application_keys = []
+ for application in self.parameters['applications']:
+ # make sure app entries are not duplicated
+ application_name = application['application']
+ if application_name in application_keys:
+ self.module.fail_json(msg='Error: repeated application name: %s. Group all authentication methods under a single entry.' % application_name)
+ application_keys.append(application_name)
+ if self.use_rest:
+ if application_name == 'snmp':
+ self.module.fail_json(msg="snmp as application is not supported in REST.")
+ # REST prefers certificate to cert
+ application['authentication_methods'] = ['certificate' if x == 'cert' else x for x in application['authentication_methods']]
+ # REST get always returns 'second_authentication_method'
+ if 'second_authentication_method' not in application:
+ application['second_authentication_method'] = None
+
+ def strs_to_dicts(self):
+ """transform applications list of strs to a list of dicts if application_strs in use"""
+ if 'application_dicts' in self.parameters:
+ for application in self.parameters['application_dicts']:
+ # keep them sorted for comparison with current
+ application['authentication_methods'].sort()
+ self.parameters['applications'] = self.parameters['application_dicts']
+ self.parameters['replace_existing_apps_and_methods'] = 'always'
+ elif 'application_strs' in self.parameters:
+ # actual conversion
+ self.parameters['applications'] = [
+ dict(application=application,
+ authentication_methods=[self.parameters['authentication_method']],
+ second_authentication_method=None
+ ) for application in self.parameters['application_strs']]
+ else:
+ self.parameters['applications'] = None
+
+ def get_user_rest(self):
+ api = 'security/accounts'
+ query = {
+ 'name': self.parameters['name']
+ }
+ if self.parameters.get('vserver') is None:
+ # vserser is empty for cluster
+ query['scope'] = 'cluster'
+ else:
+ query['owner.name'] = self.parameters['vserver']
+
+ message, error = self.rest_api.get(api, query)
+ if error:
+ self.module.fail_json(msg='Error while fetching user info: %s' % error)
+ if message['num_records'] == 1:
+ return message['records'][0]['owner']['uuid'], message['records'][0]['name']
+ if message['num_records'] > 1:
+ self.module.fail_json(msg='Error while fetching user info, found multiple entries: %s' % repr(message))
+
+ return None
+
+ def get_user_details_rest(self, name, owner_uuid):
+ query = {
+ 'fields': 'role,applications,locked'
+ }
+ api = "security/accounts/%s/%s" % (owner_uuid, name)
+ response, error = self.rest_api.get(api, query)
+ if error:
+ self.module.fail_json(msg='Error while fetching user details: %s' % error)
+ if response:
+ # replace "none" values with None for comparison
+ for application in response['applications']:
+ if application.get('second_authentication_method') == 'none':
+ application['second_authentication_method'] = None
+ # new read-only attribute in 9.11, breaks idempotency when present
+ application.pop('is_ldap_fastbind', None)
+ return_value = {
+ 'role_name': response['role']['name'],
+ 'applications': response['applications']
+ }
+ if "locked" in response:
+ return_value['lock_user'] = response['locked']
+ return return_value
+
+ def get_user(self):
+ """
+ Checks if the user exists.
+ :param: application: application to grant access to, a dict
+ :return:
+ Dictionary if user found
+ None if user is not found
+ """
+ desired_applications = [application['application'] for application in self.parameters['applications']]
+ desired_method = self.parameters.get('authentication_method')
+ security_login_get_iter = netapp_utils.zapi.NaElement('security-login-get-iter')
+ query_details = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'security-login-account-info', **{'vserver': self.parameters['vserver'],
+ 'user-name': self.parameters['name']})
+
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ security_login_get_iter.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(security_login_get_iter,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ if to_native(error.code) in ['16034', '16043']:
+ # Error 16034 denotes a user not being found.
+ # Error 16043 denotes the user existing, but the application missing.
+ return None
+ self.module.fail_json(msg='Error getting user %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ if not result.get_child_by_name('num-records') or not int(result.get_child_content('num-records')):
+ return None
+
+ applications = {}
+ attr = result.get_child_by_name('attributes-list')
+ locks = []
+ for info in attr.get_children():
+ lock_user = self.na_helper.get_value_for_bool(True, info.get_child_content('is-locked'))
+ locks.append(lock_user)
+ role_name = info.get_child_content('role-name')
+ application = info.get_child_content('application')
+ auth_method = info.get_child_content('authentication-method')
+ sec_method = info.get_child_content('second-authentication-method')
+ if self.parameters['replace_existing_apps_and_methods'] == 'always' and application in applications:
+ applications[application][0].append(auth_method)
+ if sec_method != 'none':
+ # we can't change sec_method in place, a tuple is not mutable
+ applications[application] = (applications[application][0], sec_method)
+ elif (self.parameters['replace_existing_apps_and_methods'] == 'always'
+ or (application in desired_applications and auth_method == desired_method)):
+ # with 'auto' we ignore existing apps that were not asked for
+ # with auto, only a single method is supported
+ applications[application] = ([auth_method], sec_method if sec_method != 'none' else None)
+ apps = [dict(application=application, authentication_methods=sorted(methods), second_authentication_method=sec_method)
+ for application, (methods, sec_method) in applications.items()]
+ return dict(
+ lock_user=any(locks),
+ role_name=role_name,
+ applications=apps
+ )
+
+ def create_user_rest(self, apps):
+ api = 'security/accounts'
+ body = {
+ 'name': self.parameters['name'],
+ 'role.name': self.parameters['role_name'],
+ 'applications': self.na_helper.filter_out_none_entries(apps)
+ }
+ if self.parameters.get('vserver') is not None:
+ # vserser is empty for cluster
+ body['owner.name'] = self.parameters['vserver']
+ if 'set_password' in self.parameters:
+ body['password'] = self.parameters['set_password']
+ if 'lock_user' in self.parameters:
+ body['locked'] = self.parameters['lock_user']
+ dummy, error = self.rest_api.post(api, body)
+ if (
+ error
+ and 'invalid value' in error['message']
+ and any(x in error['message'] for x in ['service-processor', 'service_processor'])
+ ):
+ # find if there is an error for service processor application value
+ # update value as per ONTAP version support
+ app_list_sp = body['applications']
+ for app_item in app_list_sp:
+ if app_item['application'] == 'service-processor':
+ app_item['application'] = 'service_processor'
+ elif app_item['application'] == 'service_processor':
+ app_item['application'] = 'service-processor'
+ body['applications'] = app_list_sp
+ # post again and throw first error in case of an error
+ dummy, error_sp = self.rest_api.post(api, body)
+ if not error_sp:
+ return
+
+ # non-sp errors thrown or initial sp errors
+ if error:
+ self.module.fail_json(msg='Error while creating user: %s' % error)
+
+ def create_user(self, application):
+ for index in range(len(application['authentication_methods'])):
+ self.create_user_with_auth(application, index)
+
+ def create_user_with_auth(self, application, index):
+ """
+ creates the user for the given application and authentication_method
+ application is now a directory
+ :param: application: application to grant access to
+ """
+ user_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'security-login-create', **{'vserver': self.parameters['vserver'],
+ 'user-name': self.parameters['name'],
+ 'application': application['application'],
+ 'authentication-method': application['authentication_methods'][index],
+ 'role-name': self.parameters.get('role_name')})
+ if application.get('second_authentication_method') is not None:
+ user_create.add_new_child('second-authentication-method', application['second_authentication_method'])
+ if self.parameters.get('set_password') is not None:
+ user_create.add_new_child('password', self.parameters.get('set_password'))
+ if application['authentication_methods'][0] == 'usm':
+ if self.parameters.get('remote_switch_ipaddress') is not None:
+ user_create.add_new_child('remote-switch-ipaddress', self.parameters.get('remote_switch_ipaddress'))
+ snmpv3_login_info = netapp_utils.zapi.NaElement('snmpv3-login-info')
+ if self.parameters.get('authentication_password') is not None:
+ snmpv3_login_info.add_new_child('authentication-password', self.parameters['authentication_password'])
+ if self.parameters.get('authentication_protocol') is not None:
+ snmpv3_login_info.add_new_child('authentication-protocol', self.parameters['authentication_protocol'])
+ if self.parameters.get('engine_id') is not None:
+ snmpv3_login_info.add_new_child('engine-id', self.parameters['engine_id'])
+ if self.parameters.get('privacy_password') is not None:
+ snmpv3_login_info.add_new_child('privacy-password', self.parameters['privacy_password'])
+ if self.parameters.get('privacy_protocol') is not None:
+ snmpv3_login_info.add_new_child('privacy-protocol', self.parameters['privacy_protocol'])
+ user_create.add_child_elem(snmpv3_login_info)
+
+ try:
+ self.server.invoke_successfully(user_create,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating user %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def lock_unlock_user_rest(self, owner_uuid, username, value=None):
+ body = {
+ 'locked': value
+ }
+ error = self.patch_account(owner_uuid, username, body)
+ if error:
+ self.module.fail_json(msg='Error while locking/unlocking user: %s' % error)
+
+ def lock_given_user(self):
+ """
+ locks the user
+ """
+ user_lock = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'security-login-lock', **{'vserver': self.parameters['vserver'],
+ 'user-name': self.parameters['name']})
+
+ try:
+ self.server.invoke_successfully(user_lock,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error locking user %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def unlock_given_user(self):
+ """
+ unlocks the user
+ """
+ user_unlock = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'security-login-unlock', **{'vserver': self.parameters['vserver'],
+ 'user-name': self.parameters['name']})
+
+ try:
+ self.server.invoke_successfully(user_unlock,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ if to_native(error.code) != '13114':
+ self.module.fail_json(msg='Error unlocking user %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ return
+
+ def delete_user_rest(self, owner_uuid, username):
+ api = "security/accounts/%s/%s" % (owner_uuid, username)
+ dummy, error = self.rest_api.delete(api)
+ if error:
+ self.module.fail_json(msg='Error while deleting user: %s' % error)
+
+ def delete_user(self, application, methods_to_keep=None):
+ for index, method in enumerate(application['authentication_methods']):
+ if methods_to_keep is None or method not in methods_to_keep:
+ self.delete_user_with_auth(application, index)
+
+ def delete_user_with_auth(self, application, index):
+ """
+ deletes the user for the given application and authentication_method
+ application is now a dict
+ :param: application: application to grant access to
+ """
+ user_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'security-login-delete', **{'vserver': self.parameters['vserver'],
+ 'user-name': self.parameters['name'],
+ 'application': application['application'],
+ 'authentication-method': application['authentication_methods'][index]})
+
+ try:
+ self.server.invoke_successfully(user_delete,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error removing user %s: %s - application: %s'
+ % (self.parameters['name'], to_native(error), application),
+ exception=traceback.format_exc())
+
+ @staticmethod
+ def is_repeated_password(message):
+ return message.startswith('New password must be different than last 6 passwords.') \
+ or message.startswith('New password must be different from last 6 passwords.') \
+ or message.startswith('New password must be different than the old password.') \
+ or message.startswith('New password must be different from the old password.')
+
+ def change_password_rest(self, owner_uuid, username):
+ body = {
+ 'password': self.parameters['set_password'],
+ }
+ error = self.patch_account(owner_uuid, username, body)
+ if error:
+ if 'message' in error and self.is_repeated_password(error['message']):
+ # if the password is reused, assume idempotency
+ return False
+ self.module.fail_json(msg='Error while updating user password: %s' % error)
+ return True
+
+ def change_password(self):
+ """
+ Changes the password
+
+ :return:
+ True if password updated
+ False if password is not updated
+ :rtype: bool
+ """
+ # self.server.set_vserver(self.parameters['vserver'])
+ modify_password = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'security-login-modify-password', **{
+ 'new-password': str(self.parameters.get('set_password')),
+ 'user-name': self.parameters['name']})
+ try:
+ self.server.invoke_successfully(modify_password,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ if to_native(error.code) == '13114':
+ return False
+ # if the user give the same password, instead of returning an error, return ok
+ if to_native(error.code) == '13214' and self.is_repeated_password(error.message):
+ return False
+ self.module.fail_json(msg='Error setting password for user %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ self.server.set_vserver(None)
+ return True
+
+ def modify_apps_rest(self, owner_uuid, username, apps=None):
+ body = {
+ 'role.name': self.parameters['role_name'],
+ 'applications': self.na_helper.filter_out_none_entries(apps)
+ }
+ error = self.patch_account(owner_uuid, username, body)
+ if error:
+ self.module.fail_json(msg='Error while modifying user details: %s' % error)
+
+ def patch_account(self, owner_uuid, username, body):
+ query = {'name': self.parameters['name'], 'owner.uuid': owner_uuid}
+ api = "security/accounts/%s/%s" % (owner_uuid, username)
+ dummy, result = self.rest_api.patch(api, body, query)
+ return result
+
+ def modify_user(self, application, current_methods):
+ for index, method in enumerate(application['authentication_methods']):
+ if method in current_methods:
+ self.modify_user_with_auth(application, index)
+ else:
+ self.create_user_with_auth(application, index)
+
+ def modify_user_with_auth(self, application, index):
+ """
+ Modify user
+ application is now a dict
+ """
+ user_modify = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'security-login-modify', **{'vserver': self.parameters['vserver'],
+ 'user-name': self.parameters['name'],
+ 'application': application['application'],
+ 'authentication-method': application['authentication_methods'][index],
+ 'role-name': self.parameters.get('role_name')})
+
+ try:
+ self.server.invoke_successfully(user_modify,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying user %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def change_sp_application(self, current_apps):
+ """Adjust requested app name to match ONTAP convention"""
+ if not self.parameters['applications']:
+ return
+ app_list = [app['application'] for app in current_apps]
+ for application in self.parameters['applications']:
+ if application['application'] == 'service_processor' and 'service-processor' in app_list:
+ application['application'] = 'service-processor'
+ elif application['application'] == 'service-processor' and 'service_processor' in app_list:
+ application['application'] = 'service_processor'
+
+ def validate_action(self, action):
+ errors = []
+ if action == 'create':
+ if not self.parameters.get('role_name'):
+ errors.append('role_name')
+ if not self.parameters.get('applications'):
+ errors.append('application_dicts or application_strs')
+ if errors:
+ plural = 's' if len(errors) > 1 else ''
+ self.module.fail_json(msg='Error: missing required parameter%s for %s: %s.' %
+ (plural, action, ' and: '.join(errors)))
+
+ def modify_apps_zapi(self, current, modify_decision):
+ if 'applications' not in modify_decision:
+ # to change roles, we need at least one app
+ modify_decision['applications'] = self.parameters['applications']
+ current_apps = dict((application['application'], application['authentication_methods']) for application in current['applications'])
+ for application in modify_decision['applications']:
+ if application['application'] in current_apps:
+ self.modify_user(application, current_apps[application['application']])
+ else:
+ self.create_user(application)
+ desired_apps = dict((application['application'], application['authentication_methods'])
+ for application in self.parameters['applications'])
+ for application in current['applications']:
+ if application['application'] not in desired_apps:
+ self.delete_user(application)
+ else:
+ self.delete_user(application, desired_apps[application['application']])
+
+ def get_current(self):
+ owner_uuid, name = None, None
+ if self.use_rest:
+ current = self.get_user_rest()
+ if current is not None:
+ owner_uuid, name = current
+ current = self.get_user_details_rest(name, owner_uuid)
+ self.change_sp_application(current['applications'])
+ else:
+ current = self.get_user()
+ return current, owner_uuid, name
+
+ def define_actions(self, current):
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ modify = self.na_helper.get_modified_attributes(current, self.parameters) if cd_action is None else None
+ if self.use_rest and cd_action is None and current and 'lock_user' not in current and self.parameters.get('lock_user') is not None:
+ # REST does not return locked if password is not set
+ if self.parameters.get('set_password') is None:
+ self.module.fail_json(msg='Error: cannot modify lock state if password is not set.')
+ modify['lock_user'] = self.parameters['lock_user']
+ self.na_helper.changed = True
+ self.validate_action(cd_action)
+ return cd_action, modify
+
+ def take_action(self, cd_action, modify, current, owner_uuid, name):
+ if cd_action == 'create':
+ if self.use_rest:
+ self.create_user_rest(self.parameters['applications'])
+ else:
+ for application in self.parameters['applications']:
+ self.create_user(application)
+ elif cd_action == 'delete':
+ if self.use_rest:
+ self.delete_user_rest(owner_uuid, name)
+ else:
+ for application in current['applications']:
+ self.delete_user(application)
+ elif modify:
+ if 'role_name' in modify or 'applications' in modify:
+ if self.use_rest:
+ self.modify_apps_rest(owner_uuid, name, self.parameters['applications'])
+ else:
+ self.modify_apps_zapi(current, modify)
+ return modify and 'lock_user' in modify
+
+ def apply(self):
+ current, owner_uuid, name = self.get_current()
+ cd_action, modify = self.define_actions(current)
+ deferred_lock = False
+
+ if self.na_helper.changed and not self.module.check_mode:
+ # lock/unlock actions require password to be set
+ deferred_lock = self.take_action(cd_action, modify, current, owner_uuid, name)
+
+ password_changed = False
+ if cd_action is None and self.parameters.get('set_password') is not None and self.parameters['state'] == 'present':
+ # if check_mode, don't attempt to change the password, but assume it would be changed
+ if self.use_rest:
+ password_changed = self.module.check_mode or self.change_password_rest(owner_uuid, name)
+ else:
+ password_changed = self.module.check_mode or self.change_password()
+ if self.module.check_mode:
+ self.module.warn('Module is not idempotent with check_mode when set_password is present.')
+
+ if deferred_lock:
+ if self.use_rest:
+ self.lock_unlock_user_rest(owner_uuid, name, self.parameters['lock_user'])
+ elif self.parameters.get('lock_user'):
+ self.lock_given_user()
+ else:
+ self.unlock_given_user()
+
+ self.module.exit_json(changed=self.na_helper.changed | password_changed, current=current, modify=modify)
+
+
+def main():
+ obj = NetAppOntapUser()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_user_role.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_user_role.py
new file mode 100644
index 000000000..75c5d0993
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_user_role.py
@@ -0,0 +1,522 @@
+#!/usr/bin/python
+
+# (c) 2018-2023, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: na_ontap_user_role
+
+short_description: NetApp ONTAP user role configuration and management
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+ - Create or destroy user roles
+
+options:
+
+ state:
+ description:
+ - Whether the specified user role should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: present
+
+ name:
+ description:
+ - The name of the role to manage.
+ required: true
+ type: str
+
+ command_directory_name:
+ description:
+ - The command or command directory to which the role has an access.
+ - Required with ZAPI.
+ - Supported with REST from ONTAP 9.11.1 or later.
+ type: str
+
+ access_level:
+ description:
+ - The access level of the role.
+ - Use C(privileges) for rest-role access choices.
+ choices: ['none', 'readonly', 'all']
+ type: str
+ default: all
+
+ query:
+ description:
+ - A query for the role. The query must apply to the specified command or directory name.
+ - Use double quotes "" for modifying a existing query to none.
+ - Supported with REST from ONTAP 9.11.1 or later.
+ type: str
+ version_added: 2.8.0
+
+ privileges:
+ description:
+ - Privileges to give the user roles
+ - REST only
+ type: list
+ elements: dict
+ version_added: 21.23.0
+ suboptions:
+ query:
+ description:
+ - A query for the role. The query must apply to the specified command or directory name.
+ - Query is only supported on 9.11.1+
+ type: str
+ access:
+ description:
+ - The access level of the role.
+ - For command/command directory path, the only supported enum values are 'none','readonly' and 'all'.
+ - Options 'read_create', 'read_modify' and 'read_create_modify' are supported only with REST and requires ONTAP 9.11.1 or later versions.
+ choices: ['none', 'readonly', 'all', 'read_create', 'read_modify', 'read_create_modify']
+ default: all
+ type: str
+ path:
+ description:
+ - The api or command to which the role has an access.
+ - command or command directory path is supported from ONTAP 9.11.1 or later versions.
+ - Only rest roles are supported for earlier versions.
+ type: str
+ required: true
+
+ vserver:
+ description:
+ - The name of the vserver to use.
+ - Required with ZAPI.
+ type: str
+
+notes:
+ - supports ZAPI and REST. REST requires ONTAP 9.7 or later.
+ - supports check mode.
+ - when trying to add a command to a role, ONTAP will affect other related commands too.
+ - for example, 'volume modify' will affect 'volume create' and 'volume show', always provide all the related commands.
+ - REST supports both role and rest-role from ONTAP 9.11.1 or later versions and only rest-role for earlier versions.
+'''
+
+EXAMPLES = """
+
+ - name: Create User Role Zapi
+ netapp.ontap.na_ontap_user_role:
+ state: present
+ name: ansibleRole
+ command_directory_name: volume
+ access_level: none
+ query: show
+ vserver: ansibleVServer
+ use_rest: never
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Modify User Role Zapi
+ netapp.ontap.na_ontap_user_role:
+ state: present
+ name: ansibleRole
+ command_directory_name: volume
+ access_level: none
+ query: ""
+ vserver: ansibleVServer
+ use_rest: never
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Create user role REST in ONTAP 9.11.1.
+ netapp.ontap.na_ontap_user_role:
+ state: present
+ privileges:
+ - path: /api/cluster/jobs
+ vserver: ansibleSVM
+ name: carchi-test-role
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Modify user role REST in ONTAP 9.11.1.
+ netapp.ontap.na_ontap_user_role:
+ state: present
+ privileges:
+ - path: /api/cluster/jobs
+ access: readonly
+ - path: /api/storage/volumes
+ access: readonly
+ vserver: ansibleSVM
+ name: carchi-test-role
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppOntapUserRole:
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ command_directory_name=dict(required=False, type='str'),
+ access_level=dict(required=False, type='str', default='all',
+ choices=['none', 'readonly', 'all']),
+ vserver=dict(required=False, type='str'),
+ query=dict(required=False, type='str'),
+ privileges=dict(required=False, type='list', elements='dict', options=dict(
+ query=dict(required=False, type='str'),
+ access=dict(required=False, type='str', default='all',
+ choices=['none', 'readonly', 'all', 'read_create', 'read_modify', 'read_create_modify']),
+ path=dict(required=True, type='str')
+ ))
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[('command_directory_name', 'privileges'),
+ ('access_level', 'privileges'),
+ ('query', 'privileges')]
+ )
+ self.owner_uuid = None
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ if self.parameters.get('privileges') is not None:
+ self.parameters['privileges'] = self.na_helper.filter_out_none_entries(self.parameters['privileges'])
+ self.rest_api = netapp_utils.OntapRestAPI(self.module)
+ partially_supported_rest_properties = [
+ ['query', (9, 11, 1)],
+ ['privileges.query', (9, 11, 1)],
+ ['command_directory_name', (9, 11, 1)]
+ ]
+ self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, None, partially_supported_rest_properties)
+ if self.use_rest and not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 7, 0):
+ msg = 'REST requires ONTAP 9.7 or later for security/roles APIs.'
+ self.use_rest = self.na_helper.fall_back_to_zapi(self.module, msg, self.parameters)
+ if not self.use_rest:
+ if netapp_utils.has_netapp_lib() is False:
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ if not self.parameters.get('vserver'):
+ self.module.fail_json(msg="Error: vserver is required field with ZAPI.")
+ if not self.parameters.get('command_directory_name'):
+ self.module.fail_json(msg="Error: command_directory_name is required field with ZAPI")
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+ elif not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 11, 1) and self.parameters['state'] == 'present':
+ self.validate_rest_path()
+
+ def validate_rest_path(self):
+ """
+ REST does not support command or command directory path in ONTAP < 9.11.1 versions.
+ """
+ invalid_uri = []
+ for privilege in self.parameters.get('privileges', []):
+ # an api path have '/' in it, validate it present for ONTAP earlier versions.
+ if '/' not in privilege['path']:
+ invalid_uri.append(privilege['path'])
+ if invalid_uri:
+ self.module.fail_json(msg="Error: Invalid URI %s, please set valid REST API path" % invalid_uri)
+
+ def get_role(self):
+ """
+ Checks if the role exists for specific command-directory-name.
+
+ :return:
+ True if role found
+ False if role is not found
+ :rtype: bool
+ """
+ if self.use_rest:
+ return self.get_role_rest()
+ options = {'vserver': self.parameters['vserver'],
+ 'role-name': self.parameters['name'],
+ 'command-directory-name': self.parameters['command_directory_name']}
+ security_login_role_get_iter = netapp_utils.zapi.NaElement(
+ 'security-login-role-get-iter')
+ query_details = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'security-login-role-info', **options)
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ security_login_role_get_iter.add_child_elem(query)
+
+ try:
+ result = self.server.invoke_successfully(
+ security_login_role_get_iter, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ # Error 16031 denotes a role not being found.
+ if to_native(e.code) == "16031":
+ return None
+ # Error 16039 denotes command directory not found.
+ elif to_native(e.code) == "16039":
+ return None
+ else:
+ self.module.fail_json(msg='Error getting role %s: %s' % (self.parameters['name'], to_native(e)),
+ exception=traceback.format_exc())
+ if (result.get_child_by_name('num-records') and
+ int(result.get_child_content('num-records')) >= 1):
+ role_info = result.get_child_by_name('attributes-list').get_child_by_name('security-login-role-info')
+ result = {
+ 'name': role_info['role-name'],
+ 'access_level': role_info['access-level'],
+ 'command_directory_name': role_info['command-directory-name'],
+ 'query': role_info['role-query']
+ }
+ return result
+ return None
+
+ def create_role(self):
+ if self.use_rest:
+ return self.create_role_rest()
+ options = {'vserver': self.parameters['vserver'],
+ 'role-name': self.parameters['name'],
+ 'command-directory-name': self.parameters['command_directory_name'],
+ 'access-level': self.parameters['access_level']}
+ if self.parameters.get('query'):
+ options['role-query'] = self.parameters['query']
+ role_create = netapp_utils.zapi.NaElement.create_node_with_children('security-login-role-create', **options)
+
+ try:
+ self.server.invoke_successfully(role_create,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating role %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_role(self):
+ if self.use_rest:
+ return self.delete_role_rest()
+ role_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'security-login-role-delete', **{'vserver': self.parameters['vserver'],
+ 'role-name': self.parameters['name'],
+ 'command-directory-name':
+ self.parameters['command_directory_name']})
+
+ try:
+ self.server.invoke_successfully(role_delete,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error removing role %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_role(self, modify):
+ if self.use_rest:
+ return self.modify_role_rest(modify)
+ options = {'vserver': self.parameters['vserver'],
+ 'role-name': self.parameters['name'],
+ 'command-directory-name': self.parameters['command_directory_name']}
+ if 'access_level' in modify.keys():
+ options['access-level'] = self.parameters['access_level']
+ if 'query' in modify.keys():
+ options['role-query'] = self.parameters['query']
+
+ role_modify = netapp_utils.zapi.NaElement.create_node_with_children('security-login-role-modify', **options)
+
+ try:
+ self.server.invoke_successfully(role_modify,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying role %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def get_role_rest(self):
+ api = 'security/roles'
+ if self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 11, 1):
+ fields = 'name,owner,privileges.path,privileges.access,privileges.query'
+ else:
+ fields = 'name,owner,privileges.path,privileges.access'
+ params = {'name': self.parameters['name'],
+ 'fields': fields}
+ if self.parameters.get('vserver'):
+ params['owner.name'] = self.parameters['vserver']
+ else:
+ params['scope'] = 'cluster'
+ record, error = rest_generic.get_one_record(self.rest_api, api, params)
+ if error:
+ self.module.fail_json(msg="Error getting role %s: %s" % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ return self.format_record(record)
+
+ def format_record(self, record):
+ if not record:
+ return None
+ for each in self.na_helper.safe_get(record, ['privileges']):
+ if each['path'] == 'DEFAULT':
+ record['privileges'].remove(each)
+ for each in self.na_helper.safe_get(record, ['privileges']):
+ if each.get('_links'):
+ each.pop('_links')
+ return_record = {
+ 'name': self.na_helper.safe_get(record, ['name']),
+ 'privileges': self.na_helper.safe_get(record, ['privileges']),
+ }
+ self.owner_uuid = self.na_helper.safe_get(record, ['owner', 'uuid'])
+ return return_record
+
+ def create_role_rest(self):
+ api = 'security/roles'
+ body = {'name': self.parameters['name']}
+ if self.parameters.get('vserver'):
+ body['owner.name'] = self.parameters['vserver']
+ body['privileges'] = self.parameters['privileges']
+ dummy, error = rest_generic.post_async(self.rest_api, api, body, job_timeout=120)
+ if error:
+ self.module.fail_json(msg='Error creating role %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_role_rest(self):
+ api = 'security/roles'
+ uuids = '%s/%s' % (self.owner_uuid, self.parameters['name'])
+ dummy, error = rest_generic.delete_async(self.rest_api, api, uuids, job_timeout=120)
+ if error:
+ self.module.fail_json(msg='Error deleting role %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_role_rest(self, modify):
+ # there is no direct modify for role.
+ privileges = self.get_role_privileges_rest()
+ modify_privilege = []
+ for privilege in modify['privileges']:
+ path = privilege['path']
+ modify_privilege.append(path)
+ # if the path is not in privilege then it need to be added
+ if path not in privileges:
+ self.create_role_privilege(privilege)
+ elif privilege.get('query'):
+ if not privileges[path].get('query'):
+ self.modify_role_privilege(privilege, path)
+ elif privilege['query'] != privileges[path]['query']:
+ self.modify_role_privilege(privilege, path)
+ elif privilege.get('access') and privilege['access'] != privileges[path]['access']:
+ self.modify_role_privilege(privilege, path)
+ for privilege_path in privileges:
+ if privilege_path not in modify_privilege:
+ self.delete_role_privilege(privilege_path)
+
+ def get_role_privileges_rest(self):
+ api = 'security/roles/%s/%s/privileges' % (self.owner_uuid, self.parameters['name'])
+ records, error = rest_generic.get_0_or_more_records(self.rest_api, api, {})
+ if error:
+ self.module.fail_json(msg="Error getting role privileges for role %s: %s" % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ return self.format_privileges(records)
+
+ def format_privileges(self, records):
+ return_dict = {}
+ for record in records:
+ return_dict[record['path']] = record
+ return return_dict
+
+ def create_role_privilege(self, privilege):
+ api = 'security/roles/%s/%s/privileges' % (self.owner_uuid, self.parameters['name'])
+ body = {'path': privilege['path'], 'access': privilege['access']}
+ dummy, error = rest_generic.post_async(self.rest_api, api, body, job_timeout=120)
+ if error:
+ self.module.fail_json(msg='Error creating role privilege %s: %s' % (privilege['path'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_role_privilege(self, privilege, path):
+ path = path.replace('/', '%2F')
+ api = 'security/roles/%s/%s/privileges' % (self.owner_uuid, self.parameters['name'])
+ body = {}
+ if privilege.get('access'):
+ body['access'] = privilege['access']
+ if privilege.get('query'):
+ body['query'] = privilege['query']
+ dummy, error = rest_generic.patch_async(self.rest_api, api, path, body)
+ if error:
+ self.module.fail_json(msg='Error modifying privileges for path %s: %s' % (path, to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_role_privilege(self, path):
+ path = path.replace('/', '%2F')
+ api = 'security/roles/%s/%s/privileges' % (self.owner_uuid, self.parameters['name'])
+ dummy, error = rest_generic.delete_async(self.rest_api, api, path, job_timeout=120)
+ if error:
+ # removing one of relevant commands will also remove all other commands in group.
+ # skip if entry does not exist error occurs.
+ if "entry doesn't exist" in error and "'target': 'path'" in error:
+ return
+ self.module.fail_json(msg='Error deleting role privileges %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def convert_parameters(self):
+ if self.parameters.get('privileges') is not None:
+ return
+ if not self.parameters.get('command_directory_name'):
+ self.module.fail_json(msg="Error: either path or command_directory_name is required in REST.")
+ self.parameters['privileges'] = []
+ temp_dict = {
+ 'path': self.parameters['command_directory_name'],
+ 'access': self.parameters['access_level']
+ }
+ self.parameters.pop('command_directory_name')
+ self.parameters.pop('access_level')
+ if self.parameters.get('query'):
+ temp_dict['query'] = self.parameters['query']
+ self.parameters.pop('query')
+ self.parameters['privileges'] = [temp_dict]
+
+ def validate_create_modify_required(self, current, modify):
+ new_current = self.get_role()
+ new_cd_action = self.na_helper.get_cd_action(new_current, self.parameters)
+ new_modify = None if new_cd_action else self.na_helper.get_modified_attributes(new_current, self.parameters)
+ msg = ''
+ if current is None and new_modify:
+ msg = "Create operation also affected additional related commands: %s" % new_current['privileges']
+ elif modify and new_cd_action == 'create':
+ msg = """Create role is required, desired is: %s but it's a subset of relevant commands/command directory configured in current: %s,
+ deleting one of the commands will remove all the commands in the relevant group""" % (self.parameters['privileges'], current['privileges'])
+ elif modify and new_modify:
+ msg = "modify is required, desired: %s and new current: %s" % (self.parameters['privileges'], new_current['privileges'])
+ if msg:
+ self.module.warn(msg)
+
+ def apply(self):
+ if self.use_rest:
+ # if rest convert parameters to rest format if zapi format is used
+ self.convert_parameters()
+ current = self.get_role()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ # if desired state specify empty quote query and current query is None, set desired query to None.
+ # otherwise na_helper.get_modified_attributes will detect a change.
+ # for REST, query is part of a tuple in privileges list.
+ if not self.use_rest and self.parameters.get('query') == '' and current is not None and current['query'] is None:
+ self.parameters['query'] = None
+
+ modify = None if cd_action else self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_role()
+ elif cd_action == 'delete':
+ self.delete_role()
+ elif modify:
+ self.modify_role(modify)
+ if self.use_rest:
+ self.validate_create_modify_required(current, modify)
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ obj = NetAppOntapUserRole()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume.py
new file mode 100644
index 000000000..7ca007c29
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume.py
@@ -0,0 +1,2902 @@
+#!/usr/bin/python
+
+# (c) 2018-2023, NetApp, Inc
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_volume
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+
+module: na_ontap_volume
+
+short_description: NetApp ONTAP manage volumes.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+ - Create or destroy or modify volumes on NetApp ONTAP.
+
+options:
+
+ state:
+ description:
+ - Whether the specified volume should exist or not.
+ choices: ['present', 'absent']
+ type: str
+ default: 'present'
+
+ name:
+ description:
+ - The name of the volume to manage.
+ type: str
+ required: true
+
+ vserver:
+ description:
+ - Name of the vserver to use.
+ type: str
+ required: true
+
+ from_name:
+ description:
+ - Name of the existing volume to be renamed to name.
+ type: str
+ version_added: 2.7.0
+
+ is_infinite:
+ type: bool
+ description:
+ - Set True if the volume is an Infinite Volume.
+ - Deleting an infinite volume is asynchronous.
+ default: false
+
+ is_online:
+ type: bool
+ description:
+ - Whether the specified volume is online, or not.
+ default: True
+
+ aggregate_name:
+ description:
+ - The name of the aggregate the flexvol should exist on.
+ - Cannot be set when using the na_application_template option.
+ type: str
+
+ tags:
+ description:
+ - Tags are an optional way to track the uses of a resource.
+ - Tag values must be formatted as key:value strings, example ["team:csi", "environment:test"]
+ type: list
+ elements: str
+ version_added: 22.6.0
+
+ nas_application_template:
+ description:
+ - additional options when using the application/applications REST API to create a volume.
+ - the module is using ZAPI by default, and switches to REST if any suboption is present.
+ - create a FlexVol by default.
+ - create a FlexGroup if C(auto_provision_as) is set and C(FlexCache) option is not present.
+ - create a FlexCache if C(flexcache) option is present.
+ type: dict
+ version_added: 20.12.0
+ suboptions:
+ flexcache:
+ description: whether to create a flexcache. If absent, a FlexVol or FlexGroup is created.
+ type: dict
+ suboptions:
+ dr_cache:
+ description:
+ - whether to use the same flexgroup msid as the origin.
+ - requires ONTAP 9.9 and REST.
+ - create only option, ignored if the flexcache already exists.
+ type: bool
+ version_added: 21.3.0
+ origin_svm_name:
+ description: the remote SVM for the flexcache.
+ type: str
+ required: true
+ origin_component_name:
+ description: the remote component for the flexcache.
+ type: str
+ required: true
+ cifs_access:
+ description:
+ - The list of CIFS access controls. You must provide I(user_or_group) or I(access) to enable CIFS access.
+ type: list
+ elements: dict
+ suboptions:
+ access:
+ description: The CIFS access granted to the user or group. Default is full_control.
+ type: str
+ choices: [change, full_control, no_access, read]
+ user_or_group:
+ description: The name of the CIFS user or group that will be granted access. Default is Everyone.
+ type: str
+ nfs_access:
+ description:
+ - The list of NFS access controls. You must provide I(host) or I(access) to enable NFS access.
+ - Mutually exclusive with export_policy option.
+ type: list
+ elements: dict
+ suboptions:
+ access:
+ description: The NFS access granted. Default is rw.
+ type: str
+ choices: [none, ro, rw]
+ host:
+ description: The name of the NFS entity granted access. Default is 0.0.0.0/0.
+ type: str
+ storage_service:
+ description:
+ - The performance service level (PSL) for this volume
+ type: str
+ choices: ['value', 'performance', 'extreme']
+ tiering:
+ description:
+ - Cloud tiering policy (see C(tiering_policy) for a more complete description).
+ type: dict
+ suboptions:
+ control:
+ description: Storage tiering placement rules for the container.
+ choices: ['required', 'best_effort', 'disallowed']
+ type: str
+ policy:
+ description:
+ - Cloud tiering policy (see C(tiering_policy)).
+ - Must match C(tiering_policy) if both are present.
+ choices: ['all', 'auto', 'none', 'snapshot-only']
+ type: str
+ object_stores:
+ description: list of object store names for tiering.
+ type: list
+ elements: str
+ exclude_aggregates:
+ description:
+ - The list of aggregate names to exclude when creating a volume.
+ - Requires ONTAP 9.9.1 GA or later.
+ type: list
+ elements: str
+ version_added: 21.7.0
+ use_nas_application:
+ description:
+ - Whether to use the application/applications REST/API to create a volume.
+ - This will default to true if any other suboption is present.
+ type: bool
+ default: true
+
+ size:
+ description:
+ - The size of the volume in (size_unit). Required when C(state=present).
+ type: int
+
+ size_unit:
+ description:
+ - The unit used to interpret the size parameter.
+ choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
+ type: str
+ default: 'gb'
+
+ size_change_threshold:
+ description:
+ - Percentage in size change to trigger a resize.
+ - When this parameter is greater than 0, a difference in size between what is expected and what is configured is ignored if it is below the threshold.
+ - For instance, the nas application allocates a larger size than specified to account for overhead.
+ - Set this to 0 for an exact match.
+ type: int
+ default: 10
+ version_added: 20.12.0
+
+ sizing_method:
+ description:
+ - Represents the method to modify the size of a FlexGroup.
+ - use_existing_resources - Increases or decreases the size of the FlexGroup by increasing or decreasing the size of the current FlexGroup resources.
+ - add_new_resources - Increases the size of the FlexGroup by adding new resources. This is limited to two new resources per available aggregate.
+ - This is only supported if REST is enabled (ONTAP 9.6 or later) and only for FlexGroups. ONTAP defaults to use_existing_resources.
+ type: str
+ choices: ['add_new_resources', 'use_existing_resources']
+ version_added: 20.12.0
+
+ type:
+ description:
+ - The volume type, either read-write (RW) or data-protection (DP).
+ type: str
+
+ export_policy:
+ description:
+ - Name of the export policy.
+ - Mutually exclusive with nfs_access suboption in nas_application_template.
+ type: str
+ aliases: ['policy']
+
+ junction_path:
+ description:
+ - Junction path of the volume.
+ - To unmount, use junction path C('').
+ type: str
+
+ space_guarantee:
+ description:
+ - Space guarantee style for the volume.
+ - The file setting is no longer supported.
+ choices: ['none', 'file', 'volume']
+ type: str
+
+ percent_snapshot_space:
+ description:
+ - Amount of space reserved for snapshot copies of the volume.
+ type: int
+
+ volume_security_style:
+ description:
+ - The security style associated with this volume.
+ choices: ['mixed', 'ntfs', 'unified', 'unix']
+ type: str
+
+ encrypt:
+ type: bool
+ description:
+ - Whether or not to enable Volume Encryption.
+ - If not present, ONTAP defaults to false at volume creation.
+ - Changing encrypt value after creation requires ONTAP 9.3 or later.
+ version_added: 2.7.0
+
+ efficiency_policy:
+ description:
+ - Allows a storage efficiency policy to be set on volume creation.
+ type: str
+ version_added: 2.7.0
+
+ unix_permissions:
+ description:
+ - Unix permission bits in octal or symbolic format.
+ - For example, 0 is equivalent to ------------, 777 is equivalent to ---rwxrwxrwx,both formats are accepted.
+ - The valid octal value ranges between 0 and 777 inclusive.
+ type: str
+ version_added: 2.8.0
+
+ group_id:
+ description:
+ - The UNIX group ID for the volume. The default value is 0 ('root').
+ type: int
+ version_added: '20.1.0'
+
+ user_id:
+ description:
+ - The UNIX user ID for the volume. The default value is 0 ('root').
+ type: int
+ version_added: '20.1.0'
+
+ snapshot_policy:
+ description:
+ - The name of the snapshot policy.
+ - The default policy name is 'default'.
+ - If present, this will set the protection_type when using C(nas_application_template).
+ type: str
+ version_added: 2.8.0
+
+ aggr_list:
+ description:
+ - an array of names of aggregates to be used for FlexGroup constituents.
+ type: list
+ elements: str
+ version_added: 2.8.0
+
+ aggr_list_multiplier:
+ description:
+ - The number of times to iterate over the aggregates listed with the aggr_list parameter when creating a FlexGroup.
+ type: int
+ version_added: 2.8.0
+
+ auto_provision_as:
+ description:
+ - Automatically provision a FlexGroup volume.
+ version_added: 2.8.0
+ choices: ['flexgroup']
+ type: str
+
+ snapdir_access:
+ description:
+ - This is an advanced option, the default is False.
+ - Enable the visible '.snapshot' directory that is normally present at system internal mount points.
+ - This value also turns on access to all other '.snapshot' directories in the volume.
+ type: bool
+ version_added: 2.8.0
+
+ atime_update:
+ description:
+ - This is an advanced option, the default is True.
+ - If false, prevent the update of inode access times when a file is read.
+ - This value is useful for volumes with extremely high read traffic,
+ since it prevents writes to the inode file for the volume from contending with reads from other files.
+ - This field should be used carefully.
+ - That is, use this field when you know in advance that the correct access time for inodes will not be needed for files on that volume.
+ type: bool
+ version_added: 2.8.0
+
+ wait_for_completion:
+ description:
+ - Set this parameter to 'true' for synchronous execution during create (wait until volume status is online)
+ - Set this parameter to 'false' for asynchronous execution
+ - For asynchronous, execution exits as soon as the request is sent, without checking volume status
+ type: bool
+ default: false
+ version_added: 2.8.0
+
+ time_out:
+ description:
+ - With ZAPI - time to wait for Flexgroup creation, modification, or deletion in seconds.
+ - With REST - time to wait for any volume creation, modification, or deletion in seconds.
+ - Error out if task is not completed in defined time.
+ - With ZAPI - if 0, the request is asynchronous.
+ - Default is set to 3 minutes.
+ - Use C(max_wait_time) and C(wait_for_completion) for volume move and encryption operations.
+ default: 180
+ type: int
+ version_added: 2.8.0
+
+ max_wait_time:
+ description:
+ - Volume move and encryption operations might take longer time to complete.
+ - With C(wait_for_completion) set, module will wait for time set in this option for volume move and encryption to complete.
+ - If time exipres, module exit and the operation may still running.
+ - Default is set to 10 minutes.
+ default: 600
+ type: int
+ version_added: 22.0.0
+
+ language:
+ description:
+ - Language to use for Volume
+ - Default uses SVM language
+ - Possible values Language
+ - c POSIX
+ - ar Arabic
+ - cs Czech
+ - da Danish
+ - de German
+ - en English
+ - en_us English (US)
+ - es Spanish
+ - fi Finnish
+ - fr French
+ - he Hebrew
+ - hr Croatian
+ - hu Hungarian
+ - it Italian
+ - ja Japanese euc-j
+ - ja_v1 Japanese euc-j
+ - ja_jp.pck Japanese PCK (sjis)
+ - ja_jp.932 Japanese cp932
+ - ja_jp.pck_v2 Japanese PCK (sjis)
+ - ko Korean
+ - no Norwegian
+ - nl Dutch
+ - pl Polish
+ - pt Portuguese
+ - ro Romanian
+ - ru Russian
+ - sk Slovak
+ - sl Slovenian
+ - sv Swedish
+ - tr Turkish
+ - zh Simplified Chinese
+ - zh.gbk Simplified Chinese (GBK)
+ - zh_tw Traditional Chinese euc-tw
+ - zh_tw.big5 Traditional Chinese Big 5
+ - To use UTF-8 as the NFS character set, append '.UTF-8' to the language code
+ type: str
+ version_added: 2.8.0
+
+ qos_policy_group:
+ description:
+ - Specifies a QoS policy group to be set on volume.
+ type: str
+ version_added: 2.9.0
+
+ qos_adaptive_policy_group:
+ description:
+ - Specifies a QoS adaptive policy group to be set on volume.
+ type: str
+ version_added: 2.9.0
+
+ tiering_policy:
+ description:
+ - The tiering policy that is to be associated with the volume.
+ - This policy decides whether the blocks of a volume will be tiered to the capacity tier.
+ - snapshot-only policy allows tiering of only the volume snapshot copies not associated with the active file system.
+ - auto policy allows tiering of both snapshot and active file system user data to the capacity tier.
+ - backup policy on DP volumes allows all transferred user data blocks to start in the capacity tier.
+ - all is the REST equivalent for backup.
+ - When set to none, the Volume blocks will not be tiered to the capacity tier.
+ - If no value specified, the volume is assigned snapshot only by default.
+ - Requires ONTAP 9.4 or later.
+ choices: ['snapshot-only', 'auto', 'backup', 'none', 'all']
+ type: str
+ version_added: 2.9.0
+
+ space_slo:
+ description:
+ - Specifies the space SLO type for the volume. The space SLO type is the Service Level Objective for space management for the volume.
+ - The space SLO value is used to enforce existing volume settings so that sufficient space is set aside on the aggregate to meet the space SLO.
+ - This parameter is not supported on Infinite Volumes.
+ choices: ['none', 'thick', 'semi-thick']
+ type: str
+ version_added: 2.9.0
+
+ nvfail_enabled:
+ description:
+ - If true, the controller performs additional work at boot and takeover times if it finds that there has been any potential data loss in the volume's
+ constituents due to an NVRAM failure.
+ - The volume's constituents would be put in a special state called 'in-nvfailed-state' such that protocol access is blocked.
+ - This will cause the client applications to crash and thus prevent access to stale data.
+ - To get out of this situation, the admin needs to manually clear the 'in-nvfailed-state' on the volume's constituents.
+ type: bool
+ version_added: 2.9.0
+
+ vserver_dr_protection:
+ description:
+ - Specifies the protection type for the volume in a Vserver DR setup.
+ choices: ['protected', 'unprotected']
+ type: str
+ version_added: 2.9.0
+
+ comment:
+ description:
+ - Sets a comment associated with the volume.
+ type: str
+ version_added: 2.9.0
+
+ snapshot_auto_delete:
+ description:
+ - A dictionary for the auto delete options and values.
+ - Supported options include 'state', 'commitment', 'trigger', 'target_free_space', 'delete_order', 'defer_delete',
+ 'prefix', 'destroy_list'.
+ - Option 'state' determines if the snapshot autodelete is currently enabled for the volume. Possible values are 'on' and 'off'.
+ - Option 'commitment' determines the snapshots which snapshot autodelete is allowed to delete to get back space.
+ Possible values are 'try', 'disrupt' and 'destroy'.
+ - Option 'trigger' determines the condition which starts the automatic deletion of snapshots.
+ Possible values are 'volume', 'snap_reserve' and DEPRECATED 'space_reserve'.
+ - Option 'target_free_space' determines when snapshot autodelete should stop deleting snapshots. Depending on the trigger,
+ snapshots are deleted till we reach the target free space percentage. Accepts int type.
+ - Option 'delete_order' determines if the oldest or newest snapshot is deleted first. Possible values are 'newest_first' and 'oldest_first'.
+ - Option 'defer_delete' determines which kind of snapshots to delete in the end. Possible values are 'scheduled', 'user_created',
+ 'prefix' and 'none'.
+ - Option 'prefix' can be set to provide the prefix string for the 'prefix' value of the 'defer_delete' option.
+ The prefix string length can be 15 char long.
+ - Option 'destroy_list' is a comma seperated list of services which can be destroyed if the snapshot backing that service is deleted.
+ For 7-mode, the possible values for this option are a combination of 'lun_clone', 'vol_clone', 'cifs_share', 'file_clone' or 'none'.
+ For cluster-mode, the possible values for this option are a combination of 'lun_clone,file_clone' (for LUN clone and/or file clone),
+ 'lun_clone,sfsr' (for LUN clone and/or sfsr), 'vol_clone', 'cifs_share', or 'none'.
+ type: dict
+ version_added: '20.4.0'
+
+ cutover_action:
+ description:
+ - Specifies the action to be taken for cutover.
+ - Possible values are 'abort_on_failure', 'defer_on_failure', 'force' and 'wait'. Default is 'defer_on_failure'.
+ choices: ['abort_on_failure', 'defer_on_failure', 'force', 'wait']
+ type: str
+ version_added: '20.5.0'
+
+ check_interval:
+ description:
+ - The amount of time in seconds to wait between checks of a volume to see if it has moved successfully.
+ default: 30
+ type: int
+ version_added: '20.6.0'
+
+ from_vserver:
+ description:
+ - The source vserver of the volume is rehosted.
+ type: str
+ version_added: '20.6.0'
+
+ auto_remap_luns:
+ description:
+ - Flag to control automatic map of LUNs.
+ type: bool
+ version_added: '20.6.0'
+
+ force_unmap_luns:
+ description:
+ - Flag to control automatic unmap of LUNs.
+ type: bool
+ version_added: '20.6.0'
+
+ force_restore:
+ description:
+ - If this field is set to "true", the Snapshot copy is restored even if the volume has one or more newer Snapshot
+ copies which are currently used as reference Snapshot copy by SnapMirror. If a restore is done in this
+ situation, this will cause future SnapMirror transfers to fail.
+ - Option should only be used along with snapshot_restore.
+ type: bool
+ version_added: '20.6.0'
+
+ preserve_lun_ids:
+ description:
+ - If this field is set to "true", LUNs in the volume being restored will remain mapped and their identities
+ preserved such that host connectivity will not be disrupted during the restore operation. I/O's to the LUN will
+ be fenced during the restore operation by placing the LUNs in an unavailable state. Once the restore operation
+ has completed, hosts will be able to resume I/O access to the LUNs.
+ - Option should only be used along with snapshot_restore.
+ type: bool
+ version_added: '20.6.0'
+
+ snapshot_restore:
+ description:
+ - Name of snapshot to restore from.
+ - Not supported on Infinite Volume.
+ type: str
+ version_added: '20.6.0'
+
+ compression:
+ description:
+ - Whether to enable compression for the volume (HDD and Flash Pool aggregates).
+ - If this option is not present, it is automatically set to true if inline_compression is true.
+ type: bool
+ version_added: '20.12.0'
+
+ inline_compression:
+ description:
+ - Whether to enable inline compression for the volume (HDD and Flash Pool aggregates, AFF platforms).
+ type: bool
+ version_added: '20.12.0'
+
+ tiering_minimum_cooling_days:
+ description:
+ - Determines how many days must pass before inactive data in a volume using the Auto or Snapshot-Only policy is
+ considered cold and eligible for tiering.
+ - This option is only supported in REST 9.8 or later.
+ type: int
+ version_added: '20.16.0'
+
+ logical_space_enforcement:
+ description:
+ - This optionally specifies whether to perform logical space accounting on the volume. When space is enforced
+ logically, ONTAP enforces volume settings such that all the physical space saved by the storage efficiency
+ features will be calculated as used.
+ - This is only supported with REST.
+ type: bool
+ version_added: '20.16.0'
+
+ logical_space_reporting:
+ description:
+ - This optionally specifies whether to report space logically on the volume. When space is reported logically,
+ ONTAP reports the volume space such that all the physical space saved by the storage efficiency features are also
+ reported as used.
+ - This is only supported with REST.
+ type: bool
+ version_added: '20.16.0'
+
+ snaplock:
+ description:
+ - Starting with ONTAP 9.10.1, snaplock.type is set at the volume level.
+ - The other suboptions can be set or modified when using REST on earlier versions of ONTAP.
+ - This option and suboptions are only supported with REST.
+ type: dict
+ version_added: 21.18.0
+ suboptions:
+ append_mode_enabled:
+ description:
+ - when enabled, all the files created with write permissions on the volume are, by default,
+ WORM appendable files. The user can append the data to a WORM appendable file but cannot modify
+ the existing contents of the file nor delete the file until it expires.
+ type: bool
+ autocommit_period:
+ description:
+ - autocommit period for SnapLock volume. All files which are not modified for a period greater than
+ the autocommit period of the volume are committed to the WORM state.
+ - duration is in the ISO-8601 duration format (eg PY, PM, PD, PTH, PTM).
+ - examples P30M, P10Y, PT1H, "none". A duration that combines different periods is not supported.
+ type: str
+ privileged_delete:
+ description:
+ - privileged-delete attribute of a SnapLock volume.
+ - On a SnapLock Enterprise (SLE) volume, a designated privileged user can selectively delete files irrespective of the retention time of the file.
+ - On a SnapLock Compliance (SLC) volume, it is always permanently_disabled.
+ type: str
+ choices: [disabled, enabled, permanently_disabled]
+ retention:
+ description:
+ - default, maximum, and minumum retention periods for files committed to the WORM state on the volume.
+ - durations are in the ISO-8601 duration format, see autocommit_period.
+ type: dict
+ suboptions:
+ default:
+ description:
+ - default retention period that is applied to files while committing them to the WORM state without an associated retention period.
+ type: str
+ maximum:
+ description:
+ - maximum allowed retention period for files committed to the WORM state on the volume.
+ type: str
+ minimum:
+ description:
+ - minimum allowed retention period for files committed to the WORM state on the volume.
+ type: str
+ type:
+ description:
+ - The SnapLock type of the volume.
+ - compliance - A SnapLock Compliance (SLC) volume provides the highest level of WORM protection and
+ an administrator cannot destroy a SLC volume if it contains unexpired WORM files.
+ - enterprise - An administrator can delete a SnapLock Enterprise (SLE) volume.
+ - non_snaplock - Indicates the volume is non-snaplock.
+ type: str
+ choices: [compliance, enterprise, non_snaplock]
+
+ max_files:
+ description:
+ - The maximum number of files (inodes) for user-visible data allowed on the volume.
+ - Note - ONTAP allocates a slightly different value, for instance 3990 when asking for 4000.
+ Tp preserve idempotency, small variations in size are ignored.
+ type: int
+ version_added: '20.18.0'
+
+ analytics:
+ description:
+ - Set file system analytics state of the volume.
+ - Only supported with REST and requires ONTAP 9.8 or later version.
+ - Cannot enable analytics for volume that contains luns.
+ type: str
+ version_added: '22.0.0'
+ choices: ['on', 'off']
+
+notes:
+ - supports REST and ZAPI. REST requires ONTAP 9.6 or later. Efficiency with REST requires ONTAP 9.7 or later.
+ - REST is enabled when C(use_rest) is set to always.
+ - The feature_flag C(warn_or_fail_on_fabricpool_backend_change) controls whether an error is reported when
+ tiering control would require or disallow FabricPool for an existing volume with a different backend.
+ Allowed values are fail, warn, and ignore, and the default is set to fail.
+ - snapshot_restore is not idempotent, it always restores.
+
+'''
+
+EXAMPLES = """
+
+ - name: Create FlexVol
+ netapp.ontap.na_ontap_volume:
+ state: present
+ name: ansibleVolume12
+ is_infinite: False
+ aggregate_name: ansible_aggr
+ size: 100
+ size_unit: mb
+ user_id: 1001
+ group_id: 2002
+ space_guarantee: none
+ tiering_policy: auto
+ export_policy: default
+ percent_snapshot_space: 60
+ qos_policy_group: max_performance_gold
+ vserver: ansibleVServer
+ wait_for_completion: True
+ space_slo: none
+ nvfail_enabled: False
+ comment: ansible created volume
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Volume Delete
+ netapp.ontap.na_ontap_volume:
+ state: absent
+ name: ansibleVolume12
+ aggregate_name: ansible_aggr
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Make FlexVol offline
+ netapp.ontap.na_ontap_volume:
+ state: present
+ name: ansibleVolume
+ is_infinite: False
+ is_online: False
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Create Flexgroup volume manually
+ netapp.ontap.na_ontap_volume:
+ state: present
+ name: ansibleVolume
+ is_infinite: False
+ aggr_list: "{{ aggr_list }}"
+ aggr_list_multiplier: 2
+ size: 200
+ size_unit: mb
+ space_guarantee: none
+ export_policy: default
+ vserver: "{{ vserver }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: False
+ unix_permissions: 777
+ snapshot_policy: default
+ time_out: 0
+
+ - name: Create Flexgroup volume auto provsion as flex group
+ netapp.ontap.na_ontap_volume:
+ state: present
+ name: ansibleVolume
+ is_infinite: False
+ auto_provision_as: flexgroup
+ size: 200
+ size_unit: mb
+ space_guarantee: none
+ export_policy: default
+ vserver: "{{ vserver }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: False
+ unix_permissions: 777
+ snapshot_policy: default
+ time_out: 0
+
+ - name: Create FlexVol with QoS adaptive
+ netapp.ontap.na_ontap_volume:
+ state: present
+ name: ansibleVolume15
+ is_infinite: False
+ aggregate_name: ansible_aggr
+ size: 100
+ size_unit: gb
+ space_guarantee: none
+ export_policy: default
+ percent_snapshot_space: 10
+ qos_adaptive_policy_group: extreme
+ vserver: ansibleVServer
+ wait_for_completion: True
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Modify volume dr protection (vserver of the volume must be in a snapmirror relationship)
+ netapp.ontap.na_ontap_volume:
+ state: present
+ name: ansibleVolume
+ vserver_dr_protection: protected
+ vserver: "{{ vserver }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: False
+
+ - name: Modify volume with snapshot auto delete options
+ netapp.ontap.na_ontap_volume:
+ state: present
+ name: vol_auto_delete
+ snapshot_auto_delete:
+ state: "on"
+ commitment: try
+ defer_delete: scheduled
+ target_free_space: 30
+ destroy_list: lun_clone,vol_clone
+ delete_order: newest_first
+ aggregate_name: "{{ aggr }}"
+ vserver: "{{ vserver }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: False
+
+ - name: Move volume with force cutover action
+ netapp.ontap.na_ontap_volume:
+ name: ansible_vol
+ aggregate_name: aggr_ansible
+ cutover_action: force
+ vserver: "{{ vserver }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: false
+
+ - name: Rehost volume to another vserver auto remap luns
+ netapp.ontap.na_ontap_volume:
+ name: ansible_vol
+ from_vserver: ansible
+ auto_remap_luns: true
+ vserver: "{{ vserver }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: false
+
+ - name: Rehost volume to another vserver force unmap luns
+ netapp.ontap.na_ontap_volume:
+ name: ansible_vol
+ from_vserver: ansible
+ force_unmap_luns: true
+ vserver: "{{ vserver }}"
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: false
+
+ - name: Snapshot restore volume
+ netapp.ontap.na_ontap_volume:
+ name: ansible_vol
+ vserver: ansible
+ snapshot_restore: 2020-05-24-weekly
+ force_restore: true
+ preserve_lun_ids: true
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: false
+
+ - name: Volume create using application/applications nas template
+ netapp.ontap.na_ontap_volume:
+ state: present
+ name: ansibleVolume12
+ vserver: ansibleSVM
+ size: 100000000
+ size_unit: b
+ space_guarantee: none
+ language: es
+ percent_snapshot_space: 60
+ unix_permissions: ---rwxrwxrwx
+ snapshot_policy: default
+ efficiency_policy: default
+ comment: testing
+ nas_application_template:
+ nfs_access: # the mere presence of a suboption is enough to enable this new feature
+ - access: ro
+ - access: rw
+ host: 10.0.0.0/8
+ exclude_aggregates: aggr0
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ https: true
+ validate_certs: false
+
+ # requires Ontap collection version - 21.24.0 to use iso filter plugin.
+ - name: volume create with snaplock set.
+ netapp.ontap.na_ontap_volume:
+ state: present
+ name: "{{ snaplock_volume }}"
+ aggregate_name: "{{ aggregate }}"
+ size: 20
+ size_unit: mb
+ space_guarantee: none
+ policy: default
+ type: rw
+ snaplock:
+ type: enterprise
+ retention:
+ default: "{{ 60 | netapp.ontap.iso8601_duration_from_seconds }}"
+
+"""
+
+RETURN = """
+"""
+
+import time
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.rest_application import RestApplication
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_vserver
+
+
+class NetAppOntapVolume:
+ '''Class with volume operations'''
+
+ def __init__(self):
+ '''Initialize module parameters'''
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ vserver=dict(required=True, type='str'),
+ from_name=dict(required=False, type='str'),
+ is_infinite=dict(required=False, type='bool', default=False),
+ is_online=dict(required=False, type='bool', default=True),
+ size=dict(type='int', default=None),
+ size_unit=dict(default='gb', choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb'], type='str'),
+ sizing_method=dict(choices=['add_new_resources', 'use_existing_resources'], type='str'),
+ aggregate_name=dict(type='str', default=None),
+ type=dict(type='str', default=None),
+ export_policy=dict(type='str', default=None, aliases=['policy']),
+ junction_path=dict(type='str', default=None),
+ space_guarantee=dict(choices=['none', 'file', 'volume'], default=None),
+ percent_snapshot_space=dict(type='int', default=None),
+ volume_security_style=dict(choices=['mixed', 'ntfs', 'unified', 'unix']),
+ encrypt=dict(required=False, type='bool'),
+ efficiency_policy=dict(required=False, type='str'),
+ unix_permissions=dict(required=False, type='str'),
+ group_id=dict(required=False, type='int'),
+ user_id=dict(required=False, type='int'),
+ snapshot_policy=dict(required=False, type='str'),
+ aggr_list=dict(required=False, type='list', elements='str'),
+ aggr_list_multiplier=dict(required=False, type='int'),
+ snapdir_access=dict(required=False, type='bool'),
+ atime_update=dict(required=False, type='bool'),
+ auto_provision_as=dict(choices=['flexgroup'], required=False, type='str'),
+ wait_for_completion=dict(required=False, type='bool', default=False),
+ time_out=dict(required=False, type='int', default=180),
+ max_wait_time=dict(required=False, type='int', default=600),
+ language=dict(type='str', required=False),
+ qos_policy_group=dict(required=False, type='str'),
+ qos_adaptive_policy_group=dict(required=False, type='str'),
+ nvfail_enabled=dict(type='bool', required=False),
+ space_slo=dict(type='str', required=False, choices=['none', 'thick', 'semi-thick']),
+ tiering_policy=dict(type='str', required=False, choices=['snapshot-only', 'auto', 'backup', 'none', 'all']),
+ vserver_dr_protection=dict(type='str', required=False, choices=['protected', 'unprotected']),
+ comment=dict(type='str', required=False),
+ snapshot_auto_delete=dict(type='dict', required=False),
+ cutover_action=dict(required=False, type='str', choices=['abort_on_failure', 'defer_on_failure', 'force', 'wait']),
+ check_interval=dict(required=False, type='int', default=30),
+ from_vserver=dict(required=False, type='str'),
+ auto_remap_luns=dict(required=False, type='bool'),
+ force_unmap_luns=dict(required=False, type='bool'),
+ force_restore=dict(required=False, type='bool'),
+ compression=dict(required=False, type='bool'),
+ inline_compression=dict(required=False, type='bool'),
+ preserve_lun_ids=dict(required=False, type='bool'),
+ snapshot_restore=dict(required=False, type='str'),
+ nas_application_template=dict(type='dict', options=dict(
+ use_nas_application=dict(type='bool', default=True),
+ exclude_aggregates=dict(type='list', elements='str'),
+ flexcache=dict(type='dict', options=dict(
+ dr_cache=dict(type='bool'),
+ origin_svm_name=dict(required=True, type='str'),
+ origin_component_name=dict(required=True, type='str')
+ )),
+ cifs_access=dict(type='list', elements='dict', options=dict(
+ access=dict(type='str', choices=['change', 'full_control', 'no_access', 'read']),
+ user_or_group=dict(type='str')
+ )),
+ nfs_access=dict(type='list', elements='dict', options=dict(
+ access=dict(type='str', choices=['none', 'ro', 'rw']),
+ host=dict(type='str')
+ )),
+ storage_service=dict(type='str', choices=['value', 'performance', 'extreme']),
+ tiering=dict(type='dict', options=dict(
+ control=dict(type='str', choices=['required', 'best_effort', 'disallowed']),
+ policy=dict(type='str', choices=['all', 'auto', 'none', 'snapshot-only']),
+ object_stores=dict(type='list', elements='str') # create only
+ ))
+ )),
+ size_change_threshold=dict(type='int', default=10),
+ tiering_minimum_cooling_days=dict(required=False, type='int'),
+ logical_space_enforcement=dict(required=False, type='bool'),
+ logical_space_reporting=dict(required=False, type='bool'),
+ snaplock=dict(type='dict', options=dict(
+ append_mode_enabled=dict(required=False, type='bool'),
+ autocommit_period=dict(required=False, type='str'),
+ privileged_delete=dict(required=False, type='str', choices=['disabled', 'enabled', 'permanently_disabled']),
+ retention=dict(type='dict', options=dict(
+ default=dict(required=False, type='str'),
+ maximum=dict(required=False, type='str'),
+ minimum=dict(required=False, type='str')
+ )),
+ type=dict(required=False, type='str', choices=['compliance', 'enterprise', 'non_snaplock'])
+ )),
+ max_files=dict(required=False, type='int'),
+ analytics=dict(required=False, type='str', choices=['on', 'off']),
+ tags=dict(required=False, type='list', elements='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ mutually_exclusive=[
+ ['space_guarantee', 'space_slo'], ['auto_remap_luns', 'force_unmap_luns']
+ ],
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule(self)
+ self.parameters = self.na_helper.check_and_set_parameters(self.module)
+ self.volume_style = None
+ self.volume_created = False
+ self.issues = []
+ self.sis_keys2zapi_get = dict(
+ efficiency_policy='policy',
+ compression='is-compression-enabled',
+ inline_compression='is-inline-compression-enabled')
+ self.sis_keys2zapi_set = dict(
+ efficiency_policy='policy-name',
+ compression='enable-compression',
+ inline_compression='enable-inline-compression')
+
+ if self.parameters.get('size'):
+ self.parameters['size'] = self.parameters['size'] * \
+ netapp_utils.POW2_BYTE_MAP[self.parameters['size_unit']]
+ self.validate_snapshot_auto_delete()
+ self.rest_api = netapp_utils.OntapRestAPI(self.module)
+ unsupported_rest_properties = ['atime_update',
+ 'cutover_action',
+ 'encrypt-destination',
+ 'force_restore',
+ 'nvfail_enabled',
+ 'preserve_lun_ids',
+ 'snapdir_access',
+ 'snapshot_auto_delete',
+ 'space_slo',
+ 'vserver_dr_protection']
+ partially_supported_rest_properties = [['efficiency_policy', (9, 7)], ['tiering_minimum_cooling_days', (9, 8)], ['analytics', (9, 8)],
+ ['tags', (9, 13, 1)]]
+ self.unsupported_zapi_properties = ['sizing_method', 'logical_space_enforcement', 'logical_space_reporting', 'snaplock', 'analytics', 'tags']
+ self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, unsupported_rest_properties, partially_supported_rest_properties)
+
+ if not self.use_rest:
+ self.setup_zapi()
+ if self.use_rest:
+ self.rest_errors()
+
+ # REST API for application/applications if needed - will report an error when REST is not supported
+ self.rest_app = self.setup_rest_application()
+
+ def setup_zapi(self):
+ if netapp_utils.has_netapp_lib() is False:
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+
+ for unsupported_zapi_property in self.unsupported_zapi_properties:
+ if self.parameters.get(unsupported_zapi_property) is not None:
+ msg = "Error: %s option is not supported with ZAPI. It can only be used with REST." % unsupported_zapi_property
+ msg += ' use_rest: %s.' % self.parameters['use_rest']
+ if self.rest_api.fallback_to_zapi_reason:
+ msg += ' Conflict %s.' % self.rest_api.fallback_to_zapi_reason
+ self.module.fail_json(msg=msg)
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+ self.cluster = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def validate_snapshot_auto_delete(self):
+ if 'snapshot_auto_delete' in self.parameters:
+ for key in self.parameters['snapshot_auto_delete']:
+ if key not in ['commitment', 'trigger', 'target_free_space', 'delete_order', 'defer_delete',
+ 'prefix', 'destroy_list', 'state']:
+ self.module.fail_json(msg="snapshot_auto_delete option '%s' is not valid." % key)
+
+ def setup_rest_application(self):
+ rest_app = None
+ if self.na_helper.safe_get(self.parameters, ['nas_application_template', 'use_nas_application']):
+ if not self.use_rest:
+ msg = 'Error: nas_application_template requires REST support.'
+ msg += ' use_rest: %s.' % self.parameters['use_rest']
+ if self.rest_api.fallback_to_zapi_reason:
+ msg += ' Conflict %s.' % self.rest_api.fallback_to_zapi_reason
+ self.module.fail_json(msg=msg)
+ # consistency checks
+ # tiering policy is duplicated, make sure values are matching
+ tiering_policy_nas = self.na_helper.safe_get(self.parameters, ['nas_application_template', 'tiering', 'policy'])
+ tiering_policy = self.na_helper.safe_get(self.parameters, ['tiering_policy'])
+ if tiering_policy_nas is not None and tiering_policy is not None and tiering_policy_nas != tiering_policy:
+ msg = 'Conflict: if tiering_policy and nas_application_template tiering policy are both set, they must match.'
+ msg += ' Found "%s" and "%s".' % (tiering_policy, tiering_policy_nas)
+ self.module.fail_json(msg=msg)
+ # aggregate_name will force a move if present
+ if self.parameters.get('aggregate_name') is not None:
+ msg = 'Conflict: aggregate_name is not supported when application template is enabled.'\
+ ' Found: aggregate_name: %s' % self.parameters['aggregate_name']
+ self.module.fail_json(msg=msg)
+ nfs_access = self.na_helper.safe_get(self.parameters, ['nas_application_template', 'nfs_access'])
+ if nfs_access is not None and self.na_helper.safe_get(self.parameters, ['export_policy']) is not None:
+ msg = 'Conflict: export_policy option and nfs_access suboption in nas_application_template are mutually exclusive.'
+ self.module.fail_json(msg=msg)
+ rest_app = RestApplication(self.rest_api, self.parameters['vserver'], self.parameters['name'])
+ return rest_app
+
+ def volume_get_iter(self, vol_name=None):
+ """
+ Return volume-get-iter query results
+ :param vol_name: name of the volume
+ :return: NaElement
+ """
+ volume_info = netapp_utils.zapi.NaElement('volume-get-iter')
+ volume_attributes = netapp_utils.zapi.NaElement('volume-attributes')
+ volume_id_attributes = netapp_utils.zapi.NaElement('volume-id-attributes')
+ volume_id_attributes.add_new_child('name', vol_name)
+ volume_id_attributes.add_new_child('vserver', self.parameters['vserver'])
+ volume_attributes.add_child_elem(volume_id_attributes)
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(volume_attributes)
+ volume_info.add_child_elem(query)
+
+ try:
+ result = self.server.invoke_successfully(volume_info, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching volume %s : %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ return result
+
+ def get_application(self):
+ if self.rest_app:
+ app, error = self.rest_app.get_application_details('nas')
+ self.na_helper.fail_on_error(error)
+ # flatten component list
+ comps = self.na_helper.safe_get(app, ['nas', 'application_components'])
+ if comps:
+ comp = comps[0]
+ app['nas'].pop('application_components')
+ app['nas'].update(comp)
+ return app['nas']
+ return None
+
+ def get_volume_attributes(self, volume_attributes, result):
+ # extract values from volume record
+ attrs = dict(
+ # The keys are used to index a result dictionary, values are read from a ZAPI object indexed by key_list.
+ # If required is True, an error is reported if a key in key_list is not found.
+ # We may have observed cases where the record is incomplete as the volume is being created, so it may be better to ignore missing keys
+ # I'm not sure there is much value in omitnone, but it preserves backward compatibility
+ # If omitnone is absent or False, a None value is recorded, if True, the key is not set
+ encrypt=dict(key_list=['encrypt'], convert_to=bool, omitnone=True),
+ tiering_policy=dict(key_list=['volume-comp-aggr-attributes', 'tiering-policy'], omitnone=True),
+ export_policy=dict(key_list=['volume-export-attributes', 'policy']),
+ aggregate_name=dict(key_list=['volume-id-attributes', 'containing-aggregate-name']),
+ flexgroup_uuid=dict(key_list=['volume-id-attributes', 'flexgroup-uuid']),
+ instance_uuid=dict(key_list=['volume-id-attributes', 'instance-uuid']),
+ junction_path=dict(key_list=['volume-id-attributes', 'junction-path'], default=''),
+ style_extended=dict(key_list=['volume-id-attributes', 'style-extended']),
+ type=dict(key_list=['volume-id-attributes', 'type'], omitnone=True),
+ comment=dict(key_list=['volume-id-attributes', 'comment']),
+ max_files=dict(key_list=['volume-inode-attributes', 'files-total'], convert_to=int),
+ atime_update=dict(key_list=['volume-performance-attributes', 'is-atime-update-enabled'], convert_to=bool),
+ qos_policy_group=dict(key_list=['volume-qos-attributes', 'policy-group-name']),
+ qos_adaptive_policy_group=dict(key_list=['volume-qos-attributes', 'adaptive-policy-group-name']),
+ # style is not present if the volume is still offline or of type: dp
+ volume_security_style=dict(key_list=['volume-security-attributes', 'style'], omitnone=True),
+ group_id=dict(key_list=['volume-security-attributes', 'volume-security-unix-attributes', 'group-id'], convert_to=int, omitnone=True),
+ unix_permissions=dict(key_list=['volume-security-attributes', 'volume-security-unix-attributes', 'permissions'], required=True),
+ user_id=dict(key_list=['volume-security-attributes', 'volume-security-unix-attributes', 'user-id'], convert_to=int, omitnone=True),
+ snapdir_access=dict(key_list=['volume-snapshot-attributes', 'snapdir-access-enabled'], convert_to=bool),
+ snapshot_policy=dict(key_list=['volume-snapshot-attributes', 'snapshot-policy'], omitnone=True),
+ percent_snapshot_space=dict(key_list=['volume-space-attributes', 'percentage-snapshot-reserve'], convert_to=int, omitnone=True),
+ size=dict(key_list=['volume-space-attributes', 'size'], convert_to=int),
+ space_guarantee=dict(key_list=['volume-space-attributes', 'space-guarantee']),
+ space_slo=dict(key_list=['volume-space-attributes', 'space-slo']),
+ nvfail_enabled=dict(key_list=['volume-state-attributes', 'is-nvfail-enabled'], convert_to=bool),
+ is_online=dict(key_list=['volume-state-attributes', 'state'], convert_to='bool_online', omitnone=True),
+ vserver_dr_protection=dict(key_list=['volume-vserver-dr-protection-attributes', 'vserver-dr-protection']),
+ )
+
+ self.na_helper.zapi_get_attrs(volume_attributes, attrs, result)
+
+ def get_snapshot_auto_delete_attributes(self, volume_attributes, result):
+ attrs = dict(
+ commitment=dict(key_list=['volume-snapshot-autodelete-attributes', 'commitment']),
+ defer_delete=dict(key_list=['volume-snapshot-autodelete-attributes', 'defer-delete']),
+ delete_order=dict(key_list=['volume-snapshot-autodelete-attributes', 'delete-order']),
+ destroy_list=dict(key_list=['volume-snapshot-autodelete-attributes', 'destroy-list']),
+ is_autodelete_enabled=dict(key_list=['volume-snapshot-autodelete-attributes', 'is-autodelete-enabled'], convert_to=bool),
+ prefix=dict(key_list=['volume-snapshot-autodelete-attributes', 'prefix']),
+ target_free_space=dict(key_list=['volume-snapshot-autodelete-attributes', 'target-free-space'], convert_to=int),
+ trigger=dict(key_list=['volume-snapshot-autodelete-attributes', 'trigger']),
+ )
+ self.na_helper.zapi_get_attrs(volume_attributes, attrs, result)
+ if result['is_autodelete_enabled'] is not None:
+ result['state'] = 'on' if result['is_autodelete_enabled'] else 'off'
+ del result['is_autodelete_enabled']
+
+ def get_volume(self, vol_name=None):
+ """
+ Return details about the volume
+ :param:
+ name : Name of the volume
+ :return: Details about the volume. None if not found.
+ :rtype: dict
+ """
+ result = None
+ if vol_name is None:
+ vol_name = self.parameters['name']
+ if self.use_rest:
+ return self.get_volume_rest(vol_name)
+ volume_info = self.volume_get_iter(vol_name)
+ if self.na_helper.zapi_get_value(volume_info, ['num-records'], convert_to=int, default=0) > 0:
+ result = self.get_volume_record_from_zapi(volume_info, vol_name)
+ return result
+
+ def get_volume_record_from_zapi(self, volume_info, vol_name):
+ volume_attributes = self.na_helper.zapi_get_value(volume_info, ['attributes-list', 'volume-attributes'], required=True)
+ result = dict(name=vol_name)
+ self.get_volume_attributes(volume_attributes, result)
+ result['uuid'] = (result['instance_uuid'] if result['style_extended'] == 'flexvol'
+ else result['flexgroup_uuid'] if result['style_extended'] is not None and result['style_extended'].startswith('flexgroup')
+ else None)
+
+ # snapshot_auto_delete options
+ auto_delete = {}
+ self. get_snapshot_auto_delete_attributes(volume_attributes, auto_delete)
+ result['snapshot_auto_delete'] = auto_delete
+
+ self.get_efficiency_info(result)
+
+ return result
+
+ def wrap_fail_json(self, msg, exception=None):
+ for issue in self.issues:
+ self.module.warn(issue)
+ if self.volume_created:
+ msg = 'Volume created with success, with missing attributes: %s' % msg
+ self.module.fail_json(msg=msg, exception=exception)
+
+ def create_nas_application_component(self):
+ '''Create application component for nas template'''
+ required_options = ('name', 'size')
+ for option in required_options:
+ if self.parameters.get(option) is None:
+ self.module.fail_json(msg='Error: "%s" is required to create nas application.' % option)
+
+ application_component = dict(
+ name=self.parameters['name'],
+ total_size=self.parameters['size'],
+ share_count=1, # 1 is the maximum value for nas
+ scale_out=(self.volume_style == 'flexgroup'),
+ )
+ name = self.na_helper.safe_get(self.parameters, ['nas_application_template', 'storage_service'])
+ if name is not None:
+ application_component['storage_service'] = dict(name=name)
+
+ flexcache = self.na_helper.safe_get(self.parameters, ['nas_application_template', 'flexcache'])
+ if flexcache is not None:
+ application_component['flexcache'] = dict(
+ origin=dict(
+ svm=dict(name=flexcache['origin_svm_name']),
+ component=dict(name=flexcache['origin_component_name'])
+ )
+ )
+ # scale_out should be absent or set to True for FlexCache
+ del application_component['scale_out']
+ dr_cache = self.na_helper.safe_get(self.parameters, ['nas_application_template', 'flexcache', 'dr_cache'])
+ if dr_cache is not None:
+ application_component['flexcache']['dr_cache'] = dr_cache
+
+ tiering = self.na_helper.safe_get(self.parameters, ['nas_application_template', 'tiering'])
+ if tiering is not None or self.parameters.get('tiering_policy') is not None:
+ application_component['tiering'] = {}
+ if tiering is None:
+ tiering = {}
+ if 'policy' not in tiering:
+ tiering['policy'] = self.parameters.get('tiering_policy')
+ for attr in ('control', 'policy', 'object_stores'):
+ value = tiering.get(attr)
+ if attr == 'object_stores' and value is not None:
+ value = [dict(name=x) for x in value]
+ if value is not None:
+ application_component['tiering'][attr] = value
+
+ if self.get_qos_policy_group() is not None:
+ application_component['qos'] = {
+ "policy": {
+ "name": self.get_qos_policy_group(),
+ }
+ }
+ if self.parameters.get('export_policy') is not None:
+ application_component['export_policy'] = {
+ "name": self.parameters['export_policy'],
+ }
+ return application_component
+
+ def create_volume_body(self):
+ '''Create body for nas template'''
+ nas = dict(application_components=[self.create_nas_application_component()])
+ value = self.na_helper.safe_get(self.parameters, ['snapshot_policy'])
+ if value is not None:
+ nas['protection_type'] = {'local_policy': value}
+ for attr in ('nfs_access', 'cifs_access'):
+ value = self.na_helper.safe_get(self.parameters, ['nas_application_template', attr])
+ if value is not None:
+ # we expect value to be a list of dicts, with maybe some empty entries
+ value = self.na_helper.filter_out_none_entries(value)
+ if value:
+ nas[attr] = value
+ for attr in ('exclude_aggregates',):
+ values = self.na_helper.safe_get(self.parameters, ['nas_application_template', attr])
+ if values:
+ nas[attr] = [dict(name=name) for name in values]
+ return self.rest_app.create_application_body("nas", nas, smart_container=True)
+
+ def create_nas_application(self):
+ '''Use REST application/applications nas template to create a volume'''
+ body, error = self.create_volume_body()
+ self.na_helper.fail_on_error(error)
+ response, error = self.rest_app.create_application(body)
+ self.na_helper.fail_on_error(error)
+ return response
+
+ def create_volume(self):
+ '''Create ONTAP volume'''
+ if self.rest_app:
+ return self.create_nas_application()
+ if self.use_rest:
+ return self.create_volume_rest()
+ if self.volume_style == 'flexgroup':
+ return self.create_volume_async()
+
+ options = self.create_volume_options()
+ volume_create = netapp_utils.zapi.NaElement.create_node_with_children('volume-create', **options)
+ try:
+ self.server.invoke_successfully(volume_create, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ size_msg = ' of size %s' % self.parameters['size'] if self.parameters.get('size') is not None else ''
+ self.module.fail_json(msg='Error provisioning volume %s%s: %s'
+ % (self.parameters['name'], size_msg, to_native(error)),
+ exception=traceback.format_exc())
+
+ if self.parameters.get('wait_for_completion'):
+ # round off time_out
+ retries = (self.parameters['time_out'] + 5) // 10
+ is_online = None
+ errors = []
+ while not is_online and retries > 0:
+ try:
+ current = self.get_volume()
+ is_online = None if current is None else current['is_online']
+ except KeyError as err:
+ # get_volume may receive incomplete data as the volume is being created
+ errors.append(repr(err))
+ if not is_online:
+ time.sleep(10)
+ retries -= 1
+ if not is_online:
+ errors.append("Timeout after %s seconds" % self.parameters['time_out'])
+ self.module.fail_json(msg='Error waiting for volume %s to come online: %s'
+ % (self.parameters['name'], str(errors)))
+ return None
+
+ def create_volume_async(self):
+ '''
+ create volume async.
+ '''
+ options = self.create_volume_options()
+ volume_create = netapp_utils.zapi.NaElement.create_node_with_children('volume-create-async', **options)
+ if self.parameters.get('aggr_list'):
+ aggr_list_obj = netapp_utils.zapi.NaElement('aggr-list')
+ volume_create.add_child_elem(aggr_list_obj)
+ for aggr in self.parameters['aggr_list']:
+ aggr_list_obj.add_new_child('aggr-name', aggr)
+ try:
+ result = self.server.invoke_successfully(volume_create, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ size_msg = ' of size %s' % self.parameters['size'] if self.parameters.get('size') is not None else ''
+ self.module.fail_json(msg='Error provisioning volume %s%s: %s'
+ % (self.parameters['name'], size_msg, to_native(error)),
+ exception=traceback.format_exc())
+ self.check_invoke_result(result, 'create')
+ return None
+
+ def create_volume_options(self):
+ '''Set volume options for create operation'''
+ options = {}
+ if self.volume_style == 'flexgroup':
+ options['volume-name'] = self.parameters['name']
+ if self.parameters.get('aggr_list_multiplier') is not None:
+ options['aggr-list-multiplier'] = str(self.parameters['aggr_list_multiplier'])
+ if self.parameters.get('auto_provision_as') is not None:
+ options['auto-provision-as'] = self.parameters['auto_provision_as']
+ if self.parameters.get('space_guarantee') is not None:
+ options['space-guarantee'] = self.parameters['space_guarantee']
+ else:
+ options['volume'] = self.parameters['name']
+ if self.parameters.get('aggregate_name') is None:
+ self.module.fail_json(msg='Error provisioning volume %s: aggregate_name is required'
+ % self.parameters['name'])
+ options['containing-aggr-name'] = self.parameters['aggregate_name']
+ if self.parameters.get('space_guarantee') is not None:
+ options['space-reserve'] = self.parameters['space_guarantee']
+
+ if self.parameters.get('size') is not None:
+ options['size'] = str(self.parameters['size'])
+ if self.parameters.get('snapshot_policy') is not None:
+ options['snapshot-policy'] = self.parameters['snapshot_policy']
+ if self.parameters.get('unix_permissions') is not None:
+ options['unix-permissions'] = self.parameters['unix_permissions']
+ if self.parameters.get('group_id') is not None:
+ options['group-id'] = str(self.parameters['group_id'])
+ if self.parameters.get('user_id') is not None:
+ options['user-id'] = str(self.parameters['user_id'])
+ if self.parameters.get('volume_security_style') is not None:
+ options['volume-security-style'] = self.parameters['volume_security_style']
+ if self.parameters.get('export_policy') is not None:
+ options['export-policy'] = self.parameters['export_policy']
+ if self.parameters.get('junction_path') is not None:
+ options['junction-path'] = self.parameters['junction_path']
+ if self.parameters.get('comment') is not None:
+ options['volume-comment'] = self.parameters['comment']
+ if self.parameters.get('type') is not None:
+ options['volume-type'] = self.parameters['type']
+ if self.parameters.get('percent_snapshot_space') is not None:
+ options['percentage-snapshot-reserve'] = str(self.parameters['percent_snapshot_space'])
+ if self.parameters.get('language') is not None:
+ options['language-code'] = self.parameters['language']
+ if self.parameters.get('qos_policy_group') is not None:
+ options['qos-policy-group-name'] = self.parameters['qos_policy_group']
+ if self.parameters.get('qos_adaptive_policy_group') is not None:
+ options['qos-adaptive-policy-group-name'] = self.parameters['qos_adaptive_policy_group']
+ if self.parameters.get('nvfail_enabled') is not None:
+ options['is-nvfail-enabled'] = str(self.parameters['nvfail_enabled'])
+ if self.parameters.get('space_slo') is not None:
+ options['space-slo'] = self.parameters['space_slo']
+ if self.parameters.get('tiering_policy') is not None:
+ options['tiering-policy'] = self.parameters['tiering_policy']
+ if self.parameters.get('encrypt') is not None:
+ options['encrypt'] = self.na_helper.get_value_for_bool(False, self.parameters['encrypt'], 'encrypt')
+ if self.parameters.get('vserver_dr_protection') is not None:
+ options['vserver-dr-protection'] = self.parameters['vserver_dr_protection']
+ if self.parameters['is_online']:
+ options['volume-state'] = 'online'
+ else:
+ options['volume-state'] = 'offline'
+ return options
+
+ def rest_delete_volume(self, current):
+ """
+ Delete the volume using REST DELETE method (it scrubs better than ZAPI).
+ """
+ uuid = self.parameters['uuid']
+ if uuid is None:
+ self.module.fail_json(msg='Could not read UUID for volume %s in delete.' % self.parameters['name'])
+ unmount_error = self.volume_unmount_rest(fail_on_error=False) if current.get('junction_path') else None
+ dummy, error = rest_generic.delete_async(self.rest_api, 'storage/volumes', uuid, job_timeout=self.parameters['time_out'])
+ self.na_helper.fail_on_error(error, previous_errors=(['Error unmounting volume: %s' % unmount_error] if unmount_error else None))
+ if unmount_error:
+ self.module.warn('Volume was successfully deleted though unmount failed with: %s' % unmount_error)
+
+ def delete_volume_async(self, current):
+ '''Delete ONTAP volume for infinite or flexgroup types '''
+ errors = None
+ if current['is_online']:
+ dummy, errors = self.change_volume_state(call_from_delete_vol=True)
+ volume_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-destroy-async', **{'volume-name': self.parameters['name']})
+ try:
+ result = self.server.invoke_successfully(volume_delete, enable_tunneling=True)
+ self.check_invoke_result(result, 'delete')
+ except netapp_utils.zapi.NaApiError as error:
+ msg = 'Error deleting volume %s: %s.' % (self.parameters['name'], to_native(error))
+ if errors:
+ msg += ' Previous errors when offlining/unmounting volume: %s' % ' - '.join(errors)
+ self.module.fail_json(msg=msg)
+
+ def delete_volume_sync(self, current, unmount_offline):
+ '''Delete ONTAP volume for flexvol types '''
+ options = {'name': self.parameters['name']}
+ if unmount_offline:
+ options['unmount-and-offline'] = 'true'
+ volume_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-destroy', **options)
+ try:
+ self.server.invoke_successfully(volume_delete, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ return error
+ return None
+
+ def delete_volume(self, current):
+ '''Delete ONTAP volume'''
+ if self.use_rest and self.parameters['uuid'] is not None:
+ return self.rest_delete_volume(current)
+ if self.parameters.get('is_infinite') or self.volume_style == 'flexgroup':
+ return self.delete_volume_async(current)
+ errors = []
+ error = self.delete_volume_sync(current, True)
+ if error:
+ errors.append('volume delete failed with unmount-and-offline option: %s' % to_native(error))
+ error = self.delete_volume_sync(current, False)
+ if error:
+ errors.append('volume delete failed without unmount-and-offline option: %s' % to_native(error))
+ if errors:
+ self.module.fail_json(msg='Error deleting volume %s: %s'
+ % (self.parameters['name'], ' - '.join(errors)),
+ exception=traceback.format_exc())
+
+ def move_volume(self, encrypt_destination=None):
+ '''Move volume from source aggregate to destination aggregate'''
+ if self.use_rest:
+ return self.move_volume_rest(encrypt_destination)
+ volume_move = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-move-start', **{'source-volume': self.parameters['name'],
+ 'vserver': self.parameters['vserver'],
+ 'dest-aggr': self.parameters['aggregate_name']})
+ if self.parameters.get('cutover_action'):
+ volume_move.add_new_child('cutover-action', self.parameters['cutover_action'])
+ if encrypt_destination is not None:
+ volume_move.add_new_child('encrypt-destination', self.na_helper.get_value_for_bool(False, encrypt_destination))
+ try:
+ self.cluster.invoke_successfully(volume_move,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ rest_error = self.move_volume_with_rest_passthrough(encrypt_destination)
+ if rest_error is not None:
+ self.module.fail_json(msg='Error moving volume %s: %s - Retry failed with REST error: %s'
+ % (self.parameters['name'], to_native(error), rest_error),
+ exception=traceback.format_exc())
+ if self.parameters.get('wait_for_completion'):
+ self.wait_for_volume_move()
+
+ def move_volume_with_rest_passthrough(self, encrypt_destination=None):
+ # MDV volume will fail on a move, but will work using the REST CLI pass through
+ # vol move start -volume MDV_CRS_d6b0b313ff5611e9837100a098544e51_A -destination-aggregate data_a3 -vserver wmc66-a
+ # if REST isn't available fail with the original error
+ if not self.use_rest:
+ return False
+ # if REST exists let's try moving using the passthrough CLI
+ api = 'private/cli/volume/move/start'
+ body = {'destination-aggregate': self.parameters['aggregate_name'],
+ }
+ if encrypt_destination is not None:
+ body['encrypt-destination'] = encrypt_destination
+ query = {'volume': self.parameters['name'],
+ 'vserver': self.parameters['vserver']
+ }
+ dummy, error = self.rest_api.patch(api, body, query)
+ return error
+
+ def check_volume_move_state(self, result):
+ if self.use_rest:
+ volume_move_status = self.na_helper.safe_get(result, ['movement', 'state'])
+ else:
+ volume_move_status = result.get_child_by_name('attributes-list').get_child_by_name('volume-move-info').get_child_content('state')
+ # We have 5 states that can be returned.
+ # warning and healthy are state where the move is still going so we don't need to do anything for thouse.
+ # success - volume move is completed in REST.
+ if volume_move_status in ['success', 'done']:
+ return False
+ # ZAPI returns failed or alert, REST returns failed or aborted.
+ if volume_move_status in ['failed', 'alert', 'aborted']:
+ self.module.fail_json(msg='Error moving volume %s: %s' %
+ (self.parameters['name'], result.get_child_by_name('attributes-list').get_child_by_name('volume-move-info')
+ .get_child_content('details')))
+ return True
+
+ def wait_for_volume_move(self):
+ volume_move_iter = netapp_utils.zapi.NaElement('volume-move-get-iter')
+ volume_move_info = netapp_utils.zapi.NaElement('volume-move-info')
+ volume_move_info.add_new_child('volume', self.parameters['name'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(volume_move_info)
+ volume_move_iter.add_child_elem(query)
+ error = self.wait_for_task_completion(volume_move_iter, self.check_volume_move_state)
+ if error:
+ self.module.fail_json(msg='Error getting volume move status: %s' % (to_native(error)),
+ exception=traceback.format_exc())
+
+ def wait_for_volume_move_rest(self):
+ api = "storage/volumes"
+ query = {
+ 'name': self.parameters['name'],
+ 'movement.destination_aggregate.name': self.parameters['aggregate_name'],
+ 'fields': 'movement.state'
+ }
+ error = self.wait_for_task_completion_rest(api, query, self.check_volume_move_state)
+ if error:
+ self.module.fail_json(msg='Error getting volume move status: %s' % (to_native(error)),
+ exception=traceback.format_exc())
+
+ def check_volume_encryption_conversion_state(self, result):
+ if self.use_rest:
+ volume_encryption_conversion_status = self.na_helper.safe_get(result, ['encryption', 'status', 'message'])
+ else:
+ volume_encryption_conversion_status = result.get_child_by_name('attributes-list').get_child_by_name('volume-encryption-conversion-info')\
+ .get_child_content('status')
+ # REST returns running or initializing, ZAPI returns running if encryption in progress.
+ if volume_encryption_conversion_status in ['running', 'initializing']:
+ return True
+ # If encryprion is completed, REST do have encryption status message.
+ if volume_encryption_conversion_status in ['Not currently going on.', None]:
+ return False
+ self.module.fail_json(msg='Error converting encryption for volume %s: %s' %
+ (self.parameters['name'], volume_encryption_conversion_status))
+
+ def wait_for_volume_encryption_conversion(self):
+ if self.use_rest:
+ return self.wait_for_volume_encryption_conversion_rest()
+ volume_encryption_conversion_iter = netapp_utils.zapi.NaElement('volume-encryption-conversion-get-iter')
+ volume_encryption_conversion_info = netapp_utils.zapi.NaElement('volume-encryption-conversion-info')
+ volume_encryption_conversion_info.add_new_child('volume', self.parameters['name'])
+ volume_encryption_conversion_info.add_new_child('vserver', self.parameters['vserver'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(volume_encryption_conversion_info)
+ volume_encryption_conversion_iter.add_child_elem(query)
+ error = self.wait_for_task_completion(volume_encryption_conversion_iter, self.check_volume_encryption_conversion_state)
+ if error:
+ self.module.fail_json(msg='Error getting volume encryption_conversion status: %s' % (to_native(error)),
+ exception=traceback.format_exc())
+
+ def wait_for_volume_encryption_conversion_rest(self):
+ api = "storage/volumes"
+ query = {
+ 'name': self.parameters['name'],
+ 'fields': 'encryption'
+ }
+ error = self.wait_for_task_completion_rest(api, query, self.check_volume_encryption_conversion_state)
+ if error:
+ self.module.fail_json(msg='Error getting volume encryption_conversion status: %s' % (to_native(error)),
+ exception=traceback.format_exc())
+
+ def wait_for_task_completion(self, zapi_iter, check_state):
+ retries = self.parameters['max_wait_time'] // (self.parameters['check_interval'] + 1)
+ fail_count = 0
+ while retries > 0:
+ try:
+ result = self.cluster.invoke_successfully(zapi_iter, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ if fail_count < 3:
+ fail_count += 1
+ retries -= 1
+ time.sleep(self.parameters['check_interval'])
+ continue
+ return error
+ if int(result.get_child_content('num-records')) == 0:
+ return None
+ # reset fail count to 0
+ fail_count = 0
+ retry_required = check_state(result)
+ if not retry_required:
+ return None
+ time.sleep(self.parameters['check_interval'])
+ retries -= 1
+
+ def wait_for_task_completion_rest(self, api, query, check_state):
+ retries = self.parameters['max_wait_time'] // (self.parameters['check_interval'] + 1)
+ fail_count = 0
+ while retries > 0:
+ record, error = rest_generic.get_one_record(self.rest_api, api, query)
+ if error:
+ if fail_count < 3:
+ fail_count += 1
+ retries -= 1
+ time.sleep(self.parameters['check_interval'])
+ continue
+ return error
+ if record is None:
+ return None
+ # reset fail count to 0
+ fail_count = 0
+ retry_required = check_state(record)
+ if not retry_required:
+ return None
+ time.sleep(self.parameters['check_interval'])
+ retries -= 1
+
+ def rename_volume(self):
+ """
+ Rename the volume.
+
+ Note: 'is_infinite' needs to be set to True in order to rename an
+ Infinite Volume. Use time_out parameter to set wait time for rename completion.
+ """
+ if self.use_rest:
+ return self.rename_volume_rest()
+ vol_rename_zapi, vol_name_zapi = ['volume-rename-async', 'volume-name'] if self.parameters['is_infinite']\
+ else ['volume-rename', 'volume']
+ volume_rename = netapp_utils.zapi.NaElement.create_node_with_children(
+ vol_rename_zapi, **{vol_name_zapi: self.parameters['from_name'],
+ 'new-volume-name': str(self.parameters['name'])})
+ try:
+ result = self.server.invoke_successfully(volume_rename, enable_tunneling=True)
+ if vol_rename_zapi == 'volume-rename-async':
+ self.check_invoke_result(result, 'rename')
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error renaming volume %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def resize_volume(self):
+ """
+ Re-size the volume.
+
+ Note: 'is_infinite' needs to be set to True in order to resize an
+ Infinite Volume.
+ """
+ if self.use_rest:
+ return self.resize_volume_rest()
+
+ vol_size_zapi, vol_name_zapi = ['volume-size-async', 'volume-name']\
+ if (self.parameters['is_infinite'] or self.volume_style == 'flexgroup')\
+ else ['volume-size', 'volume']
+ volume_resize = netapp_utils.zapi.NaElement.create_node_with_children(
+ vol_size_zapi, **{vol_name_zapi: self.parameters['name'],
+ 'new-size': str(self.parameters['size'])})
+ try:
+ result = self.server.invoke_successfully(volume_resize, enable_tunneling=True)
+ if vol_size_zapi == 'volume-size-async':
+ self.check_invoke_result(result, 'resize')
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error re-sizing volume %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ return None
+
+ def start_encryption_conversion(self, encrypt_destination):
+ if encrypt_destination:
+ if self.use_rest:
+ return self.encryption_conversion_rest()
+ zapi = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-encryption-conversion-start', **{'volume': self.parameters['name']})
+ try:
+ self.server.invoke_successfully(zapi, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error enabling encryption for volume %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ if self.parameters.get('wait_for_completion'):
+ self.wait_for_volume_encryption_conversion()
+ else:
+ self.module.warn('disabling encryption requires cluster admin permissions.')
+ self.move_volume(encrypt_destination)
+
+ def change_volume_state(self, call_from_delete_vol=False):
+ """
+ Change volume's state (offline/online).
+ """
+ if self.use_rest:
+ return self.change_volume_state_rest()
+ if self.parameters['is_online'] and not call_from_delete_vol: # Desired state is online, setup zapi APIs respectively
+ vol_state_zapi, vol_name_zapi, action = ['volume-online-async', 'volume-name', 'online']\
+ if (self.parameters['is_infinite'] or self.volume_style == 'flexgroup')\
+ else ['volume-online', 'name', 'online']
+ else: # Desired state is offline, setup zapi APIs respectively
+ vol_state_zapi, vol_name_zapi, action = ['volume-offline-async', 'volume-name', 'offline']\
+ if (self.parameters['is_infinite'] or self.volume_style == 'flexgroup')\
+ else ['volume-offline', 'name', 'offline']
+ volume_unmount = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-unmount', **{'volume-name': self.parameters['name']})
+ volume_change_state = netapp_utils.zapi.NaElement.create_node_with_children(
+ vol_state_zapi, **{vol_name_zapi: self.parameters['name']})
+
+ errors = []
+ if not self.parameters['is_online'] or call_from_delete_vol: # Unmount before offline
+ try:
+ self.server.invoke_successfully(volume_unmount, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ errors.append('Error unmounting volume %s: %s' % (self.parameters['name'], to_native(error)))
+ state = "online" if self.parameters['is_online'] and not call_from_delete_vol else "offline"
+ try:
+ result = self.server.invoke_successfully(volume_change_state, enable_tunneling=True)
+ if self.volume_style == 'flexgroup' or self.parameters['is_infinite']:
+ self.check_invoke_result(result, action)
+ except netapp_utils.zapi.NaApiError as error:
+ errors.append('Error changing the state of volume %s to %s: %s' % (self.parameters['name'], state, to_native(error)))
+ if errors and not call_from_delete_vol:
+ self.module.fail_json(msg=', '.join(errors), exception=traceback.format_exc())
+ return state, errors
+
+ def create_volume_attribute(self, zapi_object, parent_attribute, attribute, option_name, convert_from=None):
+ """
+
+ :param parent_attribute:
+ :param child_attribute:
+ :param value:
+ :return:
+ """
+ value = self.parameters.get(option_name)
+ if value is None:
+ return
+ if convert_from == int:
+ value = str(value)
+ elif convert_from == bool:
+ value = self.na_helper.get_value_for_bool(False, value, option_name)
+
+ if zapi_object is None:
+ parent_attribute.add_new_child(attribute, value)
+ return
+ if isinstance(zapi_object, str):
+ # retrieve existing in parent, or create a new one
+ element = parent_attribute.get_child_by_name(zapi_object)
+ zapi_object = netapp_utils.zapi.NaElement(zapi_object) if element is None else element
+ zapi_object.add_new_child(attribute, value)
+ parent_attribute.add_child_elem(zapi_object)
+
+ def build_zapi_volume_modify_iter(self, params):
+ vol_mod_iter = netapp_utils.zapi.NaElement('volume-modify-iter-async' if self.volume_style == 'flexgroup' or self.parameters['is_infinite']
+ else 'volume-modify-iter')
+
+ attributes = netapp_utils.zapi.NaElement('attributes')
+ vol_mod_attributes = netapp_utils.zapi.NaElement('volume-attributes')
+ # Volume-attributes is split in to 25 sub categories
+ # volume-inode-attributes
+ vol_inode_attributes = netapp_utils.zapi.NaElement('volume-inode-attributes')
+ self.create_volume_attribute(vol_inode_attributes, vol_mod_attributes, 'files-total', 'max_files', int)
+ # volume-space-attributes
+ vol_space_attributes = netapp_utils.zapi.NaElement('volume-space-attributes')
+ self.create_volume_attribute(vol_space_attributes, vol_mod_attributes, 'space-guarantee', 'space_guarantee')
+ self.create_volume_attribute(vol_space_attributes, vol_mod_attributes, 'percentage-snapshot-reserve', 'percent_snapshot_space', int)
+ self.create_volume_attribute(vol_space_attributes, vol_mod_attributes, 'space-slo', 'space_slo')
+ # volume-snapshot-attributes
+ vol_snapshot_attributes = netapp_utils.zapi.NaElement('volume-snapshot-attributes')
+ self.create_volume_attribute(vol_snapshot_attributes, vol_mod_attributes, 'snapshot-policy', 'snapshot_policy')
+ self.create_volume_attribute(vol_snapshot_attributes, vol_mod_attributes, 'snapdir-access-enabled', 'snapdir_access', bool)
+ # volume-export-attributes
+ self.create_volume_attribute('volume-export-attributes', vol_mod_attributes, 'policy', 'export_policy')
+ # volume-security-attributes
+ if self.parameters.get('unix_permissions') is not None or self.parameters.get('group_id') is not None or self.parameters.get('user_id') is not None:
+ vol_security_attributes = netapp_utils.zapi.NaElement('volume-security-attributes')
+ vol_security_unix_attributes = netapp_utils.zapi.NaElement('volume-security-unix-attributes')
+ self.create_volume_attribute(vol_security_unix_attributes, vol_security_attributes, 'permissions', 'unix_permissions')
+ self.create_volume_attribute(vol_security_unix_attributes, vol_security_attributes, 'group-id', 'group_id', int)
+ self.create_volume_attribute(vol_security_unix_attributes, vol_security_attributes, 'user-id', 'user_id', int)
+ vol_mod_attributes.add_child_elem(vol_security_attributes)
+ if params and params.get('volume_security_style') is not None:
+ self.create_volume_attribute('volume-security-attributes', vol_mod_attributes, 'style', 'volume_security_style')
+
+ # volume-performance-attributes
+ self.create_volume_attribute('volume-performance-attributes', vol_mod_attributes, 'is-atime-update-enabled', 'atime_update', bool)
+ # volume-qos-attributes
+ self.create_volume_attribute('volume-qos-attributes', vol_mod_attributes, 'policy-group-name', 'qos_policy_group')
+ self.create_volume_attribute('volume-qos-attributes', vol_mod_attributes, 'adaptive-policy-group-name', 'qos_adaptive_policy_group')
+ # volume-comp-aggr-attributes
+ if params and params.get('tiering_policy') is not None:
+ self.create_volume_attribute('volume-comp-aggr-attributes', vol_mod_attributes, 'tiering-policy', 'tiering_policy')
+ # volume-state-attributes
+ self.create_volume_attribute('volume-state-attributes', vol_mod_attributes, 'is-nvfail-enabled', 'nvfail_enabled', bool)
+ # volume-dr-protection-attributes
+ self.create_volume_attribute('volume-vserver-dr-protection-attributes', vol_mod_attributes, 'vserver-dr-protection', 'vserver_dr_protection')
+ # volume-id-attributes
+ self.create_volume_attribute('volume-id-attributes', vol_mod_attributes, 'comment', 'comment')
+ # End of Volume-attributes sub attributes
+ attributes.add_child_elem(vol_mod_attributes)
+
+ query = netapp_utils.zapi.NaElement('query')
+ vol_query_attributes = netapp_utils.zapi.NaElement('volume-attributes')
+ self.create_volume_attribute('volume-id-attributes', vol_query_attributes, 'name', 'name')
+ query.add_child_elem(vol_query_attributes)
+ vol_mod_iter.add_child_elem(attributes)
+ vol_mod_iter.add_child_elem(query)
+ return vol_mod_iter
+
+ def volume_modify_attributes(self, params):
+ """
+ modify volume parameter 'export_policy','unix_permissions','snapshot_policy','space_guarantee', 'percent_snapshot_space',
+ 'qos_policy_group', 'qos_adaptive_policy_group'
+ """
+ if self.use_rest:
+ return self.volume_modify_attributes_rest(params)
+ vol_mod_iter = self.build_zapi_volume_modify_iter(params)
+ try:
+ result = self.server.invoke_successfully(vol_mod_iter, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ error_msg = to_native(error)
+ if 'volume-comp-aggr-attributes' in error_msg:
+ error_msg += ". Added info: tiering option requires 9.4 or later."
+ self.wrap_fail_json(msg='Error modifying volume %s: %s'
+ % (self.parameters['name'], error_msg),
+ exception=traceback.format_exc())
+
+ failures = result.get_child_by_name('failure-list')
+ # handle error if modify space, policy, or unix-permissions parameter fails
+ if failures is not None:
+ error_msgs = [
+ failures.get_child_by_name(return_info).get_child_content(
+ 'error-message'
+ )
+ for return_info in (
+ 'volume-modify-iter-info',
+ 'volume-modify-iter-async-info',
+ )
+ if failures.get_child_by_name(return_info) is not None
+ ]
+ if error_msgs and any(x is not None for x in error_msgs):
+ self.wrap_fail_json(msg="Error modifying volume %s: %s"
+ % (self.parameters['name'], ' --- '.join(error_msgs)),
+ exception=traceback.format_exc())
+ if self.volume_style == 'flexgroup' or self.parameters['is_infinite']:
+ success = self.na_helper.safe_get(result, ['success-list', 'volume-modify-iter-async-info'])
+ results = {}
+ for key in ('status', 'jobid'):
+ if success and success.get_child_by_name(key):
+ results[key] = success[key]
+
+ status = results.get('status')
+ if status == 'in_progress' and 'jobid' in results:
+ if self.parameters['time_out'] == 0:
+ return
+ error = self.check_job_status(results['jobid'])
+ if error is None:
+ return
+ self.wrap_fail_json(msg='Error when modifying volume: %s' % error)
+ self.wrap_fail_json(msg='Unexpected error when modifying volume: result is: %s' % str(result.to_string()))
+
+ def volume_mount(self):
+ """
+ Mount an existing volume in specified junction_path
+ :return: None
+ """
+ if self.use_rest:
+ return self.volume_mount_rest()
+ vol_mount = netapp_utils.zapi.NaElement('volume-mount')
+ vol_mount.add_new_child('volume-name', self.parameters['name'])
+ vol_mount.add_new_child('junction-path', self.parameters['junction_path'])
+ try:
+ self.server.invoke_successfully(vol_mount, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error mounting volume %s on path %s: %s'
+ % (self.parameters['name'], self.parameters['junction_path'],
+ to_native(error)), exception=traceback.format_exc())
+
+ def volume_unmount(self):
+ """
+ Unmount an existing volume
+ :return: None
+ """
+ if self.use_rest:
+ return self.volume_unmount_rest()
+ vol_unmount = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-unmount', **{'volume-name': self.parameters['name']})
+ try:
+ self.server.invoke_successfully(vol_unmount, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error unmounting volume %s: %s'
+ % (self.parameters['name'], to_native(error)), exception=traceback.format_exc())
+
+ def modify_volume(self, modify):
+ '''Modify volume action'''
+ # snaplock requires volume in unmount state.
+ if modify.get('junction_path') == '':
+ self.volume_unmount()
+ attributes = modify.keys()
+ for attribute in attributes:
+ if attribute in ['space_guarantee', 'export_policy', 'unix_permissions', 'group_id', 'user_id', 'tiering_policy',
+ 'snapshot_policy', 'percent_snapshot_space', 'snapdir_access', 'atime_update', 'volume_security_style',
+ 'nvfail_enabled', 'space_slo', 'qos_policy_group', 'qos_adaptive_policy_group', 'vserver_dr_protection',
+ 'comment', 'logical_space_enforcement', 'logical_space_reporting', 'tiering_minimum_cooling_days',
+ 'snaplock', 'max_files', 'analytics', 'tags']:
+ self.volume_modify_attributes(modify)
+ break
+ if 'snapshot_auto_delete' in attributes and not self.use_rest:
+ # Rest doesn't support any snapshot_auto_delete option other than is_autodelete_enabled. For now i've completely
+ # disabled this in rest
+ self.set_snapshot_auto_delete()
+ # don't mount or unmount when offline
+ if modify.get('junction_path'):
+ self.volume_mount()
+ if 'size' in attributes:
+ self.resize_volume()
+ if 'aggregate_name' in attributes:
+ # keep it last, as it may take some time
+ # handle change in encryption as part of the move
+ # allow for encrypt/decrypt only if encrypt present in attributes.
+ self.move_volume(modify.get('encrypt'))
+ elif 'encrypt' in attributes:
+ self.start_encryption_conversion(self.parameters['encrypt'])
+
+ def get_volume_style(self, current):
+ '''Get volume style, infinite or standard flexvol'''
+ if current is not None:
+ return current.get('style_extended')
+ if self.parameters.get('aggr_list') or self.parameters.get('aggr_list_multiplier') or self.parameters.get('auto_provision_as'):
+ if self.use_rest and self.parameters.get('auto_provision_as') and self.parameters.get('aggr_list_multiplier') is None:
+ self.parameters['aggr_list_multiplier'] = 1
+ return 'flexgroup'
+ return None
+
+ def get_job(self, jobid, server):
+ """
+ Get job details by id
+ """
+ job_get = netapp_utils.zapi.NaElement('job-get')
+ job_get.add_new_child('job-id', jobid)
+ try:
+ result = server.invoke_successfully(job_get, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ if to_native(error.code) == "15661":
+ # Not found
+ return None
+ self.wrap_fail_json(msg='Error fetching job info: %s' % to_native(error),
+ exception=traceback.format_exc())
+ job_info = result.get_child_by_name('attributes').get_child_by_name('job-info')
+ return {
+ 'job-progress': job_info['job-progress'],
+ 'job-state': job_info['job-state'],
+ 'job-completion': job_info['job-completion'] if job_info.get_child_by_name('job-completion') is not None else None
+ }
+
+ def check_job_status(self, jobid):
+ """
+ Loop until job is complete
+ """
+ server = self.server
+ sleep_time = 5
+ time_out = self.parameters['time_out']
+ error = 'timeout'
+
+ if time_out <= 0:
+ results = self.get_job(jobid, server)
+
+ while time_out > 0:
+ results = self.get_job(jobid, server)
+ # If running as cluster admin, the job is owned by cluster vserver
+ # rather than the target vserver.
+ if results is None and server == self.server:
+ results = netapp_utils.get_cserver(self.server)
+ server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ continue
+ if results is None:
+ error = 'cannot locate job with id: %d' % int(jobid)
+ break
+ if results['job-state'] in ('queued', 'running'):
+ time.sleep(sleep_time)
+ time_out -= sleep_time
+ continue
+ if results['job-state'] in ('success', 'failure'):
+ break
+ else:
+ self.wrap_fail_json(msg='Unexpected job status in: %s' % repr(results))
+
+ if results is not None:
+ if results['job-state'] == 'success':
+ error = None
+ elif results['job-state'] in ('queued', 'running'):
+ error = 'job completion exceeded expected timer of: %s seconds' % \
+ self.parameters['time_out']
+ elif results['job-completion'] is not None:
+ error = results['job-completion']
+ else:
+ error = results['job-progress']
+ return error
+
+ def check_invoke_result(self, result, action):
+ '''
+ check invoked api call back result.
+ '''
+ results = {}
+ for key in ('result-status', 'result-jobid'):
+ if result.get_child_by_name(key):
+ results[key] = result[key]
+
+ status = results.get('result-status')
+ if status == 'in_progress' and 'result-jobid' in results:
+ if self.parameters['time_out'] == 0:
+ return
+ error = self.check_job_status(results['result-jobid'])
+ if error is None:
+ return
+ else:
+ self.wrap_fail_json(msg='Error when %s volume: %s' % (action, error))
+ if status == 'failed':
+ self.wrap_fail_json(msg='Operation failed when %s volume.' % action)
+
+ def set_efficiency_attributes(self, options):
+ for key, attr in self.sis_keys2zapi_set.items():
+ value = self.parameters.get(key)
+ if value is not None:
+ if self.argument_spec[key]['type'] == 'bool':
+ value = self.na_helper.get_value_for_bool(False, value)
+ options[attr] = value
+ # ZAPI requires compression to be set for inline-compression
+ if options.get('enable-inline-compression') == 'true' and 'enable-compression' not in options:
+ options['enable-compression'] = 'true'
+
+ def set_efficiency_config(self):
+ '''Set efficiency policy and compression attributes'''
+ options = {'path': '/vol/' + self.parameters['name']}
+ efficiency_enable = netapp_utils.zapi.NaElement.create_node_with_children('sis-enable', **options)
+ try:
+ self.server.invoke_successfully(efficiency_enable, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ # Error 40043 denotes an Operation has already been enabled.
+ if to_native(error.code) != "40043":
+ self.wrap_fail_json(msg='Error enable efficiency on volume %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ self.set_efficiency_attributes(options)
+ efficiency_start = netapp_utils.zapi.NaElement.create_node_with_children('sis-set-config', **options)
+ try:
+ self.server.invoke_successfully(efficiency_start, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.wrap_fail_json(msg='Error setting up efficiency attributes on volume %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def set_efficiency_config_async(self):
+ """Set efficiency policy and compression attributes in asynchronous mode"""
+ options = {'volume-name': self.parameters['name']}
+ efficiency_enable = netapp_utils.zapi.NaElement.create_node_with_children('sis-enable-async', **options)
+ try:
+ result = self.server.invoke_successfully(efficiency_enable, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.wrap_fail_json(msg='Error enable efficiency on volume %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ self.check_invoke_result(result, 'enable efficiency on')
+
+ self.set_efficiency_attributes(options)
+ efficiency_start = netapp_utils.zapi.NaElement.create_node_with_children('sis-set-config-async', **options)
+ try:
+ result = self.server.invoke_successfully(efficiency_start, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.wrap_fail_json(msg='Error setting up efficiency attributes on volume %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ self.check_invoke_result(result, 'set efficiency policy on')
+
+ def get_efficiency_info(self, return_value):
+ """
+ get the name of the efficiency policy assigned to volume, as well as compression values
+ if attribute does not exist, set its value to None
+ :return: update return_value dict.
+ """
+ sis_info = netapp_utils.zapi.NaElement('sis-get-iter')
+ sis_status_info = netapp_utils.zapi.NaElement('sis-status-info')
+ sis_status_info.add_new_child('path', '/vol/' + self.parameters['name'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(sis_status_info)
+ sis_info.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(sis_info, True)
+ except netapp_utils.zapi.NaApiError as error:
+ # Don't error out if efficiency settings cannot be read. We'll fail if they need to be set.
+ if error.message.startswith('Insufficient privileges: user ') and error.message.endswith(' does not have read access to this resource'):
+ self.issues.append('cannot read volume efficiency options (as expected when running as vserver): %s' % to_native(error))
+ return
+ self.wrap_fail_json(msg='Error fetching efficiency policy for volume %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ for key in self.sis_keys2zapi_get:
+ return_value[key] = None
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ sis_attributes = result.get_child_by_name('attributes-list'). get_child_by_name('sis-status-info')
+ for key, attr in self.sis_keys2zapi_get.items():
+ value = sis_attributes.get_child_content(attr)
+ if self.argument_spec[key]['type'] == 'bool':
+ value = self.na_helper.get_value_for_bool(True, value)
+ return_value[key] = value
+
+ def modify_volume_efficiency_config(self, efficiency_config_modify_value):
+ if self.use_rest:
+ return self.set_efficiency_rest()
+ if efficiency_config_modify_value == 'async':
+ self.set_efficiency_config_async()
+ else:
+ self.set_efficiency_config()
+
+ def set_snapshot_auto_delete(self):
+ options = {'volume': self.parameters['name']}
+ desired_options = self.parameters['snapshot_auto_delete']
+ for key, value in desired_options.items():
+ options['option-name'] = key
+ options['option-value'] = str(value)
+ snapshot_auto_delete = netapp_utils.zapi.NaElement.create_node_with_children('snapshot-autodelete-set-option', **options)
+ try:
+ self.server.invoke_successfully(snapshot_auto_delete, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.wrap_fail_json(msg='Error setting snapshot auto delete options for volume %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def rehost_volume(self):
+ volume_rehost = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-rehost', **{'vserver': self.parameters['from_vserver'],
+ 'destination-vserver': self.parameters['vserver'],
+ 'volume': self.parameters['name']})
+ if self.parameters.get('auto_remap_luns') is not None:
+ volume_rehost.add_new_child('auto-remap-luns', str(self.parameters['auto_remap_luns']))
+ if self.parameters.get('force_unmap_luns') is not None:
+ volume_rehost.add_new_child('force-unmap-luns', str(self.parameters['force_unmap_luns']))
+ try:
+ self.cluster.invoke_successfully(volume_rehost, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error rehosting volume %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def snapshot_restore_volume(self):
+ if self.use_rest:
+ return self.snapshot_restore_volume_rest()
+ snapshot_restore = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'snapshot-restore-volume', **{'snapshot': self.parameters['snapshot_restore'],
+ 'volume': self.parameters['name']})
+ if self.parameters.get('force_restore') is not None:
+ snapshot_restore.add_new_child('force', str(self.parameters['force_restore']))
+ if self.parameters.get('preserve_lun_ids') is not None:
+ snapshot_restore.add_new_child('preserve-lun-ids', str(self.parameters['preserve_lun_ids']))
+ try:
+ self.server.invoke_successfully(snapshot_restore, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error restoring volume %s: %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def ignore_small_change(self, current, attribute, threshold):
+ if attribute in current and current[attribute] != 0 and self.parameters.get(attribute) is not None:
+ # ignore a less than XX% difference
+ change = abs(current[attribute] - self.parameters[attribute]) * 100.0 / current[attribute]
+ if change < threshold:
+ self.parameters[attribute] = current[attribute]
+ if change > 0.1:
+ self.module.warn('resize request for %s ignored: %.1f%% is below the threshold: %.1f%%' % (attribute, change, threshold))
+
+ def adjust_sizes(self, current, after_create):
+ """
+ ignore small change in size by resetting expectations
+ """
+ if after_create:
+ # ignore change in size immediately after a create:
+ self.parameters['size'] = current['size']
+ # inodes are not set in create
+ return
+ self.ignore_small_change(current, 'size', self.parameters['size_change_threshold'])
+ self.ignore_small_change(current, 'max_files', netapp_utils.get_feature(self.module, 'max_files_change_threshold'))
+
+ def validate_snaplock_changes(self, current, modify=None, after_create=False):
+ if not self.use_rest:
+ return
+ msg = None
+ if modify:
+ # prechecks when computing modify
+ if 'type' in modify['snaplock']:
+ msg = "Error: volume snaplock type was not set properly at creation time." if after_create else \
+ "Error: changing a volume snaplock type after creation is not allowed."
+ msg += ' Current: %s, desired: %s.' % (current['snaplock']['type'], self.parameters['snaplock']['type'])
+ elif self.parameters['state'] == 'present':
+ # prechecks before computing modify
+ sl_dict = self.na_helper.filter_out_none_entries(self.parameters.get('snaplock', {}))
+ sl_type = sl_dict.pop('type', 'non_snaplock')
+ # verify type is the only option when not enabling snaplock compliance or enterprise
+ if sl_dict and (
+ (current is None and sl_type == 'non_snaplock') or (current and current['snaplock']['type'] == 'non_snaplock')):
+ msg = "Error: snaplock options are not supported for non_snaplock volume, found: %s." % sl_dict
+ # verify type is not used before 9.10.1, or allow non_snaplock as this is the default
+ if not self.rest_api.meets_rest_minimum_version(True, 9, 10, 1):
+ if sl_type == 'non_snaplock':
+ self.parameters.pop('snaplock', None)
+ else:
+ msg = "Error: %s" % self.rest_api.options_require_ontap_version('snaplock type', '9.10.1', True)
+ if msg:
+ self.module.fail_json(msg=msg)
+
+ def set_modify_dict(self, current, after_create=False):
+ '''Fill modify dict with changes'''
+ octal_value = current.get('unix_permissions') if current else None
+ if self.parameters.get('unix_permissions') is not None and self.na_helper.compare_chmod_value(octal_value, self.parameters['unix_permissions']):
+ # don't change if the values are the same
+ # can't change permissions if not online
+ del self.parameters['unix_permissions']
+ # snapshot_auto_delete's value is a dict, get_modified_attributes function doesn't support dict as value.
+ auto_delete_info = current.pop('snapshot_auto_delete', None)
+ # ignore small changes in volume size or inode maximum by adjusting self.parameters['size'] or self.parameters['max_files']
+ self.adjust_sizes(current, after_create)
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if modify is not None and 'type' in modify:
+ msg = "Error: volume type was not set properly at creation time." if after_create else \
+ "Error: changing a volume from one type to another is not allowed."
+ msg += ' Current: %s, desired: %s.' % (current['type'], self.parameters['type'])
+ self.module.fail_json(msg=msg)
+ if modify is not None and 'snaplock' in modify:
+ self.validate_snaplock_changes(current, modify, after_create)
+ desired_style = self.get_volume_style(None)
+ if desired_style is not None and desired_style != self.volume_style:
+ msg = "Error: volume backend was not set properly at creation time." if after_create else \
+ "Error: changing a volume from one backend to another is not allowed."
+ msg += ' Current: %s, desired: %s.' % (self.volume_style, desired_style)
+ self.module.fail_json(msg=msg)
+ desired_tcontrol = self.na_helper.safe_get(self.parameters, ['nas_application_template', 'tiering', 'control'])
+ if desired_tcontrol in ('required', 'disallowed'):
+ warn_or_fail = netapp_utils.get_feature(self.module, 'warn_or_fail_on_fabricpool_backend_change')
+ if warn_or_fail in ('warn', 'fail'):
+ current_tcontrol = self.tiering_control(current)
+ if desired_tcontrol != current_tcontrol:
+ msg = "Error: volume tiering control was not set properly at creation time." if after_create else \
+ "Error: changing a volume from one backend to another is not allowed."
+ msg += ' Current tiering control: %s, desired: %s.' % (current_tcontrol, desired_tcontrol)
+ if warn_or_fail == 'fail':
+ self.module.fail_json(msg=msg)
+ self.module.warn("Ignored " + msg)
+ elif warn_or_fail not in (None, 'ignore'):
+ self.module.warn("Unexpected value '%s' for warn_or_fail_on_fabricpool_backend_change, expecting: None, 'ignore', 'fail', 'warn'"
+ % warn_or_fail)
+ if self.parameters.get('snapshot_auto_delete') is not None:
+ auto_delete_modify = self.na_helper.get_modified_attributes(auto_delete_info,
+ self.parameters['snapshot_auto_delete'])
+ if len(auto_delete_modify) > 0:
+ modify['snapshot_auto_delete'] = auto_delete_modify
+ return modify
+
+ def take_modify_actions(self, modify):
+ self.modify_volume(modify)
+
+ if any(modify.get(key) is not None for key in self.sis_keys2zapi_get):
+ if self.parameters.get('is_infinite') or self.volume_style == 'flexgroup':
+ efficiency_config_modify = 'async'
+ else:
+ efficiency_config_modify = 'sync'
+ self.modify_volume_efficiency_config(efficiency_config_modify)
+
+ # offline volume last
+ if modify.get('is_online') is False:
+ self.change_volume_state()
+
+ """ MAPPING OF VOLUME FIELDS FROM ZAPI TO REST
+ ZAPI = REST
+ encrypt = encryption.enabled
+ volume-comp-aggr-attributes.tiering-policy = tiering.policy
+ 'volume-export-attributes.policy' = nas.export_policy.name
+ 'volume-id-attributes.containing-aggregate-name' = aggregates.name
+ 'volume-id-attributes.flexgroup-uuid' = uuid (Only for FlexGroup volumes)
+ 'volume-id-attributes.instance-uuid' = uuid (Only for FlexVols)
+ 'volume-id-attributes.junction-path' = nas.path
+ 'volume-id-attributes.style-extended' = style
+ 'volume-id-attributes.type' = type
+ 'volume-id-attributes.comment' = comment
+ 'volume-performance-attributes.is-atime-update-enabled' == NO REST VERSION
+ volume-qos-attributes.policy-group-name' = qos.policy.name
+ 'volume-qos-attributes.adaptive-policy-group-name' = qos.policy.name
+ 'volume-security-attributes.style = nas.security_style
+ volume-security-attributes.volume-security-unix-attributes.group-id' = nas.gid
+ 'volume-security-attributes.volume-security-unix-attributes.permissions' = nas.unix_permissions
+ 'volume-security-attributes.volume-security-unix-attributes.user-id' = nas.uid
+ 'volume-snapshot-attributes.snapdir-access-enabled' == NO REST VERSION
+ 'volume-snapshot-attributes,snapshot-policy' = snapshot_policy
+ volume-space-attributes.percentage-snapshot-reserve = space.snapshot.reserve_percent
+ volume-space-attributes.size' = space.size
+ 'volume-space-attributes.space-guarantee' = guarantee.type
+ volume-space-attributes.space-slo' == NO REST VERSION
+ 'volume-state-attributes.is-nvfail-enabled' == NO REST Version
+ 'volume-state-attributes.state' = state
+ 'volume-vserver-dr-protection-attributes.vserver-dr-protection' = == NO REST Version
+ volume-snapshot-autodelete-attributes.* None exist other than space.snapshot.autodelete_enabled
+ From get_efficiency_info function
+ efficiency_policy = efficiency.policy.name
+ compression = efficiency.compression
+ inline_compression = efficiency.compression
+ """
+
+ def get_volume_rest(self, vol_name):
+ """
+ This covers the zapi functions
+ get_volume
+ - volume_get_iter
+ - get_efficiency_info
+ """
+ api = 'storage/volumes'
+ params = {'name': vol_name,
+ 'svm.name': self.parameters['vserver'],
+ 'fields': 'encryption.enabled,'
+ 'tiering.policy,'
+ 'nas.export_policy.name,'
+ 'aggregates.name,'
+ 'aggregates.uuid,'
+ 'uuid,'
+ 'nas.path,'
+ 'style,'
+ 'type,'
+ 'comment,'
+ 'qos.policy.name,'
+ 'nas.security_style,'
+ 'nas.gid,'
+ 'nas.unix_permissions,'
+ 'nas.uid,'
+ 'snapshot_policy,'
+ 'space.snapshot.reserve_percent,'
+ 'space.size,'
+ 'guarantee.type,'
+ 'state,'
+ 'efficiency.compression,'
+ 'snaplock,'
+ 'files.maximum,'
+ 'space.logical_space.enforcement,'
+ 'space.logical_space.reporting,'}
+ if self.parameters.get('efficiency_policy'):
+ params['fields'] += 'efficiency.policy.name,'
+ if self.parameters.get('tiering_minimum_cooling_days'):
+ params['fields'] += 'tiering.min_cooling_days,'
+ if self.parameters.get('analytics'):
+ params['fields'] += 'analytics,'
+ if self.parameters.get('tags'):
+ params['fields'] += '_tags,'
+
+ record, error = rest_generic.get_one_record(self.rest_api, api, params)
+ if error:
+ self.module.fail_json(msg=error)
+ return self.format_get_volume_rest(record) if record else None
+
+ def rename_volume_rest(self):
+ # volume-rename-async and volume-rename are the same in rest
+ # Zapi you had to give the old and new name to change a volume.
+ # Rest you need the old UUID, and the new name only
+ current = self.get_volume_rest(self.parameters['from_name'])
+ body = {
+ 'name': self.parameters['name']
+ }
+ dummy, error = self.volume_rest_patch(body, uuid=current['uuid'])
+ if error:
+ self.module.fail_json(msg='Error changing name of volume %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def snapshot_restore_volume_rest(self):
+ # Rest does not have force_restore or preserve_lun_id
+ current = self.get_volume()
+ self.parameters['uuid'] = current['uuid']
+ body = {
+ 'restore_to.snapshot.name': self.parameters['snapshot_restore']
+ }
+ dummy, error = self.volume_rest_patch(body)
+ if error:
+ self.module.fail_json(msg='Error restoring snapshot %s in volume %s: %s' % (
+ self.parameters['snapshot_restore'],
+ self.parameters['name'],
+ to_native(error)), exception=traceback.format_exc())
+
+ def create_volume_rest(self):
+ body = self.create_volume_body_rest()
+ dummy, error = rest_generic.post_async(self.rest_api, 'storage/volumes', body, job_timeout=self.parameters['time_out'])
+ if error:
+ self.module.fail_json(msg='Error creating volume %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def create_volume_body_rest(self):
+ body = {
+ 'name': self.parameters['name'],
+ 'svm.name': self.parameters['vserver']
+ }
+ # Zapi's Space-guarantee and space-reserve are the same thing in Rest
+ if self.parameters.get('space_guarantee') is not None:
+ body['guarantee.type'] = self.parameters['space_guarantee']
+ # TODO: Check to see if there a difference in rest between flexgroup or not. might need to throw error
+ body = self.aggregates_rest(body)
+ if self.parameters.get('tags') is not None:
+ body['_tags'] = self.parameters['tags']
+ if self.parameters.get('size') is not None:
+ body['size'] = self.parameters['size']
+ if self.parameters.get('snapshot_policy') is not None:
+ body['snapshot_policy.name'] = self.parameters['snapshot_policy']
+ if self.parameters.get('unix_permissions') is not None:
+ body['nas.unix_permissions'] = self.parameters['unix_permissions']
+ if self.parameters.get('group_id') is not None:
+ body['nas.gid'] = self.parameters['group_id']
+ if self.parameters.get('user_id') is not None:
+ body['nas.uid'] = self.parameters['user_id']
+ if self.parameters.get('volume_security_style') is not None:
+ body['nas.security_style'] = self.parameters['volume_security_style']
+ if self.parameters.get('export_policy') is not None:
+ body['nas.export_policy.name'] = self.parameters['export_policy']
+ if self.parameters.get('junction_path') is not None:
+ body['nas.path'] = self.parameters['junction_path']
+ if self.parameters.get('comment') is not None:
+ body['comment'] = self.parameters['comment']
+ if self.parameters.get('type') is not None:
+ body['type'] = self.parameters['type']
+ if self.parameters.get('percent_snapshot_space') is not None:
+ body['space.snapshot.reserve_percent'] = self.parameters['percent_snapshot_space']
+ if self.parameters.get('language') is not None:
+ body['language'] = self.parameters['language']
+ if self.get_qos_policy_group() is not None:
+ body['qos.policy.name'] = self.get_qos_policy_group()
+ if self.parameters.get('tiering_policy') is not None:
+ body['tiering.policy'] = self.parameters['tiering_policy']
+ if self.parameters.get('encrypt') is not None:
+ body['encryption.enabled'] = self.parameters['encrypt']
+ if self.parameters.get('logical_space_enforcement') is not None:
+ body['space.logical_space.enforcement'] = self.parameters['logical_space_enforcement']
+ if self.parameters.get('logical_space_reporting') is not None:
+ body['space.logical_space.reporting'] = self.parameters['logical_space_reporting']
+ if self.parameters.get('tiering_minimum_cooling_days') is not None:
+ body['tiering.min_cooling_days'] = self.parameters['tiering_minimum_cooling_days']
+ if self.parameters.get('snaplock') is not None:
+ body['snaplock'] = self.na_helper.filter_out_none_entries(self.parameters['snaplock'])
+ if self.volume_style:
+ body['style'] = self.volume_style
+ if self.parameters.get('efficiency_policy') is not None:
+ body['efficiency.policy.name'] = self.parameters['efficiency_policy']
+ if self.get_compression():
+ body['efficiency.compression'] = self.get_compression()
+ if self.parameters.get('analytics'):
+ body['analytics.state'] = self.parameters['analytics']
+ body['state'] = self.bool_to_online(self.parameters['is_online'])
+ return body
+
+ def aggregates_rest(self, body):
+ if self.parameters.get('aggregate_name') is not None:
+ body['aggregates'] = [{'name': self.parameters['aggregate_name']}]
+ if self.parameters.get('aggr_list') is not None:
+ body['aggregates'] = [{'name': name} for name in self.parameters['aggr_list']]
+ if self.parameters.get('aggr_list_multiplier') is not None:
+ body['constituents_per_aggregate'] = self.parameters['aggr_list_multiplier']
+ return body
+
+ def volume_modify_attributes_rest(self, params):
+ body = self.modify_volume_body_rest(params)
+ dummy, error = self.volume_rest_patch(body)
+ if error:
+ self.module.fail_json(msg='Error modifying volume %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ @staticmethod
+ def bool_to_online(item):
+ return 'online' if item else 'offline'
+
+ def modify_volume_body_rest(self, params):
+ body = {}
+ for key, option, transform in [
+ ('analytics.state', 'analytics', None),
+ ('guarantee.type', 'space_guarantee', None),
+ ('space.snapshot.reserve_percent', 'percent_snapshot_space', None),
+ ('snapshot_policy.name', 'snapshot_policy', None),
+ ('nas.export_policy.name', 'export_policy', None),
+ ('nas.unix_permissions', 'unix_permissions', None),
+ ('nas.gid', 'group_id', None),
+ ('nas.uid', 'user_id', None),
+ # only one of these 2 options for QOS policy can be defined at most
+ ('qos.policy.name', 'qos_policy_group', None),
+ ('qos.policy.name', 'qos_adaptive_policy_group', None),
+ ('comment', 'comment', None),
+ ('space.logical_space.enforcement', 'logical_space_enforcement', None),
+ ('space.logical_space.reporting', 'logical_space_reporting', None),
+ ('tiering.min_cooling_days', 'tiering_minimum_cooling_days', None),
+ ('state', 'is_online', self.bool_to_online),
+ ('_tags', 'tags', None)
+ ]:
+ value = self.parameters.get(option)
+ if value is not None and transform:
+ value = transform(value)
+ if value is not None:
+ body[key] = value
+
+ # not too sure why we don't always set them
+ # one good reason are fields that are not supported on all releases
+ for key, option, transform in [
+ ('nas.security_style', 'volume_security_style', None),
+ ('tiering.policy', 'tiering_policy', None),
+ ('files.maximum', 'max_files', None),
+ ]:
+ if params and params.get(option) is not None:
+ body[key] = self.parameters[option]
+
+ if params and params.get('snaplock') is not None:
+ sl_dict = self.na_helper.filter_out_none_entries(self.parameters['snaplock']) or {}
+ # type is not allowed in patch, and we already prevented any change in type
+ sl_dict.pop('type', None)
+ if sl_dict:
+ body['snaplock'] = sl_dict
+ return body
+
+ def change_volume_state_rest(self):
+ body = {
+ 'state': self.bool_to_online(self.parameters['is_online']),
+ }
+ dummy, error = self.volume_rest_patch(body)
+ if error:
+ self.module.fail_json(msg='Error changing state of volume %s: %s' % (self.parameters['name'],
+ to_native(error)),
+ exception=traceback.format_exc())
+ return body['state'], None
+
+ def volume_unmount_rest(self, fail_on_error=True):
+ body = {
+ 'nas.path': '',
+ }
+ dummy, error = self.volume_rest_patch(body)
+ if error and fail_on_error:
+ self.module.fail_json(msg='Error unmounting volume %s with path "%s": %s' % (self.parameters['name'],
+ self.parameters.get('junction_path'),
+ to_native(error)),
+ exception=traceback.format_exc())
+ return error
+
+ def volume_mount_rest(self):
+ body = {
+ 'nas.path': self.parameters['junction_path']
+ }
+ dummy, error = self.volume_rest_patch(body)
+ if error:
+ self.module.fail_json(msg='Error mounting volume %s with path "%s": %s' % (self.parameters['name'],
+ self.parameters['junction_path'],
+ to_native(error)),
+ exception=traceback.format_exc())
+
+ def set_efficiency_rest(self):
+ body = {}
+ if self.parameters.get('efficiency_policy') is not None:
+ body['efficiency.policy.name'] = self.parameters['efficiency_policy']
+ if self.get_compression():
+ body['efficiency.compression'] = self.get_compression()
+ if not body:
+ return
+ dummy, error = self.volume_rest_patch(body)
+ if error:
+ self.module.fail_json(msg='Error setting efficiency for volume %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def encryption_conversion_rest(self):
+ # volume-encryption-conversion-start
+ # Set the "encryption.enabled" field to "true" to start the encryption conversion operation.
+ body = {
+ 'encryption.enabled': True
+ }
+ dummy, error = self.volume_rest_patch(body)
+ if error:
+ self.module.fail_json(msg='Error enabling encryption for volume %s: %s' % (self.parameters['name'],
+ to_native(error)),
+ exception=traceback.format_exc())
+ if self.parameters.get('wait_for_completion'):
+ self.wait_for_volume_encryption_conversion_rest()
+
+ def resize_volume_rest(self):
+ query = None
+ if self.parameters.get('sizing_method') is not None:
+ query = dict(sizing_method=self.parameters['sizing_method'])
+ body = {
+ 'size': self.parameters['size']
+ }
+ dummy, error = self.volume_rest_patch(body, query)
+ if error:
+ self.module.fail_json(msg='Error resizing volume %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def move_volume_rest(self, encrypt_destination):
+ body = {
+ 'movement.destination_aggregate.name': self.parameters['aggregate_name'],
+ }
+ if encrypt_destination is not None:
+ body['encryption.enabled'] = encrypt_destination
+ dummy, error = self.volume_rest_patch(body)
+ if error:
+ self.module.fail_json(msg='Error moving volume %s: %s' % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+ if self.parameters.get('wait_for_completion'):
+ self.wait_for_volume_move_rest()
+
+ def volume_rest_patch(self, body, query=None, uuid=None):
+ if not uuid:
+ uuid = self.parameters['uuid']
+ if not uuid:
+ self.module.fail_json(msg='Could not read UUID for volume %s in patch.' % self.parameters['name'])
+ return rest_generic.patch_async(self.rest_api, 'storage/volumes', uuid, body, query=query, job_timeout=self.parameters['time_out'])
+
+ def get_qos_policy_group(self):
+ if self.parameters.get('qos_policy_group') is not None:
+ return self.parameters['qos_policy_group']
+ if self.parameters.get('qos_adaptive_policy_group') is not None:
+ return self.parameters['qos_adaptive_policy_group']
+ return None
+
+ def get_compression(self):
+ if self.parameters.get('compression') and self.parameters.get('inline_compression'):
+ return 'both'
+ if self.parameters.get('compression'):
+ return 'background'
+ if self.parameters.get('inline_compression'):
+ return 'inline'
+ if self.parameters.get('compression') is False and self.parameters.get('inline_compression') is False:
+ return 'none'
+ return None
+
+ def rest_errors(self):
+ # For variable that have been merged together we should fail before we do anything
+ if self.parameters.get('qos_policy_group') and self.parameters.get('qos_adaptive_policy_group'):
+ self.module.fail_json(msg='Error: With Rest API qos_policy_group and qos_adaptive_policy_group are now '
+ 'the same thing, and cannot be set at the same time')
+
+ ontap_97_options = ['nas_application_template']
+ if not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 7) and any(x in self.parameters for x in ontap_97_options):
+ self.module.fail_json(msg='Error: %s' % self.rest_api.options_require_ontap_version(ontap_97_options, version='9.7'))
+ if not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 9) and\
+ self.na_helper.safe_get(self.parameters, ['nas_application_template', 'flexcache', 'dr_cache']) is not None:
+ self.module.fail_json(msg='Error: %s' % self.rest_api.options_require_ontap_version('flexcache: dr_cache', version='9.9'))
+
+ def format_get_volume_rest(self, record):
+ is_online = record.get('state') == 'online'
+ # TODO FIX THIS!!!! ZAPI would only return a single aggr, REST can return more than 1.
+ # For now i'm going to hard code this, but we need a way to show all aggrs
+ aggregates = record.get('aggregates', None)
+ aggr_name = aggregates[0].get('name', None) if aggregates else None
+ rest_compression = self.na_helper.safe_get(record, ['efficiency', 'compression'])
+ junction_path = self.na_helper.safe_get(record, ['nas', 'path'])
+ if junction_path is None:
+ junction_path = ''
+ # if analytics.state is initializing it will be ON once completed.
+ state = self.na_helper.safe_get(record, ['analytics', 'state'])
+ analytics = 'on' if state == 'initializing' else state
+ return {
+ 'tags': record.get('_tags', []),
+ 'name': record.get('name', None),
+ 'analytics': analytics,
+ 'encrypt': self.na_helper.safe_get(record, ['encryption', 'enabled']),
+ 'tiering_policy': self.na_helper.safe_get(record, ['tiering', 'policy']),
+ 'export_policy': self.na_helper.safe_get(record, ['nas', 'export_policy', 'name']),
+ 'aggregate_name': aggr_name,
+ 'aggregates': aggregates,
+ 'flexgroup_uuid': record.get('uuid', None), # this might need some additional logic
+ 'instance_uuid': record.get('uuid', None), # this might need some additional logic
+ 'junction_path': junction_path,
+ 'style_extended': record.get('style', None),
+ 'type': record.get('type', None),
+ 'comment': record.get('comment', None),
+ 'qos_policy_group': self.na_helper.safe_get(record, ['qos', 'policy', 'name']),
+ 'qos_adaptive_policy_group': self.na_helper.safe_get(record, ['qos', 'policy', 'name']),
+ 'volume_security_style': self.na_helper.safe_get(record, ['nas', 'security_style']),
+ 'group_id': self.na_helper.safe_get(record, ['nas', 'gid']),
+ # Rest return an Int while Zapi return a string, force Rest to be an String
+ 'unix_permissions': str(self.na_helper.safe_get(record, ['nas', 'unix_permissions'])),
+ 'user_id': self.na_helper.safe_get(record, ['nas', 'uid']),
+ 'snapshot_policy': self.na_helper.safe_get(record, ['snapshot_policy', 'name']),
+ 'percent_snapshot_space': self.na_helper.safe_get(record, ['space', 'snapshot', 'reserve_percent']),
+ 'size': self.na_helper.safe_get(record, ['space', 'size']),
+ 'space_guarantee': self.na_helper.safe_get(record, ['guarantee', 'type']),
+ 'is_online': is_online,
+ 'uuid': record.get('uuid', None),
+ 'efficiency_policy': self.na_helper.safe_get(record, ['efficiency', 'policy', 'name']),
+ 'compression': rest_compression in ('both', 'background'),
+ 'inline_compression': rest_compression in ('both', 'inline'),
+ 'logical_space_enforcement': self.na_helper.safe_get(record, ['space', 'logical_space', 'enforcement']),
+ 'logical_space_reporting': self.na_helper.safe_get(record, ['space', 'logical_space', 'reporting']),
+ 'tiering_minimum_cooling_days': self.na_helper.safe_get(record, ['tiering', 'min_cooling_days']),
+ 'snaplock': self.na_helper.safe_get(record, ['snaplock']),
+ 'max_files': self.na_helper.safe_get(record, ['files', 'maximum']),
+
+ }
+
+ def is_fabricpool(self, name, aggregate_uuid):
+ '''whether the aggregate is associated with one or more object stores'''
+ api = 'storage/aggregates/%s/cloud-stores' % aggregate_uuid
+ records, error = rest_generic.get_0_or_more_records(self.rest_api, api)
+ if error:
+ self.module.fail_json(msg="Error getting object store for aggregate: %s: %s" % (name, error))
+ return records is not None and len(records) > 0
+
+ def tiering_control(self, current):
+ '''return whether the backend meets FabricPool requirements:
+ required: all aggregates are in a FabricPool
+ disallowed: all aggregates are not in a FabricPool
+ best_effort: mixed
+ '''
+ fabricpools = [self.is_fabricpool(aggregate['name'], aggregate['uuid'])
+ for aggregate in current.get('aggregates', [])]
+ if not fabricpools:
+ return None
+ if all(fabricpools):
+ return 'required'
+ if any(fabricpools):
+ return 'best_effort'
+ return 'disallowed'
+
+ def set_actions(self):
+ """define what needs to be done"""
+ actions = []
+ modify = {}
+
+ current = self.get_volume()
+ self.volume_style = self.get_volume_style(current)
+ if self.volume_style == 'flexgroup' and self.parameters.get('aggregate_name') is not None:
+ self.module.fail_json(msg='Error: aggregate_name option cannot be used with FlexGroups.')
+
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action == 'delete' or self.parameters['state'] == 'absent':
+ return ['delete'] if cd_action == 'delete' else [], current, modify
+ if cd_action == 'create':
+ # report an error if the vserver does not exist (it can be also be a cluster or node vserver with REST)
+ if self.use_rest:
+ rest_vserver.get_vserver_uuid(self.rest_api, self.parameters['vserver'], self.module, True)
+ actions = ['create']
+ if self.parameters.get('from_name'):
+ # create by renaming
+ current = self.get_volume(self.parameters['from_name'])
+ rename = self.na_helper.is_rename_action(current, None)
+ if rename is None:
+ self.module.fail_json(msg="Error renaming volume: cannot find %s" % self.parameters['from_name'])
+ if rename:
+ cd_action = None
+ actions = ['rename']
+ elif self.parameters.get('from_vserver'):
+ # create by rehosting
+ if self.use_rest:
+ self.module.fail_json(msg='Error: ONTAP REST API does not support Rehosting Volumes')
+ actions = ['rehost']
+ self.na_helper.changed = True
+ if self.parameters.get('snapshot_restore'):
+ # update by restoring
+ if 'create' in actions:
+ self.module.fail_json(msg="Error restoring volume: cannot find parent: %s" % self.parameters['name'])
+ # let's allow restoring after a rename or rehost
+ actions.append('snapshot_restore')
+ self.na_helper.changed = True
+ self.validate_snaplock_changes(current)
+ if cd_action is None and 'rehost' not in actions:
+ # Ignoring modify after a rehost, as we can't read the volume properties on the remote volume
+ # or maybe we could, using a cluster ZAPI, but since ZAPI is going away, is it worth it?
+ modify = self.set_modify_dict(current)
+ if modify:
+ # ZAPI decrypts volume using volume move api and aggregate name is required.
+ if not self.use_rest and modify.get('encrypt') is False and not self.parameters.get('aggregate_name'):
+ self.parameters['aggregate_name'] = current['aggregate_name']
+ if self.use_rest and modify.get('encrypt') is False and not modify.get('aggregate_name'):
+ self.module.fail_json(msg="Error: unencrypting volume is only supported when moving the volume to another aggregate in REST.")
+ actions.append('modify')
+ if self.parameters.get('nas_application_template') is not None:
+ application = self.get_application()
+ changed = self.na_helper.changed
+ app_component = self.create_nas_application_component() if self.parameters['state'] == 'present' else None
+ modify_app = self.na_helper.get_modified_attributes(application, app_component)
+ # restore current change state, as we ignore this
+ if modify_app:
+ self.na_helper.changed = changed
+ self.module.warn('Modifying an app is not supported at present: ignoring: %s' % str(modify_app))
+ return actions, current, modify
+
+ def apply(self):
+ '''Call create/modify/delete operations'''
+ actions, current, modify = self.set_actions()
+ is_online = current.get('is_online') if current else None
+ response = None
+
+ # rehost, snapshot_restore and modify actions requires volume state to be online.
+ online_modify_options = [x for x in actions if x in ['rehost', 'snapshot_restore', 'modify']]
+ # ignore options that requires volume shoule be online.
+ if not modify.get('is_online') and is_online is False and online_modify_options:
+ modify_keys = []
+ if 'modify' in online_modify_options:
+ online_modify_options.remove('modify')
+ modify_keys = [key for key in modify if key != 'is_online']
+ action_msg = 'perform action(s): %s' % online_modify_options if online_modify_options else ''
+ modify_msg = ' and modify: %s' % modify_keys if action_msg else 'modify: %s' % modify_keys
+ self.module.warn("Cannot %s%s when volume is offline." % (action_msg, modify_msg))
+ modify, actions = {}, []
+ if 'rename' in actions:
+ # rename can be done if volume is offline.
+ actions = ['rename']
+ else:
+ self.na_helper.changed = False
+
+ if self.na_helper.changed and not self.module.check_mode:
+ # always online volume first before other changes.
+ # rehost, snapshot_restore and modify requires volume in online state.
+ if modify.get('is_online'):
+ self.parameters['uuid'] = current['uuid']
+ # when moving to online, include parameters that get does not return when volume is offline
+ for field in ['volume_security_style', 'group_id', 'user_id', 'percent_snapshot_space']:
+ if self.parameters.get(field) is not None:
+ modify[field] = self.parameters[field]
+ self.change_volume_state()
+ if 'rename' in actions:
+ self.rename_volume()
+ if 'rehost' in actions:
+ # REST DOES NOT have a volume-rehost equivalent
+ self.rehost_volume()
+ if 'snapshot_restore' in actions:
+ self.snapshot_restore_volume()
+ if 'create' in actions:
+ response = self.create_volume()
+ # if we create using ZAPI and modify only options are set (snapdir_access or atime_update), we need to run a modify.
+ # The modify also takes care of efficiency (sis) parameters and snapshot_auto_delete.
+ # If we create using REST application, some options are not available, we may need to run a modify.
+ # volume should be online for modify.
+ current = self.get_volume()
+ if current:
+ self.volume_created = True
+ modify = self.set_modify_dict(current, after_create=True)
+ is_online = current.get('is_online')
+ if modify:
+ if is_online:
+ actions.append('modify')
+ else:
+ self.module.warn("Cannot perform actions: modify when volume is offline.")
+ # restore this, as set_modify_dict could set it to False
+ self.na_helper.changed = True
+ if 'delete' in actions:
+ self.parameters['uuid'] = current['uuid']
+ self.delete_volume(current)
+ if 'modify' in actions:
+ self.parameters['uuid'] = current['uuid']
+ self.take_modify_actions(modify)
+
+ result = netapp_utils.generate_result(self.na_helper.changed, actions, modify, response)
+ self.module.exit_json(**result)
+
+
+def main():
+ '''Apply volume operations from playbook'''
+ obj = NetAppOntapVolume()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume_autosize.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume_autosize.py
new file mode 100644
index 000000000..0b40c5d45
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume_autosize.py
@@ -0,0 +1,353 @@
+#!/usr/bin/python
+
+# (c) 2019-2022, NetApp, Inc
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_volume_autosize
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = '''
+module: na_ontap_volume_autosize
+short_description: NetApp ONTAP manage volume autosize
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.9.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Modify Volume AutoSize
+options:
+ volume:
+ description:
+ - The name of the flexible volume for which we want to set autosize.
+ type: str
+ required: true
+
+ mode:
+ description:
+ - Specify the flexible volume's autosize mode of operation.
+ type: str
+ choices: ['grow', 'grow_shrink', 'off']
+
+ vserver:
+ description:
+ - Name of the vserver to use.
+ required: true
+ type: str
+
+ grow_threshold_percent:
+ description:
+ - Specifies the percentage of the flexible volume's capacity at which autogrow is initiated.
+ - The default grow threshold varies from 85% to 98%, depending on the volume size.
+ - It is an error for the grow threshold to be less than or equal to the shrink threshold.
+ - Range between 0 and 100
+ type: int
+
+ increment_size:
+ description:
+ - Specify the flexible volume's increment size using the following format < number > [k|m|g|t]
+ - The amount is the absolute size to set.
+ - The trailing 'k', 'm', 'g', and 't' indicates the desired units, namely 'kilobytes', 'megabytes', 'gigabytes', and 'terabytes' (respectively).
+ type: str
+
+ maximum_size:
+ description:
+ - Specify the flexible volume's maximum allowed size using the following format < number > [k|m|g|t]
+ - The amount is the absolute size to set.
+ - The trailing 'k', 'm', 'g', and 't' indicates the desired units, namely 'kilobytes', 'megabytes', 'gigabytes', and 'terabytes' (respectively).
+ - The default value is 20% greater than the volume size at the time autosize was enabled.
+ - It is an error for the maximum volume size to be less than the current volume size.
+ - It is also an error for the maximum size to be less than or equal to the minimum size.
+ type: str
+
+ minimum_size:
+ description:
+ - Specify the flexible volume's minimum allowed size using the following format < number > [k|m|g|t] The amount is the absolute size to set.
+ - The trailing 'k', 'm', 'g', and 't' indicates the desired units, namely 'kilobytes', 'megabytes', 'gigabytes', and 'terabytes' (respectively).
+ - The default value is the size of the volume at the time the 'grow_shrink' mode was enabled.
+ - It is an error for the minimum size to be greater than or equal to the maximum size.
+ type: str
+
+ reset:
+ description:
+ - "Sets the values of maximum_size, increment_size, minimum_size, grow_threshold_percent, shrink_threshold_percent and mode to their defaults"
+ - If reset paramater is present system will always perform reset action, so idempotency is not supported.
+ type: bool
+
+ shrink_threshold_percent:
+ description:
+ - Specifies the percentage of the flexible volume's capacity at which autoshrink is initiated.
+ - The default shrink theshold is 50%. It is an error for the shrink threshold to be greater than or equal to the grow threshold.
+ - Range between 0 and 100
+ type: int
+'''
+
+EXAMPLES = """
+ - name: Modify volume autosize
+ netapp.ontap.na_ontap_volume_autosize:
+ hostname: 10.193.79.189
+ username: admin
+ password: netapp1!
+ volume: ansibleVolumesize12
+ mode: grow
+ grow_threshold_percent: 99
+ increment_size: 50m
+ maximum_size: 10g
+ minimum_size: 21m
+ shrink_threshold_percent: 40
+ vserver: ansible_vserver
+
+ - name: Reset volume autosize
+ netapp.ontap.na_ontap_volume_autosize:
+ hostname: 10.193.79.189
+ username: admin
+ password: netapp1!
+ volume: ansibleVolumesize12
+ reset: true
+ vserver: ansible_vserver
+"""
+
+RETURN = """
+"""
+import copy
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppOntapVolumeAutosize:
+ ''' volume autosize configuration '''
+ def __init__(self):
+ self.use_rest = False
+ # Volume_autosize returns KB and not B like Volume so values are shifted down 1
+ self._size_unit_map = dict(
+ k=1,
+ m=1024,
+ g=1024 ** 2,
+ t=1024 ** 3,
+ )
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ volume=dict(required=True, type="str"),
+ mode=dict(required=False, choices=['grow', 'grow_shrink', 'off']),
+ vserver=dict(required=True, type='str'),
+ grow_threshold_percent=dict(required=False, type='int'),
+ increment_size=dict(required=False, type='str'),
+ maximum_size=dict(required=False, type='str'),
+ minimum_size=dict(required=False, type='str'),
+ reset=dict(required=False, type='bool'),
+ shrink_threshold_percent=dict(required=False, type='int')
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['reset', 'maximum_size'],
+ ['reset', 'increment_size'],
+ ['reset', 'minimum_size'],
+ ['reset', 'grow_threshold_percent'],
+ ['reset', 'shrink_threshold_percent'],
+ ['reset', 'mode']
+ ]
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ # API should be used for ONTAP 9.6 or higher, ZAPI for lower version
+ self.rest_api = OntapRestAPI(self.module)
+ if self.rest_api.is_rest():
+ self.use_rest = True
+ # increment size and reset are not supported with rest api
+ if self.parameters.get('increment_size'):
+ self.module.fail_json(msg="Rest API does not support increment size, please switch to ZAPI")
+ if self.parameters.get('reset'):
+ self.module.fail_json(msg="Rest API does not support reset, please switch to ZAPI")
+ else:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_volume_autosize(self):
+ """
+ Get volume_autosize information from the ONTAP system
+ :return:
+ """
+ if self.use_rest:
+ query = {
+ 'name': self.parameters['volume'],
+ 'svm.name': self.parameters['vserver'],
+ 'fields': 'autosize,uuid'
+ }
+ api = 'storage/volumes'
+ response, error = rest_generic.get_one_record(self.rest_api, api, query)
+ if error is not None:
+ self.module.fail_json(msg='Error fetching volume autosize info for %s: %s' % (self.parameters['volume'], error))
+ if response:
+ return self._create_get_volume_return(response['autosize'], response['uuid'])
+ self.module.fail_json(msg='Error fetching volume autosize info for %s: volume not found for vserver %s.'
+ % (self.parameters['volume'], self.parameters['vserver']))
+ else:
+ volume_autosize_info = netapp_utils.zapi.NaElement('volume-autosize-get')
+ volume_autosize_info.add_new_child('volume', self.parameters['volume'])
+ try:
+ result = self.server.invoke_successfully(volume_autosize_info, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching volume autosize info for %s: %s.' % (self.parameters['volume'], to_native(error)),
+ exception=traceback.format_exc())
+ return self._create_get_volume_return(result)
+
+ def _create_get_volume_return(self, results, uuid=None):
+ """
+ Create a return value from volume-autosize-get info file
+ :param results:
+ :return:
+ """
+ return_value = {}
+ if self.use_rest:
+ return_value['uuid'] = uuid
+ if 'mode' in results:
+ return_value['mode'] = results['mode']
+ if 'grow_threshold' in results:
+ return_value['grow_threshold_percent'] = results['grow_threshold']
+ if 'maximum' in results:
+ return_value['maximum_size'] = results['maximum']
+ if 'minimum' in results:
+ return_value['minimum_size'] = results['minimum']
+ if 'shrink_threshold' in results:
+ return_value['shrink_threshold_percent'] = results['shrink_threshold']
+ else:
+ if results.get_child_by_name('mode'):
+ return_value['mode'] = results.get_child_content('mode')
+ if results.get_child_by_name('grow-threshold-percent'):
+ return_value['grow_threshold_percent'] = int(results.get_child_content('grow-threshold-percent'))
+ if results.get_child_by_name('increment-size'):
+ return_value['increment_size'] = results.get_child_content('increment-size')
+ if results.get_child_by_name('maximum-size'):
+ return_value['maximum_size'] = results.get_child_content('maximum-size')
+ if results.get_child_by_name('minimum-size'):
+ return_value['minimum_size'] = results.get_child_content('minimum-size')
+ if results.get_child_by_name('shrink-threshold-percent'):
+ return_value['shrink_threshold_percent'] = int(results.get_child_content('shrink-threshold-percent'))
+ if not return_value:
+ return_value = None
+ return return_value
+
+ def modify_volume_autosize(self, uuid):
+ """
+ Modify a Volumes autosize
+ :return:
+ """
+ if self.use_rest:
+ autosize = {}
+ if self.parameters.get('mode'):
+ autosize['mode'] = self.parameters['mode']
+ if self.parameters.get('grow_threshold_percent'):
+ autosize['grow_threshold'] = self.parameters['grow_threshold_percent']
+ if self.parameters.get('maximum_size'):
+ autosize['maximum'] = self.parameters['maximum_size']
+ if self.parameters.get('minimum_size'):
+ autosize['minimum'] = self.parameters['minimum_size']
+ if self.parameters.get('shrink_threshold_percent'):
+ autosize['shrink_threshold'] = self.parameters['shrink_threshold_percent']
+ if not autosize:
+ return
+ api = 'storage/volumes'
+ body = {'autosize': autosize}
+ dummy, error = rest_generic.patch_async(self.rest_api, api, uuid, body)
+ if error is not None:
+ self.module.fail_json(msg="Error modifying volume autosize for %s: %s" % (self.parameters["volume"], error))
+
+ else:
+ volume_autosize_info = netapp_utils.zapi.NaElement('volume-autosize-set')
+ volume_autosize_info.add_new_child('volume', self.parameters['volume'])
+ if self.parameters.get('mode'):
+ volume_autosize_info.add_new_child('mode', self.parameters['mode'])
+ if self.parameters.get('grow_threshold_percent'):
+ volume_autosize_info.add_new_child('grow-threshold-percent', str(self.parameters['grow_threshold_percent']))
+ if self.parameters.get('increment_size'):
+ volume_autosize_info.add_new_child('increment-size', self.parameters['increment_size'])
+ if self.parameters.get('reset') is not None:
+ volume_autosize_info.add_new_child('reset', str(self.parameters['reset']))
+ if self.parameters.get('maximum_size'):
+ volume_autosize_info.add_new_child('maximum-size', self.parameters['maximum_size'])
+ if self.parameters.get('minimum_size'):
+ volume_autosize_info.add_new_child('minimum-size', self.parameters['minimum_size'])
+ if self.parameters.get('shrink_threshold_percent'):
+ volume_autosize_info.add_new_child('shrink-threshold-percent', str(self.parameters['shrink_threshold_percent']))
+ try:
+ self.server.invoke_successfully(volume_autosize_info, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error modifying volume autosize for %s: %s." % (self.parameters["volume"], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_to_kb(self, converted_parameters):
+ """
+ Save a coverted parameter
+ :param converted_parameters: Dic of all parameters
+ :return:
+ """
+ for attr in ['maximum_size', 'minimum_size', 'increment_size']:
+ if converted_parameters.get(attr) is not None:
+ if self.use_rest:
+ converted_parameters[attr] = self.convert_to_byte(attr, converted_parameters)
+ else:
+ converted_parameters[attr] = str(self.convert_to_kb(attr, converted_parameters))
+ return converted_parameters
+
+ def convert_to_kb(self, variable, converted_parameters):
+ """
+ Convert a number 10m in to its correct KB size
+ :param variable: the Parameter we are going to covert
+ :param converted_parameters: Dic of all parameters
+ :return:
+ """
+ value = converted_parameters.get(variable)
+ if len(value) < 2:
+ self.module.fail_json(msg="%s must start with a number, and must end with a k, m, g or t, found '%s'." % (variable, value))
+ if value[-1] not in ['k', 'm', 'g', 't']:
+ self.module.fail_json(msg="%s must end with a k, m, g or t, found %s in %s." % (variable, value[-1], value))
+ try:
+ digits = int(value[:-1])
+ except ValueError:
+ self.module.fail_json(msg="%s must start with a number, found %s in %s." % (variable, value[:-1], value))
+ return self._size_unit_map[value[-1]] * digits
+
+ def convert_to_byte(self, variable, converted_parameters):
+ return self.convert_to_kb(variable, converted_parameters) * 1024
+
+ def apply(self):
+ current = self.get_volume_autosize()
+ converted_parameters = copy.deepcopy(self.parameters)
+ converted_parameters = self.modify_to_kb(converted_parameters)
+ self.na_helper.get_modified_attributes(current, converted_parameters)
+ if self.parameters.get('reset') is True:
+ self.na_helper.changed = True
+ if self.na_helper.changed and not self.module.check_mode:
+ uuid = current.get('uuid') if current else None
+ self.modify_volume_autosize(uuid=uuid)
+
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """
+ Apply volume autosize operations from playbook
+ :return:
+ """
+ obj = NetAppOntapVolumeAutosize()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume_clone.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume_clone.py
new file mode 100644
index 000000000..a2b40e0b2
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume_clone.py
@@ -0,0 +1,355 @@
+#!/usr/bin/python
+
+# (c) 2018-2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: na_ontap_volume_clone
+short_description: NetApp ONTAP manage volume clones.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.6.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create NetApp ONTAP volume clones.
+- A FlexClone License is required to use this module
+options:
+ state:
+ description:
+ - Whether volume clone should be created.
+ choices: ['present']
+ type: str
+ default: 'present'
+ parent_volume:
+ description:
+ - The parent volume of the volume clone being created.
+ required: true
+ type: str
+ name:
+ description:
+ - The name of the volume clone being created.
+ required: true
+ type: str
+ aliases:
+ - volume
+ vserver:
+ description:
+ - Vserver in which the volume clone should be created.
+ required: true
+ type: str
+ parent_snapshot:
+ description:
+ - Parent snapshot in which volume clone is created off.
+ type: str
+ parent_vserver:
+ description:
+ - Vserver of parent volume in which clone is created off.
+ type: str
+ qos_policy_group_name:
+ description:
+ - The qos-policy-group-name which should be set for volume clone.
+ type: str
+ space_reserve:
+ description:
+ - The space_reserve setting which should be used for the volume clone.
+ choices: ['volume', 'none']
+ type: str
+ volume_type:
+ description:
+ - The volume-type setting which should be used for the volume clone.
+ choices: ['rw', 'dp']
+ type: str
+ junction_path:
+ version_added: 2.8.0
+ description:
+ - Junction path of the volume.
+ type: str
+ uid:
+ version_added: 2.9.0
+ description:
+ - The UNIX user ID for the clone volume.
+ type: int
+ gid:
+ version_added: 2.9.0
+ description:
+ - The UNIX group ID for the clone volume.
+ type: int
+ split:
+ version_added: '20.2.0'
+ description:
+ - Split clone volume from parent volume.
+ type: bool
+'''
+
+EXAMPLES = """
+ - name: create volume clone
+ na_ontap_volume_clone:
+ state: present
+ username: "{{ netapp username }}"
+ password: "{{ netapp password }}"
+ hostname: "{{ netapp hostname }}"
+ vserver: vs_hack
+ parent_volume: normal_volume
+ name: clone_volume_7
+ space_reserve: none
+ parent_snapshot: backup1
+ junction_path: /clone_volume_7
+ uid: 1
+ gid: 1
+"""
+
+RETURN = """
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+import ansible_collections.netapp.ontap.plugins.module_utils.rest_response_helpers as rrh
+
+
+class NetAppONTAPVolumeClone:
+ """
+ Creates a volume clone
+ """
+
+ def __init__(self):
+ """
+ Initialize the NetAppOntapVolumeClone class
+ """
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present'], default='present'),
+ parent_volume=dict(required=True, type='str'),
+ name=dict(required=True, type='str', aliases=["volume"]),
+ vserver=dict(required=True, type='str'),
+ parent_snapshot=dict(required=False, type='str', default=None),
+ parent_vserver=dict(required=False, type='str', default=None),
+ qos_policy_group_name=dict(required=False, type='str', default=None),
+ space_reserve=dict(required=False, type='str', choices=['volume', 'none'], default=None),
+ volume_type=dict(required=False, type='str', choices=['rw', 'dp']),
+ junction_path=dict(required=False, type='str', default=None),
+ uid=dict(required=False, type='int'),
+ gid=dict(required=False, type='int'),
+ split=dict(required=False, type='bool', default=None),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True,
+ required_together=[
+ ['uid', 'gid']
+ ],
+ mutually_exclusive=[
+ ('junction_path', 'parent_vserver'),
+ ('uid', 'parent_vserver'),
+ ('gid', 'parent_vserver')
+ ]
+ )
+
+ self.uuid = None # UUID if the FlexClone if it exists, or after creation
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.rest_api = OntapRestAPI(self.module)
+ unsupported_rest_properties = ['space_reserve']
+ self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, unsupported_rest_properties)
+ if not self.use_rest:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ if self.parameters.get('parent_vserver'):
+ # use cluster ZAPI, as vserver ZAPI does not support parent-vserser for create
+ self.create_server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+ # keep vserver for ems log and clone-get
+ self.vserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+ else:
+ self.vserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+ self.create_server = self.vserver
+
+ def create_volume_clone(self):
+ """
+ Creates a new volume clone
+ """
+ if self.use_rest:
+ return self.create_volume_clone_rest()
+ clone_obj = netapp_utils.zapi.NaElement('volume-clone-create')
+ clone_obj.add_new_child("parent-volume", self.parameters['parent_volume'])
+ clone_obj.add_new_child("volume", self.parameters['name'])
+ if self.parameters.get('qos_policy_group_name'):
+ clone_obj.add_new_child("qos-policy-group-name", self.parameters['qos_policy_group_name'])
+ if self.parameters.get('space_reserve'):
+ clone_obj.add_new_child("space-reserve", self.parameters['space_reserve'])
+ if self.parameters.get('parent_snapshot'):
+ clone_obj.add_new_child("parent-snapshot", self.parameters['parent_snapshot'])
+ if self.parameters.get('parent_vserver'):
+ clone_obj.add_new_child("parent-vserver", self.parameters['parent_vserver'])
+ clone_obj.add_new_child("vserver", self.parameters['vserver'])
+ if self.parameters.get('volume_type'):
+ clone_obj.add_new_child("volume-type", self.parameters['volume_type'])
+ if self.parameters.get('junction_path'):
+ clone_obj.add_new_child("junction-path", self.parameters['junction_path'])
+ if self.parameters.get('uid'):
+ clone_obj.add_new_child("uid", str(self.parameters['uid']))
+ clone_obj.add_new_child("gid", str(self.parameters['gid']))
+ try:
+ self.create_server.invoke_successfully(clone_obj, True)
+ except netapp_utils.zapi.NaApiError as exc:
+ self.module.fail_json(msg='Error creating volume clone: %s: %s' % (self.parameters['name'], to_native(exc)))
+
+ def modify_volume_clone(self):
+ """
+ Modify an existing volume clone
+ """
+ if 'split' in self.parameters and self.parameters['split']:
+ self.start_volume_clone_split()
+
+ def start_volume_clone_split(self):
+ """
+ Starts a volume clone split
+ """
+ if self.use_rest:
+ return self.start_volume_clone_split_rest()
+ clone_obj = netapp_utils.zapi.NaElement('volume-clone-split-start')
+ clone_obj.add_new_child("volume", self.parameters['name'])
+ try:
+ self.vserver.invoke_successfully(clone_obj, True)
+ except netapp_utils.zapi.NaApiError as exc:
+ self.module.fail_json(msg='Error starting volume clone split: %s: %s' % (self.parameters['name'], to_native(exc)))
+
+ def get_volume_clone(self):
+ if self.use_rest:
+ return self.get_volume_clone_rest()
+ clone_obj = netapp_utils.zapi.NaElement('volume-clone-get')
+ clone_obj.add_new_child("volume", self.parameters['name'])
+ try:
+ results = self.vserver.invoke_successfully(clone_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ # Error 15661 denotes a volume clone not being found.
+ if to_native(error.code) == "15661":
+ return None
+ self.module.fail_json(msg='Error fetching volume clone information %s: %s' % (self.parameters['name'], to_native(error)))
+ current = None
+ if results.get_child_by_name('attributes'):
+ attributes = results.get_child_by_name('attributes')
+ info = attributes.get_child_by_name('volume-clone-info')
+ # Check if clone is currently splitting. Whilst a split is in
+ # progress, these attributes are present in 'volume-clone-info':
+ # block-percentage-complete, blocks-scanned & blocks-updated.
+ current = {
+ 'split': bool(
+ info.get_child_by_name('block-percentage-complete')
+ or info.get_child_by_name('blocks-scanned')
+ or info.get_child_by_name('blocks-updated')
+ )
+ }
+ return current
+
+ def get_volume_clone_rest(self):
+ api = 'storage/volumes'
+ params = {'name': self.parameters['name'],
+ 'svm.name': self.parameters['vserver'],
+ 'fields': 'clone.is_flexclone,uuid'}
+ record, error = rest_generic.get_one_record(self.rest_api, api, params)
+ if error:
+ self.module.fail_json(msg='Error getting volume clone %s: %s' % (self.parameters['name'], to_native(error)))
+ if record:
+ return self.format_get_volume_clone_rest(record)
+ return record
+
+ def format_get_volume_clone_rest(self, record):
+ return {
+ 'name': record.get('name', None),
+ 'uuid': record.get('uuid', None),
+ 'is_clone': self.na_helper.safe_get(record, ['clone', 'is_flexclone']),
+ # if it is a FlexClone, it is not split.
+ # if it is not a FlexClone, it can be either the result of a split, or a plain volume. We mark it as split,
+ # as it cannot be split again.
+ 'split': self.na_helper.safe_get(record, ['clone', 'is_flexclone']) is not True
+ }
+
+ def create_volume_clone_rest(self):
+ api = 'storage/volumes'
+ body = {'name': self.parameters['name'],
+ 'clone.parent_volume.name': self.parameters['parent_volume'],
+ "clone.is_flexclone": True,
+ "svm.name": self.parameters['vserver']}
+ if self.parameters.get('qos_policy_group_name'):
+ body['qos.policy.name'] = self.parameters['qos_policy_group_name']
+ if self.parameters.get('parent_snapshot'):
+ body['clone.parent_snapshot.name'] = self.parameters['parent_snapshot']
+ if self.parameters.get('parent_vserver'):
+ body['clone.parent_svm.name'] = self.parameters['parent_vserver']
+ if self.parameters.get('volume_type'):
+ body['type'] = self.parameters['volume_type']
+ if self.parameters.get('junction_path'):
+ body['nas.path'] = self.parameters['junction_path']
+ if self.parameters.get('uid'):
+ body['nas.uid'] = self.parameters['uid']
+ if self.parameters.get('gid'):
+ body['nas.gid'] = self.parameters['gid']
+ query = {'return_records': 'true'} # in order to capture UUID
+ response, error = rest_generic.post_async(self.rest_api, api, body, query, job_timeout=120)
+ if error:
+ self.module.fail_json(
+ msg='Error creating volume clone %s: %s' % (self.parameters['name'], to_native(error)))
+ if response:
+ record, error = rrh.check_for_0_or_1_records(api, response, error, query)
+ if not error and record and 'uuid' not in record:
+ error = 'uuid key not present in %s:' % record
+ if error:
+ self.module.fail_json(msg='Error: failed to parse create clone response: %s' % error)
+ if record:
+ self.uuid = record['uuid']
+
+ def start_volume_clone_split_rest(self):
+ if self.uuid is None:
+ self.module.fail_json(msg='Error starting volume clone split %s: %s' % (self.parameters['name'],
+ 'clone UUID is not set'))
+ api = 'storage/volumes'
+ body = {'clone.split_initiated': True}
+ dummy, error = rest_generic.patch_async(self.rest_api, api, self.uuid, body, job_timeout=120)
+ if error:
+ self.module.fail_json(msg='Error starting volume clone split %s: %s' % (self.parameters['name'],
+ to_native(error)))
+
+ def apply(self):
+ """
+ Run Module based on playbook
+ """
+ current = self.get_volume_clone()
+ if self.use_rest and current:
+ self.uuid = current['uuid']
+ if self.use_rest and current and not current['is_clone'] and not self.parameters.get('split'):
+ self.module.fail_json(
+ msg="Error: a volume %s which is not a FlexClone already exists, and split not requested." % self.parameters['name'])
+ modify = None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None and self.parameters['state'] == 'present':
+ # the only thing that is supported is split
+ current_split = {'split': current.get('split')} if current else None
+ modify = self.na_helper.get_modified_attributes(current_split, self.parameters)
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_volume_clone()
+ if self.parameters.get('split'):
+ self.modify_volume_clone()
+ if modify:
+ self.modify_volume_clone()
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ """
+ Creates the NetApp Ontap Volume Clone object and runs the correct play task
+ """
+ obj = NetAppONTAPVolumeClone()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume_efficiency.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume_efficiency.py
new file mode 100644
index 000000000..9da58b0a9
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume_efficiency.py
@@ -0,0 +1,715 @@
+#!/usr/bin/python
+
+# (c) 2021-2023, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = """
+module: na_ontap_volume_efficiency
+short_description: NetApp ONTAP enables, disables or modifies volume efficiency
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: '21.2.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Enable, modify or disable volume efficiency.
+ - Either path or volume_name is required.
+ - Only admin user can modify volume efficiency.
+options:
+ state:
+ description:
+ - Whether the specified volume efficiency should be enabled or not.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ vserver:
+ description:
+ - Specifies the vserver for the volume.
+ required: true
+ type: str
+
+ path:
+ description:
+ - Specifies the path for the volume.
+ - Either C(path) or C(volume_name) is required.
+ - Requires ONTAP 9.9.1 or later with REST.
+ type: str
+
+ volume_name:
+ description:
+ - Specifies the volume name.
+ version_added: 22.3.0
+ type: str
+
+ schedule:
+ description:
+ - Specifies the storage efficiency schedule.
+ - Only supported with ZAPI.
+ type: str
+
+ policy:
+ description:
+ - Specifies the storage efficiency policy to use.
+ - By default, the following names are available 'auto', 'default', 'inline-only', '-'.
+ - Requires ONTAP 9.7 or later with REST.
+ type: str
+
+ enable_compression:
+ description:
+ - Specifies if compression is to be enabled.
+ type: bool
+
+ enable_inline_compression:
+ description:
+ - Specifies if in-line compression is to be enabled.
+ type: bool
+
+ enable_inline_dedupe:
+ description:
+ - Specifies if in-line deduplication is to be enabled, only supported on AFF systems or hybrid aggregates.
+ type: bool
+
+ enable_data_compaction:
+ description:
+ - Specifies if compaction is to be enabled.
+ type: bool
+
+ enable_cross_volume_inline_dedupe:
+ description:
+ - Specifies if in-line cross volume inline deduplication is to be enabled, this can only be enabled when inline deduplication is enabled.
+ type: bool
+
+ enable_cross_volume_background_dedupe:
+ description:
+ - Specifies if cross volume background deduplication is to be enabled, this can only be enabled when inline deduplication is enabled.
+ type: bool
+
+ volume_efficiency:
+ description:
+ - Start or Stop a volume efficiency operation on a given volume path.
+ - Requires ONTAP 9.11.1 or later with REST.
+ choices: ['start', 'stop']
+ version_added: '21.4.0'
+ type: str
+
+ start_ve_scan_all:
+ description:
+ - Specifies the scanner to scan the entire volume without applying share block optimization.
+ - Only supported with ZAPI.
+ version_added: '21.4.0'
+ type: bool
+
+ start_ve_build_metadata:
+ description:
+ - Specifies the scanner to scan the entire and generate fingerprint database without attempting the sharing.
+ - Only supported with ZAPI.
+ version_added: '21.4.0'
+ type: bool
+
+ start_ve_delete_checkpoint:
+ description:
+ - Specifies the scanner to delete existing checkpoint and start the operation from the begining.
+ - Only supported with ZAPI.
+ version_added: '21.4.0'
+ type: bool
+
+ start_ve_queue_operation:
+ description:
+ - Specifies the operation to queue if an exisitng operation is already running on the volume and in the fingerprint verification phase.
+ - Only supported with ZAPI.
+ version_added: '21.4.0'
+ type: bool
+
+ start_ve_scan_old_data:
+ description:
+ - Specifies the operation to scan the file system to process all the existing data.
+ - Requires ONTAP 9.11.1 or later with REST.
+ version_added: '21.4.0'
+ type: bool
+
+ start_ve_qos_policy:
+ description:
+ - Specifies the QoS policy for the operation.
+ - Default is best-effort in ZAPI.
+ - Only supported with ZAPI.
+ choices: ['background', 'best-effort']
+ version_added: '21.4.0'
+ type: str
+
+ stop_ve_all_operations:
+ description:
+ - Specifies that all running and queued operations to be stopped.
+ - Only supported with ZAPI.
+ version_added: '21.4.0'
+ type: bool
+
+ storage_efficiency_mode:
+ description:
+ - Storage efficiency mode used by volume. This parameter is only supported on AFF platforms.
+ - Requires ONTAP 9.10.1 or later.
+ choices: ['default', 'efficient']
+ type: str
+ version_added: '21.14.0'
+
+notes:
+ - supports ZAPI and REST. REST requires ONTAP 9.6 or later.
+ - supports check mode.
+"""
+
+EXAMPLES = """
+ - name: Enable Volume efficiency
+ netapp.ontap.na_ontap_volume_efficiency:
+ state: present
+ vserver: "TESTSVM"
+ path: "/vol/test_sis"
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ https: true
+ validate_certs: false
+
+ - name: Disable Volume efficiency test
+ netapp.ontap.na_ontap_volume_efficiency:
+ state: absent
+ vserver: "TESTSVM"
+ path: "/vol/test_sis"
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ https: true
+ validate_certs: false
+
+ - name: Modify storage efficiency schedule with ZAPI.
+ netapp.ontap.na_ontap_volume_efficiency:
+ state: present
+ vserver: "TESTSVM"
+ path: "/vol/test_sis"
+ schedule: "mon-sun@0,1,23"
+ enable_compression: true
+ enable_inline_compression: true
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ https: true
+ validate_certs: false
+
+ - name: Start volume efficiency
+ netapp.ontap.na_ontap_volume_efficiency:
+ state: present
+ vserver: "TESTSVM"
+ path: "/vol/test_sis"
+ volume_efficiency: "start"
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ https: true
+ validate_certs: false
+
+ - name: Stop volume efficiency
+ netapp.ontap.na_ontap_volume_efficiency:
+ state: present
+ vserver: "TESTSVM"
+ path: "/vol/test_sis"
+ volume_efficiency: "stop"
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ https: true
+ validate_certs: false
+
+ - name: modify volume efficiency with volume name in REST.
+ netapp.ontap.na_ontap_volume_efficiency:
+ state: present
+ vserver: "TESTSVM"
+ volume_name: "test_sis"
+ volume_efficiency: "stop"
+ enable_compression: True
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ https: true
+ validate_certs: false
+
+"""
+
+RETURN = """
+
+"""
+
+import copy
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppOntapVolumeEfficiency(object):
+ """
+ Creates, Modifies and Disables a Volume Efficiency
+ """
+ def __init__(self):
+ """
+ Initialize the ONTAP Volume Efficiency class
+ """
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ path=dict(required=False, type='str'),
+ volume_name=dict(required=False, type='str'),
+ schedule=dict(required=False, type='str'),
+ policy=dict(required=False, type='str'),
+ enable_inline_compression=dict(required=False, type='bool'),
+ enable_compression=dict(required=False, type='bool'),
+ enable_inline_dedupe=dict(required=False, type='bool'),
+ enable_data_compaction=dict(required=False, type='bool'),
+ enable_cross_volume_inline_dedupe=dict(required=False, type='bool'),
+ enable_cross_volume_background_dedupe=dict(required=False, type='bool'),
+ storage_efficiency_mode=dict(required=False, choices=['default', 'efficient'], type='str'),
+ volume_efficiency=dict(required=False, choices=['start', 'stop'], type='str'),
+ start_ve_scan_all=dict(required=False, type='bool'),
+ start_ve_build_metadata=dict(required=False, type='bool'),
+ start_ve_delete_checkpoint=dict(required=False, type='bool'),
+ start_ve_queue_operation=dict(required=False, type='bool'),
+ start_ve_scan_old_data=dict(required=False, type='bool'),
+ start_ve_qos_policy=dict(required=False, choices=['background', 'best-effort'], type='str'),
+ stop_ve_all_operations=dict(required=False, type='bool')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True,
+ required_if=[('start_ve_scan_all', True, ['start_ve_scan_old_data'])],
+ required_one_of=[('path', 'volume_name')],
+ mutually_exclusive=[('policy', 'schedule'), ('path', 'volume_name')]
+ )
+
+ # set up variables
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ if self.parameters['state'] == 'present':
+ self.parameters['enabled'] = 'enabled'
+ else:
+ self.parameters['enabled'] = 'disabled'
+
+ self.rest_api = netapp_utils.OntapRestAPI(self.module)
+ partially_supported_rest_properties = [
+ ['policy', (9, 7)], ['storage_efficiency_mode', (9, 10, 1)], ['path', (9, 9, 1)],
+ # make op_state active/idle is supported from 9.11.1 or later with REST.
+ ['volume_efficiency', (9, 11, 1)], ['start_ve_scan_old_data', (9, 11, 1)]
+ ]
+ unsupported_rest_properties = [
+ 'schedule', 'start_ve_scan_all', 'start_ve_build_metadata', 'start_ve_delete_checkpoint',
+ 'start_ve_queue_operation', 'start_ve_qos_policy', 'stop_ve_all_operations'
+ ]
+ self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, unsupported_rest_properties, partially_supported_rest_properties)
+ self.volume_uuid = None
+ if 'volume_efficiency' in self.parameters:
+ if self.parameters['volume_efficiency'] == 'start':
+ self.parameters['status'] = 'running' if not self.use_rest else 'active'
+ else:
+ self.parameters['status'] = 'idle'
+ if not self.use_rest:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ self.validate_and_configure_zapi()
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def validate_and_configure_zapi(self):
+ if self.parameters.get('storage_efficiency_mode'):
+ self.module.fail_json(msg="Error: cannot set storage_efficiency_mode in ZAPI")
+ # set default value for ZAPI like before as REST currently not support this option.
+ if not self.parameters.get('start_ve_qos_policy'):
+ self.parameters['start_ve_qos_policy'] = 'best-effort'
+ if self.parameters.get('volume_name'):
+ self.parameters['path'] = '/vol/' + self.parameters['volume_name']
+ self.module.warn("ZAPI requires '/vol/' present in the volume path, updated path: %s" % self.parameters['path'])
+
+ def get_volume_efficiency(self):
+ """
+ get the storage efficiency for a given path
+ :return: dict of sis if exist, None if not
+ """
+
+ return_value = None
+
+ if self.use_rest:
+ api = 'storage/volumes'
+ query = {'svm.name': self.parameters['vserver'], 'fields': 'uuid,efficiency'}
+ if self.parameters.get('path'):
+ query['efficiency.volume_path'] = self.parameters['path']
+ else:
+ query['name'] = self.parameters['volume_name']
+ record, error = rest_generic.get_one_record(self.rest_api, api, query)
+ if error:
+ path_or_volume = self.parameters.get('path') or self.parameters.get('volume_name')
+ self.module.fail_json(msg='Error getting volume efficiency for path %s on vserver %s: %s' % (
+ path_or_volume, self.parameters['vserver'], to_native(error)), exception=traceback.format_exc()
+ )
+ if record:
+ return_value = self.format_rest_record(record)
+ return return_value
+
+ else:
+
+ sis_get_iter = netapp_utils.zapi.NaElement('sis-get-iter')
+ sis_status_info = netapp_utils.zapi.NaElement('sis-status-info')
+ sis_status_info.add_new_child('path', self.parameters['path'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(sis_status_info)
+ sis_get_iter.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(sis_get_iter, True)
+ if result.get_child_by_name('attributes-list'):
+ sis_status_attributes = result['attributes-list']['sis-status-info']
+ return_value = {
+ 'path': sis_status_attributes['path'],
+ 'enabled': sis_status_attributes['state'],
+ 'status': sis_status_attributes['status'],
+ 'schedule': sis_status_attributes['schedule'],
+ 'enable_inline_compression': self.na_helper.get_value_for_bool(
+ True, sis_status_attributes.get_child_content('is-inline-compression-enabled')
+ ),
+ 'enable_compression': self.na_helper.get_value_for_bool(True, sis_status_attributes.get_child_content('is-compression-enabled')),
+ 'enable_inline_dedupe': self.na_helper.get_value_for_bool(True, sis_status_attributes.get_child_content('is-inline-dedupe-enabled')),
+ 'enable_data_compaction': self.na_helper.get_value_for_bool(
+ True, sis_status_attributes.get_child_content('is-data-compaction-enabled')
+ ),
+ 'enable_cross_volume_inline_dedupe': self.na_helper.get_value_for_bool(
+ True, sis_status_attributes.get_child_content('is-cross-volume-inline-dedupe-enabled')
+ ),
+ 'enable_cross_volume_background_dedupe': self.na_helper.get_value_for_bool(
+ True, sis_status_attributes.get_child_content('is-cross-volume-background-dedupe-enabled')
+ )
+ }
+
+ if sis_status_attributes.get_child_by_name('policy'):
+ return_value['policy'] = sis_status_attributes['policy']
+ else:
+ return_value['policy'] = '-'
+
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error getting volume efficiency for path %s on vserver %s: %s' % (
+ self.parameters['path'], self.parameters['vserver'], to_native(error)), exception=traceback.format_exc()
+ )
+ return return_value
+
+ def enable_volume_efficiency(self):
+ """
+ Enables Volume efficiency for a given volume by path
+ """
+ sis_enable = netapp_utils.zapi.NaElement("sis-enable")
+ sis_enable.add_new_child("path", self.parameters['path'])
+
+ try:
+ self.server.invoke_successfully(sis_enable, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error enabling storage efficiency for path %s on vserver %s: %s' % (self.parameters['path'],
+ self.parameters['vserver'], to_native(error)), exception=traceback.format_exc())
+
+ def disable_volume_efficiency(self):
+ """
+ Disables Volume efficiency for a given volume by path
+ """
+ sis_disable = netapp_utils.zapi.NaElement("sis-disable")
+ sis_disable.add_new_child("path", self.parameters['path'])
+
+ try:
+ self.server.invoke_successfully(sis_disable, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error disabling storage efficiency for path %s: %s' % (self.parameters['path'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_volume_efficiency(self, body=None):
+ """
+ Modifies volume efficiency settings for a given volume by path
+ """
+
+ if self.use_rest:
+ if not body:
+ return
+ dummy, error = rest_generic.patch_async(self.rest_api, 'storage/volumes', self.volume_uuid, body)
+ if error:
+ if 'Unexpected argument "storage_efficiency_mode".' in error:
+ error = "cannot modify storage_efficiency mode in non AFF platform."
+ if 'not authorized' in error:
+ error = "%s user is not authorized to modify volume efficiency" % self.parameters.get('username')
+ self.module.fail_json(msg='Error in volume/efficiency patch: %s' % error)
+
+ else:
+
+ sis_config_obj = netapp_utils.zapi.NaElement("sis-set-config")
+ sis_config_obj.add_new_child('path', self.parameters['path'])
+ if 'schedule' in self.parameters:
+ sis_config_obj.add_new_child('schedule', self.parameters['schedule'])
+ if 'policy' in self.parameters:
+ sis_config_obj.add_new_child('policy-name', self.parameters['policy'])
+ if 'enable_compression' in self.parameters:
+ sis_config_obj.add_new_child('enable-compression', self.na_helper.get_value_for_bool(False, self.parameters['enable_compression']))
+ if 'enable_inline_compression' in self.parameters:
+ sis_config_obj.add_new_child('enable-inline-compression', self.na_helper.get_value_for_bool(
+ False, self.parameters['enable_inline_compression'])
+ )
+ if 'enable_inline_dedupe' in self.parameters:
+ sis_config_obj.add_new_child('enable-inline-dedupe', self.na_helper.get_value_for_bool(
+ False, self.parameters['enable_inline_dedupe'])
+ )
+ if 'enable_data_compaction' in self.parameters:
+ sis_config_obj.add_new_child('enable-data-compaction', self.na_helper.get_value_for_bool(
+ False, self.parameters['enable_data_compaction'])
+ )
+ if 'enable_cross_volume_inline_dedupe' in self.parameters:
+ sis_config_obj.add_new_child('enable-cross-volume-inline-dedupe', self.na_helper.get_value_for_bool(
+ False, self.parameters['enable_cross_volume_inline_dedupe'])
+ )
+ if 'enable_cross_volume_background_dedupe' in self.parameters:
+ sis_config_obj.add_new_child('enable-cross-volume-background-dedupe', self.na_helper.get_value_for_bool(
+ False, self.parameters['enable_cross_volume_background_dedupe'])
+ )
+
+ try:
+ self.server.invoke_successfully(sis_config_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying storage efficiency for path %s: %s' % (self.parameters['path'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def start_volume_efficiency(self):
+ """
+ Starts volume efficiency for a given flex volume by path
+ """
+
+ sis_start = netapp_utils.zapi.NaElement('sis-start')
+ sis_start.add_new_child('path', self.parameters['path'])
+
+ if 'start_ve_scan_all' in self.parameters:
+ sis_start.add_new_child('scan-all', self.na_helper.get_value_for_bool(
+ False, self.parameters['start_ve_scan_all'])
+ )
+ if 'start_ve_build_metadata' in self.parameters:
+ sis_start.add_new_child('build-metadata', self.na_helper.get_value_for_bool(
+ False, self.parameters['start_ve_build_metadata'])
+ )
+ if 'start_ve_delete_checkpoint' in self.parameters:
+ sis_start.add_new_child('delete-checkpoint', self.na_helper.get_value_for_bool(
+ False, self.parameters['start_ve_delete_checkpoint'])
+ )
+ if 'start_ve_queue_operation' in self.parameters:
+ sis_start.add_new_child('queue-operation', self.na_helper.get_value_for_bool(
+ False, self.parameters['start_ve_queue_operation'])
+ )
+ if 'start_ve_scan_old_data' in self.parameters:
+ sis_start.add_new_child('scan', self.na_helper.get_value_for_bool(
+ False, self.parameters['start_ve_scan_old_data'])
+ )
+ if 'start_ve_qos_policy' in self.parameters:
+ sis_start.add_new_child('qos-policy', self.parameters['start_ve_qos_policy'])
+
+ try:
+ self.server.invoke_successfully(sis_start, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error starting storage efficiency for path %s on vserver %s: %s' % (self.parameters['path'],
+ self.parameters['vserver'], to_native(error)), exception=traceback.format_exc())
+
+ def stop_volume_efficiency(self):
+ """
+ Stops volume efficiency for a given flex volume by path
+ """
+ sis_stop = netapp_utils.zapi.NaElement('sis-stop')
+ sis_stop.add_new_child('path', self.parameters['path'])
+ if 'stop_ve_all_operations' in self.parameters:
+ sis_stop.add_new_child('all-operations', self.na_helper.get_value_for_bool(
+ False, self.parameters['stop_ve_all_operations'])
+ )
+
+ try:
+ self.server.invoke_successfully(sis_stop, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error stopping storage efficiency for path %s on vserver %s: %s' % (self.parameters['path'],
+ self.parameters['vserver'], to_native(error)), exception=traceback.format_exc())
+
+ def format_rest_record(self, record):
+ """
+ returns current efficiency values.
+ """
+ self.volume_uuid = record['uuid']
+ return_value = {
+ 'enabled': self.na_helper.safe_get(record, ['efficiency', 'state']),
+ 'status': self.na_helper.safe_get(record, ['efficiency', 'op_state']),
+ 'enable_compression': self.na_helper.safe_get(record, ['efficiency', 'compression']),
+ 'enable_inline_dedupe': self.na_helper.safe_get(record, ['efficiency', 'dedupe']),
+ 'enable_data_compaction': self.na_helper.safe_get(record, ['efficiency', 'compaction']),
+ 'enable_cross_volume_inline_dedupe': self.na_helper.safe_get(record, ['efficiency', 'cross_volume_dedupe'])
+ }
+ if not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 9, 1):
+ # efficiency is enabled if dedupe is either background or both.
+ # it's disabled if both dedupe and compression is none.
+ dedupe = self.na_helper.safe_get(record, ['efficiency', 'dedupe'])
+ if dedupe in ['background', 'both']:
+ return_value['enabled'] = 'enabled'
+ elif dedupe == 'none' and self.na_helper.safe_get(record, ['efficiency', 'compression']) == 'none':
+ return_value['enabled'] = 'disabled'
+ if self.parameters.get('storage_efficiency_mode'):
+ return_value['storage_efficiency_mode'] = self.na_helper.safe_get(record, ['efficiency', 'storage_efficiency_mode'])
+ if self.parameters.get('policy'):
+ return_value['policy'] = self.na_helper.safe_get(record, ['efficiency', 'policy', 'name'])
+ compression, inline_compression, cross_volume_inline_dedupe, cross_volume_background_dedupe = False, False, False, False
+ inline_dedupe, compaction = False, False
+ if return_value['enable_compression'] in ['background', 'both']:
+ compression = True
+ if return_value['enable_compression'] in ['inline', 'both']:
+ inline_compression = True
+ if return_value['enable_cross_volume_inline_dedupe'] in ['inline', 'both']:
+ cross_volume_inline_dedupe = True
+ if return_value['enable_cross_volume_inline_dedupe'] in ['background', 'both']:
+ cross_volume_background_dedupe = True
+ if return_value['enable_inline_dedupe'] in ['inline', 'both']:
+ inline_dedupe = True
+ if return_value['enable_data_compaction'] == 'inline':
+ compaction = True
+ return_value['enable_compression'] = compression
+ return_value['enable_inline_compression'] = inline_compression
+ return_value['enable_cross_volume_inline_dedupe'] = cross_volume_inline_dedupe
+ return_value['enable_cross_volume_background_dedupe'] = cross_volume_background_dedupe
+ return_value['enable_inline_dedupe'] = inline_dedupe
+ return_value['enable_data_compaction'] = compaction
+ return return_value
+
+ def form_modify_body_rest(self, modify, current):
+ # disable volume efficiency requires dedupe and compression set to 'none'.
+ if modify.get('enabled') == 'disabled':
+ return {'efficiency': {'dedupe': 'none', 'compression': 'none', 'compaction': 'none', 'cross_volume_dedupe': 'none'}}
+ body = {}
+ if modify.get('enabled') == 'enabled':
+ body['efficiency.dedupe'] = 'background'
+ # there are cases where ZAPI allows setting cross_volume_background_dedupe and inline_dedupe and REST not.
+ if 'enable_compression' in modify or 'enable_inline_compression' in modify:
+ body['efficiency.compression'] = self.derive_efficiency_type(modify.get('enable_compression'), modify.get('enable_inline_compression'),
+ current.get('enable_compression'), current.get('enable_inline_compression'))
+
+ if 'enable_cross_volume_background_dedupe' in modify or 'enable_cross_volume_inline_dedupe' in modify:
+ body['efficiency.cross_volume_dedupe'] = self.derive_efficiency_type(modify.get('enable_cross_volume_background_dedupe'),
+ modify.get('enable_cross_volume_inline_dedupe'),
+ current.get('enable_cross_volume_background_dedupe'),
+ current.get('enable_cross_volume_inline_dedupe'))
+
+ if modify.get('enable_data_compaction'):
+ body['efficiency.compaction'] = 'inline'
+ elif modify.get('enable_data_compaction') is False:
+ body['efficiency.compaction'] = 'none'
+
+ if modify.get('enable_inline_dedupe'):
+ body['efficiency.dedupe'] = 'both'
+ elif modify.get('enable_inline_dedupe') is False:
+ body['efficiency.dedupe'] = 'background'
+ # REST changes policy to default, so use policy in params.
+ if self.parameters.get('policy'):
+ body['efficiency.policy.name'] = self.parameters['policy']
+ if modify.get('storage_efficiency_mode'):
+ body['storage_efficiency_mode'] = modify['storage_efficiency_mode']
+
+ # start/stop vol efficiency
+ if modify.get('status'):
+ body['efficiency.scanner.state'] = modify['status']
+ if 'start_ve_scan_old_data' in self.parameters:
+ body['efficiency.scanner.scan_old_data'] = self.parameters['start_ve_scan_old_data']
+ return body
+
+ @staticmethod
+ def derive_efficiency_type(desired_background, desired_inline, current_background, current_inline):
+ if ((desired_background and desired_inline) or
+ (desired_background and desired_inline is None and current_inline) or
+ (desired_background is None and desired_inline and current_background)):
+ return 'both'
+ elif ((desired_background and desired_inline is False) or
+ (desired_background and desired_inline is None and not current_inline) or
+ (desired_background is None and desired_inline is False and current_background)):
+ return 'background'
+ elif ((desired_background is False and desired_inline) or
+ (desired_background is False and desired_inline is None and current_inline) or
+ (desired_background is None and desired_inline and not current_background)):
+ return 'inline'
+ elif ((desired_background is False and desired_inline is False) or
+ (desired_background is False and desired_inline is None and not current_inline) or
+ (desired_background is None and desired_inline is False and not current_background)):
+ return 'none'
+
+ def validate_efficiency_compression(self, modify):
+ """
+ validate:
+ - no efficiency keys are set when state is disabled.
+ """
+ if self.parameters['enabled'] == 'disabled':
+ # if any of the keys are set, efficiency gets enabled, error out if any of eff keys are set and state is absent.
+ unsupported_enable_eff_keys = [
+ 'enable_compression', 'enable_inline_compression', 'enable_inline_dedupe',
+ 'enable_cross_volume_inline_dedupe', 'enable_cross_volume_background_dedupe', 'enable_data_compaction'
+ ]
+ used_unsupported_enable_eff_keys = [key for key in unsupported_enable_eff_keys if self.parameters.get(key)]
+ if used_unsupported_enable_eff_keys:
+ disable_str = 'when volume efficiency already disabled, retry with state: present'
+ if modify.get('enabled') == 'disabled':
+ disable_str = 'when trying to disable volume efficiency'
+ self.module.fail_json(msg="Error: cannot set compression keys: %s %s" % (used_unsupported_enable_eff_keys, disable_str))
+
+ def apply(self):
+ current = self.get_volume_efficiency()
+ ve_status = None
+
+ # If the volume efficiency does not exist for a given path to create this current is set to disabled
+ # this is for ONTAP systems that do not enable efficiency by default.
+ if current is None:
+ current = {'enabled': 'disabled'}
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ to_modify = copy.deepcopy(modify)
+ self.validate_efficiency_compression(modify)
+ if self.na_helper.changed and not self.module.check_mode:
+ # enable/disable, start/stop & modify vol efficiency handled in REST PATCH.
+ if self.use_rest:
+ self.modify_volume_efficiency(self.form_modify_body_rest(modify, current))
+ else:
+ if 'enabled' in modify:
+ if modify['enabled'] == 'enabled':
+ self.enable_volume_efficiency()
+ # Checking to see if there are any additional parameters that need to be set after
+ # enabling volume efficiency required for Non-AFF systems
+ current = self.get_volume_efficiency()
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ to_modify['modify_after_enable'] = copy.deepcopy(modify)
+ elif modify['enabled'] == 'disabled':
+ self.disable_volume_efficiency()
+ # key may not exist anymore, if modify is refreshed at line 686
+ modify.pop('enabled', None)
+
+ if 'status' in modify:
+ ve_status = modify['status']
+ del modify['status']
+
+ # Removed the enabled and volume efficiency status,
+ # if there is anything remaining in the modify dict we need to modify.
+ if modify:
+ self.modify_volume_efficiency()
+
+ if ve_status == 'running':
+ self.start_volume_efficiency()
+ elif ve_status == 'idle':
+ self.stop_volume_efficiency()
+
+ result = netapp_utils.generate_result(self.na_helper.changed, modify=to_modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ """
+ Enables, modifies or disables NetApp Ontap volume efficiency
+ """
+ obj = NetAppOntapVolumeEfficiency()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume_snaplock.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume_snaplock.py
new file mode 100644
index 000000000..272d8bf92
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume_snaplock.py
@@ -0,0 +1,227 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_ontap_volume_snaplock
+
+short_description: NetApp ONTAP manage volume snaplock retention.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap_zapi
+version_added: '20.2.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Modifies the snaplock retention of volumes on NetApp ONTAP.
+options:
+ name:
+ description:
+ - The name of the volume to manage.
+ type: str
+ required: true
+
+ vserver:
+ description:
+ - Name of the vserver to use.
+ type: str
+ required: true
+
+ default_retention_period:
+ description:
+ - Specifies the default retention period that will be applied.
+ - The format is "<number> <units>" for example "10 days", the following units are valid
+ - "seconds"
+ - "minutes"
+ - "hours"
+ - "days"
+ - "months"
+ - "years"
+ - If this option is specified as "max", then maximum_retention_period will be used as the default retention period.
+ type: str
+
+ autocommit_period:
+ description:
+ - Specifies the autocommit-period for the snaplock volume.
+ - The format is "<number> <units>" for example "8 hours", the following units are valid
+ - "seconds"
+ - "minutes"
+ - "hours"
+ - "days"
+ - "months"
+ - "years"
+ type: str
+
+ is_volume_append_mode_enabled:
+ description:
+ - Specifies if the volume append mode must be enabled or disabled.
+ - It can be modified only when the volume is not mounted and does not have any data or Snapshot copy.
+ - Volume append mode is not supported on SnapLock audit log volumes.
+ - When it is enabled, all files created with write permissions on the volume will be WORM appendable files by default.
+ - All WORM appendable files not modified for a period greater than the autocommit period of the volume are also committed to WORM read-only state.
+ type: bool
+
+ maximum_retention_period:
+ description:
+ - Specifies the allowed maximum retention period that will be applied.
+ - The format is "<number> <units>" for example "2 years", the following units are valid
+ - "seconds"
+ - "minutes"
+ - "hours"
+ - "days"
+ - "months"
+ - "years"
+ type: str
+
+ minimum_retention_period:
+ description:
+ - Specifies the allowed minimum retention period that will be applied.
+ - The format is "<number> <units>" for example "1 days", the following units are valid
+ - "seconds"
+ - "minutes"
+ - "hours"
+ - "days"
+ - "months"
+ - "years"
+ type: str
+
+notes:
+ - supports ZAPI only.
+ - for REST, snaplock is supported in na_ontap_volume starting with 21.18.0.
+'''
+
+EXAMPLES = """
+ - name: Set volume snaplock
+ na_ontap_volume_snaplock:
+ vserver: svm
+ name: ansibleVolume
+ default_retention_period: "5 days"
+ minimum_retention_period: "0 years"
+ maximum_retention_period: "10 days"
+ is_volume_append_mode_enabled: False
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+"""
+
+RETURN = """
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapVolumeSnaplock(object):
+ '''Class with volume operations'''
+
+ def __init__(self):
+ '''Initialize module parameters'''
+
+ self.argument_spec = netapp_utils.na_ontap_zapi_only_spec()
+ self.argument_spec.update(dict(
+ name=dict(required=True, type='str'),
+ vserver=dict(required=True, type='str'),
+ default_retention_period=dict(required=False, type='str'),
+ maximum_retention_period=dict(required=False, type='str'),
+ minimum_retention_period=dict(required=False, type='str'),
+ autocommit_period=dict(required=False, type='str'),
+ is_volume_append_mode_enabled=dict(required=False, type='bool'),
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.na_helper.module_replaces('na_ontap_volume', self.module)
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_volume_snaplock_attrs(self):
+ """
+ Return volume-get-snaplock-attrs query results
+ :param vol_name: name of the volume
+ :return: dict of the volume snaplock attrs
+ """
+ volume_snaplock = netapp_utils.zapi.NaElement('volume-get-snaplock-attrs')
+ volume_snaplock.add_new_child('volume', self.parameters['name'])
+
+ try:
+ result = self.server.invoke_successfully(volume_snaplock, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching snaplock attributes for volume %s : %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ return_value = None
+
+ if result.get_child_by_name('snaplock-attrs'):
+ volume_snaplock_attributes = result['snaplock-attrs']['snaplock-attrs-info']
+ return_value = {
+ 'autocommit_period': volume_snaplock_attributes['autocommit-period'],
+ 'default_retention_period': volume_snaplock_attributes['default-retention-period'],
+ 'is_volume_append_mode_enabled': self.na_helper.get_value_for_bool(True, volume_snaplock_attributes['is-volume-append-mode-enabled']),
+ 'maximum_retention_period': volume_snaplock_attributes['maximum-retention-period'],
+ 'minimum_retention_period': volume_snaplock_attributes['minimum-retention-period'],
+ }
+ return return_value
+
+ def set_volume_snaplock_attrs(self, modify):
+ '''Set ONTAP volume snaplock attributes'''
+ volume_snaplock_obj = netapp_utils.zapi.NaElement('volume-set-snaplock-attrs')
+ volume_snaplock_obj.add_new_child('volume', self.parameters['name'])
+ if modify.get('autocommit_period') is not None:
+ volume_snaplock_obj.add_new_child('autocommit-period', self.parameters['autocommit_period'])
+ if modify.get('default_retention_period') is not None:
+ volume_snaplock_obj.add_new_child('default-retention-period', self.parameters['default_retention_period'])
+ if modify.get('is_volume_append_mode_enabled') is not None:
+ volume_snaplock_obj.add_new_child('is-volume-append-mode-enabled',
+ self.na_helper.get_value_for_bool(False, self.parameters['is_volume_append_mode_enabled']))
+ if modify.get('maximum_retention_period') is not None:
+ volume_snaplock_obj.add_new_child('maximum-retention-period', self.parameters['maximum_retention_period'])
+ if modify.get('minimum_retention_period') is not None:
+ volume_snaplock_obj.add_new_child('minimum-retention-period', self.parameters['minimum_retention_period'])
+ try:
+ self.server.invoke_successfully(volume_snaplock_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error setting snaplock attributes for volume %s : %s'
+ % (self.parameters['name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ current, modify = self.get_volume_snaplock_attrs(), None
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+
+ if self.na_helper.changed and not self.module.check_mode:
+ self.set_volume_snaplock_attrs(modify)
+ result = netapp_utils.generate_result(self.na_helper.changed, modify=modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ '''Set volume snaplock attributes from playbook'''
+ obj = NetAppOntapVolumeSnaplock()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan.py
new file mode 100644
index 000000000..e089d3b8a
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan.py
@@ -0,0 +1,168 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_vscan
+'''
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+module: na_ontap_vscan
+short_description: NetApp ONTAP Vscan enable/disable.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.9.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+notes:
+- on demand task, on_access_policy and scanner_pools must be set up before running this module
+description:
+- Enable and Disable Vscan
+options:
+ enable:
+ description:
+ - Whether to enable to disable a Vscan
+ type: bool
+ default: True
+
+ vserver:
+ description:
+ - the name of the data vserver to use.
+ required: true
+ type: str
+'''
+
+EXAMPLES = """
+ - name: Enable Vscan
+ na_ontap_vscan:
+ enable: True
+ username: '{{ netapp_username }}'
+ password: '{{ netapp_password }}'
+ hostname: '{{ netapp_hostname }}'
+ vserver: trident_svm
+
+ - name: Disable Vscan
+ na_ontap_vscan:
+ enable: False
+ username: '{{ netapp_username }}'
+ password: '{{ netapp_password }}'
+ hostname: '{{ netapp_hostname }}'
+ vserver: trident_svm
+"""
+
+RETURN = """
+
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapVscan(object):
+ ''' enable/disable vscan '''
+ def __init__(self):
+ self.use_rest = False
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ enable=dict(type='bool', default=True),
+ vserver=dict(required=True, type='str'),
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ # API should be used for ONTAP 9.6 or higher, Zapi for lower version
+ self.rest_api = OntapRestAPI(self.module)
+ if self.rest_api.is_rest():
+ self.use_rest = True
+ else:
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_vscan(self):
+ if self.use_rest:
+ params = {'fields': 'svm,enabled',
+ "svm.name": self.parameters['vserver']}
+ api = "protocols/vscan"
+ message, error = self.rest_api.get(api, params)
+ if error:
+ self.module.fail_json(msg=error)
+ return message['records'][0]
+ else:
+ vscan_status_iter = netapp_utils.zapi.NaElement('vscan-status-get-iter')
+ vscan_status_info = netapp_utils.zapi.NaElement('vscan-status-info')
+ vscan_status_info.add_new_child('vserver', self.parameters['vserver'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(vscan_status_info)
+ vscan_status_iter.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(vscan_status_iter, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error getting Vscan info for Vserver %s: %s' %
+ (self.parameters['vserver'], to_native(error)),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ return result.get_child_by_name('attributes-list').get_child_by_name('vscan-status-info')
+
+ def enable_vscan(self, uuid=None):
+ if self.use_rest:
+ params = {"svm.name": self.parameters['vserver']}
+ data = {"enabled": self.parameters['enable']}
+ api = "protocols/vscan/" + uuid
+ dummy, error = self.rest_api.patch(api, data, params)
+ if error is not None:
+ self.module.fail_json(msg=error)
+ else:
+ vscan_status_obj = netapp_utils.zapi.NaElement("vscan-status-modify")
+ vscan_status_obj.add_new_child('is-vscan-enabled', str(self.parameters['enable']))
+ try:
+ self.server.invoke_successfully(vscan_status_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error Enable/Disabling Vscan: %s" % to_native(error), exception=traceback.format_exc())
+
+ def apply(self):
+ changed = False
+ current = self.get_vscan()
+ if self.use_rest:
+ if current['enabled'] != self.parameters['enable']:
+ if not self.module.check_mode:
+ self.enable_vscan(current['svm']['uuid'])
+ changed = True
+ else:
+ if current.get_child_content('is-vscan-enabled') != str(self.parameters['enable']).lower():
+ if not self.module.check_mode:
+ self.enable_vscan()
+ changed = True
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ """
+ Execute action from playbook
+ """
+ command = NetAppOntapVscan()
+ command.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan_on_access_policy.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan_on_access_policy.py
new file mode 100644
index 000000000..08da1fe7e
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan_on_access_policy.py
@@ -0,0 +1,524 @@
+#!/usr/bin/python
+
+# (c) 2018-2022, NetApp Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: na_ontap_vscan_on_access_policy
+short_description: NetApp ONTAP Vscan on access policy configuration.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.8.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Configure on access policy for Vscan (virus scan)
+options:
+ state:
+ description:
+ - Whether a Vscan on Access policy is present or not
+ choices: ['present', 'absent']
+ type: str
+ default: present
+
+ vserver:
+ description:
+ - the name of the data vserver to use.
+ required: true
+ type: str
+
+ policy_name:
+ description:
+ - The name of the policy
+ required: true
+ type: str
+
+ file_ext_to_exclude:
+ description:
+ - File extensions for which On-Access scanning must not be performed.
+ type: list
+ elements: str
+
+ file_ext_to_include:
+ description:
+ - File extensions for which On-Access scanning is considered. The default value is '*', which means that all files are considered for scanning except
+ - those which are excluded from scanning.
+ type: list
+ elements: str
+
+ filters:
+ description:
+ - A list of filters which can be used to define the scope of the On-Access policy more precisely. The filters can be added in any order. Possible values
+ - scan_ro_volume Enable scans for read-only volume,
+ - scan_execute_access Scan only files opened with execute-access (CIFS only).
+ - deprecated with REST, use C(scan_readonly_volumes) or C(only_execute_access).
+ type: list
+ elements: str
+
+ is_scan_mandatory:
+ description:
+ - Specifies whether access to a file is allowed if there are no external virus-scanning servers available for virus scanning.
+ - If not specified, default value is False in ZAPI.
+ type: bool
+
+ max_file_size:
+ description:
+ - Max file-size (in bytes) allowed for scanning. The default value of 2147483648 (2GB) is taken if not provided at the time of creating a policy.
+ type: int
+
+ paths_to_exclude:
+ description:
+ - File paths for which On-Access scanning must not be performed.
+ type: list
+ elements: str
+
+ scan_files_with_no_ext:
+ description:
+ - Specifies whether files without any extension are considered for scanning or not.
+ - If not specified, default value is True in ZAPI.
+ type: bool
+
+ policy_status:
+ description:
+ - Status for the created policy
+ type: bool
+ version_added: 20.8.0
+
+ scan_readonly_volumes:
+ description:
+ - Specifies whether or not read-only volume can be scanned.
+ - If not specified, default value is False in creating policy.
+ type: bool
+ version_added: 21.20.0
+
+ only_execute_access:
+ description:
+ - Scan only files opened with execute-access.
+ - If not specified, default value is False in creating policy.
+ type: bool
+ version_added: 21.20.0
+'''
+
+EXAMPLES = """
+ - name: Create Vscan On Access Policy
+ netapp.ontap.na_ontap_vscan_on_access_policy:
+ state: present
+ username: '{{ netapp_username }}'
+ password: '{{ netapp_password }}'
+ hostname: '{{ netapp_hostname }}'
+ vserver: carchi-vsim2
+ policy_name: carchi_policy
+ file_ext_to_exclude: ['exe', 'yml']
+ - name: Create Vscan On Access Policy with Policy Status enabled
+ netapp.ontap.na_ontap_vscan_on_access_policy:
+ state: present
+ username: '{{ netapp_username }}'
+ password: '{{ netapp_password }}'
+ hostname: '{{ netapp_hostname }}'
+ vserver: carchi-vsim2
+ policy_name: carchi_policy
+ file_ext_to_exclude: ['exe', 'yml']
+ policy_status: True
+ - name: modify Vscan on Access Policy
+ netapp.ontap.na_ontap_vscan_on_access_policy:
+ state: present
+ username: '{{ netapp_username }}'
+ password: '{{ netapp_password }}'
+ hostname: '{{ netapp_hostname }}'
+ vserver: carchi-vsim2
+ policy_name: carchi_policy
+ file_ext_to_exclude: ['exe', 'yml', 'py']
+ - name: Delete On Access Policy
+ netapp.ontap.na_ontap_vscan_on_access_policy:
+ state: absent
+ username: '{{ netapp_username }}'
+ password: '{{ netapp_password }}'
+ hostname: '{{ netapp_hostname }}'
+ vserver: carchi-vsim2
+ policy_name: carchi_policy
+"""
+
+RETURN = """
+
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic, rest_vserver
+
+
+class NetAppOntapVscanOnAccessPolicy:
+ """
+ Create/Modify/Delete a Vscan OnAccess policy
+ """
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ policy_name=dict(required=True, type='str'),
+ file_ext_to_exclude=dict(required=False, type='list', elements='str'),
+ file_ext_to_include=dict(required=False, type='list', elements='str'),
+ filters=dict(required=False, type='list', elements='str'),
+ is_scan_mandatory=dict(required=False, type='bool'),
+ max_file_size=dict(required=False, type="int"),
+ paths_to_exclude=dict(required=False, type='list', elements='str'),
+ scan_files_with_no_ext=dict(required=False, type='bool'),
+ policy_status=dict(required=False, type='bool'),
+ scan_readonly_volumes=dict(required=False, type='bool'),
+ only_execute_access=dict(required=False, type='bool')
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['filters', 'scan_readonly_volumes'],
+ ['filters', 'only_execute_access']
+ ]
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ # Set up Rest API
+ self.rest_api = OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+ self.svm_uuid = None
+
+ # validate list options not contains '' in it in REST.
+ if self.use_rest:
+ self.validate_options()
+
+ # file_ext_to_include cannot be empty in both ZAPI and REST.
+ if 'file_ext_to_include' in self.parameters and len(self.parameters['file_ext_to_include']) < 1:
+ self.module.fail_json(msg="Error: The value for file_ext_include cannot be empty")
+
+ # map filters options to rest equivalent options.
+ if self.use_rest and 'filters' in self.parameters:
+ self.parameters['only_execute_access'], self.parameters['scan_readonly_volumes'] = False, False
+ for filter in self.parameters['filters']:
+ if filter.lower() not in ['scan_execute_access', 'scan_ro_volume']:
+ self.module.fail_json(msg="Error: Invalid value %s specified for filters %s" % filter)
+ if filter.lower() == 'scan_execute_access':
+ self.parameters['only_execute_access'] = True
+ if filter.lower() == 'scan_ro_volume':
+ self.parameters['scan_readonly_volumes'] = True
+
+ if not self.use_rest:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+ self.set_playbook_zapi_key_map()
+
+ # set default value for is_scan_mandatory and scan_files_with_no_ext if not set.
+ if self.parameters.get('is_scan_mandatory') is None:
+ self.parameters['is_scan_mandatory'] = False
+ if self.parameters.get('scan_files_with_no_ext') is None:
+ self.parameters['scan_files_with_no_ext'] = True
+
+ # form filters from REST options only_execute_access and scan_readonly_volumes.
+ filters = []
+ if self.parameters.get('only_execute_access'):
+ filters.append('scan_execute_access')
+ if self.parameters.get('scan_readonly_volumes'):
+ filters.append('scan_ro_volume')
+ if filters:
+ self.parameters['filters'] = filters
+
+ def validate_options(self):
+ list_options = ['filters', 'file_ext_to_exclude', 'file_ext_to_include', 'paths_to_exclude']
+ invalid_options = []
+ for option in list_options:
+ if option in self.parameters:
+ for value in self.parameters[option]:
+ # '' is an invalid value.
+ if len(value.strip()) < 1:
+ invalid_options.append(option)
+ if invalid_options:
+ self.module.fail_json(msg="Error: Invalid value specified for option(s): %s" % ', '.join(invalid_options))
+
+ def set_playbook_zapi_key_map(self):
+ self.na_helper.zapi_int_keys = {
+ 'max_file_size': 'max-file-size'
+ }
+ self.na_helper.zapi_str_keys = {
+ 'vserver': 'vserver',
+ 'policy_name': 'policy-name'
+ }
+ self.na_helper.zapi_bool_keys = {
+ 'is_scan_mandatory': 'is-scan-mandatory',
+ 'policy_status': 'is-policy-enabled',
+ 'scan_files_with_no_ext': 'scan-files-with-no-ext'
+ }
+ self.na_helper.zapi_list_keys = {
+ 'file_ext_to_exclude': 'file-ext-to-exclude',
+ 'file_ext_to_include': 'file-ext-to-include',
+ 'paths_to_exclude': 'paths-to-exclude',
+ 'filters': 'filters'
+ }
+
+ def get_on_access_policy(self):
+ """
+ Return a Vscan on Access Policy
+ :return: None if there is no access policy, return the policy if there is
+ """
+ if self.use_rest:
+ return self.get_on_access_policy_rest()
+ access_policy_obj = netapp_utils.zapi.NaElement('vscan-on-access-policy-get-iter')
+ access_policy_info = netapp_utils.zapi.NaElement('vscan-on-access-policy-info')
+ access_policy_info.add_new_child('policy-name', self.parameters['policy_name'])
+ access_policy_info.add_new_child('vserver', self.parameters['vserver'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(access_policy_info)
+ access_policy_obj.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(access_policy_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error searching Vscan on Access Policy %s: %s' %
+ (self.parameters['policy_name'], to_native(error)), exception=traceback.format_exc())
+ return_value = {}
+ if result.get_child_by_name('num-records'):
+ if int(result.get_child_content('num-records')) == 1:
+ attributes_list = result.get_child_by_name('attributes-list')
+ vscan_info = attributes_list.get_child_by_name('vscan-on-access-policy-info')
+ for option, zapi_key in self.na_helper.zapi_int_keys.items():
+ return_value[option] = self.na_helper.get_value_for_int(from_zapi=True, value=vscan_info.get_child_content(zapi_key))
+ for option, zapi_key in self.na_helper.zapi_bool_keys.items():
+ return_value[option] = self.na_helper.get_value_for_bool(from_zapi=True, value=vscan_info.get_child_content(zapi_key))
+ for option, zapi_key in self.na_helper.zapi_list_keys.items():
+ return_value[option] = self.na_helper.get_value_for_list(from_zapi=True, zapi_parent=vscan_info.get_child_by_name(zapi_key))
+ for option, zapi_key in self.na_helper.zapi_str_keys.items():
+ return_value[option] = vscan_info.get_child_content(zapi_key)
+ return return_value
+ elif int(result.get_child_content('num-records')) > 1:
+ self.module.fail_json(msg='Mutiple Vscan on Access Policy matching %s:' % self.parameters['policy_name'])
+ return None
+
+ def create_on_access_policy(self):
+ """
+ Create a Vscan on Access policy
+ :return: none
+ """
+ if self.use_rest:
+ return self.create_on_access_policy_rest()
+ access_policy_obj = netapp_utils.zapi.NaElement('vscan-on-access-policy-create')
+ access_policy_obj.add_new_child('policy-name', self.parameters['policy_name'])
+ access_policy_obj.add_new_child('protocol', 'cifs')
+ access_policy_obj = self._fill_in_access_policy(access_policy_obj)
+
+ try:
+ self.server.invoke_successfully(access_policy_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating Vscan on Access Policy %s: %s' %
+ (self.parameters['policy_name'], to_native(error)), exception=traceback.format_exc())
+
+ def status_modify_on_access_policy(self):
+ """
+ Update the status of policy
+ """
+ if self.use_rest:
+ return self.modify_on_access_policy_rest({'policy_status': False})
+ access_policy_obj = netapp_utils.zapi.NaElement('vscan-on-access-policy-status-modify')
+ access_policy_obj.add_new_child('policy-name', self.parameters['policy_name'])
+ access_policy_obj.add_new_child('policy-status', str(self.parameters['policy_status']).lower())
+
+ try:
+ self.server.invoke_successfully(access_policy_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying status Vscan on Access Policy %s: %s' %
+ (self.parameters['policy_name'], to_native(error)), exception=traceback.format_exc())
+
+ def delete_on_access_policy(self):
+ """
+ Delete a Vscan On Access Policy
+ :return:
+ """
+ if self.use_rest:
+ return self.delete_on_access_policy_rest()
+ access_policy_obj = netapp_utils.zapi.NaElement('vscan-on-access-policy-delete')
+ access_policy_obj.add_new_child('policy-name', self.parameters['policy_name'])
+ try:
+ self.server.invoke_successfully(access_policy_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error Deleting Vscan on Access Policy %s: %s' %
+ (self.parameters['policy_name'], to_native(error)), exception=traceback.format_exc())
+
+ def modify_on_access_policy(self, modify=None):
+ """
+ Modify a Vscan On Access policy
+ :return: nothing
+ """
+ if self.use_rest:
+ return self.modify_on_access_policy_rest(modify)
+ access_policy_obj = netapp_utils.zapi.NaElement('vscan-on-access-policy-modify')
+ access_policy_obj.add_new_child('policy-name', self.parameters['policy_name'])
+ access_policy_obj = self._fill_in_access_policy(access_policy_obj)
+ try:
+ self.server.invoke_successfully(access_policy_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error Modifying Vscan on Access Policy %s: %s' %
+ (self.parameters['policy_name'], to_native(error)), exception=traceback.format_exc())
+
+ def _fill_in_access_policy(self, access_policy_obj):
+ if self.parameters.get('is_scan_mandatory') is not None:
+ access_policy_obj.add_new_child('is-scan-mandatory', str(self.parameters['is_scan_mandatory']).lower())
+ if self.parameters.get('max_file_size'):
+ access_policy_obj.add_new_child('max-file-size', str(self.parameters['max_file_size']))
+ if self.parameters.get('scan_files_with_no_ext') is not None:
+ access_policy_obj.add_new_child('scan-files-with-no-ext', str(self.parameters['scan_files_with_no_ext']))
+ if 'file_ext_to_exclude' in self.parameters:
+ ext_obj = netapp_utils.zapi.NaElement('file-ext-to-exclude')
+ access_policy_obj.add_child_elem(ext_obj)
+ if len(self.parameters['file_ext_to_exclude']) < 1:
+ ext_obj.add_new_child('file-extension', "")
+ else:
+ for extension in self.parameters['file_ext_to_exclude']:
+ ext_obj.add_new_child('file-extension', extension)
+ if 'file_ext_to_include' in self.parameters:
+ ext_obj = netapp_utils.zapi.NaElement('file-ext-to-include')
+ access_policy_obj.add_child_elem(ext_obj)
+ for extension in self.parameters['file_ext_to_include']:
+ ext_obj.add_new_child('file-extension', extension)
+ if 'filters' in self.parameters:
+ ui_filter_obj = netapp_utils.zapi.NaElement('filters')
+ access_policy_obj.add_child_elem(ui_filter_obj)
+ if len(self.parameters['filters']) < 1:
+ ui_filter_obj.add_new_child('vscan-on-access-policy-ui-filter', "")
+ else:
+ for filter in self.parameters['filters']:
+ ui_filter_obj.add_new_child('vscan-on-access-policy-ui-filter', filter)
+ if 'paths_to_exclude' in self.parameters:
+ path_obj = netapp_utils.zapi.NaElement('paths-to-exclude')
+ access_policy_obj.add_child_elem(path_obj)
+ if len(self.parameters['paths_to_exclude']) < 1:
+ path_obj.add_new_child('file-path', "")
+ else:
+ for path in self.parameters['paths_to_exclude']:
+ path_obj.add_new_child('file-path', path)
+ return access_policy_obj
+
+ def get_on_access_policy_rest(self):
+ self.svm_uuid = self.get_svm_uuid()
+ if self.svm_uuid is None:
+ self.module.fail_json(msg="Error: vserver %s not found" % self.parameters['vserver'])
+ api = "protocols/vscan/%s/on-access-policies" % self.svm_uuid
+ query = {'name': self.parameters['policy_name']}
+ fields = 'svm,name,mandatory,scope,enabled'
+ record, error = rest_generic.get_one_record(self.rest_api, api, query, fields)
+ if error:
+ self.module.fail_json(msg='Error searching Vscan on Access Policy %s: %s' %
+ (self.parameters['policy_name'], to_native(error)))
+ if record:
+ return {
+ 'max_file_size': self.na_helper.safe_get(record, ['scope', 'max_file_size']),
+ 'vserver': self.na_helper.safe_get(record, ['svm', 'name']),
+ 'policy_name': record['name'],
+ 'is_scan_mandatory': record['mandatory'],
+ 'policy_status': record['enabled'],
+ 'scan_files_with_no_ext': self.na_helper.safe_get(record, ['scope', 'scan_without_extension']),
+ 'file_ext_to_exclude': self.na_helper.safe_get(record, ['scope', 'exclude_extensions']),
+ 'file_ext_to_include': self.na_helper.safe_get(record, ['scope', 'include_extensions']),
+ 'paths_to_exclude': self.na_helper.safe_get(record, ['scope', 'exclude_paths']),
+ 'scan_readonly_volumes': self.na_helper.safe_get(record, ['scope', 'scan_readonly_volumes']),
+ 'only_execute_access': self.na_helper.safe_get(record, ['scope', 'only_execute_access'])
+ }
+ return None
+
+ def get_svm_uuid(self):
+ uuid, error = rest_vserver.get_vserver_uuid(self.rest_api, self.parameters['vserver'], self.module, True)
+ return uuid
+
+ def create_on_access_policy_rest(self):
+ api = "protocols/vscan/%s/on-access-policies" % self.svm_uuid
+ body = {'name': self.parameters['policy_name']}
+ body.update(self.form_create_or_modify_body(self.parameters))
+ dummy, error = rest_generic.post_async(self.rest_api, api, body)
+ if error:
+ self.module.fail_json(msg='Error creating Vscan on Access Policy %s: %s' %
+ (self.parameters['policy_name'], to_native(error)))
+
+ def modify_on_access_policy_rest(self, modify):
+ api = "protocols/vscan/%s/on-access-policies" % self.svm_uuid
+ body = self.form_create_or_modify_body(modify)
+ dummy, error = rest_generic.patch_async(self.rest_api, api, self.parameters['policy_name'], body)
+ if error:
+ self.module.fail_json(msg='Error Modifying Vscan on Access Policy %s: %s' %
+ (self.parameters['policy_name'], to_native(error)))
+
+ def form_create_or_modify_body(self, params):
+ body = {}
+ if params.get('is_scan_mandatory') is not None:
+ body['mandatory'] = params['is_scan_mandatory']
+ if params.get('policy_status') is not None:
+ body['enabled'] = params['policy_status']
+ if params.get('max_file_size'):
+ body['scope.max_file_size'] = params['max_file_size']
+ if params.get('scan_files_with_no_ext') is not None:
+ body['scope.scan_without_extension'] = params['scan_files_with_no_ext']
+ if 'file_ext_to_exclude' in params:
+ body['scope.exclude_extensions'] = params['file_ext_to_exclude']
+ if 'file_ext_to_include' in params:
+ body['scope.include_extensions'] = params['file_ext_to_include']
+ if 'paths_to_exclude' in params:
+ body['scope.exclude_paths'] = params['paths_to_exclude']
+ if params.get('scan_readonly_volumes') is not None:
+ body['scope.scan_readonly_volumes'] = params['scan_readonly_volumes']
+ if params.get('only_execute_access') is not None:
+ body['scope.only_execute_access'] = params['only_execute_access']
+ return body
+
+ def delete_on_access_policy_rest(self):
+ api = "protocols/vscan/%s/on-access-policies" % self.svm_uuid
+ dummy, error = rest_generic.delete_async(self.rest_api, api, self.parameters['policy_name'])
+ if error:
+ self.module.fail_json(msg='Error Deleting Vscan on Access Policy %s: %s' %
+ (self.parameters['policy_name'], to_native(error)))
+
+ def apply(self):
+ modify_policy_state, modify = None, None
+ current = self.get_on_access_policy()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None and self.parameters['state'] == 'present':
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ # enable/disable policy handled in single modify api with REST.
+ if not self.use_rest and modify.get('policy_status') is not None:
+ modify_policy_state = True
+ # policy cannot be deleted unless its disabled, so disable it before delete.
+ if cd_action == 'delete' and current['policy_status'] is True and self.parameters.get('policy_status') is False:
+ modify_policy_state = True
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_on_access_policy()
+ # by default newly created policy will be in disabled state, enable if policy_status is set in ZAPI.
+ # REST enable policy on create itself.
+ if not self.use_rest and self.parameters.get('policy_status'):
+ modify_policy_state = True
+ if modify_policy_state:
+ self.status_modify_on_access_policy()
+ if cd_action == 'delete':
+ self.delete_on_access_policy()
+ if modify:
+ self.modify_on_access_policy(modify)
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify,
+ extra_responses={'modify_policy_state': modify_policy_state})
+ self.module.exit_json(**result)
+
+
+def main():
+ """
+ Execute action from playbook
+ """
+ command = NetAppOntapVscanOnAccessPolicy()
+ command.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan_on_demand_task.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan_on_demand_task.py
new file mode 100644
index 000000000..b8391fa3b
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan_on_demand_task.py
@@ -0,0 +1,407 @@
+#!/usr/bin/python
+
+# (c) 2018-2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: na_ontap_vscan_on_demand_task
+short_description: NetApp ONTAP Vscan on demand task configuration.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.8.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Configure on demand task for Vscan
+options:
+ state:
+ description:
+ - Whether a Vscan on demand task is present or not
+ choices: ['present', 'absent']
+ type: str
+ default: present
+
+ vserver:
+ description:
+ - the name of the data vserver to use.
+ required: true
+ type: str
+
+ cross_junction:
+ description:
+ - Specifies whether the On-Demand task is allowed to cross volume junctions
+ - This option is not supported with REST.
+ - This option defaults to False for ZAPI.
+ type: bool
+
+ directory_recursion:
+ description:
+ - Specifies whether the On-Demand task is allowed to recursively scan through sub-directories.
+ - This option is not supported with REST.
+ - This option defaults to False for ZAPI.
+ type: bool
+
+ file_ext_to_exclude:
+ description:
+ - File-Extensions for which scanning must not be performed.
+ - File whose extension matches with both inclusion and exclusion list is not considered for scanning.
+ type: list
+ elements: str
+
+ file_ext_to_include:
+ description:
+ - File extensions for which scanning is considered.
+ - The default value is '*', which means that all files are considered for scanning except those which are excluded from scanning.
+ - File whose extension matches with both inclusion and exclusion list is not considered for scanning.
+ type: list
+ elements: str
+
+ max_file_size:
+ description:
+ - Max file-size (in bytes) allowed for scanning. The default value of 10737418240 (10GB) is taken if not provided at the time of creating a task.
+ type: int
+
+ paths_to_exclude:
+ description:
+ - File-paths for which scanning must not be performed.
+ type: list
+ elements: str
+
+ report_directory:
+ description:
+ - Path from the vserver root where task report is created. The path must be a directory and provided in unix-format from the root of the Vserver.
+ - Example /vol1/on-demand-reports.
+ type: str
+
+ report_log_level:
+ description:
+ - Log level for the On-Demand report.
+ - This option is not supported with REST.
+ - This option defaults to 'error' for ZAPI.
+ choices: ['verbose', 'info', 'error']
+ type: str
+
+ request_timeout:
+ description:
+ - Total request-service time-limit in seconds. If the virus-scanner does not respond within the provided time, scan will be timedout.
+ - This option is not supported with REST.
+ type: str
+
+ scan_files_with_no_ext:
+ description:
+ - Specifies whether files without any extension are considered for scanning or not.
+ type: bool
+ default: True
+
+ scan_paths:
+ description:
+ - List of paths that need to be scanned. The path must be provided in unix-format and from the root of the Vserver.
+ - Example /vol1/large_files.
+ type: list
+ elements: str
+
+ scan_priority:
+ description:
+ - Priority of the On-Demand scan requests generated by this task.
+ - This option is not supported with REST.
+ - This option default to 'low' for ZAPI
+ choices: ['low', 'normal']
+ type: str
+
+ schedule:
+ description:
+ - Schedule of the task. The task will be run as per the schedule.
+ - For running the task immediately, vscan-on-demand-task-run api must be used after creating a task.
+ type: str
+
+ task_name:
+ description:
+ - Name of the task.
+ type: str
+ required: True
+'''
+
+EXAMPLES = """
+ - name: Create Vscan On Demand Task
+ netapp.ontap.na_ontap_vscan_on_demand_task:
+ state: present
+ username: '{{ netapp_username }}'
+ password: '{{ netapp_password }}'
+ hostname: '{{ netapp_hostname }}'
+ vserver: carchi-vsim2
+ task_name: carchiOnDemand
+ scan_paths: /
+ report_directory: /
+ file_ext_to_exclude: ['py', 'yml']
+ max_file_size: 10737418241
+ paths_to_exclude: ['/tmp', '/var']
+ report_log_level: info
+ request_timeout: 60
+
+ - name: Delete Vscan On Demand Task
+ netapp.ontap.na_ontap_vscan_on_demand_task:
+ state: absent
+ username: '{{ netapp_username }}'
+ password: '{{ netapp_password }}'
+ hostname: '{{ netapp_hostname }}'
+ vserver: carchi-vsim2
+ task_name: carchiOnDemand
+"""
+
+RETURN = """
+
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapVscanOnDemandTask:
+ def __init__(self):
+ self.svm_uuid = None
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ cross_junction=dict(required=False, type='bool'),
+ directory_recursion=dict(required=False, type='bool'),
+ file_ext_to_exclude=dict(required=False, type='list', elements='str'),
+ file_ext_to_include=dict(required=False, type='list', elements='str'),
+ max_file_size=dict(required=False, type="int"),
+ paths_to_exclude=dict(required=False, type='list', elements='str'),
+ report_directory=dict(required=False, type='str'),
+ report_log_level=dict(required=False, type='str', choices=['verbose', 'info', 'error']),
+ request_timeout=dict(required=False, type='str'),
+ scan_files_with_no_ext=dict(required=False, type='bool', default=True),
+ scan_paths=dict(required=False, type='list', elements='str'),
+ scan_priority=dict(required=False, type='str', choices=['low', 'normal']),
+ schedule=dict(required=False, type="str"),
+ task_name=dict(required=True, type="str")
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True,
+ required_if=[
+ ["state", "present", ["report_directory", "scan_paths"]]
+ ]
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ self.rest_api = OntapRestAPI(self.module)
+ unsupported_rest_properties = ['cross_junction', 'directory_recursion', 'report_log_level', 'request_timeout',
+ 'scan_priority']
+ self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, unsupported_rest_properties)
+ if not self.use_rest:
+ if self.parameters.get('cross_junction') is None:
+ self.parameters['cross_junction'] = False
+ if self.parameters.get('directory_recursion') is None:
+ self.parameters['directory_recursion'] = False
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def get_demand_task(self):
+ """
+ Get a demand task
+ :return: A vscan-on-demand-task-info or None
+ """
+ if self.use_rest:
+ self.get_svm_uuid()
+ return self.get_demand_task_rest()
+ demand_task_iter = netapp_utils.zapi.NaElement("vscan-on-demand-task-get-iter")
+ demand_task_info = netapp_utils.zapi.NaElement("vscan-on-demand-task-info")
+ demand_task_info.add_new_child('task-name', self.parameters['task_name'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(demand_task_info)
+ demand_task_iter.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(demand_task_iter, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error searching for Vscan on demand task %s: %s' %
+ (self.parameters['task_name'], to_native(error)),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ return result.get_child_by_name('attributes-list').get_child_by_name('vscan-on-demand-task-info')
+ return None
+
+ def create_demand_task(self):
+ """
+ Create a Demand Task
+ :return: None
+ """
+ if self.use_rest:
+ return self.create_demand_task_rest()
+ demand_task_obj = netapp_utils.zapi.NaElement("vscan-on-demand-task-create")
+ # Required items first
+ demand_task_obj.add_new_child('report-directory', self.parameters['report_directory'])
+ demand_task_obj.add_new_child('task-name', self.parameters['task_name'])
+ scan_paths = netapp_utils.zapi.NaElement("scan-paths")
+ for scan_path in self.parameters['scan_paths']:
+ scan_paths.add_new_child('string', scan_path)
+ demand_task_obj.add_child_elem(scan_paths)
+ # Optional items next
+ if self.parameters.get('cross_junction'):
+ demand_task_obj.add_new_child('cross-junction', str(self.parameters['cross_junction']).lower())
+ if self.parameters.get('directory_recursion'):
+ demand_task_obj.add_new_child('directory-recursion', str(self.parameters['directory_recursion']).lower())
+ if self.parameters.get('file_ext_to_exclude'):
+ ext_to_exclude_obj = netapp_utils.zapi.NaElement('file-ext-to-exclude')
+ for exclude_file in self.parameters['file_ext_to_exclude']:
+ ext_to_exclude_obj.add_new_child('file-extension', exclude_file)
+ demand_task_obj.add_child_elem(ext_to_exclude_obj)
+ if self.parameters.get('file_ext_to_include'):
+ ext_to_include_obj = netapp_utils.zapi.NaElement('file-ext-to-include')
+ for include_file in self.parameters['file_ext_to_exclude']:
+ ext_to_include_obj.add_child_elem(include_file)
+ demand_task_obj.add_child_elem(ext_to_include_obj)
+ if self.parameters.get('max_file_size'):
+ demand_task_obj.add_new_child('max-file-size', str(self.parameters['max_file_size']))
+ if self.parameters.get('paths_to_exclude'):
+ exclude_paths = netapp_utils.zapi.NaElement('paths-to-exclude')
+ for path in self.parameters['paths_to_exclude']:
+ exclude_paths.add_new_child('string', path)
+ demand_task_obj.add_child_elem(exclude_paths)
+ if self.parameters.get('report_log_level'):
+ demand_task_obj.add_new_child('report-log-level', self.parameters['report_log_level'])
+ if self.parameters.get('request_timeout'):
+ demand_task_obj.add_new_child('request-timeout', self.parameters['request_timeout'])
+ if self.parameters.get('scan_files_with_no_ext'):
+ demand_task_obj.add_new_child('scan-files-with-no-ext',
+ str(self.parameters['scan_files_with_no_ext']).lower())
+ if self.parameters.get('scan_priority'):
+ demand_task_obj.add_new_child('scan-priority', self.parameters['scan_priority'].lower())
+ if self.parameters.get('schedule'):
+ demand_task_obj.add_new_child('schedule', self.parameters['schedule'])
+ try:
+ self.server.invoke_successfully(demand_task_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating on demand task %s: %s' %
+ (self.parameters['task_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def delete_demand_task(self):
+ """
+ Delete a Demand Task"
+ :return:
+ """
+ if self.use_rest:
+ return self.delete_demand_task_rest()
+ demand_task_obj = netapp_utils.zapi.NaElement('vscan-on-demand-task-delete')
+ demand_task_obj.add_new_child('task-name', self.parameters['task_name'])
+ try:
+ self.server.invoke_successfully(demand_task_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting on demand task, %s: %s' %
+ (self.parameters['task_name'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def get_svm_uuid(self):
+ api = 'svm/svms'
+ query = {'name': self.parameters['vserver']}
+ record, error = rest_generic.get_one_record(self.rest_api, api, query)
+ if error:
+ self.module.fail_json(msg='Error fetching svm uuid: %s' % to_native(error))
+ if not record:
+ self.module.fail_json(msg='Could not find svm uuid for %s' % self.parameters['vserver'])
+ self.svm_uuid = record['uuid']
+
+ def get_demand_task_rest(self):
+ api = 'protocols/vscan/%s/on-demand-policies' % self.svm_uuid
+ query = {'name': self.parameters['task_name'],
+ 'fields': 'scope.exclude_extensions,'
+ 'scope.include_extensions,'
+ 'scope.max_file_size,'
+ 'scope.exclude_paths,'
+ 'log_path,'
+ 'scope.scan_without_extension,'
+ 'scan_paths,'
+ 'schedule.name,'
+ 'name'
+ }
+ record, error = rest_generic.get_one_record(self.rest_api, api, query)
+ if error:
+ self.module.fail_json(msg='Error fetching on demand task %s: %s' % (self.parameters['task_name'], to_native(error)))
+ if record:
+ return self.format_on_demand_task(record)
+ return None
+
+ def format_on_demand_task(self, record):
+ return {
+ 'task_name': record['name'],
+ 'file_ext_to_exclude': self.na_helper.safe_get(record, ['scope', 'exclude_extensions']),
+ 'file_ext_to_include': self.na_helper.safe_get(record, ['scope', 'include_extensions']),
+ 'max_file_size': self.na_helper.safe_get(record, ['scope', 'max_file_size']),
+ 'paths_to_exclude': self.na_helper.safe_get(record, ['scope', 'exclude_paths']),
+ 'report_directory': self.na_helper.safe_get(record, ['log_path']),
+ 'scan_files_with_no_ext': self.na_helper.safe_get(record, ['scope', 'scan_without_extension']),
+ 'scan_paths': self.na_helper.safe_get(record, ['scan_paths']),
+ 'schedule': self.na_helper.safe_get(record, ['schedule', 'name']),
+ }
+
+ def create_demand_task_rest(self):
+ api = 'protocols/vscan/%s/on-demand-policies' % self.svm_uuid
+ body = {
+ 'name': self.parameters['task_name'],
+ 'log_path': self.parameters['report_directory'],
+ 'scan_paths': self.parameters['scan_paths'],
+ }
+ if self.parameters.get('file_ext_to_exclude'):
+ body['scope.exclude_extensions'] = self.parameters['file_ext_to_exclude']
+ if self.parameters.get('file_ext_to_include'):
+ body['scope.include_extensions'] = self.parameters['file_ext_to_include']
+ if self.parameters.get('max_file_size'):
+ body['scope.max_file_size'] = self.parameters['max_file_size']
+ if self.parameters.get('paths_to_exclude'):
+ body['scope.exclude_paths'] = self.parameters['paths_to_exclude']
+ if self.parameters.get('scan_files_with_no_ext'):
+ body['scope.scan_without_extension'] = self.parameters['scan_files_with_no_ext']
+ if self.parameters.get('schedule'):
+ body['schedule.name'] = self.parameters['schedule']
+ dummy, error = rest_generic.post_async(self.rest_api, api, body)
+ if error:
+ self.module.fail_json(msg='Error creating on demand task %s: %s' % (self.parameters['task_name'], to_native(error)))
+
+ def delete_demand_task_rest(self):
+ api = 'protocols/vscan/%s/on-demand-policies' % self.svm_uuid
+ dummy, error = rest_generic.delete_async(self.rest_api, api, self.parameters['task_name'])
+ if error:
+ self.module.fail_json(msg='Error deleting on demand task %s: %s' % (self.parameters['task_name'], to_native(error)))
+
+ def apply(self):
+ current = self.get_demand_task()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_demand_task()
+ elif cd_action == 'delete':
+ self.delete_demand_task()
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action)
+ self.module.exit_json(**result)
+
+
+def main():
+ """
+ Execute action from playbook
+ """
+ command = NetAppOntapVscanOnDemandTask()
+ command.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan_scanner_pool.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan_scanner_pool.py
new file mode 100644
index 000000000..20e480637
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan_scanner_pool.py
@@ -0,0 +1,297 @@
+#!/usr/bin/python
+
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_vscan_scanner_pool
+'''
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = '''
+module: na_ontap_vscan_scanner_pool
+short_description: NetApp ONTAP Vscan Scanner Pools Configuration.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: 2.8.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create/Modify/Delete a Vscan Scanner Pool
+options:
+ state:
+ description:
+ - Whether a Vscan Scanner pool is present or not
+ choices: ['present', 'absent']
+ type: str
+ default: present
+
+ vserver:
+ description:
+ - the name of the data vserver to use.
+ required: true
+ type: str
+
+ hostnames:
+ description:
+ - List of hostnames of Vscan servers which are allowed to connect to Data ONTAP
+ type: list
+ elements: str
+
+ privileged_users:
+ description:
+ - List of privileged usernames. Username must be in the form "domain-name\\user-name"
+ type: list
+ elements: str
+
+ scanner_pool:
+ description:
+ - the name of the virus scanner pool
+ required: true
+ type: str
+
+ scanner_policy:
+ description:
+ - The name of the Virus scanner Policy
+ choices: ['primary', 'secondary', 'idle']
+ type: str
+'''
+
+EXAMPLES = """
+- name: Create and enable Scanner pool
+ na_ontap_vscan_scanner_pool:
+ state: present
+ username: '{{ netapp_username }}'
+ password: '{{ netapp_password }}'
+ hostname: '{{ netapp_hostname }}'
+ vserver: carchi-vsim2
+ hostnames: ['name', 'name2']
+ privileged_users: ['sim.rtp.openeng.netapp.com\\admin', 'sim.rtp.openeng.netapp.com\\carchi']
+ scanner_pool: Scanner1
+ scanner_policy: primary
+
+- name: Modify scanner pool
+ na_ontap_vscan_scanner_pool:
+ state: present
+ username: '{{ netapp_username }}'
+ password: '{{ netapp_password }}'
+ hostname: '{{ netapp_hostname }}'
+ vserver: carchi-vsim2
+ hostnames: ['name', 'name2', 'name3']
+ privileged_users: ['sim.rtp.openeng.netapp.com\\admin', 'sim.rtp.openeng.netapp.com\\carchi', 'sim.rtp.openeng.netapp.com\\chuyic']
+ scanner_pool: Scanner1
+
+- name: Delete a scanner pool
+ na_ontap_vscan_scanner_pool:
+ state: absent
+ username: '{{ netapp_username }}'
+ password: '{{ netapp_password }}'
+ hostname: '{{ netapp_hostname }}'
+ vserver: carchi-vsim2
+ scanner_pool: Scanner1
+"""
+
+RETURN = """
+
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppOntapVscanScannerPool(object):
+ ''' create, modify, delete vscan scanner pool '''
+ def __init__(self):
+ self.use_rest = False
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ hostnames=dict(required=False, type='list', elements='str'),
+ privileged_users=dict(required=False, type='list', elements='str'),
+ scanner_pool=dict(required=True, type='str'),
+ scanner_policy=dict(required=False, type='str', choices=['primary', 'secondary', 'idle'])
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ self.rest_api = OntapRestAPI(self.module)
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def create_scanner_pool(self):
+ """
+ Create a Vscan Scanner Pool
+ :return: nothing
+ """
+ scanner_pool_obj = netapp_utils.zapi.NaElement('vscan-scanner-pool-create')
+ if self.parameters['hostnames']:
+ string_obj = netapp_utils.zapi.NaElement('hostnames')
+ scanner_pool_obj.add_child_elem(string_obj)
+ for hostname in self.parameters['hostnames']:
+ string_obj.add_new_child('string', hostname)
+ if self.parameters['privileged_users']:
+ users_obj = netapp_utils.zapi.NaElement('privileged-users')
+ scanner_pool_obj.add_child_elem(users_obj)
+ for user in self.parameters['privileged_users']:
+ users_obj.add_new_child('privileged-user', user)
+ scanner_pool_obj.add_new_child('scanner-pool', self.parameters['scanner_pool'])
+ try:
+ self.server.invoke_successfully(scanner_pool_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating Vscan Scanner Pool %s: %s' %
+ (self.parameters['scanner_policy'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def apply_policy(self):
+ """
+ Apply a Scanner policy to a Scanner pool
+ :return: nothing
+ """
+ apply_policy_obj = netapp_utils.zapi.NaElement('vscan-scanner-pool-apply-policy')
+ apply_policy_obj.add_new_child('scanner-policy', self.parameters['scanner_policy'])
+ apply_policy_obj.add_new_child('scanner-pool', self.parameters['scanner_pool'])
+ try:
+ self.server.invoke_successfully(apply_policy_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error appling policy %s to pool %s: %s' %
+ (self.parameters['scanner_policy'], self.parameters['scanner_policy'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def get_scanner_pool(self):
+ """
+ Check to see if a scanner pool exist or not
+ :return: True if it exist, False if it does not
+ """
+ return_value = None
+ if self.use_rest:
+ pass
+ else:
+ scanner_pool_obj = netapp_utils.zapi.NaElement('vscan-scanner-pool-get-iter')
+ scanner_pool_info = netapp_utils.zapi.NaElement('vscan-scanner-pool-info')
+ scanner_pool_info.add_new_child('scanner-pool', self.parameters['scanner_pool'])
+ scanner_pool_info.add_new_child('vserver', self.parameters['vserver'])
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(scanner_pool_info)
+ scanner_pool_obj.add_child_elem(query)
+ try:
+ result = self.server.invoke_successfully(scanner_pool_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error searching for Vscan Scanner Pool %s: %s' %
+ (self.parameters['scanner_pool'], to_native(error)), exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ if result.get_child_by_name('attributes-list').get_child_by_name('vscan-scanner-pool-info').get_child_content(
+ 'scanner-pool') == self.parameters['scanner_pool']:
+ scanner_pool_obj = result.get_child_by_name('attributes-list').get_child_by_name('vscan-scanner-pool-info')
+ hostname = [host.get_content() for host in
+ scanner_pool_obj.get_child_by_name('hostnames').get_children()]
+ privileged_users = [user.get_content() for user in
+ scanner_pool_obj.get_child_by_name('privileged-users').get_children()]
+ return_value = {
+ 'hostnames': hostname,
+ 'enable': scanner_pool_obj.get_child_content('is-currently-active'),
+ 'privileged_users': privileged_users,
+ 'scanner_pool': scanner_pool_obj.get_child_content('scanner-pool'),
+ 'scanner_policy': scanner_pool_obj.get_child_content('scanner-policy')
+ }
+ return return_value
+
+ def delete_scanner_pool(self):
+ """
+ Delete a Scanner pool
+ :return: nothing
+ """
+ scanner_pool_obj = netapp_utils.zapi.NaElement('vscan-scanner-pool-delete')
+ scanner_pool_obj.add_new_child('scanner-pool', self.parameters['scanner_pool'])
+ try:
+ self.server.invoke_successfully(scanner_pool_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting Vscan Scanner Pool %s: %s' %
+ (self.parameters['scanner_pool'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def modify_scanner_pool(self, modify):
+ """
+ Modify a scanner pool
+ :return: nothing
+ """
+ vscan_pool_modify = netapp_utils.zapi.NaElement('vscan-scanner-pool-modify')
+ vscan_pool_modify.add_new_child('scanner-pool', self.parameters['scanner_pool'])
+ for key in modify:
+ if key == 'privileged_users':
+ users_obj = netapp_utils.zapi.NaElement('privileged-users')
+ vscan_pool_modify.add_child_elem(users_obj)
+ for user in modify['privileged_users']:
+ users_obj.add_new_child('privileged-user', user)
+ elif key == 'hostnames':
+ string_obj = netapp_utils.zapi.NaElement('hostnames')
+ vscan_pool_modify.add_child_elem(string_obj)
+ for hostname in modify['hostnames']:
+ string_obj.add_new_child('string', hostname)
+ elif key != 'scanner_policy':
+ vscan_pool_modify.add_new_child(self.attribute_to_name(key), str(modify[key]))
+
+ try:
+ self.server.invoke_successfully(vscan_pool_modify, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error modifying Vscan Scanner Pool %s: %s' %
+ (self.parameters['scanner_pool'], to_native(error)),
+ exception=traceback.format_exc())
+
+ @staticmethod
+ def attribute_to_name(attribute):
+ return str.replace(attribute, '_', '-')
+
+ def apply(self):
+ scanner_pool_obj = self.get_scanner_pool()
+ cd_action = self.na_helper.get_cd_action(scanner_pool_obj, self.parameters)
+ modify = None
+ if self.parameters['state'] == 'present' and cd_action is None:
+ modify = self.na_helper.get_modified_attributes(scanner_pool_obj, self.parameters)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_scanner_pool()
+ if self.parameters.get('scanner_policy') is not None:
+ self.apply_policy()
+ elif cd_action == 'delete':
+ self.delete_scanner_pool()
+ elif modify:
+ self.modify_scanner_pool(modify)
+ if self.parameters.get('scanner_policy') is not None:
+ self.apply_policy()
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ """
+ Execute action from playbook
+ """
+ command = NetAppOntapVscanScannerPool()
+ command.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vserver_audit.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vserver_audit.py
new file mode 100644
index 000000000..fc3dc3bed
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vserver_audit.py
@@ -0,0 +1,373 @@
+#!/usr/bin/python
+
+# (c) 2023, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = """
+module: na_ontap_vserver_audit
+short_description: NetApp Ontap - create, delete or modify vserver audit configuration.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: '22.3.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Create, delete or modify vserver audit configuration.
+options:
+ state:
+ description:
+ - Whether the specified vserver audit configuration should exist or not.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ vserver:
+ description:
+ - Specifies name of the Vserver.
+ required: true
+ type: str
+ log_path:
+ description:
+ - The audit log destination path where consolidated audit logs are stored.
+ type: str
+ guarantee:
+ description:
+ - Indicates whether there is a strict Guarantee of Auditing.
+ - This option requires ONTAP 9.10.1 or later.
+ type: bool
+ enabled:
+ description:
+ - Specifies whether or not auditing is enabled on the SVM.
+ type: bool
+ events:
+ description:
+ - Specifies events for which auditing is enabled on the SVM.
+ type: dict
+ suboptions:
+ authorization_policy:
+ description:
+ - Authorization policy change events.
+ type: bool
+ cap_staging:
+ description:
+ - Central access policy staging events.
+ type: bool
+ cifs_logon_logoff:
+ description:
+ - CIFS logon and logoff events.
+ type: bool
+ file_operations:
+ description:
+ - File operation events.
+ type: bool
+ file_share:
+ description:
+ - File share category events.
+ type: bool
+ security_group:
+ description:
+ - Local security group management events.
+ type: bool
+ user_account:
+ description:
+ - Local user account management events.
+ type: bool
+ log:
+ description:
+ - Specifies events for which auditing is enabled on the SVM.
+ type: dict
+ suboptions:
+ format:
+ description:
+ - This option describes the format in which the logs are generated by consolidation process.
+ Possible values are,
+ - xml - Data ONTAP-specific XML log format
+ - evtx - Microsoft Windows EVTX log format
+ choices: ['xml', 'evtx']
+ type: str
+ retention:
+ description:
+ - This option describes the count and time to retain the audit log file.
+ type: dict
+ suboptions:
+ count:
+ description:
+ - Determines how many audit log files to retain before rotating the oldest log file out.
+ - This is mutually exclusive with duration.
+ type: int
+ duration:
+ description:
+ - Specifies an ISO-8601 format date and time to retain the audit log file.
+ - The audit log files are deleted once they reach the specified date/time.
+ - This is mutually exclusive with count.
+ type: str
+ rotation:
+ description:
+ - Audit event log files are rotated when they reach a configured threshold log size or are on a configured schedule.
+ - When an event log file is rotated, the scheduled consolidation task first renames the active converted file to a time-stamped archive file,
+ and then creates a new active converted event log file.
+ type: dict
+ suboptions:
+ size:
+ description:
+ - Rotates logs based on log size in bytes.
+ - Default value is 104857600.
+ type: int
+
+notes:
+ - This module supports REST only.
+ - At least one event should be enabled.
+ - No other fields can be specified when enabled is specified for modify.
+"""
+
+EXAMPLES = """
+
+ - name: Create vserver audit configuration
+ netapp.ontap.na_ontap_vserver_audit:
+ state: present
+ vserver: ansible
+ enabled: True
+ events:
+ authorization_policy: False
+ cap_staging: False
+ cifs_logon_logoff: True
+ file_operations: True
+ file_share: False
+ security_group: False
+ user_account: False
+ log_path: "/"
+ log:
+ format: xml
+ retention:
+ count: 4
+ rotation:
+ size: "1048576"
+ guarantee: False
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Modify vserver audit configuration
+ netapp.ontap.na_ontap_vserver_audit:
+ state: present
+ vserver: ansible
+ enabled: True
+ events:
+ authorization_policy: True
+ cap_staging: True
+ cifs_logon_logoff: True
+ file_operations: True
+ file_share: True
+ security_group: True
+ user_account: True
+ log_path: "/tmp"
+ log:
+ format: evtx
+ retention:
+ count: 5
+ rotation:
+ size: "104857600"
+ guarantee: True
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Delete vserver audit configuration
+ netapp.ontap.na_ontap_vserver_audit:
+ state: absent
+ vserver: ansible
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+"""
+
+import time
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppONTAPVserverAudit:
+ """
+ Class with vserver audit configuration methods
+ """
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ enabled=dict(required=False, type='bool'),
+ guarantee=dict(required=False, type='bool'),
+ log_path=dict(required=False, type='str'),
+ log=dict(type='dict', options=dict(
+ format=dict(type='str', choices=['xml', 'evtx']),
+ retention=dict(type='dict', options=dict(
+ count=dict(type='int'),
+ duration=dict(type='str'),
+ )),
+ rotation=dict(type='dict', options=dict(
+ size=dict(type='int'),
+ )),
+ )),
+ events=dict(type='dict', options=dict(
+ authorization_policy=dict(type='bool'),
+ cap_staging=dict(type='bool'),
+ cifs_logon_logoff=dict(type='bool'),
+ file_operations=dict(type='bool'),
+ file_share=dict(type='bool'),
+ security_group=dict(type='bool'),
+ user_account=dict(type='bool'),
+ ))
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ # set up variables
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.filter_out_none_entries(self.na_helper.set_parameters(self.module.params))
+
+ self.rest_api = netapp_utils.OntapRestAPI(self.module)
+ self.rest_api.fail_if_not_rest_minimum_version('na_ontap_vserver_audit', 9, 6)
+ partially_supported_rest_properties = [['guarantee', (9, 10, 1)]]
+ self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, None, partially_supported_rest_properties)
+ self.svm_uuid = None
+ if 'events' in self.parameters and self.parameters['state'] == 'present':
+ if all(self.parameters['events'][value] is False for value in self.parameters['events']) is True:
+ self.module.fail_json(msg="Error: At least one event should be enabled")
+
+ def get_vserver_audit_configuration_rest(self):
+ """
+ Retrieves audit configurations.
+ """
+ api = "protocols/audit"
+ query = {
+ 'svm.name': self.parameters['vserver'],
+ 'fields': 'svm.uuid,enabled,events,log,log_path,'
+ }
+ if self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 10, 1):
+ query['fields'] += 'guarantee,'
+ record, error = rest_generic.get_one_record(self.rest_api, api, query)
+ if error:
+ self.module.fail_json(msg="Error on fetching vserver audit configuration: %s" % error)
+ if record:
+ self.svm_uuid = self.na_helper.safe_get(record, ['svm', 'uuid'])
+ return {
+ 'enabled': self.na_helper.safe_get(record, ['enabled']),
+ 'events': self.na_helper.safe_get(record, ['events']),
+ 'log': self.na_helper.safe_get(record, ['log']),
+ 'log_path': self.na_helper.safe_get(record, ['log_path']),
+ 'guarantee': record.get('guarantee', False),
+ }
+ return record
+
+ def create_vserver_audit_config_body_rest(self):
+ """
+ Vserver audit config body for create and modify with rest API.
+ """
+ body = {}
+ if 'events' in self.parameters:
+ body['events'] = self.parameters['events']
+ if 'guarantee' in self.parameters:
+ body['guarantee'] = self.parameters['guarantee']
+ if self.na_helper.safe_get(self.parameters, ['log', 'retention', 'count']):
+ body['log.retention.count'] = self.parameters['log']['retention']['count']
+ if self.na_helper.safe_get(self.parameters, ['log', 'retention', 'duration']):
+ body['log.retention.duration'] = self.parameters['log']['retention']['duration']
+ if self.na_helper.safe_get(self.parameters, ['log', 'rotation', 'size']):
+ body['log.rotation.size'] = self.parameters['log']['rotation']['size']
+ if self.na_helper.safe_get(self.parameters, ['log', 'format']):
+ body['log.format'] = self.parameters['log']['format']
+ if 'log_path' in self.parameters:
+ body['log_path'] = self.parameters['log_path']
+ return body
+
+ def create_vserver_audit_configuration_rest(self):
+ """
+ Creates an audit configuration.
+ """
+ api = "protocols/audit"
+ body = self.create_vserver_audit_config_body_rest()
+ if 'vserver' in self.parameters:
+ body['svm.name'] = self.parameters.get('vserver')
+ if 'enabled' in self.parameters:
+ body['enabled'] = self.parameters['enabled']
+ record, error = rest_generic.post_async(self.rest_api, api, body)
+ if error:
+ self.module.fail_json(msg="Error on creating vserver audit configuration: %s" % error)
+
+ def delete_vserver_audit_configuration_rest(self, current):
+ """
+ Deletes an audit configuration.
+ """
+ api = "protocols/audit/%s" % self.svm_uuid
+ if current['enabled'] is True:
+ modify = {'enabled': False}
+ self.modify_vserver_audit_configuration_rest(modify)
+ current = self.get_vserver_audit_configuration_rest()
+ retry = 2
+ while retry > 0:
+ record, error = rest_generic.delete_async(self.rest_api, api, None)
+ # Delete throws retry after sometime error during first run by default, hence retrying after sometime.
+ if error and '9699350' in error:
+ time.sleep(120)
+ retry -= 1
+ elif error:
+ self.module.fail_json(msg="Error on deleting vserver audit configuration: %s" % error)
+ else:
+ return
+
+ def modify_vserver_audit_configuration_rest(self, modify):
+ """
+ Updates audit configuration.
+ """
+ body = {}
+ if 'enabled' in modify:
+ body['enabled'] = modify['enabled']
+ else:
+ body = self.create_vserver_audit_config_body_rest()
+ api = "protocols/audit"
+ record, error = rest_generic.patch_async(self.rest_api, api, self.svm_uuid, body)
+ if error:
+ self.module.fail_json(msg="Error on modifying vserver audit configuration: %s" % error)
+
+ def apply(self):
+ current = self.get_vserver_audit_configuration_rest()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ modify = self.na_helper.get_modified_attributes(current, self.parameters) if cd_action is None else None
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_vserver_audit_configuration_rest()
+ elif cd_action == 'delete':
+ self.delete_vserver_audit_configuration_rest(current)
+ elif modify:
+ # No other fields can be specified when enabled is specified for modify
+ if 'enabled' in modify:
+ self.modify_vserver_audit_configuration_rest(modify)
+ modify.pop('enabled')
+ if modify:
+ # This method will be called to modify fields other than enabled
+ self.modify_vserver_audit_configuration_rest(modify)
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action)
+ self.module.exit_json(**result)
+
+
+def main():
+ """
+ Creates the NetApp Ontap vserver audit configuration object and runs the correct play task
+ """
+ obj = NetAppONTAPVserverAudit()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vserver_cifs_security.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vserver_cifs_security.py
new file mode 100644
index 000000000..35eaf18c9
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vserver_cifs_security.py
@@ -0,0 +1,310 @@
+#!/usr/bin/python
+
+# (c) 2018-2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+---
+module: na_ontap_vserver_cifs_security
+short_description: NetApp ONTAP vserver CIFS security modification
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap_zapi
+version_added: 2.9.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+ - modify vserver CIFS security.
+
+options:
+
+ vserver:
+ description:
+ - name of the vserver.
+ required: true
+ type: str
+
+ kerberos_clock_skew:
+ description:
+ - The clock skew in minutes is the tolerance for accepting tickets with time stamps that do not exactly match the host's system clock.
+ type: int
+
+ kerberos_ticket_age:
+ description:
+ - Determine the maximum amount of time in hours that a user's ticket may be used for the purpose of Kerberos authentication.
+ type: int
+
+ kerberos_renew_age:
+ description:
+ - Determine the maximum amount of time in days for which a ticket can be renewed.
+ type: int
+
+ kerberos_kdc_timeout:
+ description:
+ - Determine the timeout value in seconds for KDC connections.
+ type: int
+
+ is_signing_required:
+ description:
+ - Determine whether signing is required for incoming CIFS traffic.
+ type: bool
+
+ is_password_complexity_required:
+ description:
+ - Determine whether password complexity is required for local users.
+ type: bool
+
+ is_aes_encryption_enabled:
+ description:
+ - Determine whether AES-128 and AES-256 encryption mechanisms are enabled for Kerberos-related CIFS communication.
+ type: bool
+
+ is_smb_encryption_required:
+ description:
+ - Determine whether SMB encryption is required for incoming CIFS traffic.
+ type: bool
+
+ lm_compatibility_level:
+ description:
+ - Determine the LM compatibility level.
+ choices: ['lm_ntlm_ntlmv2_krb', 'ntlm_ntlmv2_krb', 'ntlmv2_krb', 'krb']
+ type: str
+
+ referral_enabled_for_ad_ldap:
+ description:
+ - Determine whether LDAP referral chasing is enabled or not for AD LDAP connections.
+ type: bool
+
+ session_security_for_ad_ldap:
+ description:
+ - Determine the level of security required for LDAP communications.
+ choices: ['none', 'sign', 'seal']
+ type: str
+
+ smb1_enabled_for_dc_connections:
+ description:
+ - Determine if SMB version 1 is used for connections to domain controllers.
+ choices: ['false', 'true', 'system_default']
+ type: str
+
+ smb2_enabled_for_dc_connections:
+ description:
+ - Determine if SMB version 2 is used for connections to domain controllers.
+ choices: ['false', 'true', 'system_default']
+ type: str
+
+ use_start_tls_for_ad_ldap:
+ description:
+ - Determine whether to use start_tls for AD LDAP connections.
+ type: bool
+
+ encryption_required_for_dc_connections:
+ description:
+ - Specifies whether encryption is required for domain controller connections.
+ type: bool
+ version_added: 21.20.0
+
+ use_ldaps_for_ad_ldap:
+ description:
+ - Determine whether to use LDAPS for secure Active Directory LDAP connections.
+ type: bool
+ version_added: 21.20.0
+
+'''
+
+EXAMPLES = '''
+ - name: modify cifs security
+ netapp.ontap.na_ontap_vserver_cifs_security:
+ hostname: "{{ hostname }}"
+ username: username
+ password: password
+ vserver: ansible
+ is_aes_encryption_enabled: false
+ lm_compatibility_level: lm_ntlm_ntlmv2_krb
+ smb1_enabled_for_dc_connections: system_default
+ smb2_enabled_for_dc_connections: system_default
+ use_start_tls_for_ad_ldap: false
+ referral_enabled_for_ad_ldap: false
+ session_security_for_ad_ldap: none
+ is_signing_required: false
+ is_password_complexity_required: false
+ encryption_required_for_dc_connections: false
+ use_ldaps_for_ad_ldap: false
+
+ - name: modify cifs security is_smb_encryption_required
+ netapp.ontap.na_ontap_vserver_cifs_security:
+ hostname: "{{ hostname }}"
+ username: username
+ password: password
+ vserver: ansible
+ is_smb_encryption_required: false
+
+ - name: modify cifs security int options
+ netapp.ontap.na_ontap_vserver_cifs_security:
+ hostname: "{{ hostname }}"
+ username: username
+ password: password
+ vserver: ansible
+ kerberos_clock_skew: 10
+ kerberos_ticket_age: 10
+ kerberos_renew_age: 5
+ kerberos_kdc_timeout: 3
+'''
+
+RETURN = '''
+'''
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+
+
+class NetAppONTAPCifsSecurity(object):
+ '''
+ modify vserver cifs security
+ '''
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_zapi_only_spec()
+ self.argument_spec.update(dict(
+ vserver=dict(required=True, type='str'),
+ kerberos_clock_skew=dict(required=False, type='int'),
+ kerberos_ticket_age=dict(required=False, type='int'),
+ kerberos_renew_age=dict(required=False, type='int'),
+ kerberos_kdc_timeout=dict(required=False, type='int'),
+ is_signing_required=dict(required=False, type='bool'),
+ is_password_complexity_required=dict(required=False, type='bool'),
+ is_aes_encryption_enabled=dict(required=False, type='bool'),
+ is_smb_encryption_required=dict(required=False, type='bool'),
+ lm_compatibility_level=dict(required=False, choices=['lm_ntlm_ntlmv2_krb', 'ntlm_ntlmv2_krb', 'ntlmv2_krb', 'krb']),
+ referral_enabled_for_ad_ldap=dict(required=False, type='bool'),
+ session_security_for_ad_ldap=dict(required=False, choices=['none', 'sign', 'seal']),
+ smb1_enabled_for_dc_connections=dict(required=False, choices=['false', 'true', 'system_default']),
+ smb2_enabled_for_dc_connections=dict(required=False, choices=['false', 'true', 'system_default']),
+ use_start_tls_for_ad_ldap=dict(required=False, type='bool'),
+ encryption_required_for_dc_connections=dict(required=False, type='bool'),
+ use_ldaps_for_ad_ldap=dict(required=False, type='bool')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[('use_ldaps_for_ad_ldap', 'use_start_tls_for_ad_ldap')]
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.na_helper.module_replaces('na_ontap_cifs_server', self.module)
+ msg = 'Error: na_ontap_vserver_cifs_security only supports ZAPI.netapp.ontap.na_ontap_cifs_server should be used instead.'
+ self.na_helper.fall_back_to_zapi(self.module, msg, self.parameters)
+
+ self.set_playbook_zapi_key_map()
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
+
+ def set_playbook_zapi_key_map(self):
+
+ self.na_helper.zapi_int_keys = {
+ 'kerberos_clock_skew': 'kerberos-clock-skew',
+ 'kerberos_ticket_age': 'kerberos-ticket-age',
+ 'kerberos_renew_age': 'kerberos-renew-age',
+ 'kerberos_kdc_timeout': 'kerberos-kdc-timeout'
+ }
+ self.na_helper.zapi_bool_keys = {
+ 'is_signing_required': 'is-signing-required',
+ 'is_password_complexity_required': 'is-password-complexity-required',
+ 'is_aes_encryption_enabled': 'is-aes-encryption-enabled',
+ 'is_smb_encryption_required': 'is-smb-encryption-required',
+ 'referral_enabled_for_ad_ldap': 'referral-enabled-for-ad-ldap',
+ 'use_start_tls_for_ad_ldap': 'use-start-tls-for-ad-ldap',
+ 'encryption_required_for_dc_connections': 'encryption-required-for-dc-connections',
+ 'use_ldaps_for_ad_ldap': 'use-ldaps-for-ad-ldap'
+ }
+ self.na_helper.zapi_str_keys = {
+ 'lm_compatibility_level': 'lm-compatibility-level',
+ 'session_security_for_ad_ldap': 'session-security-for-ad-ldap',
+ 'smb1_enabled_for_dc_connections': 'smb1-enabled-for-dc-connections',
+ 'smb2_enabled_for_dc_connections': 'smb2-enabled-for-dc-connections'
+ }
+
+ def cifs_security_get_iter(self):
+ """
+ get current vserver cifs security.
+ :return: a dict of vserver cifs security
+ """
+ cifs_security_get = netapp_utils.zapi.NaElement('cifs-security-get-iter')
+ query = netapp_utils.zapi.NaElement('query')
+ cifs_security = netapp_utils.zapi.NaElement('cifs-security')
+ cifs_security.add_new_child('vserver', self.parameters['vserver'])
+ query.add_child_elem(cifs_security)
+ cifs_security_get.add_child_elem(query)
+ cifs_security_details = dict()
+ try:
+ result = self.server.invoke_successfully(cifs_security_get, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching cifs security from %s: %s'
+ % (self.parameters['vserver'], to_native(error)),
+ exception=traceback.format_exc())
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) > 0:
+ cifs_security_info = result.get_child_by_name('attributes-list').get_child_by_name('cifs-security')
+ for option, zapi_key in self.na_helper.zapi_int_keys.items():
+ cifs_security_details[option] = self.na_helper.get_value_for_int(from_zapi=True, value=cifs_security_info.get_child_content(zapi_key))
+ for option, zapi_key in self.na_helper.zapi_bool_keys.items():
+ cifs_security_details[option] = self.na_helper.get_value_for_bool(from_zapi=True, value=cifs_security_info.get_child_content(zapi_key))
+ for option, zapi_key in self.na_helper.zapi_str_keys.items():
+ if cifs_security_info.get_child_content(zapi_key) is None:
+ cifs_security_details[option] = None
+ else:
+ cifs_security_details[option] = cifs_security_info.get_child_content(zapi_key)
+ return cifs_security_details
+ return None
+
+ def cifs_security_modify(self, modify):
+ """
+ :param modify: A list of attributes to modify
+ :return: None
+ """
+ cifs_security_modify = netapp_utils.zapi.NaElement('cifs-security-modify')
+ for attribute in modify:
+ cifs_security_modify.add_new_child(self.attribute_to_name(attribute), str(self.parameters[attribute]))
+ try:
+ self.server.invoke_successfully(cifs_security_modify, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error modifying cifs security on %s: %s'
+ % (self.parameters['vserver'], to_native(e)),
+ exception=traceback.format_exc())
+
+ @staticmethod
+ def attribute_to_name(attribute):
+ return str.replace(attribute, '_', '-')
+
+ def apply(self):
+ """Call modify operations."""
+ current = self.cifs_security_get_iter()
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if modify:
+ self.cifs_security_modify(modify)
+ result = netapp_utils.generate_result(self.na_helper.changed, modify=modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ obj = NetAppONTAPCifsSecurity()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vserver_peer.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vserver_peer.py
new file mode 100644
index 000000000..3c34ccf08
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vserver_peer.py
@@ -0,0 +1,446 @@
+#!/usr/bin/python
+
+# (c) 2018-2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Create/Delete vserver peer
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+ - netapp.ontap.netapp.na_ontap_peer
+module: na_ontap_vserver_peer
+options:
+ state:
+ choices: ['present', 'absent']
+ type: str
+ description:
+ - Whether the specified vserver peer should exist or not.
+ default: present
+ vserver:
+ description:
+ - Specifies name of the source Vserver in the relationship.
+ required: true
+ type: str
+ applications:
+ type: list
+ elements: str
+ description:
+ - List of applications which can make use of the peering relationship.
+ - FlexCache supported from ONTAP 9.5 onwards.
+ peer_vserver:
+ description:
+ - Specifies name of the peer Vserver in the relationship.
+ required: true
+ type: str
+ peer_cluster:
+ description:
+ - Specifies name of the peer Cluster.
+ - Required for creating the vserver peer relationship with a remote cluster
+ type: str
+ local_name_for_peer:
+ description:
+ - Specifies local name of the peer Vserver in the relationship.
+ - Use this if you see "Error creating vserver peer ... Vserver name conflicts with one of the following".
+ type: str
+ local_name_for_source:
+ description:
+ - Specifies local name of the source Vserver in the relationship.
+ - Use this if you see "Error accepting vserver peer ... System generated a name for the peer Vserver because of a naming conflict".
+ type: str
+ dest_hostname:
+ description:
+ - DEPRECATED - please use C(peer_options).
+ - Destination hostname or IP address.
+ - Required for creating the vserver peer relationship with a remote cluster.
+ type: str
+ dest_username:
+ description:
+ - DEPRECATED - please use C(peer_options).
+ - Destination username.
+ - Optional if this is same as source username.
+ type: str
+ dest_password:
+ description:
+ - DEPRECATED - please use C(peer_options).
+ - Destination password.
+ - Optional if this is same as source password.
+ type: str
+short_description: NetApp ONTAP Vserver peering
+version_added: 2.7.0
+'''
+
+EXAMPLES = """
+
+ - name: Source vserver peer create
+ netapp.ontap.na_ontap_vserver_peer:
+ state: present
+ peer_vserver: ansible2
+ peer_cluster: ansibleCluster
+ local_name_for_peer: peername
+ local_name_for_source: sourcename
+ vserver: ansible
+ applications: ['snapmirror']
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ peer_options:
+ hostname: "{{ netapp_dest_hostname }}"
+
+ - name: vserver peer delete
+ netapp.ontap.na_ontap_vserver_peer:
+ state: absent
+ peer_vserver: ansible2
+ vserver: ansible
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Source vserver peer create - different credentials
+ netapp.ontap.na_ontap_vserver_peer:
+ state: present
+ peer_vserver: ansible2
+ peer_cluster: ansibleCluster
+ local_name_for_peer: peername
+ local_name_for_source: sourcename
+ vserver: ansible
+ applications: ['snapmirror']
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ peer_options:
+ hostname: "{{ netapp_dest_hostname }}"
+ cert_filepath: "{{ cert_filepath }}"
+ key_filepath: "{{ key_filepath }}"
+"""
+
+RETURN = """
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppONTAPVserverPeer:
+ """
+ Class with vserver peer methods
+ """
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ peer_vserver=dict(required=True, type='str'),
+ peer_cluster=dict(required=False, type='str'),
+ local_name_for_peer=dict(required=False, type='str'),
+ local_name_for_source=dict(required=False, type='str'),
+ applications=dict(required=False, type='list', elements='str'),
+ peer_options=dict(type='dict', options=netapp_utils.na_ontap_host_argument_spec_peer()),
+ dest_hostname=dict(required=False, type='str'),
+ dest_username=dict(required=False, type='str'),
+ dest_password=dict(required=False, type='str', no_log=True)
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ mutually_exclusive=[
+ ['peer_options', 'dest_hostname'],
+ ['peer_options', 'dest_username'],
+ ['peer_options', 'dest_password']
+ ],
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ if self.parameters.get('dest_hostname') is None and self.parameters.get('peer_options') is None:
+ self.parameters['dest_hostname'] = self.parameters.get('hostname')
+ if self.parameters.get('dest_hostname') is not None:
+ # if dest_hostname is present, peer_options is absent
+ self.parameters['peer_options'] = dict(
+ hostname=self.parameters.get('dest_hostname'),
+ username=self.parameters.get('dest_username'),
+ password=self.parameters.get('dest_password'),
+ )
+ else:
+ self.parameters['dest_hostname'] = self.parameters['peer_options']['hostname']
+ netapp_utils.setup_host_options_from_module_params(
+ self.parameters['peer_options'], self.module,
+ netapp_utils.na_ontap_host_argument_spec_peer().keys())
+ # Rest API objects
+ self.use_rest = False
+ self.rest_api = OntapRestAPI(self.module)
+ self.src_use_rest = self.rest_api.is_rest()
+ self.dst_rest_api = OntapRestAPI(self.module, host_options=self.parameters['peer_options'])
+ self.dst_use_rest = self.dst_rest_api.is_rest()
+ self.use_rest = bool(self.src_use_rest and self.dst_use_rest)
+ if not self.use_rest:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+ self.dest_server = netapp_utils.setup_na_ontap_zapi(module=self.module, host_options=self.parameters['peer_options'])
+
+ def vserver_peer_get_iter(self, target):
+ """
+ Compose NaElement object to query current vserver using remote-vserver-name and vserver parameters.
+ :return: NaElement object for vserver-get-iter with query
+ """
+ vserver_peer_get = netapp_utils.zapi.NaElement('vserver-peer-get-iter')
+ query = netapp_utils.zapi.NaElement('query')
+ vserver_peer_info = netapp_utils.zapi.NaElement('vserver-peer-info')
+ vserver, remote_vserver = self.get_local_and_peer_vserver(target)
+ vserver_peer_info.add_new_child('remote-vserver-name', remote_vserver)
+ vserver_peer_info.add_new_child('vserver', vserver)
+ query.add_child_elem(vserver_peer_info)
+ vserver_peer_get.add_child_elem(query)
+ return vserver_peer_get
+
+ def get_local_and_peer_vserver(self, target):
+ if target == 'source':
+ return self.parameters['vserver'], self.parameters['peer_vserver']
+ # else for target peer.
+ return self.parameters['peer_vserver'], self.parameters['vserver']
+
+ def vserver_peer_get(self, target='source'):
+ """
+ Get current vserver peer info
+ :return: Dictionary of current vserver peer details if query successful, else return None
+ """
+ if self.use_rest:
+ return self.vserver_peer_get_rest(target)
+
+ vserver_peer_get_iter = self.vserver_peer_get_iter(target)
+ vserver_info = {}
+ try:
+ if target == 'source':
+ result = self.server.invoke_successfully(vserver_peer_get_iter, enable_tunneling=True)
+ elif target == 'peer':
+ result = self.dest_server.invoke_successfully(vserver_peer_get_iter, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching vserver peer %s: %s'
+ % (self.parameters['vserver'], to_native(error)),
+ exception=traceback.format_exc())
+ # return vserver peer details
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) > 0:
+ vserver_peer_info = result.get_child_by_name('attributes-list').get_child_by_name('vserver-peer-info')
+ vserver_info['peer_vserver'] = vserver_peer_info.get_child_content('remote-vserver-name')
+ vserver_info['vserver'] = vserver_peer_info.get_child_content('vserver')
+ vserver_info['local_peer_vserver'] = vserver_peer_info.get_child_content('peer-vserver') # required for delete and accept
+ vserver_info['peer_state'] = vserver_peer_info.get_child_content('peer-state')
+ return vserver_info
+ return None
+
+ def vserver_peer_delete(self, current):
+ """
+ Delete a vserver peer
+ """
+ if self.use_rest:
+ return self.vserver_peer_delete_rest(current)
+
+ vserver_peer_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'vserver-peer-delete', **{'peer-vserver': current['local_peer_vserver'],
+ 'vserver': self.parameters['vserver']})
+ try:
+ self.server.invoke_successfully(vserver_peer_delete,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error deleting vserver peer %s: %s'
+ % (self.parameters['vserver'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def get_peer_cluster_name(self):
+ """
+ Get local cluster name
+ :return: cluster name
+ """
+ if self.use_rest:
+ return self.get_peer_cluster_name_rest()
+
+ cluster_info = netapp_utils.zapi.NaElement('cluster-identity-get')
+ # if remote peer exist , get remote cluster name else local cluster name
+ server = self.dest_server if self.is_remote_peer() else self.server
+ try:
+ result = server.invoke_successfully(cluster_info, enable_tunneling=True)
+ return result.get_child_by_name('attributes').get_child_by_name(
+ 'cluster-identity-info').get_child_content('cluster-name')
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error fetching peer cluster name for peer vserver %s: %s'
+ % (self.parameters['peer_vserver'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def vserver_peer_create(self):
+ """
+ Create a vserver peer
+ """
+ if self.parameters.get('applications') is None:
+ self.module.fail_json(msg='applications parameter is missing')
+ if self.parameters.get('peer_cluster') is None:
+ self.parameters['peer_cluster'] = self.get_peer_cluster_name()
+ if self.use_rest:
+ return self.vserver_peer_create_rest()
+
+ vserver_peer_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'vserver-peer-create', **{'peer-vserver': self.parameters['peer_vserver'],
+ 'vserver': self.parameters['vserver'],
+ 'peer-cluster': self.parameters['peer_cluster']})
+ if 'local_name_for_peer' in self.parameters:
+ vserver_peer_create.add_new_child('local-name', self.parameters['local_name_for_peer'])
+ applications = netapp_utils.zapi.NaElement('applications')
+ for application in self.parameters['applications']:
+ applications.add_new_child('vserver-peer-application', application)
+ vserver_peer_create.add_child_elem(applications)
+ try:
+ self.server.invoke_successfully(vserver_peer_create, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error creating vserver peer %s: %s'
+ % (self.parameters['vserver'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def is_remote_peer(self):
+ return (
+ self.parameters.get('dest_hostname') is not None
+ and self.parameters['dest_hostname'] != self.parameters['hostname']
+ )
+
+ def vserver_peer_accept(self):
+ """
+ Accept a vserver peer at destination
+ """
+ # peer-vserver -> remote (source vserver is provided)
+ # vserver -> local (destination vserver is provided)
+ if self.use_rest:
+ return self.vserver_peer_accept_rest('peer')
+ vserver_peer_info = self.vserver_peer_get('peer')
+ if vserver_peer_info is None:
+ self.module.fail_json(msg='Error retrieving vserver peer information while accepting')
+ vserver_peer_accept = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'vserver-peer-accept', **{'peer-vserver': vserver_peer_info['local_peer_vserver'], 'vserver': self.parameters['peer_vserver']})
+ if 'local_name_for_source' in self.parameters:
+ vserver_peer_accept.add_new_child('local-name', self.parameters['local_name_for_source'])
+ try:
+ self.dest_server.invoke_successfully(vserver_peer_accept, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error accepting vserver peer %s: %s'
+ % (self.parameters['peer_vserver'], to_native(error)),
+ exception=traceback.format_exc())
+
+ def check_and_report_rest_error(self, error, action, where):
+ if error:
+ if "job reported error:" in error and "entry doesn't exist" in error:
+ # ignore RBAC issue with FSx - BURT1467620 (fixed in 9.11.0) - GitHub #45
+ self.module.warn('Ignoring job status, assuming success - Issue #45.')
+ return
+ self.module.fail_json(msg='Error %s vserver peer relationship on %s: %s' % (action, where, error))
+
+ def vserver_peer_accept_rest(self, target):
+ vserver_peer_info = self.vserver_peer_get_rest('peer')
+ if not vserver_peer_info:
+ self.module.fail_json(msg='Error reading vserver peer information on peer %s' % self.parameters['peer_vserver'])
+ api = 'svm/peers'
+ body = {"state": "peered"}
+ if 'local_name_for_source' in self.parameters:
+ body['name'] = self.parameters['local_name_for_source']
+ dummy, error = rest_generic.patch_async(self.dst_rest_api, api, vserver_peer_info['local_peer_vserver_uuid'], body)
+ self.check_and_report_rest_error(error, 'accepting', self.parameters['peer_vserver'])
+
+ def vserver_peer_get_rest(self, target):
+ """
+ Get current vserver peer info
+ :return: Dictionary of current vserver peer details if query successful, else return None
+ """
+ api = 'svm/peers'
+ vserver_info = {}
+ vserver, remote_vserver = self.get_local_and_peer_vserver(target)
+ restapi = self.rest_api if target == 'source' else self.dst_rest_api
+ options = {'svm.name': vserver, 'peer.svm.name': remote_vserver, 'fields': 'name,svm.name,peer.svm.name,state,uuid'}
+ record, error = rest_generic.get_one_record(restapi, api, options)
+ if error:
+ self.module.fail_json(msg='Error fetching vserver peer %s: %s' % (self.parameters['vserver'], error))
+ if record is not None:
+ vserver_info['vserver'] = self.na_helper.safe_get(record, ['svm', 'name'])
+ vserver_info['peer_vserver'] = self.na_helper.safe_get(record, ['peer', 'svm', 'name'])
+ vserver_info['peer_state'] = record.get('state')
+ # required local_peer_vserver_uuid to delete the peer relationship
+ vserver_info['local_peer_vserver_uuid'] = record.get('uuid')
+ vserver_info['local_peer_vserver'] = record['name']
+ return vserver_info
+ return None
+
+ def vserver_peer_delete_rest(self, current):
+ """
+ Delete a vserver peer using rest.
+ """
+ dummy, error = rest_generic.delete_async(self.rest_api, 'svm/peers', current['local_peer_vserver_uuid'])
+ self.check_and_report_rest_error(error, 'deleting', self.parameters['vserver'])
+
+ def get_peer_cluster_name_rest(self):
+ """
+ Get local cluster name
+ :return: cluster name
+ """
+ api = 'cluster'
+ options = {'fields': 'name'}
+ # if remote peer exist , get remote cluster name else local cluster name
+ restapi = self.dst_rest_api if self.is_remote_peer() else self.rest_api
+ record, error = rest_generic.get_one_record(restapi, api, options)
+ if error:
+ self.module.fail_json(msg='Error fetching peer cluster name for peer vserver %s: %s'
+ % (self.parameters['peer_vserver'], error))
+ if record is not None:
+ return record.get('name')
+ return None
+
+ def vserver_peer_create_rest(self):
+ """
+ Create a vserver peer using rest
+ """
+ api = 'svm/peers'
+ params = {
+ 'svm.name': self.parameters['vserver'],
+ 'peer.cluster.name': self.parameters['peer_cluster'],
+ 'peer.svm.name': self.parameters['peer_vserver'],
+ 'applications': self.parameters['applications']
+ }
+ if 'local_name_for_peer' in self.parameters:
+ params['name'] = self.parameters['local_name_for_peer']
+ dummy, error = rest_generic.post_async(self.rest_api, api, params)
+ self.check_and_report_rest_error(error, 'creating', self.parameters['vserver'])
+
+ def apply(self):
+ """
+ Apply action to create/delete or accept vserver peer
+ """
+ current = self.vserver_peer_get()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.vserver_peer_create()
+ # accept only if the peer relationship is on a remote cluster
+ if self.is_remote_peer():
+ self.vserver_peer_accept()
+ elif cd_action == 'delete':
+ self.vserver_peer_delete(current)
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action)
+ self.module.exit_json(**result)
+
+
+def main():
+ """Execute action"""
+ module_obj = NetAppONTAPVserverPeer()
+ module_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vserver_peer_permissions.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vserver_peer_permissions.py
new file mode 100644
index 000000000..9ed54e96f
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vserver_peer_permissions.py
@@ -0,0 +1,201 @@
+#!/usr/bin/python
+
+# (c) 2023, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = """
+module: na_ontap_vserver_peer_permissions
+short_description: NetApp Ontap - create, delete or modify vserver peer permission.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: '22.3.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Create, delete or modify vserver peer permission.
+options:
+ state:
+ description:
+ - Whether the specified vserver peer permission should exist or not.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ vserver:
+ description:
+ - Specifies name of the source Vserver in the relationship.
+ required: true
+ type: str
+ applications:
+ type: list
+ elements: str
+ required: true
+ description:
+ - List of applications which can make use of the peering relationship.
+ - FlexCache supported from ONTAP 9.5 onwards.
+ cluster_peer:
+ description:
+ - Specifies name of the peer Cluster.
+ type: str
+ required: true
+"""
+
+EXAMPLES = """
+
+ - name: Create vserver peer permission for an SVM
+ netapp.ontap.na_ontap_vserver_peer_permissions:
+ state: present
+ vserver: ansible
+ cluster_peer: test_cluster
+ applications: ['snapmirror']
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Modify vserver peer permission for an SVM
+ netapp.ontap.na_ontap_vserver_peer_permissions:
+ state: present
+ vserver: ansible
+ cluster_peer: test_cluster
+ applications: ['snapmirror', 'flexcache']
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Delete vserver peer permission for an SVM
+ netapp.ontap.na_ontap_vserver_peer_permissions:
+ state: absent
+ vserver: ansible
+ cluster_peer: test_cluster
+ applications: ['snapmirror']
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppONTAPVserverPeerPermissions:
+ """
+ Class with vserver peer permission methods
+ """
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ vserver=dict(required=True, type='str'),
+ applications=dict(required=True, type='list', elements='str'),
+ cluster_peer=dict(required=True, type='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ # set up variables
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.rest_api = netapp_utils.OntapRestAPI(self.module)
+ self.rest_api.fail_if_not_rest_minimum_version('na_ontap_vserver_peer_permissions', 9, 6)
+ self.input_validation()
+ self.svm_uuid = None
+ self.cluster_peer_uuid = None
+
+ def input_validation(self):
+ if self.parameters.get('vserver') == '*':
+ self.module.fail_json(msg='As svm name * represents all svms and created by default, please provide a specific SVM name')
+ if self.parameters.get('applications') == [''] and self.parameters.get('state') == 'present':
+ self.module.fail_json(msg='Applications field cannot be empty, at least one application must be specified')
+
+ def get_vserver_peer_permission_rest(self):
+ """
+ Retrieves SVM peer permissions.
+ """
+ api = "svm/peer-permissions"
+ query = {
+ 'svm.name': self.parameters['vserver'],
+ "cluster_peer.name": self.parameters['cluster_peer'],
+ 'fields': 'svm.uuid,cluster_peer.uuid,applications'
+ }
+ record, error = rest_generic.get_one_record(self.rest_api, api, query)
+ if error:
+ self.module.fail_json(msg="Error on fetching vserver peer permissions: %s" % error)
+ if record:
+ self.svm_uuid = self.na_helper.safe_get(record, ['svm', 'uuid'])
+ self.cluster_peer_uuid = self.na_helper.safe_get(record, ['cluster_peer', 'uuid'])
+ return {
+ 'applications': self.na_helper.safe_get(record, ['applications']),
+ }
+ return None
+
+ def create_vserver_peer_permission_rest(self):
+ """
+ Creates an SVM peer permission.
+ """
+ api = "svm/peer-permissions"
+ body = {
+ 'svm.name': self.parameters['vserver'],
+ 'cluster_peer.name': self.parameters['cluster_peer'],
+ 'applications': self.parameters['applications']
+ }
+ record, error = rest_generic.post_async(self.rest_api, api, body)
+ if error:
+ self.module.fail_json(msg="Error on creating vserver peer permissions: %s" % error)
+
+ def delete_vserver_peer_permission_rest(self):
+ """
+ Deletes the SVM peer permissions.
+ """
+ api = "svm/peer-permissions/%s/%s" % (self.cluster_peer_uuid, self.svm_uuid)
+ record, error = rest_generic.delete_async(self.rest_api, api, None)
+ if error:
+ self.module.fail_json(msg="Error on deleting vserver peer permissions: %s" % error)
+
+ def modify_vserver_peer_permission_rest(self, modify):
+ """
+ Updates the SVM peer permissions.
+ """
+ body = {}
+ if 'applications' in modify:
+ body['applications'] = self.parameters['applications']
+ api = "svm/peer-permissions/%s/%s" % (self.cluster_peer_uuid, self.svm_uuid)
+ record, error = rest_generic.patch_async(self.rest_api, api, None, body)
+ if error:
+ self.module.fail_json(msg="Error on modifying vserver peer permissions: %s" % error)
+
+ def apply(self):
+ current = self.get_vserver_peer_permission_rest()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ modify = self.na_helper.get_modified_attributes(current, self.parameters) if cd_action is None else None
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_vserver_peer_permission_rest()
+ elif cd_action == 'delete':
+ self.delete_vserver_peer_permission_rest()
+ elif modify:
+ self.modify_vserver_peer_permission_rest(modify)
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action)
+ self.module.exit_json(**result)
+
+
+def main():
+ """
+ Creates the NetApp Ontap vserver peer permission object and runs the correct play task
+ """
+ obj = NetAppONTAPVserverPeerPermissions()
+ obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_wait_for_condition.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_wait_for_condition.py
new file mode 100644
index 000000000..b834f210d
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_wait_for_condition.py
@@ -0,0 +1,402 @@
+#!/usr/bin/python
+'''
+# (c) 2020-2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Loop over an ONTAP get status request until a condition is satisfied.
+ - Report a timeout error if C(timeout) is exceeded while waiting for the condition.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+module: na_ontap_wait_for_condition
+short_description: NetApp ONTAP wait_for_condition. Loop over a get status request until a condition is met.
+version_added: 20.8.0
+options:
+ name:
+ description:
+ - The name of the event to check for.
+ - snapmirror_relationship was added in 21.22.0.
+ choices: ['snapmirror_relationship', 'sp_upgrade', 'sp_version']
+ type: str
+ required: true
+ state:
+ description:
+ - whether the conditions should be present or absent.
+ - if C(present), the module exits when any of the conditions is observed.
+ - if C(absent), the module exits with success when None of the conditions is observed.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ conditions:
+ description:
+ - one or more conditions to match
+ - C(state) and/or C(transfer_state) for C(snapmirror_relationship),
+ - C(is_in_progress) for C(sp_upgrade),
+ - C(firmware_version) for C(sp_version).
+ type: list
+ elements: str
+ required: true
+ polling_interval:
+ description:
+ - how ofen to check for the conditions, in seconds.
+ default: 5
+ type: int
+ timeout:
+ description:
+ - how long to wait for the conditions, in seconds.
+ default: 180
+ type: int
+ attributes:
+ description:
+ - a dictionary of custom attributes for the condition.
+ - C(sp_upgrade), C(sp_version) require C(node).
+ - C(sp_version) requires C(expected_version).
+ - C(snapmirror_relationship) requires C(destination_path) and C(expected_state) or C(expected_transfer_state) to match the condition(s).
+ type: dict
+'''
+
+EXAMPLES = """
+ - name: wait for sp_upgrade in progress
+ netapp.ontap.na_ontap_wait_for_condition:
+ hostname: "{{ ontap_admin_ip }}"
+ username: "{{ ontap_admin_username }}"
+ password: "{{ ontap_admin_password }}"
+ https: true
+ validate_certs: no
+ name: sp_upgrade
+ conditions: is_in_progress
+ attributes:
+ node: "{{ node }}"
+ polling_interval: 30
+ timeout: 1800
+
+ - name: wait for sp_upgrade not in progress
+ netapp.ontap.na_ontap_wait_for_condition:
+ hostname: "{{ ontap_admin_ip }}"
+ username: "{{ ontap_admin_username }}"
+ password: "{{ ontap_admin_password }}"
+ https: true
+ validate_certs: no
+ name: sp_upgrade
+ conditions: is_in_progress
+ state: absent
+ attributes:
+ node: "{{ ontap_admin_ip }}"
+ polling_interval: 30
+ timeout: 1800
+
+ - name: wait for sp_version to match 3.9
+ netapp.ontap.na_ontap_wait_for_condition:
+ hostname: "{{ ontap_admin_ip }}"
+ username: "{{ ontap_admin_username }}"
+ password: "{{ ontap_admin_password }}"
+ https: true
+ validate_certs: no
+ name: sp_version
+ conditions: firmware_version
+ state: present
+ attributes:
+ node: "{{ ontap_admin_ip }}"
+ expected_version: 3.9
+ polling_interval: 30
+ timeout: 1800
+"""
+
+RETURN = """
+states:
+ description:
+ - summarized list of observed states while waiting for completion
+ - reported for success or timeout error
+ returned: always
+ type: str
+last_state:
+ description: last observed state for event
+ returned: always
+ type: str
+"""
+
+import time
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic
+
+
+class NetAppONTAPWFC:
+ ''' wait for a resource to match a condition or not '''
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str', choices=['snapmirror_relationship', 'sp_upgrade', 'sp_version']),
+ conditions=dict(required=True, type='list', elements='str'),
+ polling_interval=dict(required=False, type='int', default=5),
+ timeout=dict(required=False, type='int', default=180),
+ attributes=dict(required=False, type='dict')
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('name', 'sp_upgrade', ['attributes']),
+ ('name', 'sp_version', ['attributes']),
+ ],
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.states = []
+ self.rest_api = netapp_utils.OntapRestAPI(self.module)
+ self.use_rest = self.rest_api.is_rest()
+
+ if not self.use_rest:
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, wrap_zapi=True)
+
+ self.resource_configuration = {
+ 'snapmirror_relationship': {
+ 'required_attributes': ['destination_path'],
+ 'conditions': {
+ 'state': ('state' if self.use_rest else 'not_supported', None),
+ 'transfer_state': ('transfer.state' if self.use_rest else 'not_supported', None)
+ }
+ },
+ 'sp_upgrade': {
+ 'required_attributes': ['node'],
+ 'conditions': {
+ 'is_in_progress': ('service_processor.state', 'updating') if self.use_rest else ('is-in-progress', 'true')
+ }
+ },
+ 'sp_version': {
+ 'required_attributes': ['node', 'expected_version'],
+ 'conditions': {
+ 'firmware_version': ('service_processor.firmware_version' if self.use_rest else 'firmware-version',
+ self.parameters['attributes'].get('expected_version'))
+ }
+ }
+ }
+
+ name = 'snapmirror_relationship'
+ if self.parameters['name'] == name:
+ for condition in self.resource_configuration[name]['conditions']:
+ if condition in self.parameters['conditions']:
+ self.update_condition_value(name, condition)
+
+ def update_condition_value(self, name, condition):
+ '''requires an expected value for a condition and sets it'''
+ expected_value = 'expected_%s' % condition
+ self.resource_configuration[name]['required_attributes'].append(expected_value)
+ # we can't update a tuple value, so rebuild the tuple
+ self.resource_configuration[name]['conditions'][condition] = (
+ self.resource_configuration[name]['conditions'][condition][0],
+ self.parameters['attributes'].get(expected_value))
+
+ def get_fields(self, name):
+ return ','.join([field for (field, dummy) in self.resource_configuration[name]['conditions'].values()])
+
+ def get_key_value(self, record, key):
+ if self.use_rest:
+ # with REST, we can have nested dictionaries
+ key = key.split('.')
+ return self.na_helper.safe_get(record, key)
+ return self.get_key_value_zapi(record, key)
+
+ def get_key_value_zapi(self, xml, key):
+ for child in xml.get_children():
+ value = xml.get_child_content(key)
+ if value is not None:
+ return value
+ value = self.get_key_value(child, key)
+ if value is not None:
+ return value
+ return None
+
+ def build_zapi(self, name):
+ ''' build ZAPI request based on resource name '''
+ if name == 'sp_upgrade':
+ zapi_obj = netapp_utils.zapi.NaElement("service-processor-image-update-progress-get")
+ zapi_obj.add_new_child('node', self.parameters['attributes']['node'])
+ return zapi_obj
+ if name == 'sp_version':
+ zapi_obj = netapp_utils.zapi.NaElement("service-processor-get")
+ zapi_obj.add_new_child('node', self.parameters['attributes']['node'])
+ return zapi_obj
+ if name in self.resource_configuration:
+ self.module.fail_json(msg='Error: event %s is not supported with ZAPI. It requires REST.' % name)
+ raise KeyError(name)
+
+ def build_rest_api_kwargs(self, name):
+ if name in ['sp_upgrade', 'sp_version']:
+ return {
+ 'api': 'cluster/nodes',
+ 'query': {'name': self.parameters['attributes']['node']},
+ 'fields': self.get_fields(name)
+ }
+ if name == 'snapmirror_relationship':
+ return {
+ 'api': 'snapmirror/relationships',
+ 'query': {'destination.path': self.parameters['attributes']['destination_path']},
+ 'fields': self.get_fields(name)
+ }
+ raise KeyError(name)
+
+ def extract_condition(self, name, results):
+ ''' check if any of the conditions is present
+ return:
+ None, error if key is not found
+ condition, None if a key is found with expected value
+ None, None if every key does not match the expected values
+ '''
+ for condition, (key, value) in self.resource_configuration[name]['conditions'].items():
+ status = self.get_key_value(results, key)
+ if status is None and name == 'snapmirror_relationship' and results and condition == 'transfer_state':
+ # key is absent when not transferring. We convert this to 'idle'
+ status = 'idle'
+ self.states.append(str(status))
+ if status == str(value):
+ return condition, None
+ if status is None:
+ return None, 'Cannot find element with name: %s in results: %s' % (key, results if self.use_rest else results.to_string())
+ # not found, or no match
+ return None, None
+
+ def get_condition(self, name, rest_or_zapi_args):
+ '''calls ZAPI or REST and extract condition value'''
+ record, error = self.get_record_rest(name, rest_or_zapi_args) if self.use_rest else self.get_record_zapi(name, rest_or_zapi_args)
+ if error:
+ return None, error
+ condition, error = self.extract_condition(name, record)
+ if error is not None:
+ return condition, error
+ if self.parameters['state'] == 'present':
+ if condition in self.parameters['conditions']:
+ return 'matched condition: %s' % condition, None
+ else:
+ if condition is None:
+ return 'conditions not matched', None
+ if condition not in self.parameters['conditions']:
+ return 'conditions not matched: found other condition: %s' % condition, None
+ return None, None
+
+ def get_record_zapi(self, name, zapi_obj):
+ ''' calls the ZAPI and extract condition value'''
+ try:
+ results = self.server.invoke_successfully(zapi_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ return None, 'Error running command %s: %s' % (self.parameters['name'], to_native(error))
+ return results, None
+
+ def get_record_rest(self, name, rest_api_kwargs):
+ record, error = rest_generic.get_one_record(self.rest_api, **rest_api_kwargs)
+ if error:
+ return None, 'Error running command %s: %s' % (self.parameters['name'], error)
+ if not record:
+ return None, "no record for node: %s" % rest_api_kwargs['query']
+ return record, None
+
+ def summarize_states(self):
+ ''' replaces a long list of states with multipliers
+ eg 'false*5' or 'false*2,true'
+ return:
+ state_list as str
+ last_state
+ '''
+ previous_state = None
+ count = 0
+ summaries = []
+ for state in self.states:
+ if state == previous_state:
+ count += 1
+ else:
+ if previous_state is not None:
+ summaries.append('%s%s' % (previous_state, '' if count == 1 else '*%d' % count))
+ count = 1
+ previous_state = state
+ if previous_state is not None:
+ summaries.append('%s%s' % (previous_state, '' if count == 1 else '*%d' % count))
+ last_state = self.states[-1] if self.states else ''
+ return ','.join(summaries), last_state
+
+ def wait_for_condition(self, name):
+ ''' calls the ZAPI and extract condition value - loop until found '''
+ time_left = self.parameters['timeout']
+ max_consecutive_error_count = 3
+ error_count = 0
+ rest_or_zapi_args = self.build_rest_api_kwargs(name) if self.use_rest else self.build_zapi(name)
+
+ while time_left > 0:
+ condition, error = self.get_condition(name, rest_or_zapi_args)
+ if error is not None:
+ error_count += 1
+ if error_count >= max_consecutive_error_count:
+ self.module.fail_json(msg='Error: %s - count: %d' % (error, error_count))
+ elif condition is not None:
+ return condition
+ time.sleep(self.parameters['polling_interval'])
+ time_left -= self.parameters['polling_interval']
+
+ conditions = ["%s==%s" % (condition, self.resource_configuration[name]['conditions'][condition][1]) for condition in self.parameters['conditions']]
+ error = 'Error: timeout waiting for condition%s: %s.' %\
+ ('s' if len(conditions) > 1 else '',
+ ', '.join(conditions))
+ states, last_state = self.summarize_states()
+ self.module.fail_json(msg=error, states=states, last_state=last_state)
+
+ def validate_resource(self, name):
+ if name not in self.resource_configuration:
+ raise KeyError('%s - configuration entry missing for resource' % name)
+
+ def validate_attributes(self, name):
+ required = self.resource_configuration[name].get('required_attributes', list())
+ msgs = [
+ 'attributes: %s is required for resource name: %s' % (attribute, name)
+ for attribute in required
+ if attribute not in self.parameters['attributes']
+ ]
+
+ if msgs:
+ self.module.fail_json(msg='Error: %s' % ', '.join(msgs))
+
+ def validate_conditions(self, name):
+ conditions = self.resource_configuration[name].get('conditions')
+ msgs = [
+ 'condition: %s is not valid for resource name: %s' % (condition, name)
+ for condition in self.parameters['conditions']
+ if condition not in conditions
+ ]
+
+ if msgs:
+ msgs.append('valid condition%s: %s' %
+ ('s are' if len(conditions) > 1 else ' is', ', '.join(conditions.keys())))
+ self.module.fail_json(msg='Error: %s' % ', '.join(msgs))
+
+ def apply(self):
+ ''' calls the ZAPI and check conditions '''
+ changed = False
+ name = self.parameters['name']
+ self.validate_resource(name)
+ self.validate_attributes(name)
+ self.validate_conditions(name)
+ output = self.wait_for_condition(name)
+ states, last_state = self.summarize_states()
+ self.module.exit_json(changed=changed, msg=output, states=states, last_state=last_state)
+
+
+def main():
+ """
+ Execute action from playbook
+ """
+ command = NetAppONTAPWFC()
+ command.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_wwpn_alias.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_wwpn_alias.py
new file mode 100644
index 000000000..49844622c
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_wwpn_alias.py
@@ -0,0 +1,194 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_ontap_wwpn_alias
+'''
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'
+}
+
+DOCUMENTATION = '''
+
+module: na_ontap_wwpn_alias
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+short_description: NetApp ONTAP set FCP WWPN Alias
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap
+version_added: '20.4.0'
+description:
+ - Create/Delete FCP WWPN Alias
+
+options:
+ state:
+ description:
+ - Whether the specified alias should exist or not.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ name:
+ description:
+ - The name of the alias to create or delete.
+ required: true
+ type: str
+
+ wwpn:
+ description:
+ - WWPN of the alias.
+ type: str
+
+ vserver:
+ description:
+ - The name of the vserver to use.
+ required: true
+ type: str
+
+'''
+
+EXAMPLES = '''
+ - name: Create FCP Alias
+ na_ontap_wwpn_alias:
+ state: present
+ name: alias1
+ wwpn: 01:02:03:04:0a:0b:0c:0d
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+ - name: Delete FCP Alias
+ na_ontap_wwpn_alias:
+ state: absent
+ name: alias1
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+'''
+
+RETURN = '''
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+
+class NetAppOntapWwpnAlias(object):
+ ''' ONTAP WWPN alias operations '''
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=[
+ 'present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ wwpn=dict(required=False, type='str'),
+ vserver=dict(required=True, type='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[('state', 'present', ['wwpn'])],
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ # REST API should be used for ONTAP 9.6 or higher.
+ self.rest_api = OntapRestAPI(self.module)
+ if self.rest_api.is_rest():
+ self.use_rest = True
+ else:
+ self.module.fail_json(msg=self.rest_api.requires_ontap_9_6('na_ontap_wwpn_alias'))
+
+ def get_alias(self, uuid):
+ params = {'fields': 'alias,wwpn',
+ 'alias': self.parameters['name'],
+ 'svm.uuid': uuid}
+ api = 'network/fc/wwpn-aliases'
+ message, error = self.rest_api.get(api, params)
+ if error is not None:
+ self.module.fail_json(msg="Error on fetching wwpn alias: %s" % error)
+ if message['num_records'] > 0:
+ return {'name': message['records'][0]['alias'],
+ 'wwpn': message['records'][0]['wwpn'],
+ }
+ else:
+ return None
+
+ def create_alias(self, uuid, is_modify=False):
+ params = {'alias': self.parameters['name'],
+ 'wwpn': self.parameters['wwpn'],
+ 'svm.uuid': uuid}
+ api = 'network/fc/wwpn-aliases'
+ dummy, error = self.rest_api.post(api, params)
+ if error is not None:
+ if is_modify:
+ self.module.fail_json(msg="Error on modifying wwpn alias when trying to re-create alias: %s." % error)
+ else:
+ self.module.fail_json(msg="Error on creating wwpn alias: %s." % error)
+
+ def delete_alias(self, uuid, is_modify=False):
+ api = 'network/fc/wwpn-aliases/%s/%s' % (uuid, self.parameters['name'])
+ dummy, error = self.rest_api.delete(api)
+ if error is not None:
+ if is_modify:
+ self.module.fail_json(msg="Error on modifying wwpn alias when trying to delete alias: %s." % error)
+ else:
+ self.module.fail_json(msg="Error on deleting wwpn alias: %s." % error)
+
+ def get_svm_uuid(self):
+ """
+ Get a svm's UUID
+ :return: uuid of the svm.
+ """
+ params = {'fields': 'uuid', 'name': self.parameters['vserver']}
+ api = "svm/svms"
+ message, error = self.rest_api.get(api, params)
+ if error is not None:
+ self.module.fail_json(msg="Error on fetching svm uuid: %s" % error)
+ return message['records'][0]['uuid']
+
+ def apply(self):
+ cd_action, uuid, modify = None, None, None
+ uuid = self.get_svm_uuid()
+ current = self.get_alias(uuid)
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None and self.parameters['state'] == 'present':
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+
+ if self.na_helper.changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if cd_action == 'create':
+ self.create_alias(uuid)
+ elif cd_action == 'delete':
+ self.delete_alias(uuid)
+ elif modify:
+ self.delete_alias(uuid, is_modify=True)
+ self.create_alias(uuid, is_modify=True)
+ result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify)
+ self.module.exit_json(**result)
+
+
+def main():
+ alias = NetAppOntapWwpnAlias()
+ alias.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_zapit.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_zapit.py
new file mode 100644
index 000000000..d928ff941
--- /dev/null
+++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_zapit.py
@@ -0,0 +1,315 @@
+#!/usr/bin/python
+'''
+# (c) 2020-2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Call a ZAPI on ONTAP.
+ - Cluster ZAPIs are run using a cluster admin account.
+ - Vserver ZAPIs can be run using a vsadmin account or using vserver tunneling (cluster admin with I(vserver option)).
+ - In case of success, a json dictionary is returned as C(response).
+ - In case of a ZAPI error, C(status), C(errno), C(reason) are set to help with diagnosing the issue,
+ - and the call is reported as an error ('failed').
+ - Other errors (eg connection issues) are reported as Ansible error.
+extends_documentation_fragment:
+ - netapp.ontap.netapp.na_ontap_zapi
+module: na_ontap_zapit
+short_description: NetApp ONTAP Run any ZAPI on ONTAP
+version_added: "20.4.0"
+options:
+ zapi:
+ description:
+ - A dictionary for the zapi and arguments.
+ - An XML tag I(<tag>value</tag>) is a dictionary with tag as the key.
+ - Value can be another dictionary, a list of dictionaries, a string, or nothing.
+ - eg I(<tag/>) is represented as I(tag:)
+ - A single zapi can be called at a time. Ansible warns if duplicate keys are found and only uses the last entry.
+ required: true
+ type: dict
+ vserver:
+ description:
+ - if provided, forces vserver tunneling. username identifies a cluster admin account.
+ type: str
+'''
+
+EXAMPLES = """
+-
+ name: Ontap ZAPI
+ hosts: localhost
+ gather_facts: False
+ collections:
+ - netapp.ontap
+ vars:
+ login: &login
+ hostname: "{{ admin_ip }}"
+ username: "{{ admin_username }}"
+ password: "{{ admin_password }}"
+ https: true
+ validate_certs: false
+ svm_login: &svm_login
+ hostname: "{{ svm_admin_ip }}"
+ username: "{{ svm_admin_username }}"
+ password: "{{ svm_admin_password }}"
+ https: true
+ validate_certs: false
+
+ tasks:
+ - name: run ontap ZAPI command as cluster admin
+ netapp.ontap.na_ontap_zapit:
+ <<: *login
+ zapi:
+ system-get-version:
+ register: output
+ - debug: var=output
+
+ - name: run ontap ZAPI command as cluster admin
+ netapp.ontap.na_ontap_zapit:
+ <<: *login
+ zapi:
+ vserver-get-iter:
+ register: output
+ - debug: var=output
+
+ - name: run ontap ZAPI command as cluster admin
+ netapp.ontap.na_ontap_zapit:
+ <<: *login
+ zapi:
+ vserver-get-iter:
+ desired-attributes:
+ vserver-info:
+ - aggr-list:
+ - aggr-name
+ - allowed-protocols:
+ - protocols
+ - vserver-aggr-info-list:
+ - vserser-aggr-info
+ - uuid
+ query:
+ vserver-info:
+ vserver-name: trident_svm
+ register: output
+ - debug: var=output
+
+ - name: run ontap ZAPI command as vsadmin
+ netapp.ontap.na_ontap_zapit:
+ <<: *svm_login
+ zapi:
+ vserver-get-iter:
+ desired-attributes:
+ vserver-info:
+ - uuid
+ register: output
+ - debug: var=output
+
+ - name: run ontap ZAPI command as vserver tunneling
+ netapp.ontap.na_ontap_zapit:
+ <<: *login
+ vserver: trident_svm
+ zapi:
+ vserver-get-iter:
+ desired-attributes:
+ vserver-info:
+ - uuid
+ register: output
+ - debug: var=output
+
+ - name: run ontap active-directory ZAPI command
+ netapp.ontap.na_ontap_zapit:
+ <<: *login
+ vserver: trident_svm
+ zapi:
+ active-directory-account-create:
+ account-name: testaccount
+ admin-username: testuser
+ admin-password: testpass
+ domain: testdomain
+ organizational-unit: testou
+ register: output
+ ignore_errors: True
+ - debug: var=output
+
+"""
+
+RETURN = """
+response:
+ description:
+ - If successful, a json dictionary representing the data returned by the ZAPI.
+ - If the ZAPI was executed but failed, an empty dictionary.
+ - Not present if the ZAPI call cannot be performed.
+ returned: On success
+ type: dict
+status:
+ description:
+ - If the ZAPI was executed but failed, the status set by the ZAPI.
+ - Not present if successful, or if the ZAPI call cannot be performed.
+ returned: On error
+ type: str
+errno:
+ description:
+ - If the ZAPI was executed but failed, the error code set by the ZAPI.
+ - Not present if successful, or if the ZAPI call cannot be performed.
+ returned: On error
+ type: str
+reason:
+ description:
+ - If the ZAPI was executed but failed, the error reason set by the ZAPI.
+ - Not present if successful, or if the ZAPI call cannot be performed.
+ returned: On error
+ type: str
+"""
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
+
+try:
+ import xmltodict
+ HAS_XMLTODICT = True
+except ImportError:
+ HAS_XMLTODICT = False
+
+try:
+ import json
+ HAS_JSON = True
+except ImportError:
+ HAS_JSON = False
+
+
+class NetAppONTAPZapi:
+ ''' calls a ZAPI command '''
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.na_ontap_zapi_only_spec()
+ self.argument_spec.update(dict(
+ zapi=dict(required=True, type='dict'),
+ vserver=dict(required=False, type='str'),
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=False
+ )
+ parameters = self.module.params
+ # set up state variables
+ self.zapi = parameters['zapi']
+ self.vserver = parameters['vserver']
+
+ if not HAS_JSON:
+ self.module.fail_json(msg="the python json module is required")
+ if not netapp_utils.has_netapp_lib():
+ self.module.fail_json(msg=netapp_utils.netapp_lib_is_required())
+ if not HAS_XMLTODICT:
+ self.module.fail_json(msg="the python xmltodict module is required")
+
+ if self.vserver is not None:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.vserver)
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def jsonify_and_parse_output(self, xml_data):
+ ''' convert from XML to JSON
+ extract status and error fields is present
+ '''
+ try:
+ as_str = xml_data.to_string()
+ except Exception as exc:
+ self.module.fail_json(msg='Error running zapi in to_string: %s' %
+ str(exc))
+ try:
+ as_dict = xmltodict.parse(as_str, xml_attribs=True)
+ except Exception as exc:
+ self.module.fail_json(msg='Error running zapi in xmltodict: %s: %s' %
+ (as_str, str(exc)))
+ try:
+ as_json = json.loads(json.dumps(as_dict))
+ except Exception as exc:
+ self.module.fail_json(msg='Error running zapi in json load/dump: %s: %s' %
+ (as_dict, str(exc)))
+
+ if 'results' not in as_json:
+ self.module.fail_json(msg='Error running zapi, no results field: %s: %s' %
+ (as_str, repr(as_json)))
+
+ # set status, and if applicable errno/reason, and remove attribute fields
+ errno = None
+ reason = None
+ response = as_json.pop('results')
+ status = response.get('@status', 'no_status_attr')
+ if status != 'passed':
+ # collect errno and reason
+ errno = response.get('@errno', None)
+ if errno is None:
+ errno = response.get('errorno', None)
+ if errno is None:
+ errno = 'ESTATUSFAILED'
+ reason = response.get('@reason', None)
+ if reason is None:
+ reason = response.get('reason', None)
+ if reason is None:
+ reason = 'Execution failure with unknown reason.'
+
+ for key in ('@status', '@errno', '@reason', '@xmlns'):
+ try:
+ # remove irrelevant info
+ del response[key]
+ except KeyError:
+ pass
+ return response, status, errno, reason
+
+ def run_zapi(self):
+ ''' calls the ZAPI '''
+ zapi_struct = self.zapi
+ error = None
+ if not isinstance(zapi_struct, dict):
+ error = 'A directory entry is expected, eg: system-get-version: '
+ zapi = zapi_struct
+ else:
+ zapi = list(zapi_struct.keys())
+ if len(zapi) != 1:
+ error = 'A single ZAPI can be called at a time'
+ else:
+ zapi = zapi[0]
+
+ # log first, then error out as needed
+ if error:
+ self.module.fail_json(msg='%s, received: %s' % (error, zapi))
+
+ zapi_obj = netapp_utils.zapi.NaElement(zapi)
+ attributes = zapi_struct[zapi]
+ if attributes is not None and attributes != 'None':
+ zapi_obj.translate_struct(attributes)
+
+ try:
+ output = self.server.invoke_elem(zapi_obj, True)
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg='Error running zapi %s: %s' %
+ (zapi, to_native(error)),
+ exception=traceback.format_exc())
+
+ return self.jsonify_and_parse_output(output)
+
+ def apply(self):
+ ''' calls the zapi and returns json output '''
+ response, status, errno, reason = self.run_zapi()
+ if status == 'passed':
+ self.module.exit_json(changed=True, response=response)
+ msg = 'ZAPI failure: check errno and reason.'
+ self.module.fail_json(changed=False, response=response, status=status, errno=errno, reason=reason, msg=msg)
+
+
+def main():
+ """
+ Execute action from playbook
+ """
+ zapi = NetAppONTAPZapi()
+ zapi.apply()
+
+
+if __name__ == '__main__':
+ main()